4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
99 char cmdline_filename[PATH_MAX] = {0};
102 * NUMA support configuration.
103 * When set, the NUMA support attempts to dispatch the allocation of the
104 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
105 * probed ports among the CPU sockets 0 and 1.
106 * Otherwise, all memory is allocated from CPU socket 0.
108 uint8_t numa_support = 1; /**< numa enabled by default */
111 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
114 uint8_t socket_num = UMA_NO_CONFIG;
117 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
122 * Record the Ethernet address of peer target ports to which packets are
124 * Must be instantiated with the ethernet addresses of peer traffic generator
127 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
128 portid_t nb_peer_eth_addrs = 0;
131 * Probed Target Environment.
133 struct rte_port *ports; /**< For all probed ethernet ports. */
134 portid_t nb_ports; /**< Number of probed ethernet ports. */
135 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
136 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
139 * Test Forwarding Configuration.
140 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
141 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
143 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
144 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
145 portid_t nb_cfg_ports; /**< Number of configured ports. */
146 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
148 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
149 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
151 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
152 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
155 * Forwarding engines.
157 struct fwd_engine * fwd_engines[] = {
166 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
168 &softnic_tm_bypass_engine,
170 #ifdef RTE_LIBRTE_IEEE1588
171 &ieee1588_fwd_engine,
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
184 * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
188 * In container, it cannot terminate the process which running with 'stats-period'
189 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
194 * Configuration of packet segments used by the "txonly" processing engine.
196 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
197 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
198 TXONLY_DEF_PACKET_LEN,
200 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
202 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
203 /**< Split policy for packets to TX. */
205 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
206 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
208 /* current configuration is in DCB or not,0 means it is not in DCB mode */
209 uint8_t dcb_config = 0;
211 /* Whether the dcb is in testing status */
212 uint8_t dcb_test = 0;
215 * Configurable number of RX/TX queues.
217 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
218 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
221 * Configurable number of RX/TX ring descriptors.
223 #define RTE_TEST_RX_DESC_DEFAULT 128
224 #define RTE_TEST_TX_DESC_DEFAULT 512
225 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
226 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
228 #define RTE_PMD_PARAM_UNSET -1
230 * Configurable values of RX and TX ring threshold registers.
233 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
234 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
237 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
242 * Configurable value of RX free threshold.
244 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
247 * Configurable value of RX drop enable.
249 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
252 * Configurable value of TX free threshold.
254 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
257 * Configurable value of TX RS bit threshold.
259 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
262 * Configurable value of TX queue flags.
264 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
267 * Receive Side Scaling (RSS) configuration.
269 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
272 * Port topology configuration
274 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
277 * Avoids to flush all the RX streams before starts forwarding.
279 uint8_t no_flush_rx = 0; /* flush by default */
282 * Flow API isolated mode.
284 uint8_t flow_isolate_all;
287 * Avoids to check link status when starting/stopping a port.
289 uint8_t no_link_check = 0; /* check by default */
292 * Enable link status change notification
294 uint8_t lsc_interrupt = 1; /* enabled by default */
297 * Enable device removal notification.
299 uint8_t rmv_interrupt = 1; /* enabled by default */
302 * Display or mask ether events
303 * Default to all events except VF_MBOX
305 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
306 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
307 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
308 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
309 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
310 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
313 * NIC bypass mode configuration options.
316 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
317 /* The NIC bypass watchdog timeout. */
318 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
322 #ifdef RTE_LIBRTE_LATENCY_STATS
325 * Set when latency stats is enabled in the commandline
327 uint8_t latencystats_enabled;
330 * Lcore ID to serive latency statistics.
332 lcoreid_t latencystats_lcore_id = -1;
337 * Ethernet device configuration.
339 struct rte_eth_rxmode rx_mode = {
340 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
342 .header_split = 0, /**< Header Split disabled. */
343 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
344 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
345 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
346 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
347 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
348 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
349 .hw_timestamp = 0, /**< HW timestamp enabled. */
352 struct rte_fdir_conf fdir_conf = {
353 .mode = RTE_FDIR_MODE_NONE,
354 .pballoc = RTE_FDIR_PBALLOC_64K,
355 .status = RTE_FDIR_REPORT_STATUS,
357 .vlan_tci_mask = 0x0,
359 .src_ip = 0xFFFFFFFF,
360 .dst_ip = 0xFFFFFFFF,
363 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
364 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
366 .src_port_mask = 0xFFFF,
367 .dst_port_mask = 0xFFFF,
368 .mac_addr_byte_mask = 0xFF,
369 .tunnel_type_mask = 1,
370 .tunnel_id_mask = 0xFFFFFFFF,
375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
383 uint16_t nb_tx_queue_stats_mappings = 0;
384 uint16_t nb_rx_queue_stats_mappings = 0;
387 * Display zero values by default for xstats
389 uint8_t xstats_hide_zero;
391 unsigned int num_sockets = 0;
392 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
394 #ifdef RTE_LIBRTE_BITRATE
395 /* Bitrate statistics */
396 struct rte_stats_bitrates *bitrate_data;
397 lcoreid_t bitrate_lcore_id;
398 uint8_t bitrate_enabled;
401 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
402 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
404 /* Forward function declarations */
405 static void map_port_queue_stats_mapping_registers(portid_t pi,
406 struct rte_port *port);
407 static void check_all_ports_link_status(uint32_t port_mask);
408 static int eth_event_callback(portid_t port_id,
409 enum rte_eth_event_type type,
410 void *param, void *ret_param);
413 * Check if all the ports are started.
414 * If yes, return positive value. If not, return zero.
416 static int all_ports_started(void);
418 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
419 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
422 * Helper function to check if socket is already discovered.
423 * If yes, return positive value. If not, return zero.
426 new_socket_id(unsigned int socket_id)
430 for (i = 0; i < num_sockets; i++) {
431 if (socket_ids[i] == socket_id)
438 * Setup default configuration.
441 set_default_fwd_lcores_config(void)
445 unsigned int sock_num;
448 for (i = 0; i < RTE_MAX_LCORE; i++) {
449 sock_num = rte_lcore_to_socket_id(i);
450 if (new_socket_id(sock_num)) {
451 if (num_sockets >= RTE_MAX_NUMA_NODES) {
452 rte_exit(EXIT_FAILURE,
453 "Total sockets greater than %u\n",
456 socket_ids[num_sockets++] = sock_num;
458 if (!rte_lcore_is_enabled(i))
460 if (i == rte_get_master_lcore())
462 fwd_lcores_cpuids[nb_lc++] = i;
464 nb_lcores = (lcoreid_t) nb_lc;
465 nb_cfg_lcores = nb_lcores;
470 set_def_peer_eth_addrs(void)
474 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
475 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
476 peer_eth_addrs[i].addr_bytes[5] = i;
481 set_default_fwd_ports_config(void)
486 RTE_ETH_FOREACH_DEV(pt_id)
487 fwd_ports_ids[i++] = pt_id;
489 nb_cfg_ports = nb_ports;
490 nb_fwd_ports = nb_ports;
494 set_def_fwd_config(void)
496 set_default_fwd_lcores_config();
497 set_def_peer_eth_addrs();
498 set_default_fwd_ports_config();
502 * Configuration initialisation done once at init time.
505 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
506 unsigned int socket_id)
508 char pool_name[RTE_MEMPOOL_NAMESIZE];
509 struct rte_mempool *rte_mp = NULL;
512 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
513 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
516 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
517 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
520 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
521 mb_size, (unsigned) mb_mempool_cache,
522 sizeof(struct rte_pktmbuf_pool_private),
527 if (rte_mempool_populate_anon(rte_mp) == 0) {
528 rte_mempool_free(rte_mp);
532 rte_pktmbuf_pool_init(rte_mp, NULL);
533 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
535 /* wrapper to rte_mempool_create() */
536 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
541 if (rte_mp == NULL) {
542 rte_exit(EXIT_FAILURE,
543 "Creation of mbuf pool for socket %u failed: %s\n",
544 socket_id, rte_strerror(rte_errno));
545 } else if (verbose_level > 0) {
546 rte_mempool_dump(stdout, rte_mp);
551 * Check given socket id is valid or not with NUMA mode,
552 * if valid, return 0, else return -1
555 check_socket_id(const unsigned int socket_id)
557 static int warning_once = 0;
559 if (new_socket_id(socket_id)) {
560 if (!warning_once && numa_support)
561 printf("Warning: NUMA should be configured manually by"
562 " using --port-numa-config and"
563 " --ring-numa-config parameters along with"
572 * Get the allowed maximum number of RX queues.
573 * *pid return the port id which has minimal value of
574 * max_rx_queues in all ports.
577 get_allowed_max_nb_rxq(portid_t *pid)
579 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
581 struct rte_eth_dev_info dev_info;
583 RTE_ETH_FOREACH_DEV(pi) {
584 rte_eth_dev_info_get(pi, &dev_info);
585 if (dev_info.max_rx_queues < allowed_max_rxq) {
586 allowed_max_rxq = dev_info.max_rx_queues;
590 return allowed_max_rxq;
594 * Check input rxq is valid or not.
595 * If input rxq is not greater than any of maximum number
596 * of RX queues of all ports, it is valid.
597 * if valid, return 0, else return -1
600 check_nb_rxq(queueid_t rxq)
602 queueid_t allowed_max_rxq;
605 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606 if (rxq > allowed_max_rxq) {
607 printf("Fail: input rxq (%u) can't be greater "
608 "than max_rx_queues (%u) of port %u\n",
618 * Get the allowed maximum number of TX queues.
619 * *pid return the port id which has minimal value of
620 * max_tx_queues in all ports.
623 get_allowed_max_nb_txq(portid_t *pid)
625 queueid_t allowed_max_txq = MAX_QUEUE_ID;
627 struct rte_eth_dev_info dev_info;
629 RTE_ETH_FOREACH_DEV(pi) {
630 rte_eth_dev_info_get(pi, &dev_info);
631 if (dev_info.max_tx_queues < allowed_max_txq) {
632 allowed_max_txq = dev_info.max_tx_queues;
636 return allowed_max_txq;
640 * Check input txq is valid or not.
641 * If input txq is not greater than any of maximum number
642 * of TX queues of all ports, it is valid.
643 * if valid, return 0, else return -1
646 check_nb_txq(queueid_t txq)
648 queueid_t allowed_max_txq;
651 allowed_max_txq = get_allowed_max_nb_txq(&pid);
652 if (txq > allowed_max_txq) {
653 printf("Fail: input txq (%u) can't be greater "
654 "than max_tx_queues (%u) of port %u\n",
667 struct rte_port *port;
668 struct rte_mempool *mbp;
669 unsigned int nb_mbuf_per_pool;
671 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672 struct rte_gro_param gro_param;
675 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
678 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
683 /* Configuration of logical cores. */
684 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685 sizeof(struct fwd_lcore *) * nb_lcores,
686 RTE_CACHE_LINE_SIZE);
687 if (fwd_lcores == NULL) {
688 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689 "failed\n", nb_lcores);
691 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693 sizeof(struct fwd_lcore),
694 RTE_CACHE_LINE_SIZE);
695 if (fwd_lcores[lc_id] == NULL) {
696 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
699 fwd_lcores[lc_id]->cpuid_idx = lc_id;
702 RTE_ETH_FOREACH_DEV(pid) {
704 rte_eth_dev_info_get(pid, &port->dev_info);
707 if (port_numa[pid] != NUMA_NO_CONFIG)
708 port_per_socket[port_numa[pid]]++;
710 uint32_t socket_id = rte_eth_dev_socket_id(pid);
712 /* if socket_id is invalid, set to 0 */
713 if (check_socket_id(socket_id) < 0)
715 port_per_socket[socket_id]++;
719 /* set flag to initialize port/queue */
720 port->need_reconfig = 1;
721 port->need_reconfig_queues = 1;
725 * Create pools of mbuf.
726 * If NUMA support is disabled, create a single pool of mbuf in
727 * socket 0 memory by default.
728 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
730 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
731 * nb_txd can be configured at run time.
733 if (param_total_num_mbufs)
734 nb_mbuf_per_pool = param_total_num_mbufs;
736 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
737 (nb_lcores * mb_mempool_cache) +
738 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
739 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
745 for (i = 0; i < num_sockets; i++)
746 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
749 if (socket_num == UMA_NO_CONFIG)
750 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
752 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
758 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
759 DEV_TX_OFFLOAD_GRE_TNL_TSO;
761 * Records which Mbuf pool to use by each logical core, if needed.
763 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
764 mbp = mbuf_pool_find(
765 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
768 mbp = mbuf_pool_find(0);
769 fwd_lcores[lc_id]->mbp = mbp;
770 /* initialize GSO context */
771 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
772 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
773 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
774 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
776 fwd_lcores[lc_id]->gso_ctx.flag = 0;
779 /* Configuration of packet forwarding streams. */
780 if (init_fwd_streams() < 0)
781 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
785 /* create a gro context for each lcore */
786 gro_param.gro_types = RTE_GRO_TCP_IPV4;
787 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
788 gro_param.max_item_per_flow = MAX_PKT_BURST;
789 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
790 gro_param.socket_id = rte_lcore_to_socket_id(
791 fwd_lcores_cpuids[lc_id]);
792 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
793 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
794 rte_exit(EXIT_FAILURE,
795 "rte_gro_ctx_create() failed\n");
802 reconfig(portid_t new_port_id, unsigned socket_id)
804 struct rte_port *port;
806 /* Reconfiguration of Ethernet ports. */
807 port = &ports[new_port_id];
808 rte_eth_dev_info_get(new_port_id, &port->dev_info);
810 /* set flag to initialize port/queue */
811 port->need_reconfig = 1;
812 port->need_reconfig_queues = 1;
813 port->socket_id = socket_id;
820 init_fwd_streams(void)
823 struct rte_port *port;
824 streamid_t sm_id, nb_fwd_streams_new;
827 /* set socket id according to numa or not */
828 RTE_ETH_FOREACH_DEV(pid) {
830 if (nb_rxq > port->dev_info.max_rx_queues) {
831 printf("Fail: nb_rxq(%d) is greater than "
832 "max_rx_queues(%d)\n", nb_rxq,
833 port->dev_info.max_rx_queues);
836 if (nb_txq > port->dev_info.max_tx_queues) {
837 printf("Fail: nb_txq(%d) is greater than "
838 "max_tx_queues(%d)\n", nb_txq,
839 port->dev_info.max_tx_queues);
843 if (port_numa[pid] != NUMA_NO_CONFIG)
844 port->socket_id = port_numa[pid];
846 port->socket_id = rte_eth_dev_socket_id(pid);
848 /* if socket_id is invalid, set to 0 */
849 if (check_socket_id(port->socket_id) < 0)
854 if (socket_num == UMA_NO_CONFIG)
857 port->socket_id = socket_num;
861 q = RTE_MAX(nb_rxq, nb_txq);
863 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
866 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
867 if (nb_fwd_streams_new == nb_fwd_streams)
870 if (fwd_streams != NULL) {
871 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
872 if (fwd_streams[sm_id] == NULL)
874 rte_free(fwd_streams[sm_id]);
875 fwd_streams[sm_id] = NULL;
877 rte_free(fwd_streams);
882 nb_fwd_streams = nb_fwd_streams_new;
883 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
884 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
885 if (fwd_streams == NULL)
886 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
887 "failed\n", nb_fwd_streams);
889 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
890 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
891 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
892 if (fwd_streams[sm_id] == NULL)
893 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
900 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
902 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
904 unsigned int total_burst;
905 unsigned int nb_burst;
906 unsigned int burst_stats[3];
907 uint16_t pktnb_stats[3];
909 int burst_percent[3];
912 * First compute the total number of packet bursts and the
913 * two highest numbers of bursts of the same number of packets.
916 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
917 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
918 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
919 nb_burst = pbs->pkt_burst_spread[nb_pkt];
922 total_burst += nb_burst;
923 if (nb_burst > burst_stats[0]) {
924 burst_stats[1] = burst_stats[0];
925 pktnb_stats[1] = pktnb_stats[0];
926 burst_stats[0] = nb_burst;
927 pktnb_stats[0] = nb_pkt;
930 if (total_burst == 0)
932 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
933 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
934 burst_percent[0], (int) pktnb_stats[0]);
935 if (burst_stats[0] == total_burst) {
939 if (burst_stats[0] + burst_stats[1] == total_burst) {
940 printf(" + %d%% of %d pkts]\n",
941 100 - burst_percent[0], pktnb_stats[1]);
944 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
945 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
946 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
947 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
950 printf(" + %d%% of %d pkts + %d%% of others]\n",
951 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
953 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
956 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
958 struct rte_port *port;
961 static const char *fwd_stats_border = "----------------------";
963 port = &ports[port_id];
964 printf("\n %s Forward statistics for port %-2d %s\n",
965 fwd_stats_border, port_id, fwd_stats_border);
967 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
968 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
970 stats->ipackets, stats->imissed,
971 (uint64_t) (stats->ipackets + stats->imissed));
973 if (cur_fwd_eng == &csum_fwd_engine)
974 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
975 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
976 if ((stats->ierrors + stats->rx_nombuf) > 0) {
977 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
978 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
981 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
983 stats->opackets, port->tx_dropped,
984 (uint64_t) (stats->opackets + port->tx_dropped));
987 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
989 stats->ipackets, stats->imissed,
990 (uint64_t) (stats->ipackets + stats->imissed));
992 if (cur_fwd_eng == &csum_fwd_engine)
993 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
994 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
995 if ((stats->ierrors + stats->rx_nombuf) > 0) {
996 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
997 printf(" RX-nombufs: %14"PRIu64"\n",
1001 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1003 stats->opackets, port->tx_dropped,
1004 (uint64_t) (stats->opackets + port->tx_dropped));
1007 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1008 if (port->rx_stream)
1009 pkt_burst_stats_display("RX",
1010 &port->rx_stream->rx_burst_stats);
1011 if (port->tx_stream)
1012 pkt_burst_stats_display("TX",
1013 &port->tx_stream->tx_burst_stats);
1016 if (port->rx_queue_stats_mapping_enabled) {
1018 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1019 printf(" Stats reg %2d RX-packets:%14"PRIu64
1020 " RX-errors:%14"PRIu64
1021 " RX-bytes:%14"PRIu64"\n",
1022 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1026 if (port->tx_queue_stats_mapping_enabled) {
1027 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1028 printf(" Stats reg %2d TX-packets:%14"PRIu64
1029 " TX-bytes:%14"PRIu64"\n",
1030 i, stats->q_opackets[i], stats->q_obytes[i]);
1034 printf(" %s--------------------------------%s\n",
1035 fwd_stats_border, fwd_stats_border);
1039 fwd_stream_stats_display(streamid_t stream_id)
1041 struct fwd_stream *fs;
1042 static const char *fwd_top_stats_border = "-------";
1044 fs = fwd_streams[stream_id];
1045 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1046 (fs->fwd_dropped == 0))
1048 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1049 "TX Port=%2d/Queue=%2d %s\n",
1050 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1051 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1052 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1053 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1055 /* if checksum mode */
1056 if (cur_fwd_eng == &csum_fwd_engine) {
1057 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1058 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1061 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1062 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1063 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1068 flush_fwd_rx_queues(void)
1070 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1077 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1078 uint64_t timer_period;
1080 /* convert to number of cycles */
1081 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1083 for (j = 0; j < 2; j++) {
1084 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1085 for (rxq = 0; rxq < nb_rxq; rxq++) {
1086 port_id = fwd_ports_ids[rxp];
1088 * testpmd can stuck in the below do while loop
1089 * if rte_eth_rx_burst() always returns nonzero
1090 * packets. So timer is added to exit this loop
1091 * after 1sec timer expiry.
1093 prev_tsc = rte_rdtsc();
1095 nb_rx = rte_eth_rx_burst(port_id, rxq,
1096 pkts_burst, MAX_PKT_BURST);
1097 for (i = 0; i < nb_rx; i++)
1098 rte_pktmbuf_free(pkts_burst[i]);
1100 cur_tsc = rte_rdtsc();
1101 diff_tsc = cur_tsc - prev_tsc;
1102 timer_tsc += diff_tsc;
1103 } while ((nb_rx > 0) &&
1104 (timer_tsc < timer_period));
1108 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1113 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1115 struct fwd_stream **fsm;
1118 #ifdef RTE_LIBRTE_BITRATE
1119 uint64_t tics_per_1sec;
1120 uint64_t tics_datum;
1121 uint64_t tics_current;
1122 uint8_t idx_port, cnt_ports;
1124 cnt_ports = rte_eth_dev_count();
1125 tics_datum = rte_rdtsc();
1126 tics_per_1sec = rte_get_timer_hz();
1128 fsm = &fwd_streams[fc->stream_idx];
1129 nb_fs = fc->stream_nb;
1131 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1132 (*pkt_fwd)(fsm[sm_id]);
1133 #ifdef RTE_LIBRTE_BITRATE
1134 if (bitrate_enabled != 0 &&
1135 bitrate_lcore_id == rte_lcore_id()) {
1136 tics_current = rte_rdtsc();
1137 if (tics_current - tics_datum >= tics_per_1sec) {
1138 /* Periodic bitrate calculation */
1140 idx_port < cnt_ports;
1142 rte_stats_bitrate_calc(bitrate_data,
1144 tics_datum = tics_current;
1148 #ifdef RTE_LIBRTE_LATENCY_STATS
1149 if (latencystats_enabled != 0 &&
1150 latencystats_lcore_id == rte_lcore_id())
1151 rte_latencystats_update();
1154 } while (! fc->stopped);
1158 start_pkt_forward_on_core(void *fwd_arg)
1160 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1161 cur_fwd_config.fwd_eng->packet_fwd);
1166 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1167 * Used to start communication flows in network loopback test configurations.
1170 run_one_txonly_burst_on_core(void *fwd_arg)
1172 struct fwd_lcore *fwd_lc;
1173 struct fwd_lcore tmp_lcore;
1175 fwd_lc = (struct fwd_lcore *) fwd_arg;
1176 tmp_lcore = *fwd_lc;
1177 tmp_lcore.stopped = 1;
1178 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1183 * Launch packet forwarding:
1184 * - Setup per-port forwarding context.
1185 * - launch logical cores with their forwarding configuration.
1188 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1190 port_fwd_begin_t port_fwd_begin;
1195 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1196 if (port_fwd_begin != NULL) {
1197 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1198 (*port_fwd_begin)(fwd_ports_ids[i]);
1200 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1201 lc_id = fwd_lcores_cpuids[i];
1202 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1203 fwd_lcores[i]->stopped = 0;
1204 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1205 fwd_lcores[i], lc_id);
1207 printf("launch lcore %u failed - diag=%d\n",
1214 * Launch packet forwarding configuration.
1217 start_packet_forwarding(int with_tx_first)
1219 port_fwd_begin_t port_fwd_begin;
1220 port_fwd_end_t port_fwd_end;
1221 struct rte_port *port;
1226 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1227 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1229 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1230 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1232 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1233 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1234 (!nb_rxq || !nb_txq))
1235 rte_exit(EXIT_FAILURE,
1236 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1237 cur_fwd_eng->fwd_mode_name);
1239 if (all_ports_started() == 0) {
1240 printf("Not all ports were started\n");
1243 if (test_done == 0) {
1244 printf("Packet forwarding already started\n");
1248 if (init_fwd_streams() < 0) {
1249 printf("Fail from init_fwd_streams()\n");
1254 for (i = 0; i < nb_fwd_ports; i++) {
1255 pt_id = fwd_ports_ids[i];
1256 port = &ports[pt_id];
1257 if (!port->dcb_flag) {
1258 printf("In DCB mode, all forwarding ports must "
1259 "be configured in this mode.\n");
1263 if (nb_fwd_lcores == 1) {
1264 printf("In DCB mode,the nb forwarding cores "
1265 "should be larger than 1.\n");
1272 flush_fwd_rx_queues();
1275 pkt_fwd_config_display(&cur_fwd_config);
1276 rxtx_config_display();
1278 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1279 pt_id = fwd_ports_ids[i];
1280 port = &ports[pt_id];
1281 rte_eth_stats_get(pt_id, &port->stats);
1282 port->tx_dropped = 0;
1284 map_port_queue_stats_mapping_registers(pt_id, port);
1286 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1287 fwd_streams[sm_id]->rx_packets = 0;
1288 fwd_streams[sm_id]->tx_packets = 0;
1289 fwd_streams[sm_id]->fwd_dropped = 0;
1290 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1291 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1293 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1294 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1295 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1296 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1297 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1299 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1300 fwd_streams[sm_id]->core_cycles = 0;
1303 if (with_tx_first) {
1304 port_fwd_begin = tx_only_engine.port_fwd_begin;
1305 if (port_fwd_begin != NULL) {
1306 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1307 (*port_fwd_begin)(fwd_ports_ids[i]);
1309 while (with_tx_first--) {
1310 launch_packet_forwarding(
1311 run_one_txonly_burst_on_core);
1312 rte_eal_mp_wait_lcore();
1314 port_fwd_end = tx_only_engine.port_fwd_end;
1315 if (port_fwd_end != NULL) {
1316 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1317 (*port_fwd_end)(fwd_ports_ids[i]);
1320 launch_packet_forwarding(start_pkt_forward_on_core);
1324 stop_packet_forwarding(void)
1326 struct rte_eth_stats stats;
1327 struct rte_port *port;
1328 port_fwd_end_t port_fwd_end;
1333 uint64_t total_recv;
1334 uint64_t total_xmit;
1335 uint64_t total_rx_dropped;
1336 uint64_t total_tx_dropped;
1337 uint64_t total_rx_nombuf;
1338 uint64_t tx_dropped;
1339 uint64_t rx_bad_ip_csum;
1340 uint64_t rx_bad_l4_csum;
1341 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1342 uint64_t fwd_cycles;
1345 static const char *acc_stats_border = "+++++++++++++++";
1348 printf("Packet forwarding not started\n");
1351 printf("Telling cores to stop...");
1352 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1353 fwd_lcores[lc_id]->stopped = 1;
1354 printf("\nWaiting for lcores to finish...\n");
1355 rte_eal_mp_wait_lcore();
1356 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1357 if (port_fwd_end != NULL) {
1358 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1359 pt_id = fwd_ports_ids[i];
1360 (*port_fwd_end)(pt_id);
1363 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1366 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1367 if (cur_fwd_config.nb_fwd_streams >
1368 cur_fwd_config.nb_fwd_ports) {
1369 fwd_stream_stats_display(sm_id);
1370 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1371 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1373 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1375 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1378 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1379 tx_dropped = (uint64_t) (tx_dropped +
1380 fwd_streams[sm_id]->fwd_dropped);
1381 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1384 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1385 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1386 fwd_streams[sm_id]->rx_bad_ip_csum);
1387 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1391 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1392 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1393 fwd_streams[sm_id]->rx_bad_l4_csum);
1394 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1397 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1398 fwd_cycles = (uint64_t) (fwd_cycles +
1399 fwd_streams[sm_id]->core_cycles);
1404 total_rx_dropped = 0;
1405 total_tx_dropped = 0;
1406 total_rx_nombuf = 0;
1407 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1408 pt_id = fwd_ports_ids[i];
1410 port = &ports[pt_id];
1411 rte_eth_stats_get(pt_id, &stats);
1412 stats.ipackets -= port->stats.ipackets;
1413 port->stats.ipackets = 0;
1414 stats.opackets -= port->stats.opackets;
1415 port->stats.opackets = 0;
1416 stats.ibytes -= port->stats.ibytes;
1417 port->stats.ibytes = 0;
1418 stats.obytes -= port->stats.obytes;
1419 port->stats.obytes = 0;
1420 stats.imissed -= port->stats.imissed;
1421 port->stats.imissed = 0;
1422 stats.oerrors -= port->stats.oerrors;
1423 port->stats.oerrors = 0;
1424 stats.rx_nombuf -= port->stats.rx_nombuf;
1425 port->stats.rx_nombuf = 0;
1427 total_recv += stats.ipackets;
1428 total_xmit += stats.opackets;
1429 total_rx_dropped += stats.imissed;
1430 total_tx_dropped += port->tx_dropped;
1431 total_rx_nombuf += stats.rx_nombuf;
1433 fwd_port_stats_display(pt_id, &stats);
1436 printf("\n %s Accumulated forward statistics for all ports"
1438 acc_stats_border, acc_stats_border);
1439 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1441 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1443 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1444 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1445 if (total_rx_nombuf > 0)
1446 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1447 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1449 acc_stats_border, acc_stats_border);
1450 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1452 printf("\n CPU cycles/packet=%u (total cycles="
1453 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1454 (unsigned int)(fwd_cycles / total_recv),
1455 fwd_cycles, total_recv);
1457 printf("\nDone.\n");
1462 dev_set_link_up(portid_t pid)
1464 if (rte_eth_dev_set_link_up(pid) < 0)
1465 printf("\nSet link up fail.\n");
1469 dev_set_link_down(portid_t pid)
1471 if (rte_eth_dev_set_link_down(pid) < 0)
1472 printf("\nSet link down fail.\n");
1476 all_ports_started(void)
1479 struct rte_port *port;
1481 RTE_ETH_FOREACH_DEV(pi) {
1483 /* Check if there is a port which is not started */
1484 if ((port->port_status != RTE_PORT_STARTED) &&
1485 (port->slave_flag == 0))
1489 /* No port is not started */
1494 all_ports_stopped(void)
1497 struct rte_port *port;
1499 RTE_ETH_FOREACH_DEV(pi) {
1501 if ((port->port_status != RTE_PORT_STOPPED) &&
1502 (port->slave_flag == 0))
1510 port_is_started(portid_t port_id)
1512 if (port_id_is_invalid(port_id, ENABLED_WARN))
1515 if (ports[port_id].port_status != RTE_PORT_STARTED)
1522 port_is_closed(portid_t port_id)
1524 if (port_id_is_invalid(port_id, ENABLED_WARN))
1527 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1534 start_port(portid_t pid)
1536 int diag, need_check_link_status = -1;
1539 struct rte_port *port;
1540 struct ether_addr mac_addr;
1541 enum rte_eth_event_type event_type;
1543 if (port_id_is_invalid(pid, ENABLED_WARN))
1548 RTE_ETH_FOREACH_DEV(pi) {
1549 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1552 need_check_link_status = 0;
1554 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1555 RTE_PORT_HANDLING) == 0) {
1556 printf("Port %d is now not stopped\n", pi);
1560 if (port->need_reconfig > 0) {
1561 port->need_reconfig = 0;
1563 if (flow_isolate_all) {
1564 int ret = port_flow_isolate(pi, 1);
1566 printf("Failed to apply isolated"
1567 " mode on port %d\n", pi);
1572 printf("Configuring Port %d (socket %u)\n", pi,
1574 /* configure port */
1575 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1578 if (rte_atomic16_cmpset(&(port->port_status),
1579 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1580 printf("Port %d can not be set back "
1581 "to stopped\n", pi);
1582 printf("Fail to configure port %d\n", pi);
1583 /* try to reconfigure port next time */
1584 port->need_reconfig = 1;
1588 if (port->need_reconfig_queues > 0) {
1589 port->need_reconfig_queues = 0;
1590 /* setup tx queues */
1591 for (qi = 0; qi < nb_txq; qi++) {
1592 if ((numa_support) &&
1593 (txring_numa[pi] != NUMA_NO_CONFIG))
1594 diag = rte_eth_tx_queue_setup(pi, qi,
1595 nb_txd,txring_numa[pi],
1598 diag = rte_eth_tx_queue_setup(pi, qi,
1599 nb_txd,port->socket_id,
1605 /* Fail to setup tx queue, return */
1606 if (rte_atomic16_cmpset(&(port->port_status),
1608 RTE_PORT_STOPPED) == 0)
1609 printf("Port %d can not be set back "
1610 "to stopped\n", pi);
1611 printf("Fail to configure port %d tx queues\n", pi);
1612 /* try to reconfigure queues next time */
1613 port->need_reconfig_queues = 1;
1616 /* setup rx queues */
1617 for (qi = 0; qi < nb_rxq; qi++) {
1618 if ((numa_support) &&
1619 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1620 struct rte_mempool * mp =
1621 mbuf_pool_find(rxring_numa[pi]);
1623 printf("Failed to setup RX queue:"
1624 "No mempool allocation"
1625 " on the socket %d\n",
1630 diag = rte_eth_rx_queue_setup(pi, qi,
1631 nb_rxd,rxring_numa[pi],
1632 &(port->rx_conf),mp);
1634 struct rte_mempool *mp =
1635 mbuf_pool_find(port->socket_id);
1637 printf("Failed to setup RX queue:"
1638 "No mempool allocation"
1639 " on the socket %d\n",
1643 diag = rte_eth_rx_queue_setup(pi, qi,
1644 nb_rxd,port->socket_id,
1645 &(port->rx_conf), mp);
1650 /* Fail to setup rx queue, return */
1651 if (rte_atomic16_cmpset(&(port->port_status),
1653 RTE_PORT_STOPPED) == 0)
1654 printf("Port %d can not be set back "
1655 "to stopped\n", pi);
1656 printf("Fail to configure port %d rx queues\n", pi);
1657 /* try to reconfigure queues next time */
1658 port->need_reconfig_queues = 1;
1663 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1664 event_type < RTE_ETH_EVENT_MAX;
1666 diag = rte_eth_dev_callback_register(pi,
1671 printf("Failed to setup even callback for event %d\n",
1678 if (rte_eth_dev_start(pi) < 0) {
1679 printf("Fail to start port %d\n", pi);
1681 /* Fail to setup rx queue, return */
1682 if (rte_atomic16_cmpset(&(port->port_status),
1683 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1684 printf("Port %d can not be set back to "
1689 if (rte_atomic16_cmpset(&(port->port_status),
1690 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1691 printf("Port %d can not be set into started\n", pi);
1693 rte_eth_macaddr_get(pi, &mac_addr);
1694 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1695 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1696 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1697 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1699 /* at least one port started, need checking link status */
1700 need_check_link_status = 1;
1703 if (need_check_link_status == 1 && !no_link_check)
1704 check_all_ports_link_status(RTE_PORT_ALL);
1705 else if (need_check_link_status == 0)
1706 printf("Please stop the ports first\n");
1713 stop_port(portid_t pid)
1716 struct rte_port *port;
1717 int need_check_link_status = 0;
1724 if (port_id_is_invalid(pid, ENABLED_WARN))
1727 printf("Stopping ports...\n");
1729 RTE_ETH_FOREACH_DEV(pi) {
1730 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1733 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1734 printf("Please remove port %d from forwarding configuration.\n", pi);
1738 if (port_is_bonding_slave(pi)) {
1739 printf("Please remove port %d from bonded device.\n", pi);
1744 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1745 RTE_PORT_HANDLING) == 0)
1748 rte_eth_dev_stop(pi);
1750 if (rte_atomic16_cmpset(&(port->port_status),
1751 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1752 printf("Port %d can not be set into stopped\n", pi);
1753 need_check_link_status = 1;
1755 if (need_check_link_status && !no_link_check)
1756 check_all_ports_link_status(RTE_PORT_ALL);
1762 close_port(portid_t pid)
1765 struct rte_port *port;
1767 if (port_id_is_invalid(pid, ENABLED_WARN))
1770 printf("Closing ports...\n");
1772 RTE_ETH_FOREACH_DEV(pi) {
1773 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1776 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1777 printf("Please remove port %d from forwarding configuration.\n", pi);
1781 if (port_is_bonding_slave(pi)) {
1782 printf("Please remove port %d from bonded device.\n", pi);
1787 if (rte_atomic16_cmpset(&(port->port_status),
1788 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1789 printf("Port %d is already closed\n", pi);
1793 if (rte_atomic16_cmpset(&(port->port_status),
1794 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1795 printf("Port %d is now not stopped\n", pi);
1799 if (port->flow_list)
1800 port_flow_flush(pi);
1801 rte_eth_dev_close(pi);
1803 if (rte_atomic16_cmpset(&(port->port_status),
1804 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1805 printf("Port %d cannot be set to closed\n", pi);
1812 reset_port(portid_t pid)
1816 struct rte_port *port;
1818 if (port_id_is_invalid(pid, ENABLED_WARN))
1821 printf("Resetting ports...\n");
1823 RTE_ETH_FOREACH_DEV(pi) {
1824 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1827 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1828 printf("Please remove port %d from forwarding "
1829 "configuration.\n", pi);
1833 if (port_is_bonding_slave(pi)) {
1834 printf("Please remove port %d from bonded device.\n",
1839 diag = rte_eth_dev_reset(pi);
1842 port->need_reconfig = 1;
1843 port->need_reconfig_queues = 1;
1845 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1853 attach_port(char *identifier)
1856 unsigned int socket_id;
1858 printf("Attaching a new port...\n");
1860 if (identifier == NULL) {
1861 printf("Invalid parameters are specified\n");
1865 if (rte_eth_dev_attach(identifier, &pi))
1868 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1869 /* if socket_id is invalid, set to 0 */
1870 if (check_socket_id(socket_id) < 0)
1872 reconfig(pi, socket_id);
1873 rte_eth_promiscuous_enable(pi);
1875 nb_ports = rte_eth_dev_count();
1877 ports[pi].port_status = RTE_PORT_STOPPED;
1879 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1884 detach_port(portid_t port_id)
1886 char name[RTE_ETH_NAME_MAX_LEN];
1888 printf("Detaching a port...\n");
1890 if (!port_is_closed(port_id)) {
1891 printf("Please close port first\n");
1895 if (ports[port_id].flow_list)
1896 port_flow_flush(port_id);
1898 if (rte_eth_dev_detach(port_id, name)) {
1899 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1903 nb_ports = rte_eth_dev_count();
1905 printf("Port '%s' is detached. Now total ports is %d\n",
1917 stop_packet_forwarding();
1919 if (ports != NULL) {
1921 RTE_ETH_FOREACH_DEV(pt_id) {
1922 printf("\nShutting down port %d...\n", pt_id);
1928 printf("\nBye...\n");
1931 typedef void (*cmd_func_t)(void);
1932 struct pmd_test_command {
1933 const char *cmd_name;
1934 cmd_func_t cmd_func;
1937 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1939 /* Check the link status of all ports in up to 9s, and print them finally */
1941 check_all_ports_link_status(uint32_t port_mask)
1943 #define CHECK_INTERVAL 100 /* 100ms */
1944 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1946 uint8_t count, all_ports_up, print_flag = 0;
1947 struct rte_eth_link link;
1949 printf("Checking link statuses...\n");
1951 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1953 RTE_ETH_FOREACH_DEV(portid) {
1954 if ((port_mask & (1 << portid)) == 0)
1956 memset(&link, 0, sizeof(link));
1957 rte_eth_link_get_nowait(portid, &link);
1958 /* print link status if flag set */
1959 if (print_flag == 1) {
1960 if (link.link_status)
1962 "Port%d Link Up. speed %u Mbps- %s\n",
1963 portid, link.link_speed,
1964 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1965 ("full-duplex") : ("half-duplex\n"));
1967 printf("Port %d Link Down\n", portid);
1970 /* clear all_ports_up flag if any link down */
1971 if (link.link_status == ETH_LINK_DOWN) {
1976 /* after finally printing all link status, get out */
1977 if (print_flag == 1)
1980 if (all_ports_up == 0) {
1982 rte_delay_ms(CHECK_INTERVAL);
1985 /* set the print_flag if all ports up or timeout */
1986 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1996 rmv_event_callback(void *arg)
1998 struct rte_eth_dev *dev;
1999 portid_t port_id = (intptr_t)arg;
2001 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2002 dev = &rte_eth_devices[port_id];
2005 close_port(port_id);
2006 printf("removing device %s\n", dev->device->name);
2007 if (rte_eal_dev_detach(dev->device))
2008 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
2012 /* This function is used by the interrupt thread */
2014 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2017 static const char * const event_desc[] = {
2018 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2019 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2020 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2021 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2022 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2023 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2024 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2025 [RTE_ETH_EVENT_MAX] = NULL,
2028 RTE_SET_USED(param);
2029 RTE_SET_USED(ret_param);
2031 if (type >= RTE_ETH_EVENT_MAX) {
2032 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2033 port_id, __func__, type);
2035 } else if (event_print_mask & (UINT32_C(1) << type)) {
2036 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2042 case RTE_ETH_EVENT_INTR_RMV:
2043 if (rte_eal_alarm_set(100000,
2044 rmv_event_callback, (void *)(intptr_t)port_id))
2045 fprintf(stderr, "Could not set up deferred device removal\n");
2054 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2058 uint8_t mapping_found = 0;
2060 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2061 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2062 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2063 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2064 tx_queue_stats_mappings[i].queue_id,
2065 tx_queue_stats_mappings[i].stats_counter_id);
2072 port->tx_queue_stats_mapping_enabled = 1;
2077 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2081 uint8_t mapping_found = 0;
2083 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2084 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2085 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2086 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2087 rx_queue_stats_mappings[i].queue_id,
2088 rx_queue_stats_mappings[i].stats_counter_id);
2095 port->rx_queue_stats_mapping_enabled = 1;
2100 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2104 diag = set_tx_queue_stats_mapping_registers(pi, port);
2106 if (diag == -ENOTSUP) {
2107 port->tx_queue_stats_mapping_enabled = 0;
2108 printf("TX queue stats mapping not supported port id=%d\n", pi);
2111 rte_exit(EXIT_FAILURE,
2112 "set_tx_queue_stats_mapping_registers "
2113 "failed for port id=%d diag=%d\n",
2117 diag = set_rx_queue_stats_mapping_registers(pi, port);
2119 if (diag == -ENOTSUP) {
2120 port->rx_queue_stats_mapping_enabled = 0;
2121 printf("RX queue stats mapping not supported port id=%d\n", pi);
2124 rte_exit(EXIT_FAILURE,
2125 "set_rx_queue_stats_mapping_registers "
2126 "failed for port id=%d diag=%d\n",
2132 rxtx_port_config(struct rte_port *port)
2134 port->rx_conf = port->dev_info.default_rxconf;
2135 port->tx_conf = port->dev_info.default_txconf;
2137 /* Check if any RX/TX parameters have been passed */
2138 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2139 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2141 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2142 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2144 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2145 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2147 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2148 port->rx_conf.rx_free_thresh = rx_free_thresh;
2150 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2151 port->rx_conf.rx_drop_en = rx_drop_en;
2153 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2154 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2156 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2157 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2159 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2160 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2162 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2163 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2165 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2166 port->tx_conf.tx_free_thresh = tx_free_thresh;
2168 if (txq_flags != RTE_PMD_PARAM_UNSET)
2169 port->tx_conf.txq_flags = txq_flags;
2173 init_port_config(void)
2176 struct rte_port *port;
2178 RTE_ETH_FOREACH_DEV(pid) {
2180 port->dev_conf.rxmode = rx_mode;
2181 port->dev_conf.fdir_conf = fdir_conf;
2183 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2184 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2186 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2187 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2190 if (port->dcb_flag == 0) {
2191 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2192 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2194 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2197 rxtx_port_config(port);
2199 rte_eth_macaddr_get(pid, &port->eth_addr);
2201 map_port_queue_stats_mapping_registers(pid, port);
2202 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2203 rte_pmd_ixgbe_bypass_init(pid);
2206 if (lsc_interrupt &&
2207 (rte_eth_devices[pid].data->dev_flags &
2208 RTE_ETH_DEV_INTR_LSC))
2209 port->dev_conf.intr_conf.lsc = 1;
2210 if (rmv_interrupt &&
2211 (rte_eth_devices[pid].data->dev_flags &
2212 RTE_ETH_DEV_INTR_RMV))
2213 port->dev_conf.intr_conf.rmv = 1;
2215 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2216 /* Detect softnic port */
2217 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2218 port->softnic_enable = 1;
2219 memset(&port->softport, 0, sizeof(struct softnic_port));
2221 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2222 port->softport.tm_flag = 1;
2228 void set_port_slave_flag(portid_t slave_pid)
2230 struct rte_port *port;
2232 port = &ports[slave_pid];
2233 port->slave_flag = 1;
2236 void clear_port_slave_flag(portid_t slave_pid)
2238 struct rte_port *port;
2240 port = &ports[slave_pid];
2241 port->slave_flag = 0;
2244 uint8_t port_is_bonding_slave(portid_t slave_pid)
2246 struct rte_port *port;
2248 port = &ports[slave_pid];
2249 return port->slave_flag;
2252 const uint16_t vlan_tags[] = {
2253 0, 1, 2, 3, 4, 5, 6, 7,
2254 8, 9, 10, 11, 12, 13, 14, 15,
2255 16, 17, 18, 19, 20, 21, 22, 23,
2256 24, 25, 26, 27, 28, 29, 30, 31
2260 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2261 enum dcb_mode_enable dcb_mode,
2262 enum rte_eth_nb_tcs num_tcs,
2268 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2269 * given above, and the number of traffic classes available for use.
2271 if (dcb_mode == DCB_VT_ENABLED) {
2272 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2273 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2274 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2275 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2277 /* VMDQ+DCB RX and TX configurations */
2278 vmdq_rx_conf->enable_default_pool = 0;
2279 vmdq_rx_conf->default_pool = 0;
2280 vmdq_rx_conf->nb_queue_pools =
2281 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2282 vmdq_tx_conf->nb_queue_pools =
2283 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2285 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2286 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2287 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2288 vmdq_rx_conf->pool_map[i].pools =
2289 1 << (i % vmdq_rx_conf->nb_queue_pools);
2291 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2292 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2293 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2296 /* set DCB mode of RX and TX of multiple queues */
2297 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2298 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2300 struct rte_eth_dcb_rx_conf *rx_conf =
2301 ð_conf->rx_adv_conf.dcb_rx_conf;
2302 struct rte_eth_dcb_tx_conf *tx_conf =
2303 ð_conf->tx_adv_conf.dcb_tx_conf;
2305 rx_conf->nb_tcs = num_tcs;
2306 tx_conf->nb_tcs = num_tcs;
2308 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2309 rx_conf->dcb_tc[i] = i % num_tcs;
2310 tx_conf->dcb_tc[i] = i % num_tcs;
2312 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2313 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2314 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2318 eth_conf->dcb_capability_en =
2319 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2321 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2327 init_port_dcb_config(portid_t pid,
2328 enum dcb_mode_enable dcb_mode,
2329 enum rte_eth_nb_tcs num_tcs,
2332 struct rte_eth_conf port_conf;
2333 struct rte_port *rte_port;
2337 rte_port = &ports[pid];
2339 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2340 /* Enter DCB configuration status */
2343 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2344 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2347 port_conf.rxmode.hw_vlan_filter = 1;
2350 * Write the configuration into the device.
2351 * Set the numbers of RX & TX queues to 0, so
2352 * the RX & TX queues will not be setup.
2354 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2356 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2358 /* If dev_info.vmdq_pool_base is greater than 0,
2359 * the queue id of vmdq pools is started after pf queues.
2361 if (dcb_mode == DCB_VT_ENABLED &&
2362 rte_port->dev_info.vmdq_pool_base > 0) {
2363 printf("VMDQ_DCB multi-queue mode is nonsensical"
2364 " for port %d.", pid);
2368 /* Assume the ports in testpmd have the same dcb capability
2369 * and has the same number of rxq and txq in dcb mode
2371 if (dcb_mode == DCB_VT_ENABLED) {
2372 if (rte_port->dev_info.max_vfs > 0) {
2373 nb_rxq = rte_port->dev_info.nb_rx_queues;
2374 nb_txq = rte_port->dev_info.nb_tx_queues;
2376 nb_rxq = rte_port->dev_info.max_rx_queues;
2377 nb_txq = rte_port->dev_info.max_tx_queues;
2380 /*if vt is disabled, use all pf queues */
2381 if (rte_port->dev_info.vmdq_pool_base == 0) {
2382 nb_rxq = rte_port->dev_info.max_rx_queues;
2383 nb_txq = rte_port->dev_info.max_tx_queues;
2385 nb_rxq = (queueid_t)num_tcs;
2386 nb_txq = (queueid_t)num_tcs;
2390 rx_free_thresh = 64;
2392 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2394 rxtx_port_config(rte_port);
2396 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2397 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2398 rx_vft_set(pid, vlan_tags[i], 1);
2400 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2401 map_port_queue_stats_mapping_registers(pid, rte_port);
2403 rte_port->dcb_flag = 1;
2411 /* Configuration of Ethernet ports. */
2412 ports = rte_zmalloc("testpmd: ports",
2413 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2414 RTE_CACHE_LINE_SIZE);
2415 if (ports == NULL) {
2416 rte_exit(EXIT_FAILURE,
2417 "rte_zmalloc(%d struct rte_port) failed\n",
2433 const char clr[] = { 27, '[', '2', 'J', '\0' };
2434 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2436 /* Clear screen and move to top left */
2437 printf("%s%s", clr, top_left);
2439 printf("\nPort statistics ====================================");
2440 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2441 nic_stats_display(fwd_ports_ids[i]);
2445 signal_handler(int signum)
2447 if (signum == SIGINT || signum == SIGTERM) {
2448 printf("\nSignal %d received, preparing to exit...\n",
2450 #ifdef RTE_LIBRTE_PDUMP
2451 /* uninitialize packet capture framework */
2454 #ifdef RTE_LIBRTE_LATENCY_STATS
2455 rte_latencystats_uninit();
2458 /* Set flag to indicate the force termination. */
2460 /* exit with the expected status */
2461 signal(signum, SIG_DFL);
2462 kill(getpid(), signum);
2467 main(int argc, char** argv)
2472 signal(SIGINT, signal_handler);
2473 signal(SIGTERM, signal_handler);
2475 diag = rte_eal_init(argc, argv);
2477 rte_panic("Cannot init EAL\n");
2479 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2480 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2484 #ifdef RTE_LIBRTE_PDUMP
2485 /* initialize packet capture framework */
2486 rte_pdump_init(NULL);
2489 nb_ports = (portid_t) rte_eth_dev_count();
2491 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2493 /* allocate port structures, and init them */
2496 set_def_fwd_config();
2498 rte_panic("Empty set of forwarding logical cores - check the "
2499 "core mask supplied in the command parameters\n");
2501 /* Bitrate/latency stats disabled by default */
2502 #ifdef RTE_LIBRTE_BITRATE
2503 bitrate_enabled = 0;
2505 #ifdef RTE_LIBRTE_LATENCY_STATS
2506 latencystats_enabled = 0;
2512 launch_args_parse(argc, argv);
2514 if (tx_first && interactive)
2515 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2516 "interactive mode.\n");
2518 if (tx_first && lsc_interrupt) {
2519 printf("Warning: lsc_interrupt needs to be off when "
2520 " using tx_first. Disabling.\n");
2524 if (!nb_rxq && !nb_txq)
2525 printf("Warning: Either rx or tx queues should be non-zero\n");
2527 if (nb_rxq > 1 && nb_rxq > nb_txq)
2528 printf("Warning: nb_rxq=%d enables RSS configuration, "
2529 "but nb_txq=%d will prevent to fully test it.\n",
2533 if (start_port(RTE_PORT_ALL) != 0)
2534 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2536 /* set all ports to promiscuous mode by default */
2537 RTE_ETH_FOREACH_DEV(port_id)
2538 rte_eth_promiscuous_enable(port_id);
2540 /* Init metrics library */
2541 rte_metrics_init(rte_socket_id());
2543 #ifdef RTE_LIBRTE_LATENCY_STATS
2544 if (latencystats_enabled != 0) {
2545 int ret = rte_latencystats_init(1, NULL);
2547 printf("Warning: latencystats init()"
2548 " returned error %d\n", ret);
2549 printf("Latencystats running on lcore %d\n",
2550 latencystats_lcore_id);
2554 /* Setup bitrate stats */
2555 #ifdef RTE_LIBRTE_BITRATE
2556 if (bitrate_enabled != 0) {
2557 bitrate_data = rte_stats_bitrate_create();
2558 if (bitrate_data == NULL)
2559 rte_exit(EXIT_FAILURE,
2560 "Could not allocate bitrate data.\n");
2561 rte_stats_bitrate_reg(bitrate_data);
2565 #ifdef RTE_LIBRTE_CMDLINE
2566 if (strlen(cmdline_filename) != 0)
2567 cmdline_read_from_file(cmdline_filename);
2569 if (interactive == 1) {
2571 printf("Start automatic packet forwarding\n");
2572 start_packet_forwarding(0);
2584 printf("No commandline core given, start packet forwarding\n");
2585 start_packet_forwarding(tx_first);
2586 if (stats_period != 0) {
2587 uint64_t prev_time = 0, cur_time, diff_time = 0;
2588 uint64_t timer_period;
2590 /* Convert to number of cycles */
2591 timer_period = stats_period * rte_get_timer_hz();
2593 while (f_quit == 0) {
2594 cur_time = rte_get_timer_cycles();
2595 diff_time += cur_time - prev_time;
2597 if (diff_time >= timer_period) {
2599 /* Reset the timer */
2602 /* Sleep to avoid unnecessary checks */
2603 prev_time = cur_time;
2608 printf("Press enter to exit\n");
2609 rc = read(0, &c, 1);