4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
99 char cmdline_filename[PATH_MAX] = {0};
102 * NUMA support configuration.
103 * When set, the NUMA support attempts to dispatch the allocation of the
104 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
105 * probed ports among the CPU sockets 0 and 1.
106 * Otherwise, all memory is allocated from CPU socket 0.
108 uint8_t numa_support = 1; /**< numa enabled by default */
111 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
114 uint8_t socket_num = UMA_NO_CONFIG;
117 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
122 * Record the Ethernet address of peer target ports to which packets are
124 * Must be instantiated with the ethernet addresses of peer traffic generator
127 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
128 portid_t nb_peer_eth_addrs = 0;
131 * Probed Target Environment.
133 struct rte_port *ports; /**< For all probed ethernet ports. */
134 portid_t nb_ports; /**< Number of probed ethernet ports. */
135 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
136 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
139 * Test Forwarding Configuration.
140 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
141 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
143 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
144 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
145 portid_t nb_cfg_ports; /**< Number of configured ports. */
146 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
148 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
149 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
151 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
152 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
155 * Forwarding engines.
157 struct fwd_engine * fwd_engines[] = {
166 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
168 &softnic_tm_bypass_engine,
170 #ifdef RTE_LIBRTE_IEEE1588
171 &ieee1588_fwd_engine,
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
184 * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
188 * In container, it cannot terminate the process which running with 'stats-period'
189 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
194 * Configuration of packet segments used by the "txonly" processing engine.
196 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
197 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
198 TXONLY_DEF_PACKET_LEN,
200 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
202 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
203 /**< Split policy for packets to TX. */
205 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
206 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
208 /* current configuration is in DCB or not,0 means it is not in DCB mode */
209 uint8_t dcb_config = 0;
211 /* Whether the dcb is in testing status */
212 uint8_t dcb_test = 0;
215 * Configurable number of RX/TX queues.
217 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
218 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
221 * Configurable number of RX/TX ring descriptors.
223 #define RTE_TEST_RX_DESC_DEFAULT 128
224 #define RTE_TEST_TX_DESC_DEFAULT 512
225 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
226 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
228 #define RTE_PMD_PARAM_UNSET -1
230 * Configurable values of RX and TX ring threshold registers.
233 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
234 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
237 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
242 * Configurable value of RX free threshold.
244 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
247 * Configurable value of RX drop enable.
249 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
252 * Configurable value of TX free threshold.
254 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
257 * Configurable value of TX RS bit threshold.
259 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
262 * Configurable value of TX queue flags.
264 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
267 * Receive Side Scaling (RSS) configuration.
269 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
272 * Port topology configuration
274 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
277 * Avoids to flush all the RX streams before starts forwarding.
279 uint8_t no_flush_rx = 0; /* flush by default */
282 * Flow API isolated mode.
284 uint8_t flow_isolate_all;
287 * Avoids to check link status when starting/stopping a port.
289 uint8_t no_link_check = 0; /* check by default */
292 * Enable link status change notification
294 uint8_t lsc_interrupt = 1; /* enabled by default */
297 * Enable device removal notification.
299 uint8_t rmv_interrupt = 1; /* enabled by default */
302 * Display or mask ether events
303 * Default to all events except VF_MBOX
305 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
306 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
307 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
308 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
309 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
310 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
313 * NIC bypass mode configuration options.
316 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
317 /* The NIC bypass watchdog timeout. */
318 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
322 #ifdef RTE_LIBRTE_LATENCY_STATS
325 * Set when latency stats is enabled in the commandline
327 uint8_t latencystats_enabled;
330 * Lcore ID to serive latency statistics.
332 lcoreid_t latencystats_lcore_id = -1;
337 * Ethernet device configuration.
339 struct rte_eth_rxmode rx_mode = {
340 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
342 .header_split = 0, /**< Header Split disabled. */
343 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
344 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
345 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
346 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
347 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
348 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
349 .hw_timestamp = 0, /**< HW timestamp enabled. */
352 struct rte_fdir_conf fdir_conf = {
353 .mode = RTE_FDIR_MODE_NONE,
354 .pballoc = RTE_FDIR_PBALLOC_64K,
355 .status = RTE_FDIR_REPORT_STATUS,
357 .vlan_tci_mask = 0x0,
359 .src_ip = 0xFFFFFFFF,
360 .dst_ip = 0xFFFFFFFF,
363 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
364 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
366 .src_port_mask = 0xFFFF,
367 .dst_port_mask = 0xFFFF,
368 .mac_addr_byte_mask = 0xFF,
369 .tunnel_type_mask = 1,
370 .tunnel_id_mask = 0xFFFFFFFF,
375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
383 uint16_t nb_tx_queue_stats_mappings = 0;
384 uint16_t nb_rx_queue_stats_mappings = 0;
387 * Display zero values by default for xstats
389 uint8_t xstats_hide_zero;
391 unsigned int num_sockets = 0;
392 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
394 #ifdef RTE_LIBRTE_BITRATE
395 /* Bitrate statistics */
396 struct rte_stats_bitrates *bitrate_data;
397 lcoreid_t bitrate_lcore_id;
398 uint8_t bitrate_enabled;
401 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
402 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
404 /* Forward function declarations */
405 static void map_port_queue_stats_mapping_registers(portid_t pi,
406 struct rte_port *port);
407 static void check_all_ports_link_status(uint32_t port_mask);
408 static int eth_event_callback(portid_t port_id,
409 enum rte_eth_event_type type,
410 void *param, void *ret_param);
413 * Check if all the ports are started.
414 * If yes, return positive value. If not, return zero.
416 static int all_ports_started(void);
418 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
419 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
422 * Helper function to check if socket is already discovered.
423 * If yes, return positive value. If not, return zero.
426 new_socket_id(unsigned int socket_id)
430 for (i = 0; i < num_sockets; i++) {
431 if (socket_ids[i] == socket_id)
438 * Setup default configuration.
441 set_default_fwd_lcores_config(void)
445 unsigned int sock_num;
448 for (i = 0; i < RTE_MAX_LCORE; i++) {
449 sock_num = rte_lcore_to_socket_id(i);
450 if (new_socket_id(sock_num)) {
451 if (num_sockets >= RTE_MAX_NUMA_NODES) {
452 rte_exit(EXIT_FAILURE,
453 "Total sockets greater than %u\n",
456 socket_ids[num_sockets++] = sock_num;
458 if (!rte_lcore_is_enabled(i))
460 if (i == rte_get_master_lcore())
462 fwd_lcores_cpuids[nb_lc++] = i;
464 nb_lcores = (lcoreid_t) nb_lc;
465 nb_cfg_lcores = nb_lcores;
470 set_def_peer_eth_addrs(void)
474 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
475 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
476 peer_eth_addrs[i].addr_bytes[5] = i;
481 set_default_fwd_ports_config(void)
486 RTE_ETH_FOREACH_DEV(pt_id)
487 fwd_ports_ids[i++] = pt_id;
489 nb_cfg_ports = nb_ports;
490 nb_fwd_ports = nb_ports;
494 set_def_fwd_config(void)
496 set_default_fwd_lcores_config();
497 set_def_peer_eth_addrs();
498 set_default_fwd_ports_config();
502 * Configuration initialisation done once at init time.
505 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
506 unsigned int socket_id)
508 char pool_name[RTE_MEMPOOL_NAMESIZE];
509 struct rte_mempool *rte_mp = NULL;
512 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
513 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
516 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
517 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
520 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
521 mb_size, (unsigned) mb_mempool_cache,
522 sizeof(struct rte_pktmbuf_pool_private),
527 if (rte_mempool_populate_anon(rte_mp) == 0) {
528 rte_mempool_free(rte_mp);
532 rte_pktmbuf_pool_init(rte_mp, NULL);
533 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
535 /* wrapper to rte_mempool_create() */
536 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
541 if (rte_mp == NULL) {
542 rte_exit(EXIT_FAILURE,
543 "Creation of mbuf pool for socket %u failed: %s\n",
544 socket_id, rte_strerror(rte_errno));
545 } else if (verbose_level > 0) {
546 rte_mempool_dump(stdout, rte_mp);
551 * Check given socket id is valid or not with NUMA mode,
552 * if valid, return 0, else return -1
555 check_socket_id(const unsigned int socket_id)
557 static int warning_once = 0;
559 if (new_socket_id(socket_id)) {
560 if (!warning_once && numa_support)
561 printf("Warning: NUMA should be configured manually by"
562 " using --port-numa-config and"
563 " --ring-numa-config parameters along with"
572 * Get the allowed maximum number of RX queues.
573 * *pid return the port id which has minimal value of
574 * max_rx_queues in all ports.
577 get_allowed_max_nb_rxq(portid_t *pid)
579 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
581 struct rte_eth_dev_info dev_info;
583 RTE_ETH_FOREACH_DEV(pi) {
584 rte_eth_dev_info_get(pi, &dev_info);
585 if (dev_info.max_rx_queues < allowed_max_rxq) {
586 allowed_max_rxq = dev_info.max_rx_queues;
590 return allowed_max_rxq;
594 * Check input rxq is valid or not.
595 * If input rxq is not greater than any of maximum number
596 * of RX queues of all ports, it is valid.
597 * if valid, return 0, else return -1
600 check_nb_rxq(queueid_t rxq)
602 queueid_t allowed_max_rxq;
605 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606 if (rxq > allowed_max_rxq) {
607 printf("Fail: input rxq (%u) can't be greater "
608 "than max_rx_queues (%u) of port %u\n",
618 * Get the allowed maximum number of TX queues.
619 * *pid return the port id which has minimal value of
620 * max_tx_queues in all ports.
623 get_allowed_max_nb_txq(portid_t *pid)
625 queueid_t allowed_max_txq = MAX_QUEUE_ID;
627 struct rte_eth_dev_info dev_info;
629 RTE_ETH_FOREACH_DEV(pi) {
630 rte_eth_dev_info_get(pi, &dev_info);
631 if (dev_info.max_tx_queues < allowed_max_txq) {
632 allowed_max_txq = dev_info.max_tx_queues;
636 return allowed_max_txq;
640 * Check input txq is valid or not.
641 * If input txq is not greater than any of maximum number
642 * of TX queues of all ports, it is valid.
643 * if valid, return 0, else return -1
646 check_nb_txq(queueid_t txq)
648 queueid_t allowed_max_txq;
651 allowed_max_txq = get_allowed_max_nb_txq(&pid);
652 if (txq > allowed_max_txq) {
653 printf("Fail: input txq (%u) can't be greater "
654 "than max_tx_queues (%u) of port %u\n",
667 struct rte_port *port;
668 struct rte_mempool *mbp;
669 unsigned int nb_mbuf_per_pool;
671 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672 struct rte_gro_param gro_param;
675 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
678 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
683 /* Configuration of logical cores. */
684 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685 sizeof(struct fwd_lcore *) * nb_lcores,
686 RTE_CACHE_LINE_SIZE);
687 if (fwd_lcores == NULL) {
688 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689 "failed\n", nb_lcores);
691 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693 sizeof(struct fwd_lcore),
694 RTE_CACHE_LINE_SIZE);
695 if (fwd_lcores[lc_id] == NULL) {
696 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
699 fwd_lcores[lc_id]->cpuid_idx = lc_id;
702 RTE_ETH_FOREACH_DEV(pid) {
704 rte_eth_dev_info_get(pid, &port->dev_info);
707 if (port_numa[pid] != NUMA_NO_CONFIG)
708 port_per_socket[port_numa[pid]]++;
710 uint32_t socket_id = rte_eth_dev_socket_id(pid);
712 /* if socket_id is invalid, set to 0 */
713 if (check_socket_id(socket_id) < 0)
715 port_per_socket[socket_id]++;
719 /* set flag to initialize port/queue */
720 port->need_reconfig = 1;
721 port->need_reconfig_queues = 1;
725 * Create pools of mbuf.
726 * If NUMA support is disabled, create a single pool of mbuf in
727 * socket 0 memory by default.
728 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
730 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
731 * nb_txd can be configured at run time.
733 if (param_total_num_mbufs)
734 nb_mbuf_per_pool = param_total_num_mbufs;
736 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
737 (nb_lcores * mb_mempool_cache) +
738 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
739 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
745 for (i = 0; i < num_sockets; i++)
746 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
749 if (socket_num == UMA_NO_CONFIG)
750 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
752 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
758 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
759 DEV_TX_OFFLOAD_GRE_TNL_TSO;
761 * Records which Mbuf pool to use by each logical core, if needed.
763 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
764 mbp = mbuf_pool_find(
765 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
768 mbp = mbuf_pool_find(0);
769 fwd_lcores[lc_id]->mbp = mbp;
770 /* initialize GSO context */
771 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
772 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
773 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
774 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
776 fwd_lcores[lc_id]->gso_ctx.flag = 0;
779 /* Configuration of packet forwarding streams. */
780 if (init_fwd_streams() < 0)
781 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
785 /* create a gro context for each lcore */
786 gro_param.gro_types = RTE_GRO_TCP_IPV4;
787 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
788 gro_param.max_item_per_flow = MAX_PKT_BURST;
789 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
790 gro_param.socket_id = rte_lcore_to_socket_id(
791 fwd_lcores_cpuids[lc_id]);
792 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
793 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
794 rte_exit(EXIT_FAILURE,
795 "rte_gro_ctx_create() failed\n");
802 reconfig(portid_t new_port_id, unsigned socket_id)
804 struct rte_port *port;
806 /* Reconfiguration of Ethernet ports. */
807 port = &ports[new_port_id];
808 rte_eth_dev_info_get(new_port_id, &port->dev_info);
810 /* set flag to initialize port/queue */
811 port->need_reconfig = 1;
812 port->need_reconfig_queues = 1;
813 port->socket_id = socket_id;
820 init_fwd_streams(void)
823 struct rte_port *port;
824 streamid_t sm_id, nb_fwd_streams_new;
827 /* set socket id according to numa or not */
828 RTE_ETH_FOREACH_DEV(pid) {
830 if (nb_rxq > port->dev_info.max_rx_queues) {
831 printf("Fail: nb_rxq(%d) is greater than "
832 "max_rx_queues(%d)\n", nb_rxq,
833 port->dev_info.max_rx_queues);
836 if (nb_txq > port->dev_info.max_tx_queues) {
837 printf("Fail: nb_txq(%d) is greater than "
838 "max_tx_queues(%d)\n", nb_txq,
839 port->dev_info.max_tx_queues);
843 if (port_numa[pid] != NUMA_NO_CONFIG)
844 port->socket_id = port_numa[pid];
846 port->socket_id = rte_eth_dev_socket_id(pid);
848 /* if socket_id is invalid, set to 0 */
849 if (check_socket_id(port->socket_id) < 0)
854 if (socket_num == UMA_NO_CONFIG)
857 port->socket_id = socket_num;
861 q = RTE_MAX(nb_rxq, nb_txq);
863 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
866 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
867 if (nb_fwd_streams_new == nb_fwd_streams)
870 if (fwd_streams != NULL) {
871 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
872 if (fwd_streams[sm_id] == NULL)
874 rte_free(fwd_streams[sm_id]);
875 fwd_streams[sm_id] = NULL;
877 rte_free(fwd_streams);
882 nb_fwd_streams = nb_fwd_streams_new;
883 if (nb_fwd_streams) {
884 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
885 sizeof(struct fwd_stream *) * nb_fwd_streams,
886 RTE_CACHE_LINE_SIZE);
887 if (fwd_streams == NULL)
888 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
889 " (struct fwd_stream *)) failed\n",
892 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
893 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
894 " struct fwd_stream", sizeof(struct fwd_stream),
895 RTE_CACHE_LINE_SIZE);
896 if (fwd_streams[sm_id] == NULL)
897 rte_exit(EXIT_FAILURE, "rte_zmalloc"
898 "(struct fwd_stream) failed\n");
905 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
907 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
909 unsigned int total_burst;
910 unsigned int nb_burst;
911 unsigned int burst_stats[3];
912 uint16_t pktnb_stats[3];
914 int burst_percent[3];
917 * First compute the total number of packet bursts and the
918 * two highest numbers of bursts of the same number of packets.
921 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
922 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
923 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
924 nb_burst = pbs->pkt_burst_spread[nb_pkt];
927 total_burst += nb_burst;
928 if (nb_burst > burst_stats[0]) {
929 burst_stats[1] = burst_stats[0];
930 pktnb_stats[1] = pktnb_stats[0];
931 burst_stats[0] = nb_burst;
932 pktnb_stats[0] = nb_pkt;
933 } else if (nb_burst > burst_stats[1]) {
934 burst_stats[1] = nb_burst;
935 pktnb_stats[1] = nb_pkt;
938 if (total_burst == 0)
940 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
941 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
942 burst_percent[0], (int) pktnb_stats[0]);
943 if (burst_stats[0] == total_burst) {
947 if (burst_stats[0] + burst_stats[1] == total_burst) {
948 printf(" + %d%% of %d pkts]\n",
949 100 - burst_percent[0], pktnb_stats[1]);
952 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
953 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
954 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
955 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
958 printf(" + %d%% of %d pkts + %d%% of others]\n",
959 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
961 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
964 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
966 struct rte_port *port;
969 static const char *fwd_stats_border = "----------------------";
971 port = &ports[port_id];
972 printf("\n %s Forward statistics for port %-2d %s\n",
973 fwd_stats_border, port_id, fwd_stats_border);
975 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
976 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
978 stats->ipackets, stats->imissed,
979 (uint64_t) (stats->ipackets + stats->imissed));
981 if (cur_fwd_eng == &csum_fwd_engine)
982 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
983 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
984 if ((stats->ierrors + stats->rx_nombuf) > 0) {
985 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
986 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
989 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
991 stats->opackets, port->tx_dropped,
992 (uint64_t) (stats->opackets + port->tx_dropped));
995 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
997 stats->ipackets, stats->imissed,
998 (uint64_t) (stats->ipackets + stats->imissed));
1000 if (cur_fwd_eng == &csum_fwd_engine)
1001 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
1002 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1003 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1004 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1005 printf(" RX-nombufs: %14"PRIu64"\n",
1009 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1011 stats->opackets, port->tx_dropped,
1012 (uint64_t) (stats->opackets + port->tx_dropped));
1015 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1016 if (port->rx_stream)
1017 pkt_burst_stats_display("RX",
1018 &port->rx_stream->rx_burst_stats);
1019 if (port->tx_stream)
1020 pkt_burst_stats_display("TX",
1021 &port->tx_stream->tx_burst_stats);
1024 if (port->rx_queue_stats_mapping_enabled) {
1026 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1027 printf(" Stats reg %2d RX-packets:%14"PRIu64
1028 " RX-errors:%14"PRIu64
1029 " RX-bytes:%14"PRIu64"\n",
1030 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1034 if (port->tx_queue_stats_mapping_enabled) {
1035 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1036 printf(" Stats reg %2d TX-packets:%14"PRIu64
1037 " TX-bytes:%14"PRIu64"\n",
1038 i, stats->q_opackets[i], stats->q_obytes[i]);
1042 printf(" %s--------------------------------%s\n",
1043 fwd_stats_border, fwd_stats_border);
1047 fwd_stream_stats_display(streamid_t stream_id)
1049 struct fwd_stream *fs;
1050 static const char *fwd_top_stats_border = "-------";
1052 fs = fwd_streams[stream_id];
1053 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1054 (fs->fwd_dropped == 0))
1056 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1057 "TX Port=%2d/Queue=%2d %s\n",
1058 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1059 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1060 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1061 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1063 /* if checksum mode */
1064 if (cur_fwd_eng == &csum_fwd_engine) {
1065 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1066 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1069 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1070 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1071 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1076 flush_fwd_rx_queues(void)
1078 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1085 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1086 uint64_t timer_period;
1088 /* convert to number of cycles */
1089 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1091 for (j = 0; j < 2; j++) {
1092 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1093 for (rxq = 0; rxq < nb_rxq; rxq++) {
1094 port_id = fwd_ports_ids[rxp];
1096 * testpmd can stuck in the below do while loop
1097 * if rte_eth_rx_burst() always returns nonzero
1098 * packets. So timer is added to exit this loop
1099 * after 1sec timer expiry.
1101 prev_tsc = rte_rdtsc();
1103 nb_rx = rte_eth_rx_burst(port_id, rxq,
1104 pkts_burst, MAX_PKT_BURST);
1105 for (i = 0; i < nb_rx; i++)
1106 rte_pktmbuf_free(pkts_burst[i]);
1108 cur_tsc = rte_rdtsc();
1109 diff_tsc = cur_tsc - prev_tsc;
1110 timer_tsc += diff_tsc;
1111 } while ((nb_rx > 0) &&
1112 (timer_tsc < timer_period));
1116 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1121 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1123 struct fwd_stream **fsm;
1126 #ifdef RTE_LIBRTE_BITRATE
1127 uint64_t tics_per_1sec;
1128 uint64_t tics_datum;
1129 uint64_t tics_current;
1130 uint8_t idx_port, cnt_ports;
1132 cnt_ports = rte_eth_dev_count();
1133 tics_datum = rte_rdtsc();
1134 tics_per_1sec = rte_get_timer_hz();
1136 fsm = &fwd_streams[fc->stream_idx];
1137 nb_fs = fc->stream_nb;
1139 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1140 (*pkt_fwd)(fsm[sm_id]);
1141 #ifdef RTE_LIBRTE_BITRATE
1142 if (bitrate_enabled != 0 &&
1143 bitrate_lcore_id == rte_lcore_id()) {
1144 tics_current = rte_rdtsc();
1145 if (tics_current - tics_datum >= tics_per_1sec) {
1146 /* Periodic bitrate calculation */
1148 idx_port < cnt_ports;
1150 rte_stats_bitrate_calc(bitrate_data,
1152 tics_datum = tics_current;
1156 #ifdef RTE_LIBRTE_LATENCY_STATS
1157 if (latencystats_enabled != 0 &&
1158 latencystats_lcore_id == rte_lcore_id())
1159 rte_latencystats_update();
1162 } while (! fc->stopped);
1166 start_pkt_forward_on_core(void *fwd_arg)
1168 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1169 cur_fwd_config.fwd_eng->packet_fwd);
1174 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1175 * Used to start communication flows in network loopback test configurations.
1178 run_one_txonly_burst_on_core(void *fwd_arg)
1180 struct fwd_lcore *fwd_lc;
1181 struct fwd_lcore tmp_lcore;
1183 fwd_lc = (struct fwd_lcore *) fwd_arg;
1184 tmp_lcore = *fwd_lc;
1185 tmp_lcore.stopped = 1;
1186 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1191 * Launch packet forwarding:
1192 * - Setup per-port forwarding context.
1193 * - launch logical cores with their forwarding configuration.
1196 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1198 port_fwd_begin_t port_fwd_begin;
1203 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1204 if (port_fwd_begin != NULL) {
1205 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1206 (*port_fwd_begin)(fwd_ports_ids[i]);
1208 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1209 lc_id = fwd_lcores_cpuids[i];
1210 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1211 fwd_lcores[i]->stopped = 0;
1212 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1213 fwd_lcores[i], lc_id);
1215 printf("launch lcore %u failed - diag=%d\n",
1222 * Update the forward ports list.
1225 update_fwd_ports(portid_t new_pid)
1228 unsigned int new_nb_fwd_ports = 0;
1231 for (i = 0; i < nb_fwd_ports; ++i) {
1232 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1235 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1239 if (new_pid < RTE_MAX_ETHPORTS)
1240 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1242 nb_fwd_ports = new_nb_fwd_ports;
1243 nb_cfg_ports = new_nb_fwd_ports;
1247 * Launch packet forwarding configuration.
1250 start_packet_forwarding(int with_tx_first)
1252 port_fwd_begin_t port_fwd_begin;
1253 port_fwd_end_t port_fwd_end;
1254 struct rte_port *port;
1259 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1260 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1262 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1263 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1265 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1266 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1267 (!nb_rxq || !nb_txq))
1268 rte_exit(EXIT_FAILURE,
1269 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1270 cur_fwd_eng->fwd_mode_name);
1272 if (all_ports_started() == 0) {
1273 printf("Not all ports were started\n");
1276 if (test_done == 0) {
1277 printf("Packet forwarding already started\n");
1283 for (i = 0; i < nb_fwd_ports; i++) {
1284 pt_id = fwd_ports_ids[i];
1285 port = &ports[pt_id];
1286 if (!port->dcb_flag) {
1287 printf("In DCB mode, all forwarding ports must "
1288 "be configured in this mode.\n");
1292 if (nb_fwd_lcores == 1) {
1293 printf("In DCB mode,the nb forwarding cores "
1294 "should be larger than 1.\n");
1303 flush_fwd_rx_queues();
1305 pkt_fwd_config_display(&cur_fwd_config);
1306 rxtx_config_display();
1308 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1309 pt_id = fwd_ports_ids[i];
1310 port = &ports[pt_id];
1311 rte_eth_stats_get(pt_id, &port->stats);
1312 port->tx_dropped = 0;
1314 map_port_queue_stats_mapping_registers(pt_id, port);
1316 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1317 fwd_streams[sm_id]->rx_packets = 0;
1318 fwd_streams[sm_id]->tx_packets = 0;
1319 fwd_streams[sm_id]->fwd_dropped = 0;
1320 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1321 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1323 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1324 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1325 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1326 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1327 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1329 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1330 fwd_streams[sm_id]->core_cycles = 0;
1333 if (with_tx_first) {
1334 port_fwd_begin = tx_only_engine.port_fwd_begin;
1335 if (port_fwd_begin != NULL) {
1336 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1337 (*port_fwd_begin)(fwd_ports_ids[i]);
1339 while (with_tx_first--) {
1340 launch_packet_forwarding(
1341 run_one_txonly_burst_on_core);
1342 rte_eal_mp_wait_lcore();
1344 port_fwd_end = tx_only_engine.port_fwd_end;
1345 if (port_fwd_end != NULL) {
1346 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1347 (*port_fwd_end)(fwd_ports_ids[i]);
1350 launch_packet_forwarding(start_pkt_forward_on_core);
1354 stop_packet_forwarding(void)
1356 struct rte_eth_stats stats;
1357 struct rte_port *port;
1358 port_fwd_end_t port_fwd_end;
1363 uint64_t total_recv;
1364 uint64_t total_xmit;
1365 uint64_t total_rx_dropped;
1366 uint64_t total_tx_dropped;
1367 uint64_t total_rx_nombuf;
1368 uint64_t tx_dropped;
1369 uint64_t rx_bad_ip_csum;
1370 uint64_t rx_bad_l4_csum;
1371 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1372 uint64_t fwd_cycles;
1375 static const char *acc_stats_border = "+++++++++++++++";
1378 printf("Packet forwarding not started\n");
1381 printf("Telling cores to stop...");
1382 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1383 fwd_lcores[lc_id]->stopped = 1;
1384 printf("\nWaiting for lcores to finish...\n");
1385 rte_eal_mp_wait_lcore();
1386 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1387 if (port_fwd_end != NULL) {
1388 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1389 pt_id = fwd_ports_ids[i];
1390 (*port_fwd_end)(pt_id);
1393 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1396 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1397 if (cur_fwd_config.nb_fwd_streams >
1398 cur_fwd_config.nb_fwd_ports) {
1399 fwd_stream_stats_display(sm_id);
1400 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1401 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1403 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1405 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1408 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1409 tx_dropped = (uint64_t) (tx_dropped +
1410 fwd_streams[sm_id]->fwd_dropped);
1411 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1414 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1415 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1416 fwd_streams[sm_id]->rx_bad_ip_csum);
1417 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1421 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1422 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1423 fwd_streams[sm_id]->rx_bad_l4_csum);
1424 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1427 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1428 fwd_cycles = (uint64_t) (fwd_cycles +
1429 fwd_streams[sm_id]->core_cycles);
1434 total_rx_dropped = 0;
1435 total_tx_dropped = 0;
1436 total_rx_nombuf = 0;
1437 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1438 pt_id = fwd_ports_ids[i];
1440 port = &ports[pt_id];
1441 rte_eth_stats_get(pt_id, &stats);
1442 stats.ipackets -= port->stats.ipackets;
1443 port->stats.ipackets = 0;
1444 stats.opackets -= port->stats.opackets;
1445 port->stats.opackets = 0;
1446 stats.ibytes -= port->stats.ibytes;
1447 port->stats.ibytes = 0;
1448 stats.obytes -= port->stats.obytes;
1449 port->stats.obytes = 0;
1450 stats.imissed -= port->stats.imissed;
1451 port->stats.imissed = 0;
1452 stats.oerrors -= port->stats.oerrors;
1453 port->stats.oerrors = 0;
1454 stats.rx_nombuf -= port->stats.rx_nombuf;
1455 port->stats.rx_nombuf = 0;
1457 total_recv += stats.ipackets;
1458 total_xmit += stats.opackets;
1459 total_rx_dropped += stats.imissed;
1460 total_tx_dropped += port->tx_dropped;
1461 total_rx_nombuf += stats.rx_nombuf;
1463 fwd_port_stats_display(pt_id, &stats);
1466 printf("\n %s Accumulated forward statistics for all ports"
1468 acc_stats_border, acc_stats_border);
1469 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1471 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1473 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1474 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1475 if (total_rx_nombuf > 0)
1476 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1477 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1479 acc_stats_border, acc_stats_border);
1480 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1482 printf("\n CPU cycles/packet=%u (total cycles="
1483 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1484 (unsigned int)(fwd_cycles / total_recv),
1485 fwd_cycles, total_recv);
1487 printf("\nDone.\n");
1492 dev_set_link_up(portid_t pid)
1494 if (rte_eth_dev_set_link_up(pid) < 0)
1495 printf("\nSet link up fail.\n");
1499 dev_set_link_down(portid_t pid)
1501 if (rte_eth_dev_set_link_down(pid) < 0)
1502 printf("\nSet link down fail.\n");
1506 all_ports_started(void)
1509 struct rte_port *port;
1511 RTE_ETH_FOREACH_DEV(pi) {
1513 /* Check if there is a port which is not started */
1514 if ((port->port_status != RTE_PORT_STARTED) &&
1515 (port->slave_flag == 0))
1519 /* No port is not started */
1524 all_ports_stopped(void)
1527 struct rte_port *port;
1529 RTE_ETH_FOREACH_DEV(pi) {
1531 if ((port->port_status != RTE_PORT_STOPPED) &&
1532 (port->slave_flag == 0))
1540 port_is_started(portid_t port_id)
1542 if (port_id_is_invalid(port_id, ENABLED_WARN))
1545 if (ports[port_id].port_status != RTE_PORT_STARTED)
1552 port_is_closed(portid_t port_id)
1554 if (port_id_is_invalid(port_id, ENABLED_WARN))
1557 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1564 start_port(portid_t pid)
1566 int diag, need_check_link_status = -1;
1569 struct rte_port *port;
1570 struct ether_addr mac_addr;
1571 enum rte_eth_event_type event_type;
1573 if (port_id_is_invalid(pid, ENABLED_WARN))
1578 RTE_ETH_FOREACH_DEV(pi) {
1579 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1582 need_check_link_status = 0;
1584 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1585 RTE_PORT_HANDLING) == 0) {
1586 printf("Port %d is now not stopped\n", pi);
1590 if (port->need_reconfig > 0) {
1591 port->need_reconfig = 0;
1593 if (flow_isolate_all) {
1594 int ret = port_flow_isolate(pi, 1);
1596 printf("Failed to apply isolated"
1597 " mode on port %d\n", pi);
1602 printf("Configuring Port %d (socket %u)\n", pi,
1604 /* configure port */
1605 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1608 if (rte_atomic16_cmpset(&(port->port_status),
1609 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1610 printf("Port %d can not be set back "
1611 "to stopped\n", pi);
1612 printf("Fail to configure port %d\n", pi);
1613 /* try to reconfigure port next time */
1614 port->need_reconfig = 1;
1618 if (port->need_reconfig_queues > 0) {
1619 port->need_reconfig_queues = 0;
1620 /* setup tx queues */
1621 for (qi = 0; qi < nb_txq; qi++) {
1622 if ((numa_support) &&
1623 (txring_numa[pi] != NUMA_NO_CONFIG))
1624 diag = rte_eth_tx_queue_setup(pi, qi,
1625 nb_txd,txring_numa[pi],
1628 diag = rte_eth_tx_queue_setup(pi, qi,
1629 nb_txd,port->socket_id,
1635 /* Fail to setup tx queue, return */
1636 if (rte_atomic16_cmpset(&(port->port_status),
1638 RTE_PORT_STOPPED) == 0)
1639 printf("Port %d can not be set back "
1640 "to stopped\n", pi);
1641 printf("Fail to configure port %d tx queues\n", pi);
1642 /* try to reconfigure queues next time */
1643 port->need_reconfig_queues = 1;
1646 /* setup rx queues */
1647 for (qi = 0; qi < nb_rxq; qi++) {
1648 if ((numa_support) &&
1649 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1650 struct rte_mempool * mp =
1651 mbuf_pool_find(rxring_numa[pi]);
1653 printf("Failed to setup RX queue:"
1654 "No mempool allocation"
1655 " on the socket %d\n",
1660 diag = rte_eth_rx_queue_setup(pi, qi,
1661 nb_rxd,rxring_numa[pi],
1662 &(port->rx_conf),mp);
1664 struct rte_mempool *mp =
1665 mbuf_pool_find(port->socket_id);
1667 printf("Failed to setup RX queue:"
1668 "No mempool allocation"
1669 " on the socket %d\n",
1673 diag = rte_eth_rx_queue_setup(pi, qi,
1674 nb_rxd,port->socket_id,
1675 &(port->rx_conf), mp);
1680 /* Fail to setup rx queue, return */
1681 if (rte_atomic16_cmpset(&(port->port_status),
1683 RTE_PORT_STOPPED) == 0)
1684 printf("Port %d can not be set back "
1685 "to stopped\n", pi);
1686 printf("Fail to configure port %d rx queues\n", pi);
1687 /* try to reconfigure queues next time */
1688 port->need_reconfig_queues = 1;
1693 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1694 event_type < RTE_ETH_EVENT_MAX;
1696 diag = rte_eth_dev_callback_register(pi,
1701 printf("Failed to setup even callback for event %d\n",
1708 if (rte_eth_dev_start(pi) < 0) {
1709 printf("Fail to start port %d\n", pi);
1711 /* Fail to setup rx queue, return */
1712 if (rte_atomic16_cmpset(&(port->port_status),
1713 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1714 printf("Port %d can not be set back to "
1719 if (rte_atomic16_cmpset(&(port->port_status),
1720 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1721 printf("Port %d can not be set into started\n", pi);
1723 rte_eth_macaddr_get(pi, &mac_addr);
1724 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1725 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1726 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1727 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1729 /* at least one port started, need checking link status */
1730 need_check_link_status = 1;
1733 if (need_check_link_status == 1 && !no_link_check)
1734 check_all_ports_link_status(RTE_PORT_ALL);
1735 else if (need_check_link_status == 0)
1736 printf("Please stop the ports first\n");
1743 stop_port(portid_t pid)
1746 struct rte_port *port;
1747 int need_check_link_status = 0;
1754 if (port_id_is_invalid(pid, ENABLED_WARN))
1757 printf("Stopping ports...\n");
1759 RTE_ETH_FOREACH_DEV(pi) {
1760 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1763 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1764 printf("Please remove port %d from forwarding configuration.\n", pi);
1768 if (port_is_bonding_slave(pi)) {
1769 printf("Please remove port %d from bonded device.\n", pi);
1774 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1775 RTE_PORT_HANDLING) == 0)
1778 rte_eth_dev_stop(pi);
1780 if (rte_atomic16_cmpset(&(port->port_status),
1781 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1782 printf("Port %d can not be set into stopped\n", pi);
1783 need_check_link_status = 1;
1785 if (need_check_link_status && !no_link_check)
1786 check_all_ports_link_status(RTE_PORT_ALL);
1792 close_port(portid_t pid)
1795 struct rte_port *port;
1797 if (port_id_is_invalid(pid, ENABLED_WARN))
1800 printf("Closing ports...\n");
1802 RTE_ETH_FOREACH_DEV(pi) {
1803 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1806 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1807 printf("Please remove port %d from forwarding configuration.\n", pi);
1811 if (port_is_bonding_slave(pi)) {
1812 printf("Please remove port %d from bonded device.\n", pi);
1817 if (rte_atomic16_cmpset(&(port->port_status),
1818 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1819 printf("Port %d is already closed\n", pi);
1823 if (rte_atomic16_cmpset(&(port->port_status),
1824 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1825 printf("Port %d is now not stopped\n", pi);
1829 if (port->flow_list)
1830 port_flow_flush(pi);
1831 rte_eth_dev_close(pi);
1833 if (rte_atomic16_cmpset(&(port->port_status),
1834 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1835 printf("Port %d cannot be set to closed\n", pi);
1842 reset_port(portid_t pid)
1846 struct rte_port *port;
1848 if (port_id_is_invalid(pid, ENABLED_WARN))
1851 printf("Resetting ports...\n");
1853 RTE_ETH_FOREACH_DEV(pi) {
1854 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1857 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1858 printf("Please remove port %d from forwarding "
1859 "configuration.\n", pi);
1863 if (port_is_bonding_slave(pi)) {
1864 printf("Please remove port %d from bonded device.\n",
1869 diag = rte_eth_dev_reset(pi);
1872 port->need_reconfig = 1;
1873 port->need_reconfig_queues = 1;
1875 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1883 attach_port(char *identifier)
1886 unsigned int socket_id;
1888 printf("Attaching a new port...\n");
1890 if (identifier == NULL) {
1891 printf("Invalid parameters are specified\n");
1895 if (rte_eth_dev_attach(identifier, &pi))
1898 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1899 /* if socket_id is invalid, set to 0 */
1900 if (check_socket_id(socket_id) < 0)
1902 reconfig(pi, socket_id);
1903 rte_eth_promiscuous_enable(pi);
1905 nb_ports = rte_eth_dev_count();
1907 ports[pi].port_status = RTE_PORT_STOPPED;
1909 update_fwd_ports(pi);
1911 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1916 detach_port(portid_t port_id)
1918 char name[RTE_ETH_NAME_MAX_LEN];
1920 printf("Detaching a port...\n");
1922 if (!port_is_closed(port_id)) {
1923 printf("Please close port first\n");
1927 if (ports[port_id].flow_list)
1928 port_flow_flush(port_id);
1930 if (rte_eth_dev_detach(port_id, name)) {
1931 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1935 nb_ports = rte_eth_dev_count();
1937 update_fwd_ports(RTE_MAX_ETHPORTS);
1939 printf("Port '%s' is detached. Now total ports is %d\n",
1951 stop_packet_forwarding();
1953 if (ports != NULL) {
1955 RTE_ETH_FOREACH_DEV(pt_id) {
1956 printf("\nShutting down port %d...\n", pt_id);
1962 printf("\nBye...\n");
1965 typedef void (*cmd_func_t)(void);
1966 struct pmd_test_command {
1967 const char *cmd_name;
1968 cmd_func_t cmd_func;
1971 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1973 /* Check the link status of all ports in up to 9s, and print them finally */
1975 check_all_ports_link_status(uint32_t port_mask)
1977 #define CHECK_INTERVAL 100 /* 100ms */
1978 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1980 uint8_t count, all_ports_up, print_flag = 0;
1981 struct rte_eth_link link;
1983 printf("Checking link statuses...\n");
1985 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1987 RTE_ETH_FOREACH_DEV(portid) {
1988 if ((port_mask & (1 << portid)) == 0)
1990 memset(&link, 0, sizeof(link));
1991 rte_eth_link_get_nowait(portid, &link);
1992 /* print link status if flag set */
1993 if (print_flag == 1) {
1994 if (link.link_status)
1996 "Port%d Link Up. speed %u Mbps- %s\n",
1997 portid, link.link_speed,
1998 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1999 ("full-duplex") : ("half-duplex\n"));
2001 printf("Port %d Link Down\n", portid);
2004 /* clear all_ports_up flag if any link down */
2005 if (link.link_status == ETH_LINK_DOWN) {
2010 /* after finally printing all link status, get out */
2011 if (print_flag == 1)
2014 if (all_ports_up == 0) {
2016 rte_delay_ms(CHECK_INTERVAL);
2019 /* set the print_flag if all ports up or timeout */
2020 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2030 rmv_event_callback(void *arg)
2032 int org_no_link_check = no_link_check;
2033 struct rte_eth_dev *dev;
2034 portid_t port_id = (intptr_t)arg;
2036 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2037 dev = &rte_eth_devices[port_id];
2041 no_link_check = org_no_link_check;
2042 close_port(port_id);
2043 printf("removing device %s\n", dev->device->name);
2044 if (rte_eal_dev_detach(dev->device))
2045 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
2049 /* This function is used by the interrupt thread */
2051 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2054 static const char * const event_desc[] = {
2055 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2056 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2057 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2058 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2059 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2060 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2061 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2062 [RTE_ETH_EVENT_MAX] = NULL,
2065 RTE_SET_USED(param);
2066 RTE_SET_USED(ret_param);
2068 if (type >= RTE_ETH_EVENT_MAX) {
2069 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2070 port_id, __func__, type);
2072 } else if (event_print_mask & (UINT32_C(1) << type)) {
2073 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2079 case RTE_ETH_EVENT_INTR_RMV:
2080 if (rte_eal_alarm_set(100000,
2081 rmv_event_callback, (void *)(intptr_t)port_id))
2082 fprintf(stderr, "Could not set up deferred device removal\n");
2091 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2095 uint8_t mapping_found = 0;
2097 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2098 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2099 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2100 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2101 tx_queue_stats_mappings[i].queue_id,
2102 tx_queue_stats_mappings[i].stats_counter_id);
2109 port->tx_queue_stats_mapping_enabled = 1;
2114 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2118 uint8_t mapping_found = 0;
2120 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2121 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2122 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2123 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2124 rx_queue_stats_mappings[i].queue_id,
2125 rx_queue_stats_mappings[i].stats_counter_id);
2132 port->rx_queue_stats_mapping_enabled = 1;
2137 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2141 diag = set_tx_queue_stats_mapping_registers(pi, port);
2143 if (diag == -ENOTSUP) {
2144 port->tx_queue_stats_mapping_enabled = 0;
2145 printf("TX queue stats mapping not supported port id=%d\n", pi);
2148 rte_exit(EXIT_FAILURE,
2149 "set_tx_queue_stats_mapping_registers "
2150 "failed for port id=%d diag=%d\n",
2154 diag = set_rx_queue_stats_mapping_registers(pi, port);
2156 if (diag == -ENOTSUP) {
2157 port->rx_queue_stats_mapping_enabled = 0;
2158 printf("RX queue stats mapping not supported port id=%d\n", pi);
2161 rte_exit(EXIT_FAILURE,
2162 "set_rx_queue_stats_mapping_registers "
2163 "failed for port id=%d diag=%d\n",
2169 rxtx_port_config(struct rte_port *port)
2171 port->rx_conf = port->dev_info.default_rxconf;
2172 port->tx_conf = port->dev_info.default_txconf;
2174 /* Check if any RX/TX parameters have been passed */
2175 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2176 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2178 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2179 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2181 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2182 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2184 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2185 port->rx_conf.rx_free_thresh = rx_free_thresh;
2187 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2188 port->rx_conf.rx_drop_en = rx_drop_en;
2190 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2191 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2193 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2194 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2196 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2197 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2199 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2200 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2202 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2203 port->tx_conf.tx_free_thresh = tx_free_thresh;
2205 if (txq_flags != RTE_PMD_PARAM_UNSET)
2206 port->tx_conf.txq_flags = txq_flags;
2210 init_port_config(void)
2213 struct rte_port *port;
2215 RTE_ETH_FOREACH_DEV(pid) {
2217 port->dev_conf.rxmode = rx_mode;
2218 port->dev_conf.fdir_conf = fdir_conf;
2220 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2221 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2223 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2224 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2227 if (port->dcb_flag == 0) {
2228 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2229 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2231 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2234 rxtx_port_config(port);
2236 rte_eth_macaddr_get(pid, &port->eth_addr);
2238 map_port_queue_stats_mapping_registers(pid, port);
2239 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2240 rte_pmd_ixgbe_bypass_init(pid);
2243 if (lsc_interrupt &&
2244 (rte_eth_devices[pid].data->dev_flags &
2245 RTE_ETH_DEV_INTR_LSC))
2246 port->dev_conf.intr_conf.lsc = 1;
2247 if (rmv_interrupt &&
2248 (rte_eth_devices[pid].data->dev_flags &
2249 RTE_ETH_DEV_INTR_RMV))
2250 port->dev_conf.intr_conf.rmv = 1;
2252 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2253 /* Detect softnic port */
2254 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2255 port->softnic_enable = 1;
2256 memset(&port->softport, 0, sizeof(struct softnic_port));
2258 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2259 port->softport.tm_flag = 1;
2265 void set_port_slave_flag(portid_t slave_pid)
2267 struct rte_port *port;
2269 port = &ports[slave_pid];
2270 port->slave_flag = 1;
2273 void clear_port_slave_flag(portid_t slave_pid)
2275 struct rte_port *port;
2277 port = &ports[slave_pid];
2278 port->slave_flag = 0;
2281 uint8_t port_is_bonding_slave(portid_t slave_pid)
2283 struct rte_port *port;
2285 port = &ports[slave_pid];
2286 if ((rte_eth_devices[slave_pid].data->dev_flags &
2287 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2292 const uint16_t vlan_tags[] = {
2293 0, 1, 2, 3, 4, 5, 6, 7,
2294 8, 9, 10, 11, 12, 13, 14, 15,
2295 16, 17, 18, 19, 20, 21, 22, 23,
2296 24, 25, 26, 27, 28, 29, 30, 31
2300 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2301 enum dcb_mode_enable dcb_mode,
2302 enum rte_eth_nb_tcs num_tcs,
2308 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2309 * given above, and the number of traffic classes available for use.
2311 if (dcb_mode == DCB_VT_ENABLED) {
2312 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2313 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2314 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2315 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2317 /* VMDQ+DCB RX and TX configurations */
2318 vmdq_rx_conf->enable_default_pool = 0;
2319 vmdq_rx_conf->default_pool = 0;
2320 vmdq_rx_conf->nb_queue_pools =
2321 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2322 vmdq_tx_conf->nb_queue_pools =
2323 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2325 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2326 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2327 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2328 vmdq_rx_conf->pool_map[i].pools =
2329 1 << (i % vmdq_rx_conf->nb_queue_pools);
2331 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2332 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2333 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2336 /* set DCB mode of RX and TX of multiple queues */
2337 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2338 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2340 struct rte_eth_dcb_rx_conf *rx_conf =
2341 ð_conf->rx_adv_conf.dcb_rx_conf;
2342 struct rte_eth_dcb_tx_conf *tx_conf =
2343 ð_conf->tx_adv_conf.dcb_tx_conf;
2345 rx_conf->nb_tcs = num_tcs;
2346 tx_conf->nb_tcs = num_tcs;
2348 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2349 rx_conf->dcb_tc[i] = i % num_tcs;
2350 tx_conf->dcb_tc[i] = i % num_tcs;
2352 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2353 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2354 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2358 eth_conf->dcb_capability_en =
2359 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2361 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2367 init_port_dcb_config(portid_t pid,
2368 enum dcb_mode_enable dcb_mode,
2369 enum rte_eth_nb_tcs num_tcs,
2372 struct rte_eth_conf port_conf;
2373 struct rte_port *rte_port;
2377 rte_port = &ports[pid];
2379 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2380 /* Enter DCB configuration status */
2383 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2384 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2387 port_conf.rxmode.hw_vlan_filter = 1;
2390 * Write the configuration into the device.
2391 * Set the numbers of RX & TX queues to 0, so
2392 * the RX & TX queues will not be setup.
2394 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2396 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2398 /* If dev_info.vmdq_pool_base is greater than 0,
2399 * the queue id of vmdq pools is started after pf queues.
2401 if (dcb_mode == DCB_VT_ENABLED &&
2402 rte_port->dev_info.vmdq_pool_base > 0) {
2403 printf("VMDQ_DCB multi-queue mode is nonsensical"
2404 " for port %d.", pid);
2408 /* Assume the ports in testpmd have the same dcb capability
2409 * and has the same number of rxq and txq in dcb mode
2411 if (dcb_mode == DCB_VT_ENABLED) {
2412 if (rte_port->dev_info.max_vfs > 0) {
2413 nb_rxq = rte_port->dev_info.nb_rx_queues;
2414 nb_txq = rte_port->dev_info.nb_tx_queues;
2416 nb_rxq = rte_port->dev_info.max_rx_queues;
2417 nb_txq = rte_port->dev_info.max_tx_queues;
2420 /*if vt is disabled, use all pf queues */
2421 if (rte_port->dev_info.vmdq_pool_base == 0) {
2422 nb_rxq = rte_port->dev_info.max_rx_queues;
2423 nb_txq = rte_port->dev_info.max_tx_queues;
2425 nb_rxq = (queueid_t)num_tcs;
2426 nb_txq = (queueid_t)num_tcs;
2430 rx_free_thresh = 64;
2432 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2434 rxtx_port_config(rte_port);
2436 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2437 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2438 rx_vft_set(pid, vlan_tags[i], 1);
2440 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2441 map_port_queue_stats_mapping_registers(pid, rte_port);
2443 rte_port->dcb_flag = 1;
2451 /* Configuration of Ethernet ports. */
2452 ports = rte_zmalloc("testpmd: ports",
2453 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2454 RTE_CACHE_LINE_SIZE);
2455 if (ports == NULL) {
2456 rte_exit(EXIT_FAILURE,
2457 "rte_zmalloc(%d struct rte_port) failed\n",
2473 const char clr[] = { 27, '[', '2', 'J', '\0' };
2474 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2476 /* Clear screen and move to top left */
2477 printf("%s%s", clr, top_left);
2479 printf("\nPort statistics ====================================");
2480 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2481 nic_stats_display(fwd_ports_ids[i]);
2485 signal_handler(int signum)
2487 if (signum == SIGINT || signum == SIGTERM) {
2488 printf("\nSignal %d received, preparing to exit...\n",
2490 #ifdef RTE_LIBRTE_PDUMP
2491 /* uninitialize packet capture framework */
2494 #ifdef RTE_LIBRTE_LATENCY_STATS
2495 rte_latencystats_uninit();
2498 /* Set flag to indicate the force termination. */
2500 /* exit with the expected status */
2501 signal(signum, SIG_DFL);
2502 kill(getpid(), signum);
2507 main(int argc, char** argv)
2512 signal(SIGINT, signal_handler);
2513 signal(SIGTERM, signal_handler);
2515 diag = rte_eal_init(argc, argv);
2517 rte_panic("Cannot init EAL\n");
2519 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2520 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2524 #ifdef RTE_LIBRTE_PDUMP
2525 /* initialize packet capture framework */
2526 rte_pdump_init(NULL);
2529 nb_ports = (portid_t) rte_eth_dev_count();
2531 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2533 /* allocate port structures, and init them */
2536 set_def_fwd_config();
2538 rte_panic("Empty set of forwarding logical cores - check the "
2539 "core mask supplied in the command parameters\n");
2541 /* Bitrate/latency stats disabled by default */
2542 #ifdef RTE_LIBRTE_BITRATE
2543 bitrate_enabled = 0;
2545 #ifdef RTE_LIBRTE_LATENCY_STATS
2546 latencystats_enabled = 0;
2552 launch_args_parse(argc, argv);
2554 if (tx_first && interactive)
2555 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2556 "interactive mode.\n");
2558 if (tx_first && lsc_interrupt) {
2559 printf("Warning: lsc_interrupt needs to be off when "
2560 " using tx_first. Disabling.\n");
2564 if (!nb_rxq && !nb_txq)
2565 printf("Warning: Either rx or tx queues should be non-zero\n");
2567 if (nb_rxq > 1 && nb_rxq > nb_txq)
2568 printf("Warning: nb_rxq=%d enables RSS configuration, "
2569 "but nb_txq=%d will prevent to fully test it.\n",
2573 if (start_port(RTE_PORT_ALL) != 0)
2574 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2576 /* set all ports to promiscuous mode by default */
2577 RTE_ETH_FOREACH_DEV(port_id)
2578 rte_eth_promiscuous_enable(port_id);
2580 /* Init metrics library */
2581 rte_metrics_init(rte_socket_id());
2583 #ifdef RTE_LIBRTE_LATENCY_STATS
2584 if (latencystats_enabled != 0) {
2585 int ret = rte_latencystats_init(1, NULL);
2587 printf("Warning: latencystats init()"
2588 " returned error %d\n", ret);
2589 printf("Latencystats running on lcore %d\n",
2590 latencystats_lcore_id);
2594 /* Setup bitrate stats */
2595 #ifdef RTE_LIBRTE_BITRATE
2596 if (bitrate_enabled != 0) {
2597 bitrate_data = rte_stats_bitrate_create();
2598 if (bitrate_data == NULL)
2599 rte_exit(EXIT_FAILURE,
2600 "Could not allocate bitrate data.\n");
2601 rte_stats_bitrate_reg(bitrate_data);
2605 #ifdef RTE_LIBRTE_CMDLINE
2606 if (strlen(cmdline_filename) != 0)
2607 cmdline_read_from_file(cmdline_filename);
2609 if (interactive == 1) {
2611 printf("Start automatic packet forwarding\n");
2612 start_packet_forwarding(0);
2624 printf("No commandline core given, start packet forwarding\n");
2625 start_packet_forwarding(tx_first);
2626 if (stats_period != 0) {
2627 uint64_t prev_time = 0, cur_time, diff_time = 0;
2628 uint64_t timer_period;
2630 /* Convert to number of cycles */
2631 timer_period = stats_period * rte_get_timer_hz();
2633 while (f_quit == 0) {
2634 cur_time = rte_get_timer_cycles();
2635 diff_time += cur_time - prev_time;
2637 if (diff_time >= timer_period) {
2639 /* Reset the timer */
2642 /* Sleep to avoid unnecessary checks */
2643 prev_time = cur_time;
2648 printf("Press enter to exit\n");
2649 rc = read(0, &c, 1);