New upstream version 17.11.1
[deb_dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90
91 #include "testpmd.h"
92
93 uint16_t verbose_level = 0; /**< Silent by default. */
94
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 uint8_t tx_first;
99 char cmdline_filename[PATH_MAX] = {0};
100
101 /*
102  * NUMA support configuration.
103  * When set, the NUMA support attempts to dispatch the allocation of the
104  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
105  * probed ports among the CPU sockets 0 and 1.
106  * Otherwise, all memory is allocated from CPU socket 0.
107  */
108 uint8_t numa_support = 1; /**< numa enabled by default */
109
110 /*
111  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112  * not configured.
113  */
114 uint8_t socket_num = UMA_NO_CONFIG;
115
116 /*
117  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
118  */
119 uint8_t mp_anon = 0;
120
121 /*
122  * Record the Ethernet address of peer target ports to which packets are
123  * forwarded.
124  * Must be instantiated with the ethernet addresses of peer traffic generator
125  * ports.
126  */
127 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
128 portid_t nb_peer_eth_addrs = 0;
129
130 /*
131  * Probed Target Environment.
132  */
133 struct rte_port *ports;        /**< For all probed ethernet ports. */
134 portid_t nb_ports;             /**< Number of probed ethernet ports. */
135 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
136 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
137
138 /*
139  * Test Forwarding Configuration.
140  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
141  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
142  */
143 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
144 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
145 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
146 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
147
148 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
149 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
150
151 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
152 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
153
154 /*
155  * Forwarding engines.
156  */
157 struct fwd_engine * fwd_engines[] = {
158         &io_fwd_engine,
159         &mac_fwd_engine,
160         &mac_swap_engine,
161         &flow_gen_engine,
162         &rx_only_engine,
163         &tx_only_engine,
164         &csum_fwd_engine,
165         &icmp_echo_engine,
166 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
167         &softnic_tm_engine,
168         &softnic_tm_bypass_engine,
169 #endif
170 #ifdef RTE_LIBRTE_IEEE1588
171         &ieee1588_fwd_engine,
172 #endif
173         NULL,
174 };
175
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
181
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
184                                       * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
186
187 /*
188  * In container, it cannot terminate the process which running with 'stats-period'
189  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
190  */
191 uint8_t f_quit;
192
193 /*
194  * Configuration of packet segments used by the "txonly" processing engine.
195  */
196 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
197 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
198         TXONLY_DEF_PACKET_LEN,
199 };
200 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
201
202 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
203 /**< Split policy for packets to TX. */
204
205 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
206 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
207
208 /* current configuration is in DCB or not,0 means it is not in DCB mode */
209 uint8_t dcb_config = 0;
210
211 /* Whether the dcb is in testing status */
212 uint8_t dcb_test = 0;
213
214 /*
215  * Configurable number of RX/TX queues.
216  */
217 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
218 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
219
220 /*
221  * Configurable number of RX/TX ring descriptors.
222  */
223 #define RTE_TEST_RX_DESC_DEFAULT 128
224 #define RTE_TEST_TX_DESC_DEFAULT 512
225 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
226 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
227
228 #define RTE_PMD_PARAM_UNSET -1
229 /*
230  * Configurable values of RX and TX ring threshold registers.
231  */
232
233 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
234 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
236
237 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
240
241 /*
242  * Configurable value of RX free threshold.
243  */
244 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
245
246 /*
247  * Configurable value of RX drop enable.
248  */
249 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
250
251 /*
252  * Configurable value of TX free threshold.
253  */
254 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
255
256 /*
257  * Configurable value of TX RS bit threshold.
258  */
259 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
260
261 /*
262  * Configurable value of TX queue flags.
263  */
264 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
265
266 /*
267  * Receive Side Scaling (RSS) configuration.
268  */
269 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
270
271 /*
272  * Port topology configuration
273  */
274 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
275
276 /*
277  * Avoids to flush all the RX streams before starts forwarding.
278  */
279 uint8_t no_flush_rx = 0; /* flush by default */
280
281 /*
282  * Flow API isolated mode.
283  */
284 uint8_t flow_isolate_all;
285
286 /*
287  * Avoids to check link status when starting/stopping a port.
288  */
289 uint8_t no_link_check = 0; /* check by default */
290
291 /*
292  * Enable link status change notification
293  */
294 uint8_t lsc_interrupt = 1; /* enabled by default */
295
296 /*
297  * Enable device removal notification.
298  */
299 uint8_t rmv_interrupt = 1; /* enabled by default */
300
301 /*
302  * Display or mask ether events
303  * Default to all events except VF_MBOX
304  */
305 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
306                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
307                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
308                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
309                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
310                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
311
312 /*
313  * NIC bypass mode configuration options.
314  */
315
316 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
317 /* The NIC bypass watchdog timeout. */
318 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
319 #endif
320
321
322 #ifdef RTE_LIBRTE_LATENCY_STATS
323
324 /*
325  * Set when latency stats is enabled in the commandline
326  */
327 uint8_t latencystats_enabled;
328
329 /*
330  * Lcore ID to serive latency statistics.
331  */
332 lcoreid_t latencystats_lcore_id = -1;
333
334 #endif
335
336 /*
337  * Ethernet device configuration.
338  */
339 struct rte_eth_rxmode rx_mode = {
340         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
341         .split_hdr_size = 0,
342         .header_split   = 0, /**< Header Split disabled. */
343         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
344         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
345         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
346         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
347         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
348         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
349         .hw_timestamp   = 0, /**< HW timestamp enabled. */
350 };
351
352 struct rte_fdir_conf fdir_conf = {
353         .mode = RTE_FDIR_MODE_NONE,
354         .pballoc = RTE_FDIR_PBALLOC_64K,
355         .status = RTE_FDIR_REPORT_STATUS,
356         .mask = {
357                 .vlan_tci_mask = 0x0,
358                 .ipv4_mask     = {
359                         .src_ip = 0xFFFFFFFF,
360                         .dst_ip = 0xFFFFFFFF,
361                 },
362                 .ipv6_mask     = {
363                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
364                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
365                 },
366                 .src_port_mask = 0xFFFF,
367                 .dst_port_mask = 0xFFFF,
368                 .mac_addr_byte_mask = 0xFF,
369                 .tunnel_type_mask = 1,
370                 .tunnel_id_mask = 0xFFFFFFFF,
371         },
372         .drop_queue = 127,
373 };
374
375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
376
377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
379
380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
382
383 uint16_t nb_tx_queue_stats_mappings = 0;
384 uint16_t nb_rx_queue_stats_mappings = 0;
385
386 /*
387  * Display zero values by default for xstats
388  */
389 uint8_t xstats_hide_zero;
390
391 unsigned int num_sockets = 0;
392 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
393
394 #ifdef RTE_LIBRTE_BITRATE
395 /* Bitrate statistics */
396 struct rte_stats_bitrates *bitrate_data;
397 lcoreid_t bitrate_lcore_id;
398 uint8_t bitrate_enabled;
399 #endif
400
401 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
402 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
403
404 /* Forward function declarations */
405 static void map_port_queue_stats_mapping_registers(portid_t pi,
406                                                    struct rte_port *port);
407 static void check_all_ports_link_status(uint32_t port_mask);
408 static int eth_event_callback(portid_t port_id,
409                               enum rte_eth_event_type type,
410                               void *param, void *ret_param);
411
412 /*
413  * Check if all the ports are started.
414  * If yes, return positive value. If not, return zero.
415  */
416 static int all_ports_started(void);
417
418 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
419 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
420
421 /*
422  * Helper function to check if socket is already discovered.
423  * If yes, return positive value. If not, return zero.
424  */
425 int
426 new_socket_id(unsigned int socket_id)
427 {
428         unsigned int i;
429
430         for (i = 0; i < num_sockets; i++) {
431                 if (socket_ids[i] == socket_id)
432                         return 0;
433         }
434         return 1;
435 }
436
437 /*
438  * Setup default configuration.
439  */
440 static void
441 set_default_fwd_lcores_config(void)
442 {
443         unsigned int i;
444         unsigned int nb_lc;
445         unsigned int sock_num;
446
447         nb_lc = 0;
448         for (i = 0; i < RTE_MAX_LCORE; i++) {
449                 sock_num = rte_lcore_to_socket_id(i);
450                 if (new_socket_id(sock_num)) {
451                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
452                                 rte_exit(EXIT_FAILURE,
453                                          "Total sockets greater than %u\n",
454                                          RTE_MAX_NUMA_NODES);
455                         }
456                         socket_ids[num_sockets++] = sock_num;
457                 }
458                 if (!rte_lcore_is_enabled(i))
459                         continue;
460                 if (i == rte_get_master_lcore())
461                         continue;
462                 fwd_lcores_cpuids[nb_lc++] = i;
463         }
464         nb_lcores = (lcoreid_t) nb_lc;
465         nb_cfg_lcores = nb_lcores;
466         nb_fwd_lcores = 1;
467 }
468
469 static void
470 set_def_peer_eth_addrs(void)
471 {
472         portid_t i;
473
474         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
475                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
476                 peer_eth_addrs[i].addr_bytes[5] = i;
477         }
478 }
479
480 static void
481 set_default_fwd_ports_config(void)
482 {
483         portid_t pt_id;
484         int i = 0;
485
486         RTE_ETH_FOREACH_DEV(pt_id)
487                 fwd_ports_ids[i++] = pt_id;
488
489         nb_cfg_ports = nb_ports;
490         nb_fwd_ports = nb_ports;
491 }
492
493 void
494 set_def_fwd_config(void)
495 {
496         set_default_fwd_lcores_config();
497         set_def_peer_eth_addrs();
498         set_default_fwd_ports_config();
499 }
500
501 /*
502  * Configuration initialisation done once at init time.
503  */
504 static void
505 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
506                  unsigned int socket_id)
507 {
508         char pool_name[RTE_MEMPOOL_NAMESIZE];
509         struct rte_mempool *rte_mp = NULL;
510         uint32_t mb_size;
511
512         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
513         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
514
515         RTE_LOG(INFO, USER1,
516                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
517                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
518
519         if (mp_anon != 0) {
520                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
521                         mb_size, (unsigned) mb_mempool_cache,
522                         sizeof(struct rte_pktmbuf_pool_private),
523                         socket_id, 0);
524                 if (rte_mp == NULL)
525                         goto err;
526
527                 if (rte_mempool_populate_anon(rte_mp) == 0) {
528                         rte_mempool_free(rte_mp);
529                         rte_mp = NULL;
530                         goto err;
531                 }
532                 rte_pktmbuf_pool_init(rte_mp, NULL);
533                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
534         } else {
535                 /* wrapper to rte_mempool_create() */
536                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
538         }
539
540 err:
541         if (rte_mp == NULL) {
542                 rte_exit(EXIT_FAILURE,
543                         "Creation of mbuf pool for socket %u failed: %s\n",
544                         socket_id, rte_strerror(rte_errno));
545         } else if (verbose_level > 0) {
546                 rte_mempool_dump(stdout, rte_mp);
547         }
548 }
549
550 /*
551  * Check given socket id is valid or not with NUMA mode,
552  * if valid, return 0, else return -1
553  */
554 static int
555 check_socket_id(const unsigned int socket_id)
556 {
557         static int warning_once = 0;
558
559         if (new_socket_id(socket_id)) {
560                 if (!warning_once && numa_support)
561                         printf("Warning: NUMA should be configured manually by"
562                                " using --port-numa-config and"
563                                " --ring-numa-config parameters along with"
564                                " --numa.\n");
565                 warning_once = 1;
566                 return -1;
567         }
568         return 0;
569 }
570
571 /*
572  * Get the allowed maximum number of RX queues.
573  * *pid return the port id which has minimal value of
574  * max_rx_queues in all ports.
575  */
576 queueid_t
577 get_allowed_max_nb_rxq(portid_t *pid)
578 {
579         queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580         portid_t pi;
581         struct rte_eth_dev_info dev_info;
582
583         RTE_ETH_FOREACH_DEV(pi) {
584                 rte_eth_dev_info_get(pi, &dev_info);
585                 if (dev_info.max_rx_queues < allowed_max_rxq) {
586                         allowed_max_rxq = dev_info.max_rx_queues;
587                         *pid = pi;
588                 }
589         }
590         return allowed_max_rxq;
591 }
592
593 /*
594  * Check input rxq is valid or not.
595  * If input rxq is not greater than any of maximum number
596  * of RX queues of all ports, it is valid.
597  * if valid, return 0, else return -1
598  */
599 int
600 check_nb_rxq(queueid_t rxq)
601 {
602         queueid_t allowed_max_rxq;
603         portid_t pid = 0;
604
605         allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606         if (rxq > allowed_max_rxq) {
607                 printf("Fail: input rxq (%u) can't be greater "
608                        "than max_rx_queues (%u) of port %u\n",
609                        rxq,
610                        allowed_max_rxq,
611                        pid);
612                 return -1;
613         }
614         return 0;
615 }
616
617 /*
618  * Get the allowed maximum number of TX queues.
619  * *pid return the port id which has minimal value of
620  * max_tx_queues in all ports.
621  */
622 queueid_t
623 get_allowed_max_nb_txq(portid_t *pid)
624 {
625         queueid_t allowed_max_txq = MAX_QUEUE_ID;
626         portid_t pi;
627         struct rte_eth_dev_info dev_info;
628
629         RTE_ETH_FOREACH_DEV(pi) {
630                 rte_eth_dev_info_get(pi, &dev_info);
631                 if (dev_info.max_tx_queues < allowed_max_txq) {
632                         allowed_max_txq = dev_info.max_tx_queues;
633                         *pid = pi;
634                 }
635         }
636         return allowed_max_txq;
637 }
638
639 /*
640  * Check input txq is valid or not.
641  * If input txq is not greater than any of maximum number
642  * of TX queues of all ports, it is valid.
643  * if valid, return 0, else return -1
644  */
645 int
646 check_nb_txq(queueid_t txq)
647 {
648         queueid_t allowed_max_txq;
649         portid_t pid = 0;
650
651         allowed_max_txq = get_allowed_max_nb_txq(&pid);
652         if (txq > allowed_max_txq) {
653                 printf("Fail: input txq (%u) can't be greater "
654                        "than max_tx_queues (%u) of port %u\n",
655                        txq,
656                        allowed_max_txq,
657                        pid);
658                 return -1;
659         }
660         return 0;
661 }
662
663 static void
664 init_config(void)
665 {
666         portid_t pid;
667         struct rte_port *port;
668         struct rte_mempool *mbp;
669         unsigned int nb_mbuf_per_pool;
670         lcoreid_t  lc_id;
671         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672         struct rte_gro_param gro_param;
673         uint32_t gso_types;
674
675         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
676
677         if (numa_support) {
678                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681         }
682
683         /* Configuration of logical cores. */
684         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685                                 sizeof(struct fwd_lcore *) * nb_lcores,
686                                 RTE_CACHE_LINE_SIZE);
687         if (fwd_lcores == NULL) {
688                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689                                                         "failed\n", nb_lcores);
690         }
691         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693                                                sizeof(struct fwd_lcore),
694                                                RTE_CACHE_LINE_SIZE);
695                 if (fwd_lcores[lc_id] == NULL) {
696                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
697                                                                 "failed\n");
698                 }
699                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
700         }
701
702         RTE_ETH_FOREACH_DEV(pid) {
703                 port = &ports[pid];
704                 rte_eth_dev_info_get(pid, &port->dev_info);
705
706                 if (numa_support) {
707                         if (port_numa[pid] != NUMA_NO_CONFIG)
708                                 port_per_socket[port_numa[pid]]++;
709                         else {
710                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
711
712                                 /* if socket_id is invalid, set to 0 */
713                                 if (check_socket_id(socket_id) < 0)
714                                         socket_id = 0;
715                                 port_per_socket[socket_id]++;
716                         }
717                 }
718
719                 /* set flag to initialize port/queue */
720                 port->need_reconfig = 1;
721                 port->need_reconfig_queues = 1;
722         }
723
724         /*
725          * Create pools of mbuf.
726          * If NUMA support is disabled, create a single pool of mbuf in
727          * socket 0 memory by default.
728          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
729          *
730          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
731          * nb_txd can be configured at run time.
732          */
733         if (param_total_num_mbufs)
734                 nb_mbuf_per_pool = param_total_num_mbufs;
735         else {
736                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
737                         (nb_lcores * mb_mempool_cache) +
738                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
739                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
740         }
741
742         if (numa_support) {
743                 uint8_t i;
744
745                 for (i = 0; i < num_sockets; i++)
746                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
747                                          socket_ids[i]);
748         } else {
749                 if (socket_num == UMA_NO_CONFIG)
750                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
751                 else
752                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
753                                                  socket_num);
754         }
755
756         init_port_config();
757
758         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
759                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
760         /*
761          * Records which Mbuf pool to use by each logical core, if needed.
762          */
763         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
764                 mbp = mbuf_pool_find(
765                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
766
767                 if (mbp == NULL)
768                         mbp = mbuf_pool_find(0);
769                 fwd_lcores[lc_id]->mbp = mbp;
770                 /* initialize GSO context */
771                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
772                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
773                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
774                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
775                         ETHER_CRC_LEN;
776                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
777         }
778
779         /* Configuration of packet forwarding streams. */
780         if (init_fwd_streams() < 0)
781                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
782
783         fwd_config_setup();
784
785         /* create a gro context for each lcore */
786         gro_param.gro_types = RTE_GRO_TCP_IPV4;
787         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
788         gro_param.max_item_per_flow = MAX_PKT_BURST;
789         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
790                 gro_param.socket_id = rte_lcore_to_socket_id(
791                                 fwd_lcores_cpuids[lc_id]);
792                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
793                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
794                         rte_exit(EXIT_FAILURE,
795                                         "rte_gro_ctx_create() failed\n");
796                 }
797         }
798 }
799
800
801 void
802 reconfig(portid_t new_port_id, unsigned socket_id)
803 {
804         struct rte_port *port;
805
806         /* Reconfiguration of Ethernet ports. */
807         port = &ports[new_port_id];
808         rte_eth_dev_info_get(new_port_id, &port->dev_info);
809
810         /* set flag to initialize port/queue */
811         port->need_reconfig = 1;
812         port->need_reconfig_queues = 1;
813         port->socket_id = socket_id;
814
815         init_port_config();
816 }
817
818
819 int
820 init_fwd_streams(void)
821 {
822         portid_t pid;
823         struct rte_port *port;
824         streamid_t sm_id, nb_fwd_streams_new;
825         queueid_t q;
826
827         /* set socket id according to numa or not */
828         RTE_ETH_FOREACH_DEV(pid) {
829                 port = &ports[pid];
830                 if (nb_rxq > port->dev_info.max_rx_queues) {
831                         printf("Fail: nb_rxq(%d) is greater than "
832                                 "max_rx_queues(%d)\n", nb_rxq,
833                                 port->dev_info.max_rx_queues);
834                         return -1;
835                 }
836                 if (nb_txq > port->dev_info.max_tx_queues) {
837                         printf("Fail: nb_txq(%d) is greater than "
838                                 "max_tx_queues(%d)\n", nb_txq,
839                                 port->dev_info.max_tx_queues);
840                         return -1;
841                 }
842                 if (numa_support) {
843                         if (port_numa[pid] != NUMA_NO_CONFIG)
844                                 port->socket_id = port_numa[pid];
845                         else {
846                                 port->socket_id = rte_eth_dev_socket_id(pid);
847
848                                 /* if socket_id is invalid, set to 0 */
849                                 if (check_socket_id(port->socket_id) < 0)
850                                         port->socket_id = 0;
851                         }
852                 }
853                 else {
854                         if (socket_num == UMA_NO_CONFIG)
855                                 port->socket_id = 0;
856                         else
857                                 port->socket_id = socket_num;
858                 }
859         }
860
861         q = RTE_MAX(nb_rxq, nb_txq);
862         if (q == 0) {
863                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
864                 return -1;
865         }
866         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
867         if (nb_fwd_streams_new == nb_fwd_streams)
868                 return 0;
869         /* clear the old */
870         if (fwd_streams != NULL) {
871                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
872                         if (fwd_streams[sm_id] == NULL)
873                                 continue;
874                         rte_free(fwd_streams[sm_id]);
875                         fwd_streams[sm_id] = NULL;
876                 }
877                 rte_free(fwd_streams);
878                 fwd_streams = NULL;
879         }
880
881         /* init new */
882         nb_fwd_streams = nb_fwd_streams_new;
883         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
884                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
885         if (fwd_streams == NULL)
886                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
887                                                 "failed\n", nb_fwd_streams);
888
889         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
890                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
891                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
892                 if (fwd_streams[sm_id] == NULL)
893                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
894                                                                 " failed\n");
895         }
896
897         return 0;
898 }
899
900 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
901 static void
902 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
903 {
904         unsigned int total_burst;
905         unsigned int nb_burst;
906         unsigned int burst_stats[3];
907         uint16_t pktnb_stats[3];
908         uint16_t nb_pkt;
909         int burst_percent[3];
910
911         /*
912          * First compute the total number of packet bursts and the
913          * two highest numbers of bursts of the same number of packets.
914          */
915         total_burst = 0;
916         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
917         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
918         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
919                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
920                 if (nb_burst == 0)
921                         continue;
922                 total_burst += nb_burst;
923                 if (nb_burst > burst_stats[0]) {
924                         burst_stats[1] = burst_stats[0];
925                         pktnb_stats[1] = pktnb_stats[0];
926                         burst_stats[0] = nb_burst;
927                         pktnb_stats[0] = nb_pkt;
928                 }
929         }
930         if (total_burst == 0)
931                 return;
932         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
933         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
934                burst_percent[0], (int) pktnb_stats[0]);
935         if (burst_stats[0] == total_burst) {
936                 printf("]\n");
937                 return;
938         }
939         if (burst_stats[0] + burst_stats[1] == total_burst) {
940                 printf(" + %d%% of %d pkts]\n",
941                        100 - burst_percent[0], pktnb_stats[1]);
942                 return;
943         }
944         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
945         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
946         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
947                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
948                 return;
949         }
950         printf(" + %d%% of %d pkts + %d%% of others]\n",
951                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
952 }
953 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
954
955 static void
956 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
957 {
958         struct rte_port *port;
959         uint8_t i;
960
961         static const char *fwd_stats_border = "----------------------";
962
963         port = &ports[port_id];
964         printf("\n  %s Forward statistics for port %-2d %s\n",
965                fwd_stats_border, port_id, fwd_stats_border);
966
967         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
968                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
969                        "%-"PRIu64"\n",
970                        stats->ipackets, stats->imissed,
971                        (uint64_t) (stats->ipackets + stats->imissed));
972
973                 if (cur_fwd_eng == &csum_fwd_engine)
974                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
975                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
976                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
977                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
978                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
979                 }
980
981                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
982                        "%-"PRIu64"\n",
983                        stats->opackets, port->tx_dropped,
984                        (uint64_t) (stats->opackets + port->tx_dropped));
985         }
986         else {
987                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
988                        "%14"PRIu64"\n",
989                        stats->ipackets, stats->imissed,
990                        (uint64_t) (stats->ipackets + stats->imissed));
991
992                 if (cur_fwd_eng == &csum_fwd_engine)
993                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
994                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
995                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
996                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
997                         printf("  RX-nombufs:             %14"PRIu64"\n",
998                                stats->rx_nombuf);
999                 }
1000
1001                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1002                        "%14"PRIu64"\n",
1003                        stats->opackets, port->tx_dropped,
1004                        (uint64_t) (stats->opackets + port->tx_dropped));
1005         }
1006
1007 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1008         if (port->rx_stream)
1009                 pkt_burst_stats_display("RX",
1010                         &port->rx_stream->rx_burst_stats);
1011         if (port->tx_stream)
1012                 pkt_burst_stats_display("TX",
1013                         &port->tx_stream->tx_burst_stats);
1014 #endif
1015
1016         if (port->rx_queue_stats_mapping_enabled) {
1017                 printf("\n");
1018                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1019                         printf("  Stats reg %2d RX-packets:%14"PRIu64
1020                                "     RX-errors:%14"PRIu64
1021                                "    RX-bytes:%14"PRIu64"\n",
1022                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1023                 }
1024                 printf("\n");
1025         }
1026         if (port->tx_queue_stats_mapping_enabled) {
1027                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1028                         printf("  Stats reg %2d TX-packets:%14"PRIu64
1029                                "                                 TX-bytes:%14"PRIu64"\n",
1030                                i, stats->q_opackets[i], stats->q_obytes[i]);
1031                 }
1032         }
1033
1034         printf("  %s--------------------------------%s\n",
1035                fwd_stats_border, fwd_stats_border);
1036 }
1037
1038 static void
1039 fwd_stream_stats_display(streamid_t stream_id)
1040 {
1041         struct fwd_stream *fs;
1042         static const char *fwd_top_stats_border = "-------";
1043
1044         fs = fwd_streams[stream_id];
1045         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1046             (fs->fwd_dropped == 0))
1047                 return;
1048         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1049                "TX Port=%2d/Queue=%2d %s\n",
1050                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1051                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1052         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1053                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1054
1055         /* if checksum mode */
1056         if (cur_fwd_eng == &csum_fwd_engine) {
1057                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1058                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1059         }
1060
1061 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1062         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1063         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1064 #endif
1065 }
1066
1067 static void
1068 flush_fwd_rx_queues(void)
1069 {
1070         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1071         portid_t  rxp;
1072         portid_t port_id;
1073         queueid_t rxq;
1074         uint16_t  nb_rx;
1075         uint16_t  i;
1076         uint8_t   j;
1077         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1078         uint64_t timer_period;
1079
1080         /* convert to number of cycles */
1081         timer_period = rte_get_timer_hz(); /* 1 second timeout */
1082
1083         for (j = 0; j < 2; j++) {
1084                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1085                         for (rxq = 0; rxq < nb_rxq; rxq++) {
1086                                 port_id = fwd_ports_ids[rxp];
1087                                 /**
1088                                 * testpmd can stuck in the below do while loop
1089                                 * if rte_eth_rx_burst() always returns nonzero
1090                                 * packets. So timer is added to exit this loop
1091                                 * after 1sec timer expiry.
1092                                 */
1093                                 prev_tsc = rte_rdtsc();
1094                                 do {
1095                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
1096                                                 pkts_burst, MAX_PKT_BURST);
1097                                         for (i = 0; i < nb_rx; i++)
1098                                                 rte_pktmbuf_free(pkts_burst[i]);
1099
1100                                         cur_tsc = rte_rdtsc();
1101                                         diff_tsc = cur_tsc - prev_tsc;
1102                                         timer_tsc += diff_tsc;
1103                                 } while ((nb_rx > 0) &&
1104                                         (timer_tsc < timer_period));
1105                                 timer_tsc = 0;
1106                         }
1107                 }
1108                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1109         }
1110 }
1111
1112 static void
1113 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1114 {
1115         struct fwd_stream **fsm;
1116         streamid_t nb_fs;
1117         streamid_t sm_id;
1118 #ifdef RTE_LIBRTE_BITRATE
1119         uint64_t tics_per_1sec;
1120         uint64_t tics_datum;
1121         uint64_t tics_current;
1122         uint8_t idx_port, cnt_ports;
1123
1124         cnt_ports = rte_eth_dev_count();
1125         tics_datum = rte_rdtsc();
1126         tics_per_1sec = rte_get_timer_hz();
1127 #endif
1128         fsm = &fwd_streams[fc->stream_idx];
1129         nb_fs = fc->stream_nb;
1130         do {
1131                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1132                         (*pkt_fwd)(fsm[sm_id]);
1133 #ifdef RTE_LIBRTE_BITRATE
1134                 if (bitrate_enabled != 0 &&
1135                                 bitrate_lcore_id == rte_lcore_id()) {
1136                         tics_current = rte_rdtsc();
1137                         if (tics_current - tics_datum >= tics_per_1sec) {
1138                                 /* Periodic bitrate calculation */
1139                                 for (idx_port = 0;
1140                                                 idx_port < cnt_ports;
1141                                                 idx_port++)
1142                                         rte_stats_bitrate_calc(bitrate_data,
1143                                                 idx_port);
1144                                 tics_datum = tics_current;
1145                         }
1146                 }
1147 #endif
1148 #ifdef RTE_LIBRTE_LATENCY_STATS
1149                 if (latencystats_enabled != 0 &&
1150                                 latencystats_lcore_id == rte_lcore_id())
1151                         rte_latencystats_update();
1152 #endif
1153
1154         } while (! fc->stopped);
1155 }
1156
1157 static int
1158 start_pkt_forward_on_core(void *fwd_arg)
1159 {
1160         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1161                              cur_fwd_config.fwd_eng->packet_fwd);
1162         return 0;
1163 }
1164
1165 /*
1166  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1167  * Used to start communication flows in network loopback test configurations.
1168  */
1169 static int
1170 run_one_txonly_burst_on_core(void *fwd_arg)
1171 {
1172         struct fwd_lcore *fwd_lc;
1173         struct fwd_lcore tmp_lcore;
1174
1175         fwd_lc = (struct fwd_lcore *) fwd_arg;
1176         tmp_lcore = *fwd_lc;
1177         tmp_lcore.stopped = 1;
1178         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1179         return 0;
1180 }
1181
1182 /*
1183  * Launch packet forwarding:
1184  *     - Setup per-port forwarding context.
1185  *     - launch logical cores with their forwarding configuration.
1186  */
1187 static void
1188 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1189 {
1190         port_fwd_begin_t port_fwd_begin;
1191         unsigned int i;
1192         unsigned int lc_id;
1193         int diag;
1194
1195         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1196         if (port_fwd_begin != NULL) {
1197                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1198                         (*port_fwd_begin)(fwd_ports_ids[i]);
1199         }
1200         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1201                 lc_id = fwd_lcores_cpuids[i];
1202                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1203                         fwd_lcores[i]->stopped = 0;
1204                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1205                                                      fwd_lcores[i], lc_id);
1206                         if (diag != 0)
1207                                 printf("launch lcore %u failed - diag=%d\n",
1208                                        lc_id, diag);
1209                 }
1210         }
1211 }
1212
1213 /*
1214  * Launch packet forwarding configuration.
1215  */
1216 void
1217 start_packet_forwarding(int with_tx_first)
1218 {
1219         port_fwd_begin_t port_fwd_begin;
1220         port_fwd_end_t  port_fwd_end;
1221         struct rte_port *port;
1222         unsigned int i;
1223         portid_t   pt_id;
1224         streamid_t sm_id;
1225
1226         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1227                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1228
1229         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1230                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1231
1232         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1233                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1234                 (!nb_rxq || !nb_txq))
1235                 rte_exit(EXIT_FAILURE,
1236                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1237                         cur_fwd_eng->fwd_mode_name);
1238
1239         if (all_ports_started() == 0) {
1240                 printf("Not all ports were started\n");
1241                 return;
1242         }
1243         if (test_done == 0) {
1244                 printf("Packet forwarding already started\n");
1245                 return;
1246         }
1247
1248         if (init_fwd_streams() < 0) {
1249                 printf("Fail from init_fwd_streams()\n");
1250                 return;
1251         }
1252
1253         if(dcb_test) {
1254                 for (i = 0; i < nb_fwd_ports; i++) {
1255                         pt_id = fwd_ports_ids[i];
1256                         port = &ports[pt_id];
1257                         if (!port->dcb_flag) {
1258                                 printf("In DCB mode, all forwarding ports must "
1259                                        "be configured in this mode.\n");
1260                                 return;
1261                         }
1262                 }
1263                 if (nb_fwd_lcores == 1) {
1264                         printf("In DCB mode,the nb forwarding cores "
1265                                "should be larger than 1.\n");
1266                         return;
1267                 }
1268         }
1269         test_done = 0;
1270
1271         if(!no_flush_rx)
1272                 flush_fwd_rx_queues();
1273
1274         fwd_config_setup();
1275         pkt_fwd_config_display(&cur_fwd_config);
1276         rxtx_config_display();
1277
1278         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1279                 pt_id = fwd_ports_ids[i];
1280                 port = &ports[pt_id];
1281                 rte_eth_stats_get(pt_id, &port->stats);
1282                 port->tx_dropped = 0;
1283
1284                 map_port_queue_stats_mapping_registers(pt_id, port);
1285         }
1286         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1287                 fwd_streams[sm_id]->rx_packets = 0;
1288                 fwd_streams[sm_id]->tx_packets = 0;
1289                 fwd_streams[sm_id]->fwd_dropped = 0;
1290                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1291                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1292
1293 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1294                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1295                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1296                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1297                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1298 #endif
1299 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1300                 fwd_streams[sm_id]->core_cycles = 0;
1301 #endif
1302         }
1303         if (with_tx_first) {
1304                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1305                 if (port_fwd_begin != NULL) {
1306                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1307                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1308                 }
1309                 while (with_tx_first--) {
1310                         launch_packet_forwarding(
1311                                         run_one_txonly_burst_on_core);
1312                         rte_eal_mp_wait_lcore();
1313                 }
1314                 port_fwd_end = tx_only_engine.port_fwd_end;
1315                 if (port_fwd_end != NULL) {
1316                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1317                                 (*port_fwd_end)(fwd_ports_ids[i]);
1318                 }
1319         }
1320         launch_packet_forwarding(start_pkt_forward_on_core);
1321 }
1322
1323 void
1324 stop_packet_forwarding(void)
1325 {
1326         struct rte_eth_stats stats;
1327         struct rte_port *port;
1328         port_fwd_end_t  port_fwd_end;
1329         int i;
1330         portid_t   pt_id;
1331         streamid_t sm_id;
1332         lcoreid_t  lc_id;
1333         uint64_t total_recv;
1334         uint64_t total_xmit;
1335         uint64_t total_rx_dropped;
1336         uint64_t total_tx_dropped;
1337         uint64_t total_rx_nombuf;
1338         uint64_t tx_dropped;
1339         uint64_t rx_bad_ip_csum;
1340         uint64_t rx_bad_l4_csum;
1341 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1342         uint64_t fwd_cycles;
1343 #endif
1344
1345         static const char *acc_stats_border = "+++++++++++++++";
1346
1347         if (test_done) {
1348                 printf("Packet forwarding not started\n");
1349                 return;
1350         }
1351         printf("Telling cores to stop...");
1352         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1353                 fwd_lcores[lc_id]->stopped = 1;
1354         printf("\nWaiting for lcores to finish...\n");
1355         rte_eal_mp_wait_lcore();
1356         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1357         if (port_fwd_end != NULL) {
1358                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1359                         pt_id = fwd_ports_ids[i];
1360                         (*port_fwd_end)(pt_id);
1361                 }
1362         }
1363 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1364         fwd_cycles = 0;
1365 #endif
1366         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1367                 if (cur_fwd_config.nb_fwd_streams >
1368                     cur_fwd_config.nb_fwd_ports) {
1369                         fwd_stream_stats_display(sm_id);
1370                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1371                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1372                 } else {
1373                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1374                                 fwd_streams[sm_id];
1375                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1376                                 fwd_streams[sm_id];
1377                 }
1378                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1379                 tx_dropped = (uint64_t) (tx_dropped +
1380                                          fwd_streams[sm_id]->fwd_dropped);
1381                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1382
1383                 rx_bad_ip_csum =
1384                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1385                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1386                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1387                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1388                                                         rx_bad_ip_csum;
1389
1390                 rx_bad_l4_csum =
1391                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1392                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1393                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1394                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1395                                                         rx_bad_l4_csum;
1396
1397 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1398                 fwd_cycles = (uint64_t) (fwd_cycles +
1399                                          fwd_streams[sm_id]->core_cycles);
1400 #endif
1401         }
1402         total_recv = 0;
1403         total_xmit = 0;
1404         total_rx_dropped = 0;
1405         total_tx_dropped = 0;
1406         total_rx_nombuf  = 0;
1407         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1408                 pt_id = fwd_ports_ids[i];
1409
1410                 port = &ports[pt_id];
1411                 rte_eth_stats_get(pt_id, &stats);
1412                 stats.ipackets -= port->stats.ipackets;
1413                 port->stats.ipackets = 0;
1414                 stats.opackets -= port->stats.opackets;
1415                 port->stats.opackets = 0;
1416                 stats.ibytes   -= port->stats.ibytes;
1417                 port->stats.ibytes = 0;
1418                 stats.obytes   -= port->stats.obytes;
1419                 port->stats.obytes = 0;
1420                 stats.imissed  -= port->stats.imissed;
1421                 port->stats.imissed = 0;
1422                 stats.oerrors  -= port->stats.oerrors;
1423                 port->stats.oerrors = 0;
1424                 stats.rx_nombuf -= port->stats.rx_nombuf;
1425                 port->stats.rx_nombuf = 0;
1426
1427                 total_recv += stats.ipackets;
1428                 total_xmit += stats.opackets;
1429                 total_rx_dropped += stats.imissed;
1430                 total_tx_dropped += port->tx_dropped;
1431                 total_rx_nombuf  += stats.rx_nombuf;
1432
1433                 fwd_port_stats_display(pt_id, &stats);
1434         }
1435
1436         printf("\n  %s Accumulated forward statistics for all ports"
1437                "%s\n",
1438                acc_stats_border, acc_stats_border);
1439         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1440                "%-"PRIu64"\n"
1441                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1442                "%-"PRIu64"\n",
1443                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1444                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1445         if (total_rx_nombuf > 0)
1446                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1447         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1448                "%s\n",
1449                acc_stats_border, acc_stats_border);
1450 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1451         if (total_recv > 0)
1452                 printf("\n  CPU cycles/packet=%u (total cycles="
1453                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1454                        (unsigned int)(fwd_cycles / total_recv),
1455                        fwd_cycles, total_recv);
1456 #endif
1457         printf("\nDone.\n");
1458         test_done = 1;
1459 }
1460
1461 void
1462 dev_set_link_up(portid_t pid)
1463 {
1464         if (rte_eth_dev_set_link_up(pid) < 0)
1465                 printf("\nSet link up fail.\n");
1466 }
1467
1468 void
1469 dev_set_link_down(portid_t pid)
1470 {
1471         if (rte_eth_dev_set_link_down(pid) < 0)
1472                 printf("\nSet link down fail.\n");
1473 }
1474
1475 static int
1476 all_ports_started(void)
1477 {
1478         portid_t pi;
1479         struct rte_port *port;
1480
1481         RTE_ETH_FOREACH_DEV(pi) {
1482                 port = &ports[pi];
1483                 /* Check if there is a port which is not started */
1484                 if ((port->port_status != RTE_PORT_STARTED) &&
1485                         (port->slave_flag == 0))
1486                         return 0;
1487         }
1488
1489         /* No port is not started */
1490         return 1;
1491 }
1492
1493 int
1494 all_ports_stopped(void)
1495 {
1496         portid_t pi;
1497         struct rte_port *port;
1498
1499         RTE_ETH_FOREACH_DEV(pi) {
1500                 port = &ports[pi];
1501                 if ((port->port_status != RTE_PORT_STOPPED) &&
1502                         (port->slave_flag == 0))
1503                         return 0;
1504         }
1505
1506         return 1;
1507 }
1508
1509 int
1510 port_is_started(portid_t port_id)
1511 {
1512         if (port_id_is_invalid(port_id, ENABLED_WARN))
1513                 return 0;
1514
1515         if (ports[port_id].port_status != RTE_PORT_STARTED)
1516                 return 0;
1517
1518         return 1;
1519 }
1520
1521 static int
1522 port_is_closed(portid_t port_id)
1523 {
1524         if (port_id_is_invalid(port_id, ENABLED_WARN))
1525                 return 0;
1526
1527         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1528                 return 0;
1529
1530         return 1;
1531 }
1532
1533 int
1534 start_port(portid_t pid)
1535 {
1536         int diag, need_check_link_status = -1;
1537         portid_t pi;
1538         queueid_t qi;
1539         struct rte_port *port;
1540         struct ether_addr mac_addr;
1541         enum rte_eth_event_type event_type;
1542
1543         if (port_id_is_invalid(pid, ENABLED_WARN))
1544                 return 0;
1545
1546         if(dcb_config)
1547                 dcb_test = 1;
1548         RTE_ETH_FOREACH_DEV(pi) {
1549                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1550                         continue;
1551
1552                 need_check_link_status = 0;
1553                 port = &ports[pi];
1554                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1555                                                  RTE_PORT_HANDLING) == 0) {
1556                         printf("Port %d is now not stopped\n", pi);
1557                         continue;
1558                 }
1559
1560                 if (port->need_reconfig > 0) {
1561                         port->need_reconfig = 0;
1562
1563                         if (flow_isolate_all) {
1564                                 int ret = port_flow_isolate(pi, 1);
1565                                 if (ret) {
1566                                         printf("Failed to apply isolated"
1567                                                " mode on port %d\n", pi);
1568                                         return -1;
1569                                 }
1570                         }
1571
1572                         printf("Configuring Port %d (socket %u)\n", pi,
1573                                         port->socket_id);
1574                         /* configure port */
1575                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1576                                                 &(port->dev_conf));
1577                         if (diag != 0) {
1578                                 if (rte_atomic16_cmpset(&(port->port_status),
1579                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1580                                         printf("Port %d can not be set back "
1581                                                         "to stopped\n", pi);
1582                                 printf("Fail to configure port %d\n", pi);
1583                                 /* try to reconfigure port next time */
1584                                 port->need_reconfig = 1;
1585                                 return -1;
1586                         }
1587                 }
1588                 if (port->need_reconfig_queues > 0) {
1589                         port->need_reconfig_queues = 0;
1590                         /* setup tx queues */
1591                         for (qi = 0; qi < nb_txq; qi++) {
1592                                 if ((numa_support) &&
1593                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1594                                         diag = rte_eth_tx_queue_setup(pi, qi,
1595                                                 nb_txd,txring_numa[pi],
1596                                                 &(port->tx_conf));
1597                                 else
1598                                         diag = rte_eth_tx_queue_setup(pi, qi,
1599                                                 nb_txd,port->socket_id,
1600                                                 &(port->tx_conf));
1601
1602                                 if (diag == 0)
1603                                         continue;
1604
1605                                 /* Fail to setup tx queue, return */
1606                                 if (rte_atomic16_cmpset(&(port->port_status),
1607                                                         RTE_PORT_HANDLING,
1608                                                         RTE_PORT_STOPPED) == 0)
1609                                         printf("Port %d can not be set back "
1610                                                         "to stopped\n", pi);
1611                                 printf("Fail to configure port %d tx queues\n", pi);
1612                                 /* try to reconfigure queues next time */
1613                                 port->need_reconfig_queues = 1;
1614                                 return -1;
1615                         }
1616                         /* setup rx queues */
1617                         for (qi = 0; qi < nb_rxq; qi++) {
1618                                 if ((numa_support) &&
1619                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1620                                         struct rte_mempool * mp =
1621                                                 mbuf_pool_find(rxring_numa[pi]);
1622                                         if (mp == NULL) {
1623                                                 printf("Failed to setup RX queue:"
1624                                                         "No mempool allocation"
1625                                                         " on the socket %d\n",
1626                                                         rxring_numa[pi]);
1627                                                 return -1;
1628                                         }
1629
1630                                         diag = rte_eth_rx_queue_setup(pi, qi,
1631                                              nb_rxd,rxring_numa[pi],
1632                                              &(port->rx_conf),mp);
1633                                 } else {
1634                                         struct rte_mempool *mp =
1635                                                 mbuf_pool_find(port->socket_id);
1636                                         if (mp == NULL) {
1637                                                 printf("Failed to setup RX queue:"
1638                                                         "No mempool allocation"
1639                                                         " on the socket %d\n",
1640                                                         port->socket_id);
1641                                                 return -1;
1642                                         }
1643                                         diag = rte_eth_rx_queue_setup(pi, qi,
1644                                              nb_rxd,port->socket_id,
1645                                              &(port->rx_conf), mp);
1646                                 }
1647                                 if (diag == 0)
1648                                         continue;
1649
1650                                 /* Fail to setup rx queue, return */
1651                                 if (rte_atomic16_cmpset(&(port->port_status),
1652                                                         RTE_PORT_HANDLING,
1653                                                         RTE_PORT_STOPPED) == 0)
1654                                         printf("Port %d can not be set back "
1655                                                         "to stopped\n", pi);
1656                                 printf("Fail to configure port %d rx queues\n", pi);
1657                                 /* try to reconfigure queues next time */
1658                                 port->need_reconfig_queues = 1;
1659                                 return -1;
1660                         }
1661                 }
1662
1663                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1664                      event_type < RTE_ETH_EVENT_MAX;
1665                      event_type++) {
1666                         diag = rte_eth_dev_callback_register(pi,
1667                                                         event_type,
1668                                                         eth_event_callback,
1669                                                         NULL);
1670                         if (diag) {
1671                                 printf("Failed to setup even callback for event %d\n",
1672                                         event_type);
1673                                 return -1;
1674                         }
1675                 }
1676
1677                 /* start port */
1678                 if (rte_eth_dev_start(pi) < 0) {
1679                         printf("Fail to start port %d\n", pi);
1680
1681                         /* Fail to setup rx queue, return */
1682                         if (rte_atomic16_cmpset(&(port->port_status),
1683                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1684                                 printf("Port %d can not be set back to "
1685                                                         "stopped\n", pi);
1686                         continue;
1687                 }
1688
1689                 if (rte_atomic16_cmpset(&(port->port_status),
1690                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1691                         printf("Port %d can not be set into started\n", pi);
1692
1693                 rte_eth_macaddr_get(pi, &mac_addr);
1694                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1695                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1696                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1697                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1698
1699                 /* at least one port started, need checking link status */
1700                 need_check_link_status = 1;
1701         }
1702
1703         if (need_check_link_status == 1 && !no_link_check)
1704                 check_all_ports_link_status(RTE_PORT_ALL);
1705         else if (need_check_link_status == 0)
1706                 printf("Please stop the ports first\n");
1707
1708         printf("Done\n");
1709         return 0;
1710 }
1711
1712 void
1713 stop_port(portid_t pid)
1714 {
1715         portid_t pi;
1716         struct rte_port *port;
1717         int need_check_link_status = 0;
1718
1719         if (dcb_test) {
1720                 dcb_test = 0;
1721                 dcb_config = 0;
1722         }
1723
1724         if (port_id_is_invalid(pid, ENABLED_WARN))
1725                 return;
1726
1727         printf("Stopping ports...\n");
1728
1729         RTE_ETH_FOREACH_DEV(pi) {
1730                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1731                         continue;
1732
1733                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1734                         printf("Please remove port %d from forwarding configuration.\n", pi);
1735                         continue;
1736                 }
1737
1738                 if (port_is_bonding_slave(pi)) {
1739                         printf("Please remove port %d from bonded device.\n", pi);
1740                         continue;
1741                 }
1742
1743                 port = &ports[pi];
1744                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1745                                                 RTE_PORT_HANDLING) == 0)
1746                         continue;
1747
1748                 rte_eth_dev_stop(pi);
1749
1750                 if (rte_atomic16_cmpset(&(port->port_status),
1751                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1752                         printf("Port %d can not be set into stopped\n", pi);
1753                 need_check_link_status = 1;
1754         }
1755         if (need_check_link_status && !no_link_check)
1756                 check_all_ports_link_status(RTE_PORT_ALL);
1757
1758         printf("Done\n");
1759 }
1760
1761 void
1762 close_port(portid_t pid)
1763 {
1764         portid_t pi;
1765         struct rte_port *port;
1766
1767         if (port_id_is_invalid(pid, ENABLED_WARN))
1768                 return;
1769
1770         printf("Closing ports...\n");
1771
1772         RTE_ETH_FOREACH_DEV(pi) {
1773                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1774                         continue;
1775
1776                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1777                         printf("Please remove port %d from forwarding configuration.\n", pi);
1778                         continue;
1779                 }
1780
1781                 if (port_is_bonding_slave(pi)) {
1782                         printf("Please remove port %d from bonded device.\n", pi);
1783                         continue;
1784                 }
1785
1786                 port = &ports[pi];
1787                 if (rte_atomic16_cmpset(&(port->port_status),
1788                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1789                         printf("Port %d is already closed\n", pi);
1790                         continue;
1791                 }
1792
1793                 if (rte_atomic16_cmpset(&(port->port_status),
1794                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1795                         printf("Port %d is now not stopped\n", pi);
1796                         continue;
1797                 }
1798
1799                 if (port->flow_list)
1800                         port_flow_flush(pi);
1801                 rte_eth_dev_close(pi);
1802
1803                 if (rte_atomic16_cmpset(&(port->port_status),
1804                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1805                         printf("Port %d cannot be set to closed\n", pi);
1806         }
1807
1808         printf("Done\n");
1809 }
1810
1811 void
1812 reset_port(portid_t pid)
1813 {
1814         int diag;
1815         portid_t pi;
1816         struct rte_port *port;
1817
1818         if (port_id_is_invalid(pid, ENABLED_WARN))
1819                 return;
1820
1821         printf("Resetting ports...\n");
1822
1823         RTE_ETH_FOREACH_DEV(pi) {
1824                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1825                         continue;
1826
1827                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1828                         printf("Please remove port %d from forwarding "
1829                                "configuration.\n", pi);
1830                         continue;
1831                 }
1832
1833                 if (port_is_bonding_slave(pi)) {
1834                         printf("Please remove port %d from bonded device.\n",
1835                                pi);
1836                         continue;
1837                 }
1838
1839                 diag = rte_eth_dev_reset(pi);
1840                 if (diag == 0) {
1841                         port = &ports[pi];
1842                         port->need_reconfig = 1;
1843                         port->need_reconfig_queues = 1;
1844                 } else {
1845                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1846                 }
1847         }
1848
1849         printf("Done\n");
1850 }
1851
1852 void
1853 attach_port(char *identifier)
1854 {
1855         portid_t pi = 0;
1856         unsigned int socket_id;
1857
1858         printf("Attaching a new port...\n");
1859
1860         if (identifier == NULL) {
1861                 printf("Invalid parameters are specified\n");
1862                 return;
1863         }
1864
1865         if (rte_eth_dev_attach(identifier, &pi))
1866                 return;
1867
1868         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1869         /* if socket_id is invalid, set to 0 */
1870         if (check_socket_id(socket_id) < 0)
1871                 socket_id = 0;
1872         reconfig(pi, socket_id);
1873         rte_eth_promiscuous_enable(pi);
1874
1875         nb_ports = rte_eth_dev_count();
1876
1877         ports[pi].port_status = RTE_PORT_STOPPED;
1878
1879         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1880         printf("Done\n");
1881 }
1882
1883 void
1884 detach_port(portid_t port_id)
1885 {
1886         char name[RTE_ETH_NAME_MAX_LEN];
1887
1888         printf("Detaching a port...\n");
1889
1890         if (!port_is_closed(port_id)) {
1891                 printf("Please close port first\n");
1892                 return;
1893         }
1894
1895         if (ports[port_id].flow_list)
1896                 port_flow_flush(port_id);
1897
1898         if (rte_eth_dev_detach(port_id, name)) {
1899                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1900                 return;
1901         }
1902
1903         nb_ports = rte_eth_dev_count();
1904
1905         printf("Port '%s' is detached. Now total ports is %d\n",
1906                         name, nb_ports);
1907         printf("Done\n");
1908         return;
1909 }
1910
1911 void
1912 pmd_test_exit(void)
1913 {
1914         portid_t pt_id;
1915
1916         if (test_done == 0)
1917                 stop_packet_forwarding();
1918
1919         if (ports != NULL) {
1920                 no_link_check = 1;
1921                 RTE_ETH_FOREACH_DEV(pt_id) {
1922                         printf("\nShutting down port %d...\n", pt_id);
1923                         fflush(stdout);
1924                         stop_port(pt_id);
1925                         close_port(pt_id);
1926                 }
1927         }
1928         printf("\nBye...\n");
1929 }
1930
1931 typedef void (*cmd_func_t)(void);
1932 struct pmd_test_command {
1933         const char *cmd_name;
1934         cmd_func_t cmd_func;
1935 };
1936
1937 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1938
1939 /* Check the link status of all ports in up to 9s, and print them finally */
1940 static void
1941 check_all_ports_link_status(uint32_t port_mask)
1942 {
1943 #define CHECK_INTERVAL 100 /* 100ms */
1944 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1945         portid_t portid;
1946         uint8_t count, all_ports_up, print_flag = 0;
1947         struct rte_eth_link link;
1948
1949         printf("Checking link statuses...\n");
1950         fflush(stdout);
1951         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1952                 all_ports_up = 1;
1953                 RTE_ETH_FOREACH_DEV(portid) {
1954                         if ((port_mask & (1 << portid)) == 0)
1955                                 continue;
1956                         memset(&link, 0, sizeof(link));
1957                         rte_eth_link_get_nowait(portid, &link);
1958                         /* print link status if flag set */
1959                         if (print_flag == 1) {
1960                                 if (link.link_status)
1961                                         printf(
1962                                         "Port%d Link Up. speed %u Mbps- %s\n",
1963                                         portid, link.link_speed,
1964                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1965                                         ("full-duplex") : ("half-duplex\n"));
1966                                 else
1967                                         printf("Port %d Link Down\n", portid);
1968                                 continue;
1969                         }
1970                         /* clear all_ports_up flag if any link down */
1971                         if (link.link_status == ETH_LINK_DOWN) {
1972                                 all_ports_up = 0;
1973                                 break;
1974                         }
1975                 }
1976                 /* after finally printing all link status, get out */
1977                 if (print_flag == 1)
1978                         break;
1979
1980                 if (all_ports_up == 0) {
1981                         fflush(stdout);
1982                         rte_delay_ms(CHECK_INTERVAL);
1983                 }
1984
1985                 /* set the print_flag if all ports up or timeout */
1986                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1987                         print_flag = 1;
1988                 }
1989
1990                 if (lsc_interrupt)
1991                         break;
1992         }
1993 }
1994
1995 static void
1996 rmv_event_callback(void *arg)
1997 {
1998         struct rte_eth_dev *dev;
1999         portid_t port_id = (intptr_t)arg;
2000
2001         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2002         dev = &rte_eth_devices[port_id];
2003
2004         stop_port(port_id);
2005         close_port(port_id);
2006         printf("removing device %s\n", dev->device->name);
2007         if (rte_eal_dev_detach(dev->device))
2008                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
2009                         dev->device->name);
2010 }
2011
2012 /* This function is used by the interrupt thread */
2013 static int
2014 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2015                   void *ret_param)
2016 {
2017         static const char * const event_desc[] = {
2018                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2019                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2020                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2021                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2022                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2023                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2024                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2025                 [RTE_ETH_EVENT_MAX] = NULL,
2026         };
2027
2028         RTE_SET_USED(param);
2029         RTE_SET_USED(ret_param);
2030
2031         if (type >= RTE_ETH_EVENT_MAX) {
2032                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2033                         port_id, __func__, type);
2034                 fflush(stderr);
2035         } else if (event_print_mask & (UINT32_C(1) << type)) {
2036                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2037                         event_desc[type]);
2038                 fflush(stdout);
2039         }
2040
2041         switch (type) {
2042         case RTE_ETH_EVENT_INTR_RMV:
2043                 if (rte_eal_alarm_set(100000,
2044                                 rmv_event_callback, (void *)(intptr_t)port_id))
2045                         fprintf(stderr, "Could not set up deferred device removal\n");
2046                 break;
2047         default:
2048                 break;
2049         }
2050         return 0;
2051 }
2052
2053 static int
2054 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2055 {
2056         uint16_t i;
2057         int diag;
2058         uint8_t mapping_found = 0;
2059
2060         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2061                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2062                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2063                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2064                                         tx_queue_stats_mappings[i].queue_id,
2065                                         tx_queue_stats_mappings[i].stats_counter_id);
2066                         if (diag != 0)
2067                                 return diag;
2068                         mapping_found = 1;
2069                 }
2070         }
2071         if (mapping_found)
2072                 port->tx_queue_stats_mapping_enabled = 1;
2073         return 0;
2074 }
2075
2076 static int
2077 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2078 {
2079         uint16_t i;
2080         int diag;
2081         uint8_t mapping_found = 0;
2082
2083         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2084                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2085                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2086                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2087                                         rx_queue_stats_mappings[i].queue_id,
2088                                         rx_queue_stats_mappings[i].stats_counter_id);
2089                         if (diag != 0)
2090                                 return diag;
2091                         mapping_found = 1;
2092                 }
2093         }
2094         if (mapping_found)
2095                 port->rx_queue_stats_mapping_enabled = 1;
2096         return 0;
2097 }
2098
2099 static void
2100 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2101 {
2102         int diag = 0;
2103
2104         diag = set_tx_queue_stats_mapping_registers(pi, port);
2105         if (diag != 0) {
2106                 if (diag == -ENOTSUP) {
2107                         port->tx_queue_stats_mapping_enabled = 0;
2108                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2109                 }
2110                 else
2111                         rte_exit(EXIT_FAILURE,
2112                                         "set_tx_queue_stats_mapping_registers "
2113                                         "failed for port id=%d diag=%d\n",
2114                                         pi, diag);
2115         }
2116
2117         diag = set_rx_queue_stats_mapping_registers(pi, port);
2118         if (diag != 0) {
2119                 if (diag == -ENOTSUP) {
2120                         port->rx_queue_stats_mapping_enabled = 0;
2121                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2122                 }
2123                 else
2124                         rte_exit(EXIT_FAILURE,
2125                                         "set_rx_queue_stats_mapping_registers "
2126                                         "failed for port id=%d diag=%d\n",
2127                                         pi, diag);
2128         }
2129 }
2130
2131 static void
2132 rxtx_port_config(struct rte_port *port)
2133 {
2134         port->rx_conf = port->dev_info.default_rxconf;
2135         port->tx_conf = port->dev_info.default_txconf;
2136
2137         /* Check if any RX/TX parameters have been passed */
2138         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2139                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2140
2141         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2142                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2143
2144         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2145                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2146
2147         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2148                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2149
2150         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2151                 port->rx_conf.rx_drop_en = rx_drop_en;
2152
2153         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2154                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2155
2156         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2157                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2158
2159         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2160                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2161
2162         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2163                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2164
2165         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2166                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2167
2168         if (txq_flags != RTE_PMD_PARAM_UNSET)
2169                 port->tx_conf.txq_flags = txq_flags;
2170 }
2171
2172 void
2173 init_port_config(void)
2174 {
2175         portid_t pid;
2176         struct rte_port *port;
2177
2178         RTE_ETH_FOREACH_DEV(pid) {
2179                 port = &ports[pid];
2180                 port->dev_conf.rxmode = rx_mode;
2181                 port->dev_conf.fdir_conf = fdir_conf;
2182                 if (nb_rxq > 1) {
2183                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2184                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2185                 } else {
2186                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2187                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2188                 }
2189
2190                 if (port->dcb_flag == 0) {
2191                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2192                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2193                         else
2194                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2195                 }
2196
2197                 rxtx_port_config(port);
2198
2199                 rte_eth_macaddr_get(pid, &port->eth_addr);
2200
2201                 map_port_queue_stats_mapping_registers(pid, port);
2202 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2203                 rte_pmd_ixgbe_bypass_init(pid);
2204 #endif
2205
2206                 if (lsc_interrupt &&
2207                     (rte_eth_devices[pid].data->dev_flags &
2208                      RTE_ETH_DEV_INTR_LSC))
2209                         port->dev_conf.intr_conf.lsc = 1;
2210                 if (rmv_interrupt &&
2211                     (rte_eth_devices[pid].data->dev_flags &
2212                      RTE_ETH_DEV_INTR_RMV))
2213                         port->dev_conf.intr_conf.rmv = 1;
2214
2215 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2216                 /* Detect softnic port */
2217                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2218                         port->softnic_enable = 1;
2219                         memset(&port->softport, 0, sizeof(struct softnic_port));
2220
2221                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2222                                 port->softport.tm_flag = 1;
2223                 }
2224 #endif
2225         }
2226 }
2227
2228 void set_port_slave_flag(portid_t slave_pid)
2229 {
2230         struct rte_port *port;
2231
2232         port = &ports[slave_pid];
2233         port->slave_flag = 1;
2234 }
2235
2236 void clear_port_slave_flag(portid_t slave_pid)
2237 {
2238         struct rte_port *port;
2239
2240         port = &ports[slave_pid];
2241         port->slave_flag = 0;
2242 }
2243
2244 uint8_t port_is_bonding_slave(portid_t slave_pid)
2245 {
2246         struct rte_port *port;
2247
2248         port = &ports[slave_pid];
2249         return port->slave_flag;
2250 }
2251
2252 const uint16_t vlan_tags[] = {
2253                 0,  1,  2,  3,  4,  5,  6,  7,
2254                 8,  9, 10, 11,  12, 13, 14, 15,
2255                 16, 17, 18, 19, 20, 21, 22, 23,
2256                 24, 25, 26, 27, 28, 29, 30, 31
2257 };
2258
2259 static  int
2260 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2261                  enum dcb_mode_enable dcb_mode,
2262                  enum rte_eth_nb_tcs num_tcs,
2263                  uint8_t pfc_en)
2264 {
2265         uint8_t i;
2266
2267         /*
2268          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2269          * given above, and the number of traffic classes available for use.
2270          */
2271         if (dcb_mode == DCB_VT_ENABLED) {
2272                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2273                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2274                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2275                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2276
2277                 /* VMDQ+DCB RX and TX configurations */
2278                 vmdq_rx_conf->enable_default_pool = 0;
2279                 vmdq_rx_conf->default_pool = 0;
2280                 vmdq_rx_conf->nb_queue_pools =
2281                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2282                 vmdq_tx_conf->nb_queue_pools =
2283                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2284
2285                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2286                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2287                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2288                         vmdq_rx_conf->pool_map[i].pools =
2289                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2290                 }
2291                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2292                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2293                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2294                 }
2295
2296                 /* set DCB mode of RX and TX of multiple queues */
2297                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2298                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2299         } else {
2300                 struct rte_eth_dcb_rx_conf *rx_conf =
2301                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2302                 struct rte_eth_dcb_tx_conf *tx_conf =
2303                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2304
2305                 rx_conf->nb_tcs = num_tcs;
2306                 tx_conf->nb_tcs = num_tcs;
2307
2308                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2309                         rx_conf->dcb_tc[i] = i % num_tcs;
2310                         tx_conf->dcb_tc[i] = i % num_tcs;
2311                 }
2312                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2313                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2314                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2315         }
2316
2317         if (pfc_en)
2318                 eth_conf->dcb_capability_en =
2319                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2320         else
2321                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2322
2323         return 0;
2324 }
2325
2326 int
2327 init_port_dcb_config(portid_t pid,
2328                      enum dcb_mode_enable dcb_mode,
2329                      enum rte_eth_nb_tcs num_tcs,
2330                      uint8_t pfc_en)
2331 {
2332         struct rte_eth_conf port_conf;
2333         struct rte_port *rte_port;
2334         int retval;
2335         uint16_t i;
2336
2337         rte_port = &ports[pid];
2338
2339         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2340         /* Enter DCB configuration status */
2341         dcb_config = 1;
2342
2343         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2344         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2345         if (retval < 0)
2346                 return retval;
2347         port_conf.rxmode.hw_vlan_filter = 1;
2348
2349         /**
2350          * Write the configuration into the device.
2351          * Set the numbers of RX & TX queues to 0, so
2352          * the RX & TX queues will not be setup.
2353          */
2354         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2355
2356         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2357
2358         /* If dev_info.vmdq_pool_base is greater than 0,
2359          * the queue id of vmdq pools is started after pf queues.
2360          */
2361         if (dcb_mode == DCB_VT_ENABLED &&
2362             rte_port->dev_info.vmdq_pool_base > 0) {
2363                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2364                         " for port %d.", pid);
2365                 return -1;
2366         }
2367
2368         /* Assume the ports in testpmd have the same dcb capability
2369          * and has the same number of rxq and txq in dcb mode
2370          */
2371         if (dcb_mode == DCB_VT_ENABLED) {
2372                 if (rte_port->dev_info.max_vfs > 0) {
2373                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2374                         nb_txq = rte_port->dev_info.nb_tx_queues;
2375                 } else {
2376                         nb_rxq = rte_port->dev_info.max_rx_queues;
2377                         nb_txq = rte_port->dev_info.max_tx_queues;
2378                 }
2379         } else {
2380                 /*if vt is disabled, use all pf queues */
2381                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2382                         nb_rxq = rte_port->dev_info.max_rx_queues;
2383                         nb_txq = rte_port->dev_info.max_tx_queues;
2384                 } else {
2385                         nb_rxq = (queueid_t)num_tcs;
2386                         nb_txq = (queueid_t)num_tcs;
2387
2388                 }
2389         }
2390         rx_free_thresh = 64;
2391
2392         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2393
2394         rxtx_port_config(rte_port);
2395         /* VLAN filter */
2396         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2397         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2398                 rx_vft_set(pid, vlan_tags[i], 1);
2399
2400         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2401         map_port_queue_stats_mapping_registers(pid, rte_port);
2402
2403         rte_port->dcb_flag = 1;
2404
2405         return 0;
2406 }
2407
2408 static void
2409 init_port(void)
2410 {
2411         /* Configuration of Ethernet ports. */
2412         ports = rte_zmalloc("testpmd: ports",
2413                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2414                             RTE_CACHE_LINE_SIZE);
2415         if (ports == NULL) {
2416                 rte_exit(EXIT_FAILURE,
2417                                 "rte_zmalloc(%d struct rte_port) failed\n",
2418                                 RTE_MAX_ETHPORTS);
2419         }
2420 }
2421
2422 static void
2423 force_quit(void)
2424 {
2425         pmd_test_exit();
2426         prompt_exit();
2427 }
2428
2429 static void
2430 print_stats(void)
2431 {
2432         uint8_t i;
2433         const char clr[] = { 27, '[', '2', 'J', '\0' };
2434         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2435
2436         /* Clear screen and move to top left */
2437         printf("%s%s", clr, top_left);
2438
2439         printf("\nPort statistics ====================================");
2440         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2441                 nic_stats_display(fwd_ports_ids[i]);
2442 }
2443
2444 static void
2445 signal_handler(int signum)
2446 {
2447         if (signum == SIGINT || signum == SIGTERM) {
2448                 printf("\nSignal %d received, preparing to exit...\n",
2449                                 signum);
2450 #ifdef RTE_LIBRTE_PDUMP
2451                 /* uninitialize packet capture framework */
2452                 rte_pdump_uninit();
2453 #endif
2454 #ifdef RTE_LIBRTE_LATENCY_STATS
2455                 rte_latencystats_uninit();
2456 #endif
2457                 force_quit();
2458                 /* Set flag to indicate the force termination. */
2459                 f_quit = 1;
2460                 /* exit with the expected status */
2461                 signal(signum, SIG_DFL);
2462                 kill(getpid(), signum);
2463         }
2464 }
2465
2466 int
2467 main(int argc, char** argv)
2468 {
2469         int  diag;
2470         portid_t port_id;
2471
2472         signal(SIGINT, signal_handler);
2473         signal(SIGTERM, signal_handler);
2474
2475         diag = rte_eal_init(argc, argv);
2476         if (diag < 0)
2477                 rte_panic("Cannot init EAL\n");
2478
2479         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2480                 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2481                         strerror(errno));
2482         }
2483
2484 #ifdef RTE_LIBRTE_PDUMP
2485         /* initialize packet capture framework */
2486         rte_pdump_init(NULL);
2487 #endif
2488
2489         nb_ports = (portid_t) rte_eth_dev_count();
2490         if (nb_ports == 0)
2491                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2492
2493         /* allocate port structures, and init them */
2494         init_port();
2495
2496         set_def_fwd_config();
2497         if (nb_lcores == 0)
2498                 rte_panic("Empty set of forwarding logical cores - check the "
2499                           "core mask supplied in the command parameters\n");
2500
2501         /* Bitrate/latency stats disabled by default */
2502 #ifdef RTE_LIBRTE_BITRATE
2503         bitrate_enabled = 0;
2504 #endif
2505 #ifdef RTE_LIBRTE_LATENCY_STATS
2506         latencystats_enabled = 0;
2507 #endif
2508
2509         argc -= diag;
2510         argv += diag;
2511         if (argc > 1)
2512                 launch_args_parse(argc, argv);
2513
2514         if (tx_first && interactive)
2515                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2516                                 "interactive mode.\n");
2517
2518         if (tx_first && lsc_interrupt) {
2519                 printf("Warning: lsc_interrupt needs to be off when "
2520                                 " using tx_first. Disabling.\n");
2521                 lsc_interrupt = 0;
2522         }
2523
2524         if (!nb_rxq && !nb_txq)
2525                 printf("Warning: Either rx or tx queues should be non-zero\n");
2526
2527         if (nb_rxq > 1 && nb_rxq > nb_txq)
2528                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2529                        "but nb_txq=%d will prevent to fully test it.\n",
2530                        nb_rxq, nb_txq);
2531
2532         init_config();
2533         if (start_port(RTE_PORT_ALL) != 0)
2534                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2535
2536         /* set all ports to promiscuous mode by default */
2537         RTE_ETH_FOREACH_DEV(port_id)
2538                 rte_eth_promiscuous_enable(port_id);
2539
2540         /* Init metrics library */
2541         rte_metrics_init(rte_socket_id());
2542
2543 #ifdef RTE_LIBRTE_LATENCY_STATS
2544         if (latencystats_enabled != 0) {
2545                 int ret = rte_latencystats_init(1, NULL);
2546                 if (ret)
2547                         printf("Warning: latencystats init()"
2548                                 " returned error %d\n", ret);
2549                 printf("Latencystats running on lcore %d\n",
2550                         latencystats_lcore_id);
2551         }
2552 #endif
2553
2554         /* Setup bitrate stats */
2555 #ifdef RTE_LIBRTE_BITRATE
2556         if (bitrate_enabled != 0) {
2557                 bitrate_data = rte_stats_bitrate_create();
2558                 if (bitrate_data == NULL)
2559                         rte_exit(EXIT_FAILURE,
2560                                 "Could not allocate bitrate data.\n");
2561                 rte_stats_bitrate_reg(bitrate_data);
2562         }
2563 #endif
2564
2565 #ifdef RTE_LIBRTE_CMDLINE
2566         if (strlen(cmdline_filename) != 0)
2567                 cmdline_read_from_file(cmdline_filename);
2568
2569         if (interactive == 1) {
2570                 if (auto_start) {
2571                         printf("Start automatic packet forwarding\n");
2572                         start_packet_forwarding(0);
2573                 }
2574                 prompt();
2575                 pmd_test_exit();
2576         } else
2577 #endif
2578         {
2579                 char c;
2580                 int rc;
2581
2582                 f_quit = 0;
2583
2584                 printf("No commandline core given, start packet forwarding\n");
2585                 start_packet_forwarding(tx_first);
2586                 if (stats_period != 0) {
2587                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2588                         uint64_t timer_period;
2589
2590                         /* Convert to number of cycles */
2591                         timer_period = stats_period * rte_get_timer_hz();
2592
2593                         while (f_quit == 0) {
2594                                 cur_time = rte_get_timer_cycles();
2595                                 diff_time += cur_time - prev_time;
2596
2597                                 if (diff_time >= timer_period) {
2598                                         print_stats();
2599                                         /* Reset the timer */
2600                                         diff_time = 0;
2601                                 }
2602                                 /* Sleep to avoid unnecessary checks */
2603                                 prev_time = cur_time;
2604                                 sleep(1);
2605                         }
2606                 }
2607
2608                 printf("Press enter to exit\n");
2609                 rc = read(0, &c, 1);
2610                 pmd_test_exit();
2611                 if (rc < 0)
2612                         return 1;
2613         }
2614
2615         return 0;
2616 }