New upstream version 17.11.4
[deb_dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90
91 #include "testpmd.h"
92
93 uint16_t verbose_level = 0; /**< Silent by default. */
94
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 uint8_t tx_first;
99 char cmdline_filename[PATH_MAX] = {0};
100
101 /*
102  * NUMA support configuration.
103  * When set, the NUMA support attempts to dispatch the allocation of the
104  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
105  * probed ports among the CPU sockets 0 and 1.
106  * Otherwise, all memory is allocated from CPU socket 0.
107  */
108 uint8_t numa_support = 1; /**< numa enabled by default */
109
110 /*
111  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112  * not configured.
113  */
114 uint8_t socket_num = UMA_NO_CONFIG;
115
116 /*
117  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
118  */
119 uint8_t mp_anon = 0;
120
121 /*
122  * Record the Ethernet address of peer target ports to which packets are
123  * forwarded.
124  * Must be instantiated with the ethernet addresses of peer traffic generator
125  * ports.
126  */
127 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
128 portid_t nb_peer_eth_addrs = 0;
129
130 /*
131  * Probed Target Environment.
132  */
133 struct rte_port *ports;        /**< For all probed ethernet ports. */
134 portid_t nb_ports;             /**< Number of probed ethernet ports. */
135 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
136 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
137
138 /*
139  * Test Forwarding Configuration.
140  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
141  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
142  */
143 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
144 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
145 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
146 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
147
148 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
149 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
150
151 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
152 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
153
154 /*
155  * Forwarding engines.
156  */
157 struct fwd_engine * fwd_engines[] = {
158         &io_fwd_engine,
159         &mac_fwd_engine,
160         &mac_swap_engine,
161         &flow_gen_engine,
162         &rx_only_engine,
163         &tx_only_engine,
164         &csum_fwd_engine,
165         &icmp_echo_engine,
166 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
167         &softnic_tm_engine,
168         &softnic_tm_bypass_engine,
169 #endif
170 #ifdef RTE_LIBRTE_IEEE1588
171         &ieee1588_fwd_engine,
172 #endif
173         NULL,
174 };
175
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
181
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
184                                       * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
186
187 /*
188  * In container, it cannot terminate the process which running with 'stats-period'
189  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
190  */
191 uint8_t f_quit;
192
193 /*
194  * Configuration of packet segments used by the "txonly" processing engine.
195  */
196 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
197 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
198         TXONLY_DEF_PACKET_LEN,
199 };
200 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
201
202 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
203 /**< Split policy for packets to TX. */
204
205 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
206 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
207
208 /* current configuration is in DCB or not,0 means it is not in DCB mode */
209 uint8_t dcb_config = 0;
210
211 /* Whether the dcb is in testing status */
212 uint8_t dcb_test = 0;
213
214 /*
215  * Configurable number of RX/TX queues.
216  */
217 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
218 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
219
220 /*
221  * Configurable number of RX/TX ring descriptors.
222  */
223 #define RTE_TEST_RX_DESC_DEFAULT 128
224 #define RTE_TEST_TX_DESC_DEFAULT 512
225 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
226 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
227
228 #define RTE_PMD_PARAM_UNSET -1
229 /*
230  * Configurable values of RX and TX ring threshold registers.
231  */
232
233 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
234 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
236
237 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
240
241 /*
242  * Configurable value of RX free threshold.
243  */
244 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
245
246 /*
247  * Configurable value of RX drop enable.
248  */
249 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
250
251 /*
252  * Configurable value of TX free threshold.
253  */
254 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
255
256 /*
257  * Configurable value of TX RS bit threshold.
258  */
259 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
260
261 /*
262  * Configurable value of TX queue flags.
263  */
264 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
265
266 /*
267  * Receive Side Scaling (RSS) configuration.
268  */
269 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
270
271 /*
272  * Port topology configuration
273  */
274 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
275
276 /*
277  * Avoids to flush all the RX streams before starts forwarding.
278  */
279 uint8_t no_flush_rx = 0; /* flush by default */
280
281 /*
282  * Flow API isolated mode.
283  */
284 uint8_t flow_isolate_all;
285
286 /*
287  * Avoids to check link status when starting/stopping a port.
288  */
289 uint8_t no_link_check = 0; /* check by default */
290
291 /*
292  * Enable link status change notification
293  */
294 uint8_t lsc_interrupt = 1; /* enabled by default */
295
296 /*
297  * Enable device removal notification.
298  */
299 uint8_t rmv_interrupt = 1; /* enabled by default */
300
301 /*
302  * Display or mask ether events
303  * Default to all events except VF_MBOX
304  */
305 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
306                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
307                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
308                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
309                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
310                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
311
312 /*
313  * NIC bypass mode configuration options.
314  */
315
316 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
317 /* The NIC bypass watchdog timeout. */
318 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
319 #endif
320
321
322 #ifdef RTE_LIBRTE_LATENCY_STATS
323
324 /*
325  * Set when latency stats is enabled in the commandline
326  */
327 uint8_t latencystats_enabled;
328
329 /*
330  * Lcore ID to serive latency statistics.
331  */
332 lcoreid_t latencystats_lcore_id = -1;
333
334 #endif
335
336 /*
337  * Ethernet device configuration.
338  */
339 struct rte_eth_rxmode rx_mode = {
340         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
341         .split_hdr_size = 0,
342         .header_split   = 0, /**< Header Split disabled. */
343         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
344         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
345         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
346         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
347         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
348         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
349         .hw_timestamp   = 0, /**< HW timestamp enabled. */
350 };
351
352 struct rte_fdir_conf fdir_conf = {
353         .mode = RTE_FDIR_MODE_NONE,
354         .pballoc = RTE_FDIR_PBALLOC_64K,
355         .status = RTE_FDIR_REPORT_STATUS,
356         .mask = {
357                 .vlan_tci_mask = 0xFFEF,
358                 .ipv4_mask     = {
359                         .src_ip = 0xFFFFFFFF,
360                         .dst_ip = 0xFFFFFFFF,
361                 },
362                 .ipv6_mask     = {
363                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
364                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
365                 },
366                 .src_port_mask = 0xFFFF,
367                 .dst_port_mask = 0xFFFF,
368                 .mac_addr_byte_mask = 0xFF,
369                 .tunnel_type_mask = 1,
370                 .tunnel_id_mask = 0xFFFFFFFF,
371         },
372         .drop_queue = 127,
373 };
374
375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
376
377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
379
380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
382
383 uint16_t nb_tx_queue_stats_mappings = 0;
384 uint16_t nb_rx_queue_stats_mappings = 0;
385
386 /*
387  * Display zero values by default for xstats
388  */
389 uint8_t xstats_hide_zero;
390
391 unsigned int num_sockets = 0;
392 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
393
394 #ifdef RTE_LIBRTE_BITRATE
395 /* Bitrate statistics */
396 struct rte_stats_bitrates *bitrate_data;
397 lcoreid_t bitrate_lcore_id;
398 uint8_t bitrate_enabled;
399 #endif
400
401 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
402 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
403
404 /* Forward function declarations */
405 static void map_port_queue_stats_mapping_registers(portid_t pi,
406                                                    struct rte_port *port);
407 static void check_all_ports_link_status(uint32_t port_mask);
408 static int eth_event_callback(portid_t port_id,
409                               enum rte_eth_event_type type,
410                               void *param, void *ret_param);
411
412 /*
413  * Check if all the ports are started.
414  * If yes, return positive value. If not, return zero.
415  */
416 static int all_ports_started(void);
417
418 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
419 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
420
421 /*
422  * Helper function to check if socket is already discovered.
423  * If yes, return positive value. If not, return zero.
424  */
425 int
426 new_socket_id(unsigned int socket_id)
427 {
428         unsigned int i;
429
430         for (i = 0; i < num_sockets; i++) {
431                 if (socket_ids[i] == socket_id)
432                         return 0;
433         }
434         return 1;
435 }
436
437 /*
438  * Setup default configuration.
439  */
440 static void
441 set_default_fwd_lcores_config(void)
442 {
443         unsigned int i;
444         unsigned int nb_lc;
445         unsigned int sock_num;
446
447         nb_lc = 0;
448         for (i = 0; i < RTE_MAX_LCORE; i++) {
449                 sock_num = rte_lcore_to_socket_id(i);
450                 if (new_socket_id(sock_num)) {
451                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
452                                 rte_exit(EXIT_FAILURE,
453                                          "Total sockets greater than %u\n",
454                                          RTE_MAX_NUMA_NODES);
455                         }
456                         socket_ids[num_sockets++] = sock_num;
457                 }
458                 if (!rte_lcore_is_enabled(i))
459                         continue;
460                 if (i == rte_get_master_lcore())
461                         continue;
462                 fwd_lcores_cpuids[nb_lc++] = i;
463         }
464         nb_lcores = (lcoreid_t) nb_lc;
465         nb_cfg_lcores = nb_lcores;
466         nb_fwd_lcores = 1;
467 }
468
469 static void
470 set_def_peer_eth_addrs(void)
471 {
472         portid_t i;
473
474         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
475                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
476                 peer_eth_addrs[i].addr_bytes[5] = i;
477         }
478 }
479
480 static void
481 set_default_fwd_ports_config(void)
482 {
483         portid_t pt_id;
484         int i = 0;
485
486         RTE_ETH_FOREACH_DEV(pt_id)
487                 fwd_ports_ids[i++] = pt_id;
488
489         nb_cfg_ports = nb_ports;
490         nb_fwd_ports = nb_ports;
491 }
492
493 void
494 set_def_fwd_config(void)
495 {
496         set_default_fwd_lcores_config();
497         set_def_peer_eth_addrs();
498         set_default_fwd_ports_config();
499 }
500
501 /*
502  * Configuration initialisation done once at init time.
503  */
504 static void
505 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
506                  unsigned int socket_id)
507 {
508         char pool_name[RTE_MEMPOOL_NAMESIZE];
509         struct rte_mempool *rte_mp = NULL;
510         uint32_t mb_size;
511
512         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
513         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
514
515         RTE_LOG(INFO, USER1,
516                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
517                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
518
519         if (mp_anon != 0) {
520                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
521                         mb_size, (unsigned) mb_mempool_cache,
522                         sizeof(struct rte_pktmbuf_pool_private),
523                         socket_id, 0);
524                 if (rte_mp == NULL)
525                         goto err;
526
527                 if (rte_mempool_populate_anon(rte_mp) == 0) {
528                         rte_mempool_free(rte_mp);
529                         rte_mp = NULL;
530                         goto err;
531                 }
532                 rte_pktmbuf_pool_init(rte_mp, NULL);
533                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
534         } else {
535                 /* wrapper to rte_mempool_create() */
536                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
537                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
538         }
539
540 err:
541         if (rte_mp == NULL) {
542                 rte_exit(EXIT_FAILURE,
543                         "Creation of mbuf pool for socket %u failed: %s\n",
544                         socket_id, rte_strerror(rte_errno));
545         } else if (verbose_level > 0) {
546                 rte_mempool_dump(stdout, rte_mp);
547         }
548 }
549
550 /*
551  * Check given socket id is valid or not with NUMA mode,
552  * if valid, return 0, else return -1
553  */
554 static int
555 check_socket_id(const unsigned int socket_id)
556 {
557         static int warning_once = 0;
558
559         if (new_socket_id(socket_id)) {
560                 if (!warning_once && numa_support)
561                         printf("Warning: NUMA should be configured manually by"
562                                " using --port-numa-config and"
563                                " --ring-numa-config parameters along with"
564                                " --numa.\n");
565                 warning_once = 1;
566                 return -1;
567         }
568         return 0;
569 }
570
571 /*
572  * Get the allowed maximum number of RX queues.
573  * *pid return the port id which has minimal value of
574  * max_rx_queues in all ports.
575  */
576 queueid_t
577 get_allowed_max_nb_rxq(portid_t *pid)
578 {
579         queueid_t allowed_max_rxq = MAX_QUEUE_ID;
580         portid_t pi;
581         struct rte_eth_dev_info dev_info;
582
583         RTE_ETH_FOREACH_DEV(pi) {
584                 rte_eth_dev_info_get(pi, &dev_info);
585                 if (dev_info.max_rx_queues < allowed_max_rxq) {
586                         allowed_max_rxq = dev_info.max_rx_queues;
587                         *pid = pi;
588                 }
589         }
590         return allowed_max_rxq;
591 }
592
593 /*
594  * Check input rxq is valid or not.
595  * If input rxq is not greater than any of maximum number
596  * of RX queues of all ports, it is valid.
597  * if valid, return 0, else return -1
598  */
599 int
600 check_nb_rxq(queueid_t rxq)
601 {
602         queueid_t allowed_max_rxq;
603         portid_t pid = 0;
604
605         allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
606         if (rxq > allowed_max_rxq) {
607                 printf("Fail: input rxq (%u) can't be greater "
608                        "than max_rx_queues (%u) of port %u\n",
609                        rxq,
610                        allowed_max_rxq,
611                        pid);
612                 return -1;
613         }
614         return 0;
615 }
616
617 /*
618  * Get the allowed maximum number of TX queues.
619  * *pid return the port id which has minimal value of
620  * max_tx_queues in all ports.
621  */
622 queueid_t
623 get_allowed_max_nb_txq(portid_t *pid)
624 {
625         queueid_t allowed_max_txq = MAX_QUEUE_ID;
626         portid_t pi;
627         struct rte_eth_dev_info dev_info;
628
629         RTE_ETH_FOREACH_DEV(pi) {
630                 rte_eth_dev_info_get(pi, &dev_info);
631                 if (dev_info.max_tx_queues < allowed_max_txq) {
632                         allowed_max_txq = dev_info.max_tx_queues;
633                         *pid = pi;
634                 }
635         }
636         return allowed_max_txq;
637 }
638
639 /*
640  * Check input txq is valid or not.
641  * If input txq is not greater than any of maximum number
642  * of TX queues of all ports, it is valid.
643  * if valid, return 0, else return -1
644  */
645 int
646 check_nb_txq(queueid_t txq)
647 {
648         queueid_t allowed_max_txq;
649         portid_t pid = 0;
650
651         allowed_max_txq = get_allowed_max_nb_txq(&pid);
652         if (txq > allowed_max_txq) {
653                 printf("Fail: input txq (%u) can't be greater "
654                        "than max_tx_queues (%u) of port %u\n",
655                        txq,
656                        allowed_max_txq,
657                        pid);
658                 return -1;
659         }
660         return 0;
661 }
662
663 static void
664 init_config(void)
665 {
666         portid_t pid;
667         struct rte_port *port;
668         struct rte_mempool *mbp;
669         unsigned int nb_mbuf_per_pool;
670         lcoreid_t  lc_id;
671         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
672         struct rte_gro_param gro_param;
673         uint32_t gso_types;
674
675         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
676
677         if (numa_support) {
678                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
679                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
680                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
681         }
682
683         /* Configuration of logical cores. */
684         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
685                                 sizeof(struct fwd_lcore *) * nb_lcores,
686                                 RTE_CACHE_LINE_SIZE);
687         if (fwd_lcores == NULL) {
688                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
689                                                         "failed\n", nb_lcores);
690         }
691         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
692                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
693                                                sizeof(struct fwd_lcore),
694                                                RTE_CACHE_LINE_SIZE);
695                 if (fwd_lcores[lc_id] == NULL) {
696                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
697                                                                 "failed\n");
698                 }
699                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
700         }
701
702         RTE_ETH_FOREACH_DEV(pid) {
703                 port = &ports[pid];
704                 rte_eth_dev_info_get(pid, &port->dev_info);
705
706                 if (numa_support) {
707                         if (port_numa[pid] != NUMA_NO_CONFIG)
708                                 port_per_socket[port_numa[pid]]++;
709                         else {
710                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
711
712                                 /* if socket_id is invalid, set to 0 */
713                                 if (check_socket_id(socket_id) < 0)
714                                         socket_id = 0;
715                                 port_per_socket[socket_id]++;
716                         }
717                 }
718
719                 /* set flag to initialize port/queue */
720                 port->need_reconfig = 1;
721                 port->need_reconfig_queues = 1;
722         }
723
724         /*
725          * Create pools of mbuf.
726          * If NUMA support is disabled, create a single pool of mbuf in
727          * socket 0 memory by default.
728          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
729          *
730          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
731          * nb_txd can be configured at run time.
732          */
733         if (param_total_num_mbufs)
734                 nb_mbuf_per_pool = param_total_num_mbufs;
735         else {
736                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
737                         (nb_lcores * mb_mempool_cache) +
738                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
739                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
740         }
741
742         if (numa_support) {
743                 uint8_t i;
744
745                 for (i = 0; i < num_sockets; i++)
746                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
747                                          socket_ids[i]);
748         } else {
749                 if (socket_num == UMA_NO_CONFIG)
750                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
751                 else
752                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
753                                                  socket_num);
754         }
755
756         init_port_config();
757
758         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
759                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
760         /*
761          * Records which Mbuf pool to use by each logical core, if needed.
762          */
763         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
764                 mbp = mbuf_pool_find(
765                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
766
767                 if (mbp == NULL)
768                         mbp = mbuf_pool_find(0);
769                 fwd_lcores[lc_id]->mbp = mbp;
770                 /* initialize GSO context */
771                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
772                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
773                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
774                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
775                         ETHER_CRC_LEN;
776                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
777         }
778
779         /* Configuration of packet forwarding streams. */
780         if (init_fwd_streams() < 0)
781                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
782
783         fwd_config_setup();
784
785         /* create a gro context for each lcore */
786         gro_param.gro_types = RTE_GRO_TCP_IPV4;
787         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
788         gro_param.max_item_per_flow = MAX_PKT_BURST;
789         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
790                 gro_param.socket_id = rte_lcore_to_socket_id(
791                                 fwd_lcores_cpuids[lc_id]);
792                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
793                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
794                         rte_exit(EXIT_FAILURE,
795                                         "rte_gro_ctx_create() failed\n");
796                 }
797         }
798 }
799
800
801 void
802 reconfig(portid_t new_port_id, unsigned socket_id)
803 {
804         struct rte_port *port;
805
806         /* Reconfiguration of Ethernet ports. */
807         port = &ports[new_port_id];
808         rte_eth_dev_info_get(new_port_id, &port->dev_info);
809
810         /* set flag to initialize port/queue */
811         port->need_reconfig = 1;
812         port->need_reconfig_queues = 1;
813         port->socket_id = socket_id;
814
815         init_port_config();
816 }
817
818
819 int
820 init_fwd_streams(void)
821 {
822         portid_t pid;
823         struct rte_port *port;
824         streamid_t sm_id, nb_fwd_streams_new;
825         queueid_t q;
826
827         /* set socket id according to numa or not */
828         RTE_ETH_FOREACH_DEV(pid) {
829                 port = &ports[pid];
830                 if (nb_rxq > port->dev_info.max_rx_queues) {
831                         printf("Fail: nb_rxq(%d) is greater than "
832                                 "max_rx_queues(%d)\n", nb_rxq,
833                                 port->dev_info.max_rx_queues);
834                         return -1;
835                 }
836                 if (nb_txq > port->dev_info.max_tx_queues) {
837                         printf("Fail: nb_txq(%d) is greater than "
838                                 "max_tx_queues(%d)\n", nb_txq,
839                                 port->dev_info.max_tx_queues);
840                         return -1;
841                 }
842                 if (numa_support) {
843                         if (port_numa[pid] != NUMA_NO_CONFIG)
844                                 port->socket_id = port_numa[pid];
845                         else {
846                                 port->socket_id = rte_eth_dev_socket_id(pid);
847
848                                 /* if socket_id is invalid, set to 0 */
849                                 if (check_socket_id(port->socket_id) < 0)
850                                         port->socket_id = 0;
851                         }
852                 }
853                 else {
854                         if (socket_num == UMA_NO_CONFIG)
855                                 port->socket_id = 0;
856                         else
857                                 port->socket_id = socket_num;
858                 }
859         }
860
861         q = RTE_MAX(nb_rxq, nb_txq);
862         if (q == 0) {
863                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
864                 return -1;
865         }
866         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
867         if (nb_fwd_streams_new == nb_fwd_streams)
868                 return 0;
869         /* clear the old */
870         if (fwd_streams != NULL) {
871                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
872                         if (fwd_streams[sm_id] == NULL)
873                                 continue;
874                         rte_free(fwd_streams[sm_id]);
875                         fwd_streams[sm_id] = NULL;
876                 }
877                 rte_free(fwd_streams);
878                 fwd_streams = NULL;
879         }
880
881         /* init new */
882         nb_fwd_streams = nb_fwd_streams_new;
883         if (nb_fwd_streams) {
884                 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
885                         sizeof(struct fwd_stream *) * nb_fwd_streams,
886                         RTE_CACHE_LINE_SIZE);
887                 if (fwd_streams == NULL)
888                         rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
889                                  " (struct fwd_stream *)) failed\n",
890                                  nb_fwd_streams);
891
892                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
893                         fwd_streams[sm_id] = rte_zmalloc("testpmd:"
894                                 " struct fwd_stream", sizeof(struct fwd_stream),
895                                 RTE_CACHE_LINE_SIZE);
896                         if (fwd_streams[sm_id] == NULL)
897                                 rte_exit(EXIT_FAILURE, "rte_zmalloc"
898                                          "(struct fwd_stream) failed\n");
899                 }
900         }
901
902         return 0;
903 }
904
905 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
906 static void
907 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
908 {
909         unsigned int total_burst;
910         unsigned int nb_burst;
911         unsigned int burst_stats[3];
912         uint16_t pktnb_stats[3];
913         uint16_t nb_pkt;
914         int burst_percent[3];
915
916         /*
917          * First compute the total number of packet bursts and the
918          * two highest numbers of bursts of the same number of packets.
919          */
920         total_burst = 0;
921         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
922         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
923         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
924                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
925                 if (nb_burst == 0)
926                         continue;
927                 total_burst += nb_burst;
928                 if (nb_burst > burst_stats[0]) {
929                         burst_stats[1] = burst_stats[0];
930                         pktnb_stats[1] = pktnb_stats[0];
931                         burst_stats[0] = nb_burst;
932                         pktnb_stats[0] = nb_pkt;
933                 } else if (nb_burst > burst_stats[1]) {
934                         burst_stats[1] = nb_burst;
935                         pktnb_stats[1] = nb_pkt;
936                 }
937         }
938         if (total_burst == 0)
939                 return;
940         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
941         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
942                burst_percent[0], (int) pktnb_stats[0]);
943         if (burst_stats[0] == total_burst) {
944                 printf("]\n");
945                 return;
946         }
947         if (burst_stats[0] + burst_stats[1] == total_burst) {
948                 printf(" + %d%% of %d pkts]\n",
949                        100 - burst_percent[0], pktnb_stats[1]);
950                 return;
951         }
952         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
953         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
954         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
955                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
956                 return;
957         }
958         printf(" + %d%% of %d pkts + %d%% of others]\n",
959                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
960 }
961 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
962
963 static void
964 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
965 {
966         struct rte_port *port;
967         uint8_t i;
968
969         static const char *fwd_stats_border = "----------------------";
970
971         port = &ports[port_id];
972         printf("\n  %s Forward statistics for port %-2d %s\n",
973                fwd_stats_border, port_id, fwd_stats_border);
974
975         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
976                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
977                        "%-"PRIu64"\n",
978                        stats->ipackets, stats->imissed,
979                        (uint64_t) (stats->ipackets + stats->imissed));
980
981                 if (cur_fwd_eng == &csum_fwd_engine)
982                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
983                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
984                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
985                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
986                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
987                 }
988
989                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
990                        "%-"PRIu64"\n",
991                        stats->opackets, port->tx_dropped,
992                        (uint64_t) (stats->opackets + port->tx_dropped));
993         }
994         else {
995                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
996                        "%14"PRIu64"\n",
997                        stats->ipackets, stats->imissed,
998                        (uint64_t) (stats->ipackets + stats->imissed));
999
1000                 if (cur_fwd_eng == &csum_fwd_engine)
1001                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1002                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1003                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1004                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1005                         printf("  RX-nombufs:             %14"PRIu64"\n",
1006                                stats->rx_nombuf);
1007                 }
1008
1009                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1010                        "%14"PRIu64"\n",
1011                        stats->opackets, port->tx_dropped,
1012                        (uint64_t) (stats->opackets + port->tx_dropped));
1013         }
1014
1015 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1016         if (port->rx_stream)
1017                 pkt_burst_stats_display("RX",
1018                         &port->rx_stream->rx_burst_stats);
1019         if (port->tx_stream)
1020                 pkt_burst_stats_display("TX",
1021                         &port->tx_stream->tx_burst_stats);
1022 #endif
1023
1024         if (port->rx_queue_stats_mapping_enabled) {
1025                 printf("\n");
1026                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1027                         printf("  Stats reg %2d RX-packets:%14"PRIu64
1028                                "     RX-errors:%14"PRIu64
1029                                "    RX-bytes:%14"PRIu64"\n",
1030                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1031                 }
1032                 printf("\n");
1033         }
1034         if (port->tx_queue_stats_mapping_enabled) {
1035                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1036                         printf("  Stats reg %2d TX-packets:%14"PRIu64
1037                                "                                 TX-bytes:%14"PRIu64"\n",
1038                                i, stats->q_opackets[i], stats->q_obytes[i]);
1039                 }
1040         }
1041
1042         printf("  %s--------------------------------%s\n",
1043                fwd_stats_border, fwd_stats_border);
1044 }
1045
1046 static void
1047 fwd_stream_stats_display(streamid_t stream_id)
1048 {
1049         struct fwd_stream *fs;
1050         static const char *fwd_top_stats_border = "-------";
1051
1052         fs = fwd_streams[stream_id];
1053         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1054             (fs->fwd_dropped == 0))
1055                 return;
1056         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1057                "TX Port=%2d/Queue=%2d %s\n",
1058                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1059                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1060         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1061                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1062
1063         /* if checksum mode */
1064         if (cur_fwd_eng == &csum_fwd_engine) {
1065                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1066                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1067         }
1068
1069 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1070         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1071         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1072 #endif
1073 }
1074
1075 static void
1076 flush_fwd_rx_queues(void)
1077 {
1078         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1079         portid_t  rxp;
1080         portid_t port_id;
1081         queueid_t rxq;
1082         uint16_t  nb_rx;
1083         uint16_t  i;
1084         uint8_t   j;
1085         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1086         uint64_t timer_period;
1087
1088         /* convert to number of cycles */
1089         timer_period = rte_get_timer_hz(); /* 1 second timeout */
1090
1091         for (j = 0; j < 2; j++) {
1092                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1093                         for (rxq = 0; rxq < nb_rxq; rxq++) {
1094                                 port_id = fwd_ports_ids[rxp];
1095                                 /**
1096                                 * testpmd can stuck in the below do while loop
1097                                 * if rte_eth_rx_burst() always returns nonzero
1098                                 * packets. So timer is added to exit this loop
1099                                 * after 1sec timer expiry.
1100                                 */
1101                                 prev_tsc = rte_rdtsc();
1102                                 do {
1103                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
1104                                                 pkts_burst, MAX_PKT_BURST);
1105                                         for (i = 0; i < nb_rx; i++)
1106                                                 rte_pktmbuf_free(pkts_burst[i]);
1107
1108                                         cur_tsc = rte_rdtsc();
1109                                         diff_tsc = cur_tsc - prev_tsc;
1110                                         timer_tsc += diff_tsc;
1111                                 } while ((nb_rx > 0) &&
1112                                         (timer_tsc < timer_period));
1113                                 timer_tsc = 0;
1114                         }
1115                 }
1116                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1117         }
1118 }
1119
1120 static void
1121 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1122 {
1123         struct fwd_stream **fsm;
1124         streamid_t nb_fs;
1125         streamid_t sm_id;
1126 #ifdef RTE_LIBRTE_BITRATE
1127         uint64_t tics_per_1sec;
1128         uint64_t tics_datum;
1129         uint64_t tics_current;
1130         uint8_t idx_port, cnt_ports;
1131
1132         cnt_ports = rte_eth_dev_count();
1133         tics_datum = rte_rdtsc();
1134         tics_per_1sec = rte_get_timer_hz();
1135 #endif
1136         fsm = &fwd_streams[fc->stream_idx];
1137         nb_fs = fc->stream_nb;
1138         do {
1139                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1140                         (*pkt_fwd)(fsm[sm_id]);
1141 #ifdef RTE_LIBRTE_BITRATE
1142                 if (bitrate_enabled != 0 &&
1143                                 bitrate_lcore_id == rte_lcore_id()) {
1144                         tics_current = rte_rdtsc();
1145                         if (tics_current - tics_datum >= tics_per_1sec) {
1146                                 /* Periodic bitrate calculation */
1147                                 for (idx_port = 0;
1148                                                 idx_port < cnt_ports;
1149                                                 idx_port++)
1150                                         rte_stats_bitrate_calc(bitrate_data,
1151                                                 idx_port);
1152                                 tics_datum = tics_current;
1153                         }
1154                 }
1155 #endif
1156 #ifdef RTE_LIBRTE_LATENCY_STATS
1157                 if (latencystats_enabled != 0 &&
1158                                 latencystats_lcore_id == rte_lcore_id())
1159                         rte_latencystats_update();
1160 #endif
1161
1162         } while (! fc->stopped);
1163 }
1164
1165 static int
1166 start_pkt_forward_on_core(void *fwd_arg)
1167 {
1168         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1169                              cur_fwd_config.fwd_eng->packet_fwd);
1170         return 0;
1171 }
1172
1173 /*
1174  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1175  * Used to start communication flows in network loopback test configurations.
1176  */
1177 static int
1178 run_one_txonly_burst_on_core(void *fwd_arg)
1179 {
1180         struct fwd_lcore *fwd_lc;
1181         struct fwd_lcore tmp_lcore;
1182
1183         fwd_lc = (struct fwd_lcore *) fwd_arg;
1184         tmp_lcore = *fwd_lc;
1185         tmp_lcore.stopped = 1;
1186         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1187         return 0;
1188 }
1189
1190 /*
1191  * Launch packet forwarding:
1192  *     - Setup per-port forwarding context.
1193  *     - launch logical cores with their forwarding configuration.
1194  */
1195 static void
1196 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1197 {
1198         port_fwd_begin_t port_fwd_begin;
1199         unsigned int i;
1200         unsigned int lc_id;
1201         int diag;
1202
1203         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1204         if (port_fwd_begin != NULL) {
1205                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1206                         (*port_fwd_begin)(fwd_ports_ids[i]);
1207         }
1208         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1209                 lc_id = fwd_lcores_cpuids[i];
1210                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1211                         fwd_lcores[i]->stopped = 0;
1212                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1213                                                      fwd_lcores[i], lc_id);
1214                         if (diag != 0)
1215                                 printf("launch lcore %u failed - diag=%d\n",
1216                                        lc_id, diag);
1217                 }
1218         }
1219 }
1220
1221 /*
1222  * Update the forward ports list.
1223  */
1224 void
1225 update_fwd_ports(portid_t new_pid)
1226 {
1227         unsigned int i;
1228         unsigned int new_nb_fwd_ports = 0;
1229         int move = 0;
1230
1231         for (i = 0; i < nb_fwd_ports; ++i) {
1232                 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1233                         move = 1;
1234                 else if (move)
1235                         fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1236                 else
1237                         new_nb_fwd_ports++;
1238         }
1239         if (new_pid < RTE_MAX_ETHPORTS)
1240                 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1241
1242         nb_fwd_ports = new_nb_fwd_ports;
1243         nb_cfg_ports = new_nb_fwd_ports;
1244 }
1245
1246 /*
1247  * Launch packet forwarding configuration.
1248  */
1249 void
1250 start_packet_forwarding(int with_tx_first)
1251 {
1252         port_fwd_begin_t port_fwd_begin;
1253         port_fwd_end_t  port_fwd_end;
1254         struct rte_port *port;
1255         unsigned int i;
1256         portid_t   pt_id;
1257         streamid_t sm_id;
1258
1259         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1260                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1261
1262         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1263                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1264
1265         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1266                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1267                 (!nb_rxq || !nb_txq))
1268                 rte_exit(EXIT_FAILURE,
1269                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1270                         cur_fwd_eng->fwd_mode_name);
1271
1272         if (all_ports_started() == 0) {
1273                 printf("Not all ports were started\n");
1274                 return;
1275         }
1276         if (test_done == 0) {
1277                 printf("Packet forwarding already started\n");
1278                 return;
1279         }
1280
1281
1282         if(dcb_test) {
1283                 for (i = 0; i < nb_fwd_ports; i++) {
1284                         pt_id = fwd_ports_ids[i];
1285                         port = &ports[pt_id];
1286                         if (!port->dcb_flag) {
1287                                 printf("In DCB mode, all forwarding ports must "
1288                                        "be configured in this mode.\n");
1289                                 return;
1290                         }
1291                 }
1292                 if (nb_fwd_lcores == 1) {
1293                         printf("In DCB mode,the nb forwarding cores "
1294                                "should be larger than 1.\n");
1295                         return;
1296                 }
1297         }
1298         test_done = 0;
1299
1300         fwd_config_setup();
1301
1302         if(!no_flush_rx)
1303                 flush_fwd_rx_queues();
1304
1305         pkt_fwd_config_display(&cur_fwd_config);
1306         rxtx_config_display();
1307
1308         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1309                 pt_id = fwd_ports_ids[i];
1310                 port = &ports[pt_id];
1311                 rte_eth_stats_get(pt_id, &port->stats);
1312                 port->tx_dropped = 0;
1313
1314                 map_port_queue_stats_mapping_registers(pt_id, port);
1315         }
1316         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1317                 fwd_streams[sm_id]->rx_packets = 0;
1318                 fwd_streams[sm_id]->tx_packets = 0;
1319                 fwd_streams[sm_id]->fwd_dropped = 0;
1320                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1321                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1322
1323 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1324                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1325                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1326                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1327                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1328 #endif
1329 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1330                 fwd_streams[sm_id]->core_cycles = 0;
1331 #endif
1332         }
1333         if (with_tx_first) {
1334                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1335                 if (port_fwd_begin != NULL) {
1336                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1337                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1338                 }
1339                 while (with_tx_first--) {
1340                         launch_packet_forwarding(
1341                                         run_one_txonly_burst_on_core);
1342                         rte_eal_mp_wait_lcore();
1343                 }
1344                 port_fwd_end = tx_only_engine.port_fwd_end;
1345                 if (port_fwd_end != NULL) {
1346                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1347                                 (*port_fwd_end)(fwd_ports_ids[i]);
1348                 }
1349         }
1350         launch_packet_forwarding(start_pkt_forward_on_core);
1351 }
1352
1353 void
1354 stop_packet_forwarding(void)
1355 {
1356         struct rte_eth_stats stats;
1357         struct rte_port *port;
1358         port_fwd_end_t  port_fwd_end;
1359         int i;
1360         portid_t   pt_id;
1361         streamid_t sm_id;
1362         lcoreid_t  lc_id;
1363         uint64_t total_recv;
1364         uint64_t total_xmit;
1365         uint64_t total_rx_dropped;
1366         uint64_t total_tx_dropped;
1367         uint64_t total_rx_nombuf;
1368         uint64_t tx_dropped;
1369         uint64_t rx_bad_ip_csum;
1370         uint64_t rx_bad_l4_csum;
1371 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1372         uint64_t fwd_cycles;
1373 #endif
1374
1375         static const char *acc_stats_border = "+++++++++++++++";
1376
1377         if (test_done) {
1378                 printf("Packet forwarding not started\n");
1379                 return;
1380         }
1381         printf("Telling cores to stop...");
1382         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1383                 fwd_lcores[lc_id]->stopped = 1;
1384         printf("\nWaiting for lcores to finish...\n");
1385         rte_eal_mp_wait_lcore();
1386         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1387         if (port_fwd_end != NULL) {
1388                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1389                         pt_id = fwd_ports_ids[i];
1390                         (*port_fwd_end)(pt_id);
1391                 }
1392         }
1393 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1394         fwd_cycles = 0;
1395 #endif
1396         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1397                 if (cur_fwd_config.nb_fwd_streams >
1398                     cur_fwd_config.nb_fwd_ports) {
1399                         fwd_stream_stats_display(sm_id);
1400                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1401                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1402                 } else {
1403                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1404                                 fwd_streams[sm_id];
1405                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1406                                 fwd_streams[sm_id];
1407                 }
1408                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1409                 tx_dropped = (uint64_t) (tx_dropped +
1410                                          fwd_streams[sm_id]->fwd_dropped);
1411                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1412
1413                 rx_bad_ip_csum =
1414                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1415                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1416                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1417                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1418                                                         rx_bad_ip_csum;
1419
1420                 rx_bad_l4_csum =
1421                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1422                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1423                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1424                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1425                                                         rx_bad_l4_csum;
1426
1427 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1428                 fwd_cycles = (uint64_t) (fwd_cycles +
1429                                          fwd_streams[sm_id]->core_cycles);
1430 #endif
1431         }
1432         total_recv = 0;
1433         total_xmit = 0;
1434         total_rx_dropped = 0;
1435         total_tx_dropped = 0;
1436         total_rx_nombuf  = 0;
1437         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1438                 pt_id = fwd_ports_ids[i];
1439
1440                 port = &ports[pt_id];
1441                 rte_eth_stats_get(pt_id, &stats);
1442                 stats.ipackets -= port->stats.ipackets;
1443                 port->stats.ipackets = 0;
1444                 stats.opackets -= port->stats.opackets;
1445                 port->stats.opackets = 0;
1446                 stats.ibytes   -= port->stats.ibytes;
1447                 port->stats.ibytes = 0;
1448                 stats.obytes   -= port->stats.obytes;
1449                 port->stats.obytes = 0;
1450                 stats.imissed  -= port->stats.imissed;
1451                 port->stats.imissed = 0;
1452                 stats.oerrors  -= port->stats.oerrors;
1453                 port->stats.oerrors = 0;
1454                 stats.rx_nombuf -= port->stats.rx_nombuf;
1455                 port->stats.rx_nombuf = 0;
1456
1457                 total_recv += stats.ipackets;
1458                 total_xmit += stats.opackets;
1459                 total_rx_dropped += stats.imissed;
1460                 total_tx_dropped += port->tx_dropped;
1461                 total_rx_nombuf  += stats.rx_nombuf;
1462
1463                 fwd_port_stats_display(pt_id, &stats);
1464         }
1465
1466         printf("\n  %s Accumulated forward statistics for all ports"
1467                "%s\n",
1468                acc_stats_border, acc_stats_border);
1469         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1470                "%-"PRIu64"\n"
1471                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1472                "%-"PRIu64"\n",
1473                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1474                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1475         if (total_rx_nombuf > 0)
1476                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1477         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1478                "%s\n",
1479                acc_stats_border, acc_stats_border);
1480 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1481         if (total_recv > 0)
1482                 printf("\n  CPU cycles/packet=%u (total cycles="
1483                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1484                        (unsigned int)(fwd_cycles / total_recv),
1485                        fwd_cycles, total_recv);
1486 #endif
1487         printf("\nDone.\n");
1488         test_done = 1;
1489 }
1490
1491 void
1492 dev_set_link_up(portid_t pid)
1493 {
1494         if (rte_eth_dev_set_link_up(pid) < 0)
1495                 printf("\nSet link up fail.\n");
1496 }
1497
1498 void
1499 dev_set_link_down(portid_t pid)
1500 {
1501         if (rte_eth_dev_set_link_down(pid) < 0)
1502                 printf("\nSet link down fail.\n");
1503 }
1504
1505 static int
1506 all_ports_started(void)
1507 {
1508         portid_t pi;
1509         struct rte_port *port;
1510
1511         RTE_ETH_FOREACH_DEV(pi) {
1512                 port = &ports[pi];
1513                 /* Check if there is a port which is not started */
1514                 if ((port->port_status != RTE_PORT_STARTED) &&
1515                         (port->slave_flag == 0))
1516                         return 0;
1517         }
1518
1519         /* No port is not started */
1520         return 1;
1521 }
1522
1523 int
1524 all_ports_stopped(void)
1525 {
1526         portid_t pi;
1527         struct rte_port *port;
1528
1529         RTE_ETH_FOREACH_DEV(pi) {
1530                 port = &ports[pi];
1531                 if ((port->port_status != RTE_PORT_STOPPED) &&
1532                         (port->slave_flag == 0))
1533                         return 0;
1534         }
1535
1536         return 1;
1537 }
1538
1539 int
1540 port_is_started(portid_t port_id)
1541 {
1542         if (port_id_is_invalid(port_id, ENABLED_WARN))
1543                 return 0;
1544
1545         if (ports[port_id].port_status != RTE_PORT_STARTED)
1546                 return 0;
1547
1548         return 1;
1549 }
1550
1551 static int
1552 port_is_closed(portid_t port_id)
1553 {
1554         if (port_id_is_invalid(port_id, ENABLED_WARN))
1555                 return 0;
1556
1557         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1558                 return 0;
1559
1560         return 1;
1561 }
1562
1563 int
1564 start_port(portid_t pid)
1565 {
1566         int diag, need_check_link_status = -1;
1567         portid_t pi;
1568         queueid_t qi;
1569         struct rte_port *port;
1570         struct ether_addr mac_addr;
1571         enum rte_eth_event_type event_type;
1572
1573         if (port_id_is_invalid(pid, ENABLED_WARN))
1574                 return 0;
1575
1576         if(dcb_config)
1577                 dcb_test = 1;
1578         RTE_ETH_FOREACH_DEV(pi) {
1579                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1580                         continue;
1581
1582                 need_check_link_status = 0;
1583                 port = &ports[pi];
1584                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1585                                                  RTE_PORT_HANDLING) == 0) {
1586                         printf("Port %d is now not stopped\n", pi);
1587                         continue;
1588                 }
1589
1590                 if (port->need_reconfig > 0) {
1591                         port->need_reconfig = 0;
1592
1593                         if (flow_isolate_all) {
1594                                 int ret = port_flow_isolate(pi, 1);
1595                                 if (ret) {
1596                                         printf("Failed to apply isolated"
1597                                                " mode on port %d\n", pi);
1598                                         return -1;
1599                                 }
1600                         }
1601
1602                         printf("Configuring Port %d (socket %u)\n", pi,
1603                                         port->socket_id);
1604                         /* configure port */
1605                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1606                                                 &(port->dev_conf));
1607                         if (diag != 0) {
1608                                 if (rte_atomic16_cmpset(&(port->port_status),
1609                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1610                                         printf("Port %d can not be set back "
1611                                                         "to stopped\n", pi);
1612                                 printf("Fail to configure port %d\n", pi);
1613                                 /* try to reconfigure port next time */
1614                                 port->need_reconfig = 1;
1615                                 return -1;
1616                         }
1617                 }
1618                 if (port->need_reconfig_queues > 0) {
1619                         port->need_reconfig_queues = 0;
1620                         /* setup tx queues */
1621                         for (qi = 0; qi < nb_txq; qi++) {
1622                                 if ((numa_support) &&
1623                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1624                                         diag = rte_eth_tx_queue_setup(pi, qi,
1625                                                 nb_txd,txring_numa[pi],
1626                                                 &(port->tx_conf));
1627                                 else
1628                                         diag = rte_eth_tx_queue_setup(pi, qi,
1629                                                 nb_txd,port->socket_id,
1630                                                 &(port->tx_conf));
1631
1632                                 if (diag == 0)
1633                                         continue;
1634
1635                                 /* Fail to setup tx queue, return */
1636                                 if (rte_atomic16_cmpset(&(port->port_status),
1637                                                         RTE_PORT_HANDLING,
1638                                                         RTE_PORT_STOPPED) == 0)
1639                                         printf("Port %d can not be set back "
1640                                                         "to stopped\n", pi);
1641                                 printf("Fail to configure port %d tx queues\n", pi);
1642                                 /* try to reconfigure queues next time */
1643                                 port->need_reconfig_queues = 1;
1644                                 return -1;
1645                         }
1646                         /* setup rx queues */
1647                         for (qi = 0; qi < nb_rxq; qi++) {
1648                                 if ((numa_support) &&
1649                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1650                                         struct rte_mempool * mp =
1651                                                 mbuf_pool_find(rxring_numa[pi]);
1652                                         if (mp == NULL) {
1653                                                 printf("Failed to setup RX queue:"
1654                                                         "No mempool allocation"
1655                                                         " on the socket %d\n",
1656                                                         rxring_numa[pi]);
1657                                                 return -1;
1658                                         }
1659
1660                                         diag = rte_eth_rx_queue_setup(pi, qi,
1661                                              nb_rxd,rxring_numa[pi],
1662                                              &(port->rx_conf),mp);
1663                                 } else {
1664                                         struct rte_mempool *mp =
1665                                                 mbuf_pool_find(port->socket_id);
1666                                         if (mp == NULL) {
1667                                                 printf("Failed to setup RX queue:"
1668                                                         "No mempool allocation"
1669                                                         " on the socket %d\n",
1670                                                         port->socket_id);
1671                                                 return -1;
1672                                         }
1673                                         diag = rte_eth_rx_queue_setup(pi, qi,
1674                                              nb_rxd,port->socket_id,
1675                                              &(port->rx_conf), mp);
1676                                 }
1677                                 if (diag == 0)
1678                                         continue;
1679
1680                                 /* Fail to setup rx queue, return */
1681                                 if (rte_atomic16_cmpset(&(port->port_status),
1682                                                         RTE_PORT_HANDLING,
1683                                                         RTE_PORT_STOPPED) == 0)
1684                                         printf("Port %d can not be set back "
1685                                                         "to stopped\n", pi);
1686                                 printf("Fail to configure port %d rx queues\n", pi);
1687                                 /* try to reconfigure queues next time */
1688                                 port->need_reconfig_queues = 1;
1689                                 return -1;
1690                         }
1691                 }
1692
1693                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1694                      event_type < RTE_ETH_EVENT_MAX;
1695                      event_type++) {
1696                         diag = rte_eth_dev_callback_register(pi,
1697                                                         event_type,
1698                                                         eth_event_callback,
1699                                                         NULL);
1700                         if (diag) {
1701                                 printf("Failed to setup even callback for event %d\n",
1702                                         event_type);
1703                                 return -1;
1704                         }
1705                 }
1706
1707                 /* start port */
1708                 if (rte_eth_dev_start(pi) < 0) {
1709                         printf("Fail to start port %d\n", pi);
1710
1711                         /* Fail to setup rx queue, return */
1712                         if (rte_atomic16_cmpset(&(port->port_status),
1713                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1714                                 printf("Port %d can not be set back to "
1715                                                         "stopped\n", pi);
1716                         continue;
1717                 }
1718
1719                 if (rte_atomic16_cmpset(&(port->port_status),
1720                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1721                         printf("Port %d can not be set into started\n", pi);
1722
1723                 rte_eth_macaddr_get(pi, &mac_addr);
1724                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1725                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1726                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1727                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1728
1729                 /* at least one port started, need checking link status */
1730                 need_check_link_status = 1;
1731         }
1732
1733         if (need_check_link_status == 1 && !no_link_check)
1734                 check_all_ports_link_status(RTE_PORT_ALL);
1735         else if (need_check_link_status == 0)
1736                 printf("Please stop the ports first\n");
1737
1738         printf("Done\n");
1739         return 0;
1740 }
1741
1742 void
1743 stop_port(portid_t pid)
1744 {
1745         portid_t pi;
1746         struct rte_port *port;
1747         int need_check_link_status = 0;
1748
1749         if (dcb_test) {
1750                 dcb_test = 0;
1751                 dcb_config = 0;
1752         }
1753
1754         if (port_id_is_invalid(pid, ENABLED_WARN))
1755                 return;
1756
1757         printf("Stopping ports...\n");
1758
1759         RTE_ETH_FOREACH_DEV(pi) {
1760                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1761                         continue;
1762
1763                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1764                         printf("Please remove port %d from forwarding configuration.\n", pi);
1765                         continue;
1766                 }
1767
1768                 if (port_is_bonding_slave(pi)) {
1769                         printf("Please remove port %d from bonded device.\n", pi);
1770                         continue;
1771                 }
1772
1773                 port = &ports[pi];
1774                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1775                                                 RTE_PORT_HANDLING) == 0)
1776                         continue;
1777
1778                 rte_eth_dev_stop(pi);
1779
1780                 if (rte_atomic16_cmpset(&(port->port_status),
1781                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1782                         printf("Port %d can not be set into stopped\n", pi);
1783                 need_check_link_status = 1;
1784         }
1785         if (need_check_link_status && !no_link_check)
1786                 check_all_ports_link_status(RTE_PORT_ALL);
1787
1788         printf("Done\n");
1789 }
1790
1791 void
1792 close_port(portid_t pid)
1793 {
1794         portid_t pi;
1795         struct rte_port *port;
1796
1797         if (port_id_is_invalid(pid, ENABLED_WARN))
1798                 return;
1799
1800         printf("Closing ports...\n");
1801
1802         RTE_ETH_FOREACH_DEV(pi) {
1803                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1804                         continue;
1805
1806                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1807                         printf("Please remove port %d from forwarding configuration.\n", pi);
1808                         continue;
1809                 }
1810
1811                 if (port_is_bonding_slave(pi)) {
1812                         printf("Please remove port %d from bonded device.\n", pi);
1813                         continue;
1814                 }
1815
1816                 port = &ports[pi];
1817                 if (rte_atomic16_cmpset(&(port->port_status),
1818                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1819                         printf("Port %d is already closed\n", pi);
1820                         continue;
1821                 }
1822
1823                 if (rte_atomic16_cmpset(&(port->port_status),
1824                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1825                         printf("Port %d is now not stopped\n", pi);
1826                         continue;
1827                 }
1828
1829                 if (port->flow_list)
1830                         port_flow_flush(pi);
1831                 rte_eth_dev_close(pi);
1832
1833                 if (rte_atomic16_cmpset(&(port->port_status),
1834                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1835                         printf("Port %d cannot be set to closed\n", pi);
1836         }
1837
1838         printf("Done\n");
1839 }
1840
1841 void
1842 reset_port(portid_t pid)
1843 {
1844         int diag;
1845         portid_t pi;
1846         struct rte_port *port;
1847
1848         if (port_id_is_invalid(pid, ENABLED_WARN))
1849                 return;
1850
1851         printf("Resetting ports...\n");
1852
1853         RTE_ETH_FOREACH_DEV(pi) {
1854                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1855                         continue;
1856
1857                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1858                         printf("Please remove port %d from forwarding "
1859                                "configuration.\n", pi);
1860                         continue;
1861                 }
1862
1863                 if (port_is_bonding_slave(pi)) {
1864                         printf("Please remove port %d from bonded device.\n",
1865                                pi);
1866                         continue;
1867                 }
1868
1869                 diag = rte_eth_dev_reset(pi);
1870                 if (diag == 0) {
1871                         port = &ports[pi];
1872                         port->need_reconfig = 1;
1873                         port->need_reconfig_queues = 1;
1874                 } else {
1875                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1876                 }
1877         }
1878
1879         printf("Done\n");
1880 }
1881
1882 void
1883 attach_port(char *identifier)
1884 {
1885         portid_t pi = 0;
1886         unsigned int socket_id;
1887
1888         printf("Attaching a new port...\n");
1889
1890         if (identifier == NULL) {
1891                 printf("Invalid parameters are specified\n");
1892                 return;
1893         }
1894
1895         if (rte_eth_dev_attach(identifier, &pi))
1896                 return;
1897
1898         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1899         /* if socket_id is invalid, set to 0 */
1900         if (check_socket_id(socket_id) < 0)
1901                 socket_id = 0;
1902         reconfig(pi, socket_id);
1903         rte_eth_promiscuous_enable(pi);
1904
1905         nb_ports = rte_eth_dev_count();
1906
1907         ports[pi].port_status = RTE_PORT_STOPPED;
1908
1909         update_fwd_ports(pi);
1910
1911         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1912         printf("Done\n");
1913 }
1914
1915 void
1916 detach_port(portid_t port_id)
1917 {
1918         char name[RTE_ETH_NAME_MAX_LEN];
1919
1920         printf("Detaching a port...\n");
1921
1922         if (!port_is_closed(port_id)) {
1923                 printf("Please close port first\n");
1924                 return;
1925         }
1926
1927         if (ports[port_id].flow_list)
1928                 port_flow_flush(port_id);
1929
1930         if (rte_eth_dev_detach(port_id, name)) {
1931                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1932                 return;
1933         }
1934
1935         nb_ports = rte_eth_dev_count();
1936
1937         update_fwd_ports(RTE_MAX_ETHPORTS);
1938
1939         printf("Port '%s' is detached. Now total ports is %d\n",
1940                         name, nb_ports);
1941         printf("Done\n");
1942         return;
1943 }
1944
1945 void
1946 pmd_test_exit(void)
1947 {
1948         portid_t pt_id;
1949
1950         if (test_done == 0)
1951                 stop_packet_forwarding();
1952
1953         if (ports != NULL) {
1954                 no_link_check = 1;
1955                 RTE_ETH_FOREACH_DEV(pt_id) {
1956                         printf("\nShutting down port %d...\n", pt_id);
1957                         fflush(stdout);
1958                         stop_port(pt_id);
1959                         close_port(pt_id);
1960                 }
1961         }
1962         printf("\nBye...\n");
1963 }
1964
1965 typedef void (*cmd_func_t)(void);
1966 struct pmd_test_command {
1967         const char *cmd_name;
1968         cmd_func_t cmd_func;
1969 };
1970
1971 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1972
1973 /* Check the link status of all ports in up to 9s, and print them finally */
1974 static void
1975 check_all_ports_link_status(uint32_t port_mask)
1976 {
1977 #define CHECK_INTERVAL 100 /* 100ms */
1978 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1979         portid_t portid;
1980         uint8_t count, all_ports_up, print_flag = 0;
1981         struct rte_eth_link link;
1982
1983         printf("Checking link statuses...\n");
1984         fflush(stdout);
1985         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1986                 all_ports_up = 1;
1987                 RTE_ETH_FOREACH_DEV(portid) {
1988                         if ((port_mask & (1 << portid)) == 0)
1989                                 continue;
1990                         memset(&link, 0, sizeof(link));
1991                         rte_eth_link_get_nowait(portid, &link);
1992                         /* print link status if flag set */
1993                         if (print_flag == 1) {
1994                                 if (link.link_status)
1995                                         printf(
1996                                         "Port%d Link Up. speed %u Mbps- %s\n",
1997                                         portid, link.link_speed,
1998                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1999                                         ("full-duplex") : ("half-duplex\n"));
2000                                 else
2001                                         printf("Port %d Link Down\n", portid);
2002                                 continue;
2003                         }
2004                         /* clear all_ports_up flag if any link down */
2005                         if (link.link_status == ETH_LINK_DOWN) {
2006                                 all_ports_up = 0;
2007                                 break;
2008                         }
2009                 }
2010                 /* after finally printing all link status, get out */
2011                 if (print_flag == 1)
2012                         break;
2013
2014                 if (all_ports_up == 0) {
2015                         fflush(stdout);
2016                         rte_delay_ms(CHECK_INTERVAL);
2017                 }
2018
2019                 /* set the print_flag if all ports up or timeout */
2020                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2021                         print_flag = 1;
2022                 }
2023
2024                 if (lsc_interrupt)
2025                         break;
2026         }
2027 }
2028
2029 static void
2030 rmv_event_callback(void *arg)
2031 {
2032         int org_no_link_check = no_link_check;
2033         struct rte_eth_dev *dev;
2034         portid_t port_id = (intptr_t)arg;
2035
2036         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2037         dev = &rte_eth_devices[port_id];
2038
2039         no_link_check = 1;
2040         stop_port(port_id);
2041         no_link_check = org_no_link_check;
2042         close_port(port_id);
2043         printf("removing device %s\n", dev->device->name);
2044         if (rte_eal_dev_detach(dev->device))
2045                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
2046                         dev->device->name);
2047 }
2048
2049 /* This function is used by the interrupt thread */
2050 static int
2051 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2052                   void *ret_param)
2053 {
2054         static const char * const event_desc[] = {
2055                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2056                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2057                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2058                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2059                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2060                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2061                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2062                 [RTE_ETH_EVENT_MAX] = NULL,
2063         };
2064
2065         RTE_SET_USED(param);
2066         RTE_SET_USED(ret_param);
2067
2068         if (type >= RTE_ETH_EVENT_MAX) {
2069                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
2070                         port_id, __func__, type);
2071                 fflush(stderr);
2072         } else if (event_print_mask & (UINT32_C(1) << type)) {
2073                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
2074                         event_desc[type]);
2075                 fflush(stdout);
2076         }
2077
2078         switch (type) {
2079         case RTE_ETH_EVENT_INTR_RMV:
2080                 if (rte_eal_alarm_set(100000,
2081                                 rmv_event_callback, (void *)(intptr_t)port_id))
2082                         fprintf(stderr, "Could not set up deferred device removal\n");
2083                 break;
2084         default:
2085                 break;
2086         }
2087         return 0;
2088 }
2089
2090 static int
2091 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2092 {
2093         uint16_t i;
2094         int diag;
2095         uint8_t mapping_found = 0;
2096
2097         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2098                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2099                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2100                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2101                                         tx_queue_stats_mappings[i].queue_id,
2102                                         tx_queue_stats_mappings[i].stats_counter_id);
2103                         if (diag != 0)
2104                                 return diag;
2105                         mapping_found = 1;
2106                 }
2107         }
2108         if (mapping_found)
2109                 port->tx_queue_stats_mapping_enabled = 1;
2110         return 0;
2111 }
2112
2113 static int
2114 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2115 {
2116         uint16_t i;
2117         int diag;
2118         uint8_t mapping_found = 0;
2119
2120         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2121                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2122                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2123                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2124                                         rx_queue_stats_mappings[i].queue_id,
2125                                         rx_queue_stats_mappings[i].stats_counter_id);
2126                         if (diag != 0)
2127                                 return diag;
2128                         mapping_found = 1;
2129                 }
2130         }
2131         if (mapping_found)
2132                 port->rx_queue_stats_mapping_enabled = 1;
2133         return 0;
2134 }
2135
2136 static void
2137 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2138 {
2139         int diag = 0;
2140
2141         diag = set_tx_queue_stats_mapping_registers(pi, port);
2142         if (diag != 0) {
2143                 if (diag == -ENOTSUP) {
2144                         port->tx_queue_stats_mapping_enabled = 0;
2145                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2146                 }
2147                 else
2148                         rte_exit(EXIT_FAILURE,
2149                                         "set_tx_queue_stats_mapping_registers "
2150                                         "failed for port id=%d diag=%d\n",
2151                                         pi, diag);
2152         }
2153
2154         diag = set_rx_queue_stats_mapping_registers(pi, port);
2155         if (diag != 0) {
2156                 if (diag == -ENOTSUP) {
2157                         port->rx_queue_stats_mapping_enabled = 0;
2158                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2159                 }
2160                 else
2161                         rte_exit(EXIT_FAILURE,
2162                                         "set_rx_queue_stats_mapping_registers "
2163                                         "failed for port id=%d diag=%d\n",
2164                                         pi, diag);
2165         }
2166 }
2167
2168 static void
2169 rxtx_port_config(struct rte_port *port)
2170 {
2171         port->rx_conf = port->dev_info.default_rxconf;
2172         port->tx_conf = port->dev_info.default_txconf;
2173
2174         /* Check if any RX/TX parameters have been passed */
2175         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2176                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2177
2178         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2179                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2180
2181         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2182                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2183
2184         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2185                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2186
2187         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2188                 port->rx_conf.rx_drop_en = rx_drop_en;
2189
2190         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2191                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2192
2193         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2194                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2195
2196         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2197                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2198
2199         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2200                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2201
2202         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2203                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2204
2205         if (txq_flags != RTE_PMD_PARAM_UNSET)
2206                 port->tx_conf.txq_flags = txq_flags;
2207 }
2208
2209 void
2210 init_port_config(void)
2211 {
2212         portid_t pid;
2213         struct rte_port *port;
2214
2215         RTE_ETH_FOREACH_DEV(pid) {
2216                 port = &ports[pid];
2217                 port->dev_conf.rxmode = rx_mode;
2218                 port->dev_conf.fdir_conf = fdir_conf;
2219                 if (nb_rxq > 1) {
2220                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2221                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2222                 } else {
2223                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2224                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2225                 }
2226
2227                 if (port->dcb_flag == 0) {
2228                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2229                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2230                         else
2231                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2232                 }
2233
2234                 rxtx_port_config(port);
2235
2236                 rte_eth_macaddr_get(pid, &port->eth_addr);
2237
2238                 map_port_queue_stats_mapping_registers(pid, port);
2239 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2240                 rte_pmd_ixgbe_bypass_init(pid);
2241 #endif
2242
2243                 if (lsc_interrupt &&
2244                     (rte_eth_devices[pid].data->dev_flags &
2245                      RTE_ETH_DEV_INTR_LSC))
2246                         port->dev_conf.intr_conf.lsc = 1;
2247                 if (rmv_interrupt &&
2248                     (rte_eth_devices[pid].data->dev_flags &
2249                      RTE_ETH_DEV_INTR_RMV))
2250                         port->dev_conf.intr_conf.rmv = 1;
2251
2252 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2253                 /* Detect softnic port */
2254                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2255                         port->softnic_enable = 1;
2256                         memset(&port->softport, 0, sizeof(struct softnic_port));
2257
2258                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2259                                 port->softport.tm_flag = 1;
2260                 }
2261 #endif
2262         }
2263 }
2264
2265 void set_port_slave_flag(portid_t slave_pid)
2266 {
2267         struct rte_port *port;
2268
2269         port = &ports[slave_pid];
2270         port->slave_flag = 1;
2271 }
2272
2273 void clear_port_slave_flag(portid_t slave_pid)
2274 {
2275         struct rte_port *port;
2276
2277         port = &ports[slave_pid];
2278         port->slave_flag = 0;
2279 }
2280
2281 uint8_t port_is_bonding_slave(portid_t slave_pid)
2282 {
2283         struct rte_port *port;
2284
2285         port = &ports[slave_pid];
2286         if ((rte_eth_devices[slave_pid].data->dev_flags &
2287             RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2288                 return 1;
2289         return 0;
2290 }
2291
2292 const uint16_t vlan_tags[] = {
2293                 0,  1,  2,  3,  4,  5,  6,  7,
2294                 8,  9, 10, 11,  12, 13, 14, 15,
2295                 16, 17, 18, 19, 20, 21, 22, 23,
2296                 24, 25, 26, 27, 28, 29, 30, 31
2297 };
2298
2299 static  int
2300 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2301                  enum dcb_mode_enable dcb_mode,
2302                  enum rte_eth_nb_tcs num_tcs,
2303                  uint8_t pfc_en)
2304 {
2305         uint8_t i;
2306         int32_t rc;
2307         struct rte_eth_rss_conf rss_conf;
2308
2309         /*
2310          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2311          * given above, and the number of traffic classes available for use.
2312          */
2313         if (dcb_mode == DCB_VT_ENABLED) {
2314                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2315                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2316                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2317                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2318
2319                 /* VMDQ+DCB RX and TX configurations */
2320                 vmdq_rx_conf->enable_default_pool = 0;
2321                 vmdq_rx_conf->default_pool = 0;
2322                 vmdq_rx_conf->nb_queue_pools =
2323                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2324                 vmdq_tx_conf->nb_queue_pools =
2325                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2326
2327                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2328                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2329                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2330                         vmdq_rx_conf->pool_map[i].pools =
2331                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2332                 }
2333                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2334                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2335                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2336                 }
2337
2338                 /* set DCB mode of RX and TX of multiple queues */
2339                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2340                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2341         } else {
2342                 struct rte_eth_dcb_rx_conf *rx_conf =
2343                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2344                 struct rte_eth_dcb_tx_conf *tx_conf =
2345                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2346
2347                 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2348                 if (rc != 0)
2349                         return rc;
2350
2351                 rx_conf->nb_tcs = num_tcs;
2352                 tx_conf->nb_tcs = num_tcs;
2353
2354                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2355                         rx_conf->dcb_tc[i] = i % num_tcs;
2356                         tx_conf->dcb_tc[i] = i % num_tcs;
2357                 }
2358
2359                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2360                 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2361                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2362         }
2363
2364         if (pfc_en)
2365                 eth_conf->dcb_capability_en =
2366                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2367         else
2368                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2369
2370         return 0;
2371 }
2372
2373 int
2374 init_port_dcb_config(portid_t pid,
2375                      enum dcb_mode_enable dcb_mode,
2376                      enum rte_eth_nb_tcs num_tcs,
2377                      uint8_t pfc_en)
2378 {
2379         struct rte_eth_conf port_conf;
2380         struct rte_port *rte_port;
2381         int retval;
2382         uint16_t i;
2383
2384         rte_port = &ports[pid];
2385
2386         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2387         /* Enter DCB configuration status */
2388         dcb_config = 1;
2389
2390         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2391         retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2392         if (retval < 0)
2393                 return retval;
2394         port_conf.rxmode.hw_vlan_filter = 1;
2395
2396         /**
2397          * Write the configuration into the device.
2398          * Set the numbers of RX & TX queues to 0, so
2399          * the RX & TX queues will not be setup.
2400          */
2401         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2402
2403         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2404
2405         /* If dev_info.vmdq_pool_base is greater than 0,
2406          * the queue id of vmdq pools is started after pf queues.
2407          */
2408         if (dcb_mode == DCB_VT_ENABLED &&
2409             rte_port->dev_info.vmdq_pool_base > 0) {
2410                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2411                         " for port %d.", pid);
2412                 return -1;
2413         }
2414
2415         /* Assume the ports in testpmd have the same dcb capability
2416          * and has the same number of rxq and txq in dcb mode
2417          */
2418         if (dcb_mode == DCB_VT_ENABLED) {
2419                 if (rte_port->dev_info.max_vfs > 0) {
2420                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2421                         nb_txq = rte_port->dev_info.nb_tx_queues;
2422                 } else {
2423                         nb_rxq = rte_port->dev_info.max_rx_queues;
2424                         nb_txq = rte_port->dev_info.max_tx_queues;
2425                 }
2426         } else {
2427                 /*if vt is disabled, use all pf queues */
2428                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2429                         nb_rxq = rte_port->dev_info.max_rx_queues;
2430                         nb_txq = rte_port->dev_info.max_tx_queues;
2431                 } else {
2432                         nb_rxq = (queueid_t)num_tcs;
2433                         nb_txq = (queueid_t)num_tcs;
2434
2435                 }
2436         }
2437         rx_free_thresh = 64;
2438
2439         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2440
2441         rxtx_port_config(rte_port);
2442         /* VLAN filter */
2443         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2444         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2445                 rx_vft_set(pid, vlan_tags[i], 1);
2446
2447         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2448         map_port_queue_stats_mapping_registers(pid, rte_port);
2449
2450         rte_port->dcb_flag = 1;
2451
2452         return 0;
2453 }
2454
2455 static void
2456 init_port(void)
2457 {
2458         /* Configuration of Ethernet ports. */
2459         ports = rte_zmalloc("testpmd: ports",
2460                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2461                             RTE_CACHE_LINE_SIZE);
2462         if (ports == NULL) {
2463                 rte_exit(EXIT_FAILURE,
2464                                 "rte_zmalloc(%d struct rte_port) failed\n",
2465                                 RTE_MAX_ETHPORTS);
2466         }
2467 }
2468
2469 static void
2470 force_quit(void)
2471 {
2472         pmd_test_exit();
2473         prompt_exit();
2474 }
2475
2476 static void
2477 print_stats(void)
2478 {
2479         uint8_t i;
2480         const char clr[] = { 27, '[', '2', 'J', '\0' };
2481         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2482
2483         /* Clear screen and move to top left */
2484         printf("%s%s", clr, top_left);
2485
2486         printf("\nPort statistics ====================================");
2487         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2488                 nic_stats_display(fwd_ports_ids[i]);
2489 }
2490
2491 static void
2492 signal_handler(int signum)
2493 {
2494         if (signum == SIGINT || signum == SIGTERM) {
2495                 printf("\nSignal %d received, preparing to exit...\n",
2496                                 signum);
2497 #ifdef RTE_LIBRTE_PDUMP
2498                 /* uninitialize packet capture framework */
2499                 rte_pdump_uninit();
2500 #endif
2501 #ifdef RTE_LIBRTE_LATENCY_STATS
2502                 rte_latencystats_uninit();
2503 #endif
2504                 force_quit();
2505                 /* Set flag to indicate the force termination. */
2506                 f_quit = 1;
2507                 /* exit with the expected status */
2508                 signal(signum, SIG_DFL);
2509                 kill(getpid(), signum);
2510         }
2511 }
2512
2513 int
2514 main(int argc, char** argv)
2515 {
2516         int  diag;
2517         portid_t port_id;
2518
2519         signal(SIGINT, signal_handler);
2520         signal(SIGTERM, signal_handler);
2521
2522         diag = rte_eal_init(argc, argv);
2523         if (diag < 0)
2524                 rte_panic("Cannot init EAL\n");
2525
2526         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2527                 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2528                         strerror(errno));
2529         }
2530
2531 #ifdef RTE_LIBRTE_PDUMP
2532         /* initialize packet capture framework */
2533         rte_pdump_init(NULL);
2534 #endif
2535
2536         nb_ports = (portid_t) rte_eth_dev_count();
2537         if (nb_ports == 0)
2538                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2539
2540         /* allocate port structures, and init them */
2541         init_port();
2542
2543         set_def_fwd_config();
2544         if (nb_lcores == 0)
2545                 rte_panic("Empty set of forwarding logical cores - check the "
2546                           "core mask supplied in the command parameters\n");
2547
2548         /* Bitrate/latency stats disabled by default */
2549 #ifdef RTE_LIBRTE_BITRATE
2550         bitrate_enabled = 0;
2551 #endif
2552 #ifdef RTE_LIBRTE_LATENCY_STATS
2553         latencystats_enabled = 0;
2554 #endif
2555
2556         argc -= diag;
2557         argv += diag;
2558         if (argc > 1)
2559                 launch_args_parse(argc, argv);
2560
2561         if (tx_first && interactive)
2562                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2563                                 "interactive mode.\n");
2564
2565         if (tx_first && lsc_interrupt) {
2566                 printf("Warning: lsc_interrupt needs to be off when "
2567                                 " using tx_first. Disabling.\n");
2568                 lsc_interrupt = 0;
2569         }
2570
2571         if (!nb_rxq && !nb_txq)
2572                 printf("Warning: Either rx or tx queues should be non-zero\n");
2573
2574         if (nb_rxq > 1 && nb_rxq > nb_txq)
2575                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2576                        "but nb_txq=%d will prevent to fully test it.\n",
2577                        nb_rxq, nb_txq);
2578
2579         init_config();
2580         if (start_port(RTE_PORT_ALL) != 0)
2581                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2582
2583         /* set all ports to promiscuous mode by default */
2584         RTE_ETH_FOREACH_DEV(port_id)
2585                 rte_eth_promiscuous_enable(port_id);
2586
2587         /* Init metrics library */
2588         rte_metrics_init(rte_socket_id());
2589
2590 #ifdef RTE_LIBRTE_LATENCY_STATS
2591         if (latencystats_enabled != 0) {
2592                 int ret = rte_latencystats_init(1, NULL);
2593                 if (ret)
2594                         printf("Warning: latencystats init()"
2595                                 " returned error %d\n", ret);
2596                 printf("Latencystats running on lcore %d\n",
2597                         latencystats_lcore_id);
2598         }
2599 #endif
2600
2601         /* Setup bitrate stats */
2602 #ifdef RTE_LIBRTE_BITRATE
2603         if (bitrate_enabled != 0) {
2604                 bitrate_data = rte_stats_bitrate_create();
2605                 if (bitrate_data == NULL)
2606                         rte_exit(EXIT_FAILURE,
2607                                 "Could not allocate bitrate data.\n");
2608                 rte_stats_bitrate_reg(bitrate_data);
2609         }
2610 #endif
2611
2612 #ifdef RTE_LIBRTE_CMDLINE
2613         if (strlen(cmdline_filename) != 0)
2614                 cmdline_read_from_file(cmdline_filename);
2615
2616         if (interactive == 1) {
2617                 if (auto_start) {
2618                         printf("Start automatic packet forwarding\n");
2619                         start_packet_forwarding(0);
2620                 }
2621                 prompt();
2622                 pmd_test_exit();
2623         } else
2624 #endif
2625         {
2626                 char c;
2627                 int rc;
2628
2629                 f_quit = 0;
2630
2631                 printf("No commandline core given, start packet forwarding\n");
2632                 start_packet_forwarding(tx_first);
2633                 if (stats_period != 0) {
2634                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2635                         uint64_t timer_period;
2636
2637                         /* Convert to number of cycles */
2638                         timer_period = stats_period * rte_get_timer_hz();
2639
2640                         while (f_quit == 0) {
2641                                 cur_time = rte_get_timer_cycles();
2642                                 diff_time += cur_time - prev_time;
2643
2644                                 if (diff_time >= timer_period) {
2645                                         print_stats();
2646                                         /* Reset the timer */
2647                                         diff_time = 0;
2648                                 }
2649                                 /* Sleep to avoid unnecessary checks */
2650                                 prev_time = cur_time;
2651                                 sleep(1);
2652                         }
2653                 }
2654
2655                 printf("Press enter to exit\n");
2656                 rc = read(0, &c, 1);
2657                 pmd_test_exit();
2658                 if (rc < 0)
2659                         return 1;
2660         }
2661
2662         return 0;
2663 }