New upstream version 17.11.5
[deb_dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/mman.h>
42 #include <sys/types.h>
43 #include <errno.h>
44
45 #include <sys/queue.h>
46 #include <sys/stat.h>
47
48 #include <stdint.h>
49 #include <unistd.h>
50 #include <inttypes.h>
51
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
55 #include <rte_log.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90
91 #include "testpmd.h"
92
93 uint16_t verbose_level = 0; /**< Silent by default. */
94
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 uint8_t tx_first;
99 char cmdline_filename[PATH_MAX] = {0};
100
101 /*
102  * NUMA support configuration.
103  * When set, the NUMA support attempts to dispatch the allocation of the
104  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
105  * probed ports among the CPU sockets 0 and 1.
106  * Otherwise, all memory is allocated from CPU socket 0.
107  */
108 uint8_t numa_support = 1; /**< numa enabled by default */
109
110 /*
111  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
112  * not configured.
113  */
114 uint8_t socket_num = UMA_NO_CONFIG;
115
116 /*
117  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
118  */
119 uint8_t mp_anon = 0;
120
121 /*
122  * Record the Ethernet address of peer target ports to which packets are
123  * forwarded.
124  * Must be instantiated with the ethernet addresses of peer traffic generator
125  * ports.
126  */
127 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
128 portid_t nb_peer_eth_addrs = 0;
129
130 /*
131  * Probed Target Environment.
132  */
133 struct rte_port *ports;        /**< For all probed ethernet ports. */
134 portid_t nb_ports;             /**< Number of probed ethernet ports. */
135 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
136 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
137
138 /*
139  * Test Forwarding Configuration.
140  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
141  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
142  */
143 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
144 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
145 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
146 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
147
148 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
149 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
150
151 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
152 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
153
154 /*
155  * Forwarding engines.
156  */
157 struct fwd_engine * fwd_engines[] = {
158         &io_fwd_engine,
159         &mac_fwd_engine,
160         &mac_swap_engine,
161         &flow_gen_engine,
162         &rx_only_engine,
163         &tx_only_engine,
164         &csum_fwd_engine,
165         &icmp_echo_engine,
166 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
167         &softnic_tm_engine,
168         &softnic_tm_bypass_engine,
169 #endif
170 #ifdef RTE_LIBRTE_IEEE1588
171         &ieee1588_fwd_engine,
172 #endif
173         NULL,
174 };
175
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
181
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
184                                       * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
186
187 /*
188  * In container, it cannot terminate the process which running with 'stats-period'
189  * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
190  */
191 uint8_t f_quit;
192
193 /*
194  * Configuration of packet segments used by the "txonly" processing engine.
195  */
196 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
197 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
198         TXONLY_DEF_PACKET_LEN,
199 };
200 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
201
202 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
203 /**< Split policy for packets to TX. */
204
205 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
206 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
207
208 /* current configuration is in DCB or not,0 means it is not in DCB mode */
209 uint8_t dcb_config = 0;
210
211 /* Whether the dcb is in testing status */
212 uint8_t dcb_test = 0;
213
214 /*
215  * Configurable number of RX/TX queues.
216  */
217 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
218 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
219
220 /*
221  * Configurable number of RX/TX ring descriptors.
222  */
223 #define RTE_TEST_RX_DESC_DEFAULT 128
224 #define RTE_TEST_TX_DESC_DEFAULT 512
225 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
226 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
227
228 #define RTE_PMD_PARAM_UNSET -1
229 /*
230  * Configurable values of RX and TX ring threshold registers.
231  */
232
233 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
234 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
236
237 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
240
241 /*
242  * Configurable value of RX free threshold.
243  */
244 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
245
246 /*
247  * Configurable value of RX drop enable.
248  */
249 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
250
251 /*
252  * Configurable value of TX free threshold.
253  */
254 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
255
256 /*
257  * Configurable value of TX RS bit threshold.
258  */
259 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
260
261 /*
262  * Configurable value of TX queue flags.
263  */
264 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
265
266 /*
267  * Receive Side Scaling (RSS) configuration.
268  */
269 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
270
271 /*
272  * Port topology configuration
273  */
274 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
275
276 /*
277  * Avoids to flush all the RX streams before starts forwarding.
278  */
279 uint8_t no_flush_rx = 0; /* flush by default */
280
281 /*
282  * Flow API isolated mode.
283  */
284 uint8_t flow_isolate_all;
285
286 /*
287  * Avoids to check link status when starting/stopping a port.
288  */
289 uint8_t no_link_check = 0; /* check by default */
290
291 /*
292  * Enable link status change notification
293  */
294 uint8_t lsc_interrupt = 1; /* enabled by default */
295
296 /*
297  * Enable device removal notification.
298  */
299 uint8_t rmv_interrupt = 1; /* enabled by default */
300
301 /*
302  * Display or mask ether events
303  * Default to all events except VF_MBOX
304  */
305 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
306                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
307                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
308                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
309                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
310                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
311
312 /*
313  * NIC bypass mode configuration options.
314  */
315
316 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
317 /* The NIC bypass watchdog timeout. */
318 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
319 #endif
320
321
322 #ifdef RTE_LIBRTE_LATENCY_STATS
323
324 /*
325  * Set when latency stats is enabled in the commandline
326  */
327 uint8_t latencystats_enabled;
328
329 /*
330  * Lcore ID to serive latency statistics.
331  */
332 lcoreid_t latencystats_lcore_id = -1;
333
334 #endif
335
336 /*
337  * Ethernet device configuration.
338  */
339 struct rte_eth_rxmode rx_mode = {
340         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
341         .split_hdr_size = 0,
342         .header_split   = 0, /**< Header Split disabled. */
343         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
344         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
345         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
346         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
347         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
348         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
349         .hw_timestamp   = 0, /**< HW timestamp enabled. */
350 };
351
352 struct rte_fdir_conf fdir_conf = {
353         .mode = RTE_FDIR_MODE_NONE,
354         .pballoc = RTE_FDIR_PBALLOC_64K,
355         .status = RTE_FDIR_REPORT_STATUS,
356         .mask = {
357                 .vlan_tci_mask = 0xFFEF,
358                 .ipv4_mask     = {
359                         .src_ip = 0xFFFFFFFF,
360                         .dst_ip = 0xFFFFFFFF,
361                 },
362                 .ipv6_mask     = {
363                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
364                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
365                 },
366                 .src_port_mask = 0xFFFF,
367                 .dst_port_mask = 0xFFFF,
368                 .mac_addr_byte_mask = 0xFF,
369                 .tunnel_type_mask = 1,
370                 .tunnel_id_mask = 0xFFFFFFFF,
371         },
372         .drop_queue = 127,
373 };
374
375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
376
377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
379
380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
382
383 uint16_t nb_tx_queue_stats_mappings = 0;
384 uint16_t nb_rx_queue_stats_mappings = 0;
385
386 /*
387  * Display zero values by default for xstats
388  */
389 uint8_t xstats_hide_zero;
390
391 unsigned int num_sockets = 0;
392 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
393
394 #ifdef RTE_LIBRTE_BITRATE
395 /* Bitrate statistics */
396 struct rte_stats_bitrates *bitrate_data;
397 lcoreid_t bitrate_lcore_id;
398 uint8_t bitrate_enabled;
399 #endif
400
401 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
402 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
403
404 /* Forward function declarations */
405 static void map_port_queue_stats_mapping_registers(portid_t pi,
406                                                    struct rte_port *port);
407 static void check_all_ports_link_status(uint32_t port_mask);
408 static int eth_event_callback(portid_t port_id,
409                               enum rte_eth_event_type type,
410                               void *param, void *ret_param);
411
412 /*
413  * Check if all the ports are started.
414  * If yes, return positive value. If not, return zero.
415  */
416 static int all_ports_started(void);
417
418 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
419 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
420
421 /*
422  * Helper function to check if socket is already discovered.
423  * If yes, return positive value. If not, return zero.
424  */
425 int
426 new_socket_id(unsigned int socket_id)
427 {
428         unsigned int i;
429
430         for (i = 0; i < num_sockets; i++) {
431                 if (socket_ids[i] == socket_id)
432                         return 0;
433         }
434         return 1;
435 }
436
437 /*
438  * Setup default configuration.
439  */
440 static void
441 set_default_fwd_lcores_config(void)
442 {
443         unsigned int i;
444         unsigned int nb_lc;
445         unsigned int sock_num;
446
447         nb_lc = 0;
448         for (i = 0; i < RTE_MAX_LCORE; i++) {
449                 if (!rte_lcore_is_enabled(i))
450                         continue;
451                 sock_num = rte_lcore_to_socket_id(i);
452                 if (new_socket_id(sock_num)) {
453                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
454                                 rte_exit(EXIT_FAILURE,
455                                          "Total sockets greater than %u\n",
456                                          RTE_MAX_NUMA_NODES);
457                         }
458                         socket_ids[num_sockets++] = sock_num;
459                 }
460                 if (i == rte_get_master_lcore())
461                         continue;
462                 fwd_lcores_cpuids[nb_lc++] = i;
463         }
464         nb_lcores = (lcoreid_t) nb_lc;
465         nb_cfg_lcores = nb_lcores;
466         nb_fwd_lcores = 1;
467 }
468
469 static void
470 set_def_peer_eth_addrs(void)
471 {
472         portid_t i;
473
474         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
475                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
476                 peer_eth_addrs[i].addr_bytes[5] = i;
477         }
478 }
479
480 static void
481 set_default_fwd_ports_config(void)
482 {
483         portid_t pt_id;
484         int i = 0;
485
486         RTE_ETH_FOREACH_DEV(pt_id) {
487                 fwd_ports_ids[i++] = pt_id;
488
489                 /* Update sockets info according to the attached device */
490                 int socket_id = rte_eth_dev_socket_id(pt_id);
491                 if (socket_id >= 0 && new_socket_id(socket_id)) {
492                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
493                                 rte_exit(EXIT_FAILURE,
494                                          "Total sockets greater than %u\n",
495                                          RTE_MAX_NUMA_NODES);
496                         }
497                         socket_ids[num_sockets++] = socket_id;
498                 }
499         }
500
501         nb_cfg_ports = nb_ports;
502         nb_fwd_ports = nb_ports;
503 }
504
505 void
506 set_def_fwd_config(void)
507 {
508         set_default_fwd_lcores_config();
509         set_def_peer_eth_addrs();
510         set_default_fwd_ports_config();
511 }
512
513 /*
514  * Configuration initialisation done once at init time.
515  */
516 static void
517 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
518                  unsigned int socket_id)
519 {
520         char pool_name[RTE_MEMPOOL_NAMESIZE];
521         struct rte_mempool *rte_mp = NULL;
522         uint32_t mb_size;
523
524         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
525         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
526
527         RTE_LOG(INFO, USER1,
528                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
529                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
530
531         if (mp_anon != 0) {
532                 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
533                         mb_size, (unsigned) mb_mempool_cache,
534                         sizeof(struct rte_pktmbuf_pool_private),
535                         socket_id, 0);
536                 if (rte_mp == NULL)
537                         goto err;
538
539                 if (rte_mempool_populate_anon(rte_mp) == 0) {
540                         rte_mempool_free(rte_mp);
541                         rte_mp = NULL;
542                         goto err;
543                 }
544                 rte_pktmbuf_pool_init(rte_mp, NULL);
545                 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
546         } else {
547                 /* wrapper to rte_mempool_create() */
548                 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
549                         mb_mempool_cache, 0, mbuf_seg_size, socket_id);
550         }
551
552 err:
553         if (rte_mp == NULL) {
554                 rte_exit(EXIT_FAILURE,
555                         "Creation of mbuf pool for socket %u failed: %s\n",
556                         socket_id, rte_strerror(rte_errno));
557         } else if (verbose_level > 0) {
558                 rte_mempool_dump(stdout, rte_mp);
559         }
560 }
561
562 /*
563  * Check given socket id is valid or not with NUMA mode,
564  * if valid, return 0, else return -1
565  */
566 static int
567 check_socket_id(const unsigned int socket_id)
568 {
569         static int warning_once = 0;
570
571         if (new_socket_id(socket_id)) {
572                 if (!warning_once && numa_support)
573                         printf("Warning: NUMA should be configured manually by"
574                                " using --port-numa-config and"
575                                " --ring-numa-config parameters along with"
576                                " --numa.\n");
577                 warning_once = 1;
578                 return -1;
579         }
580         return 0;
581 }
582
583 /*
584  * Get the allowed maximum number of RX queues.
585  * *pid return the port id which has minimal value of
586  * max_rx_queues in all ports.
587  */
588 queueid_t
589 get_allowed_max_nb_rxq(portid_t *pid)
590 {
591         queueid_t allowed_max_rxq = MAX_QUEUE_ID;
592         portid_t pi;
593         struct rte_eth_dev_info dev_info;
594
595         RTE_ETH_FOREACH_DEV(pi) {
596                 rte_eth_dev_info_get(pi, &dev_info);
597                 if (dev_info.max_rx_queues < allowed_max_rxq) {
598                         allowed_max_rxq = dev_info.max_rx_queues;
599                         *pid = pi;
600                 }
601         }
602         return allowed_max_rxq;
603 }
604
605 /*
606  * Check input rxq is valid or not.
607  * If input rxq is not greater than any of maximum number
608  * of RX queues of all ports, it is valid.
609  * if valid, return 0, else return -1
610  */
611 int
612 check_nb_rxq(queueid_t rxq)
613 {
614         queueid_t allowed_max_rxq;
615         portid_t pid = 0;
616
617         allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
618         if (rxq > allowed_max_rxq) {
619                 printf("Fail: input rxq (%u) can't be greater "
620                        "than max_rx_queues (%u) of port %u\n",
621                        rxq,
622                        allowed_max_rxq,
623                        pid);
624                 return -1;
625         }
626         return 0;
627 }
628
629 /*
630  * Get the allowed maximum number of TX queues.
631  * *pid return the port id which has minimal value of
632  * max_tx_queues in all ports.
633  */
634 queueid_t
635 get_allowed_max_nb_txq(portid_t *pid)
636 {
637         queueid_t allowed_max_txq = MAX_QUEUE_ID;
638         portid_t pi;
639         struct rte_eth_dev_info dev_info;
640
641         RTE_ETH_FOREACH_DEV(pi) {
642                 rte_eth_dev_info_get(pi, &dev_info);
643                 if (dev_info.max_tx_queues < allowed_max_txq) {
644                         allowed_max_txq = dev_info.max_tx_queues;
645                         *pid = pi;
646                 }
647         }
648         return allowed_max_txq;
649 }
650
651 /*
652  * Check input txq is valid or not.
653  * If input txq is not greater than any of maximum number
654  * of TX queues of all ports, it is valid.
655  * if valid, return 0, else return -1
656  */
657 int
658 check_nb_txq(queueid_t txq)
659 {
660         queueid_t allowed_max_txq;
661         portid_t pid = 0;
662
663         allowed_max_txq = get_allowed_max_nb_txq(&pid);
664         if (txq > allowed_max_txq) {
665                 printf("Fail: input txq (%u) can't be greater "
666                        "than max_tx_queues (%u) of port %u\n",
667                        txq,
668                        allowed_max_txq,
669                        pid);
670                 return -1;
671         }
672         return 0;
673 }
674
675 static void
676 init_config(void)
677 {
678         portid_t pid;
679         struct rte_port *port;
680         struct rte_mempool *mbp;
681         unsigned int nb_mbuf_per_pool;
682         lcoreid_t  lc_id;
683         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
684         struct rte_gro_param gro_param;
685         uint32_t gso_types;
686
687         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
688
689         /* Configuration of logical cores. */
690         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
691                                 sizeof(struct fwd_lcore *) * nb_lcores,
692                                 RTE_CACHE_LINE_SIZE);
693         if (fwd_lcores == NULL) {
694                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
695                                                         "failed\n", nb_lcores);
696         }
697         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
698                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
699                                                sizeof(struct fwd_lcore),
700                                                RTE_CACHE_LINE_SIZE);
701                 if (fwd_lcores[lc_id] == NULL) {
702                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
703                                                                 "failed\n");
704                 }
705                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
706         }
707
708         RTE_ETH_FOREACH_DEV(pid) {
709                 port = &ports[pid];
710                 rte_eth_dev_info_get(pid, &port->dev_info);
711
712                 if (numa_support) {
713                         if (port_numa[pid] != NUMA_NO_CONFIG)
714                                 port_per_socket[port_numa[pid]]++;
715                         else {
716                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
717
718                                 /*
719                                  * if socket_id is invalid,
720                                  * set to the first available socket.
721                                  */
722                                 if (check_socket_id(socket_id) < 0)
723                                         socket_id = socket_ids[0];
724                                 port_per_socket[socket_id]++;
725                         }
726                 }
727
728                 /* set flag to initialize port/queue */
729                 port->need_reconfig = 1;
730                 port->need_reconfig_queues = 1;
731         }
732
733         /*
734          * Create pools of mbuf.
735          * If NUMA support is disabled, create a single pool of mbuf in
736          * socket 0 memory by default.
737          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
738          *
739          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
740          * nb_txd can be configured at run time.
741          */
742         if (param_total_num_mbufs)
743                 nb_mbuf_per_pool = param_total_num_mbufs;
744         else {
745                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
746                         (nb_lcores * mb_mempool_cache) +
747                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
748                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
749         }
750
751         if (numa_support) {
752                 uint8_t i;
753
754                 for (i = 0; i < num_sockets; i++)
755                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
756                                          socket_ids[i]);
757         } else {
758                 if (socket_num == UMA_NO_CONFIG)
759                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
760                 else
761                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
762                                                  socket_num);
763         }
764
765         init_port_config();
766
767         gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
768                 DEV_TX_OFFLOAD_GRE_TNL_TSO;
769         /*
770          * Records which Mbuf pool to use by each logical core, if needed.
771          */
772         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
773                 mbp = mbuf_pool_find(
774                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
775
776                 if (mbp == NULL)
777                         mbp = mbuf_pool_find(0);
778                 fwd_lcores[lc_id]->mbp = mbp;
779                 /* initialize GSO context */
780                 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
781                 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
782                 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
783                 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
784                         ETHER_CRC_LEN;
785                 fwd_lcores[lc_id]->gso_ctx.flag = 0;
786         }
787
788         /* Configuration of packet forwarding streams. */
789         if (init_fwd_streams() < 0)
790                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
791
792         fwd_config_setup();
793
794         /* create a gro context for each lcore */
795         gro_param.gro_types = RTE_GRO_TCP_IPV4;
796         gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
797         gro_param.max_item_per_flow = MAX_PKT_BURST;
798         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
799                 gro_param.socket_id = rte_lcore_to_socket_id(
800                                 fwd_lcores_cpuids[lc_id]);
801                 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
802                 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
803                         rte_exit(EXIT_FAILURE,
804                                         "rte_gro_ctx_create() failed\n");
805                 }
806         }
807 }
808
809
810 void
811 reconfig(portid_t new_port_id, unsigned socket_id)
812 {
813         struct rte_port *port;
814
815         /* Reconfiguration of Ethernet ports. */
816         port = &ports[new_port_id];
817         rte_eth_dev_info_get(new_port_id, &port->dev_info);
818
819         /* set flag to initialize port/queue */
820         port->need_reconfig = 1;
821         port->need_reconfig_queues = 1;
822         port->socket_id = socket_id;
823
824         init_port_config();
825 }
826
827
828 int
829 init_fwd_streams(void)
830 {
831         portid_t pid;
832         struct rte_port *port;
833         streamid_t sm_id, nb_fwd_streams_new;
834         queueid_t q;
835
836         /* set socket id according to numa or not */
837         RTE_ETH_FOREACH_DEV(pid) {
838                 port = &ports[pid];
839                 if (nb_rxq > port->dev_info.max_rx_queues) {
840                         printf("Fail: nb_rxq(%d) is greater than "
841                                 "max_rx_queues(%d)\n", nb_rxq,
842                                 port->dev_info.max_rx_queues);
843                         return -1;
844                 }
845                 if (nb_txq > port->dev_info.max_tx_queues) {
846                         printf("Fail: nb_txq(%d) is greater than "
847                                 "max_tx_queues(%d)\n", nb_txq,
848                                 port->dev_info.max_tx_queues);
849                         return -1;
850                 }
851                 if (numa_support) {
852                         if (port_numa[pid] != NUMA_NO_CONFIG)
853                                 port->socket_id = port_numa[pid];
854                         else {
855                                 port->socket_id = rte_eth_dev_socket_id(pid);
856
857                                 /*
858                                  * if socket_id is invalid,
859                                  * set to the first available socket.
860                                  */
861                                 if (check_socket_id(port->socket_id) < 0)
862                                         port->socket_id = socket_ids[0];
863                         }
864                 }
865                 else {
866                         if (socket_num == UMA_NO_CONFIG)
867                                 port->socket_id = 0;
868                         else
869                                 port->socket_id = socket_num;
870                 }
871         }
872
873         q = RTE_MAX(nb_rxq, nb_txq);
874         if (q == 0) {
875                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
876                 return -1;
877         }
878         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
879         if (nb_fwd_streams_new == nb_fwd_streams)
880                 return 0;
881         /* clear the old */
882         if (fwd_streams != NULL) {
883                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
884                         if (fwd_streams[sm_id] == NULL)
885                                 continue;
886                         rte_free(fwd_streams[sm_id]);
887                         fwd_streams[sm_id] = NULL;
888                 }
889                 rte_free(fwd_streams);
890                 fwd_streams = NULL;
891         }
892
893         /* init new */
894         nb_fwd_streams = nb_fwd_streams_new;
895         if (nb_fwd_streams) {
896                 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
897                         sizeof(struct fwd_stream *) * nb_fwd_streams,
898                         RTE_CACHE_LINE_SIZE);
899                 if (fwd_streams == NULL)
900                         rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
901                                  " (struct fwd_stream *)) failed\n",
902                                  nb_fwd_streams);
903
904                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
905                         fwd_streams[sm_id] = rte_zmalloc("testpmd:"
906                                 " struct fwd_stream", sizeof(struct fwd_stream),
907                                 RTE_CACHE_LINE_SIZE);
908                         if (fwd_streams[sm_id] == NULL)
909                                 rte_exit(EXIT_FAILURE, "rte_zmalloc"
910                                          "(struct fwd_stream) failed\n");
911                 }
912         }
913
914         return 0;
915 }
916
917 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
918 static void
919 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
920 {
921         unsigned int total_burst;
922         unsigned int nb_burst;
923         unsigned int burst_stats[3];
924         uint16_t pktnb_stats[3];
925         uint16_t nb_pkt;
926         int burst_percent[3];
927
928         /*
929          * First compute the total number of packet bursts and the
930          * two highest numbers of bursts of the same number of packets.
931          */
932         total_burst = 0;
933         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
934         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
935         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
936                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
937                 if (nb_burst == 0)
938                         continue;
939                 total_burst += nb_burst;
940                 if (nb_burst > burst_stats[0]) {
941                         burst_stats[1] = burst_stats[0];
942                         pktnb_stats[1] = pktnb_stats[0];
943                         burst_stats[0] = nb_burst;
944                         pktnb_stats[0] = nb_pkt;
945                 } else if (nb_burst > burst_stats[1]) {
946                         burst_stats[1] = nb_burst;
947                         pktnb_stats[1] = nb_pkt;
948                 }
949         }
950         if (total_burst == 0)
951                 return;
952         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
953         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
954                burst_percent[0], (int) pktnb_stats[0]);
955         if (burst_stats[0] == total_burst) {
956                 printf("]\n");
957                 return;
958         }
959         if (burst_stats[0] + burst_stats[1] == total_burst) {
960                 printf(" + %d%% of %d pkts]\n",
961                        100 - burst_percent[0], pktnb_stats[1]);
962                 return;
963         }
964         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
965         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
966         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
967                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
968                 return;
969         }
970         printf(" + %d%% of %d pkts + %d%% of others]\n",
971                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
972 }
973 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
974
975 static void
976 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
977 {
978         struct rte_port *port;
979         uint8_t i;
980
981         static const char *fwd_stats_border = "----------------------";
982
983         port = &ports[port_id];
984         printf("\n  %s Forward statistics for port %-2d %s\n",
985                fwd_stats_border, port_id, fwd_stats_border);
986
987         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
988                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
989                        "%-"PRIu64"\n",
990                        stats->ipackets, stats->imissed,
991                        (uint64_t) (stats->ipackets + stats->imissed));
992
993                 if (cur_fwd_eng == &csum_fwd_engine)
994                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
995                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
996                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
997                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
998                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
999                 }
1000
1001                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1002                        "%-"PRIu64"\n",
1003                        stats->opackets, port->tx_dropped,
1004                        (uint64_t) (stats->opackets + port->tx_dropped));
1005         }
1006         else {
1007                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
1008                        "%14"PRIu64"\n",
1009                        stats->ipackets, stats->imissed,
1010                        (uint64_t) (stats->ipackets + stats->imissed));
1011
1012                 if (cur_fwd_eng == &csum_fwd_engine)
1013                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
1014                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1015                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1016                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
1017                         printf("  RX-nombufs:             %14"PRIu64"\n",
1018                                stats->rx_nombuf);
1019                 }
1020
1021                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
1022                        "%14"PRIu64"\n",
1023                        stats->opackets, port->tx_dropped,
1024                        (uint64_t) (stats->opackets + port->tx_dropped));
1025         }
1026
1027 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1028         if (port->rx_stream)
1029                 pkt_burst_stats_display("RX",
1030                         &port->rx_stream->rx_burst_stats);
1031         if (port->tx_stream)
1032                 pkt_burst_stats_display("TX",
1033                         &port->tx_stream->tx_burst_stats);
1034 #endif
1035
1036         if (port->rx_queue_stats_mapping_enabled) {
1037                 printf("\n");
1038                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1039                         printf("  Stats reg %2d RX-packets:%14"PRIu64
1040                                "     RX-errors:%14"PRIu64
1041                                "    RX-bytes:%14"PRIu64"\n",
1042                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1043                 }
1044                 printf("\n");
1045         }
1046         if (port->tx_queue_stats_mapping_enabled) {
1047                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1048                         printf("  Stats reg %2d TX-packets:%14"PRIu64
1049                                "                                 TX-bytes:%14"PRIu64"\n",
1050                                i, stats->q_opackets[i], stats->q_obytes[i]);
1051                 }
1052         }
1053
1054         printf("  %s--------------------------------%s\n",
1055                fwd_stats_border, fwd_stats_border);
1056 }
1057
1058 static void
1059 fwd_stream_stats_display(streamid_t stream_id)
1060 {
1061         struct fwd_stream *fs;
1062         static const char *fwd_top_stats_border = "-------";
1063
1064         fs = fwd_streams[stream_id];
1065         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1066             (fs->fwd_dropped == 0))
1067                 return;
1068         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1069                "TX Port=%2d/Queue=%2d %s\n",
1070                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1071                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1072         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1073                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1074
1075         /* if checksum mode */
1076         if (cur_fwd_eng == &csum_fwd_engine) {
1077                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
1078                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1079         }
1080
1081 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1082         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1083         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1084 #endif
1085 }
1086
1087 static void
1088 flush_fwd_rx_queues(void)
1089 {
1090         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1091         portid_t  rxp;
1092         portid_t port_id;
1093         queueid_t rxq;
1094         uint16_t  nb_rx;
1095         uint16_t  i;
1096         uint8_t   j;
1097         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1098         uint64_t timer_period;
1099
1100         /* convert to number of cycles */
1101         timer_period = rte_get_timer_hz(); /* 1 second timeout */
1102
1103         for (j = 0; j < 2; j++) {
1104                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1105                         for (rxq = 0; rxq < nb_rxq; rxq++) {
1106                                 port_id = fwd_ports_ids[rxp];
1107                                 /**
1108                                 * testpmd can stuck in the below do while loop
1109                                 * if rte_eth_rx_burst() always returns nonzero
1110                                 * packets. So timer is added to exit this loop
1111                                 * after 1sec timer expiry.
1112                                 */
1113                                 prev_tsc = rte_rdtsc();
1114                                 do {
1115                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
1116                                                 pkts_burst, MAX_PKT_BURST);
1117                                         for (i = 0; i < nb_rx; i++)
1118                                                 rte_pktmbuf_free(pkts_burst[i]);
1119
1120                                         cur_tsc = rte_rdtsc();
1121                                         diff_tsc = cur_tsc - prev_tsc;
1122                                         timer_tsc += diff_tsc;
1123                                 } while ((nb_rx > 0) &&
1124                                         (timer_tsc < timer_period));
1125                                 timer_tsc = 0;
1126                         }
1127                 }
1128                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1129         }
1130 }
1131
1132 static void
1133 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1134 {
1135         struct fwd_stream **fsm;
1136         streamid_t nb_fs;
1137         streamid_t sm_id;
1138 #ifdef RTE_LIBRTE_BITRATE
1139         uint64_t tics_per_1sec;
1140         uint64_t tics_datum;
1141         uint64_t tics_current;
1142         uint8_t idx_port, cnt_ports;
1143
1144         cnt_ports = rte_eth_dev_count();
1145         tics_datum = rte_rdtsc();
1146         tics_per_1sec = rte_get_timer_hz();
1147 #endif
1148         fsm = &fwd_streams[fc->stream_idx];
1149         nb_fs = fc->stream_nb;
1150         do {
1151                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1152                         (*pkt_fwd)(fsm[sm_id]);
1153 #ifdef RTE_LIBRTE_BITRATE
1154                 if (bitrate_enabled != 0 &&
1155                                 bitrate_lcore_id == rte_lcore_id()) {
1156                         tics_current = rte_rdtsc();
1157                         if (tics_current - tics_datum >= tics_per_1sec) {
1158                                 /* Periodic bitrate calculation */
1159                                 for (idx_port = 0;
1160                                                 idx_port < cnt_ports;
1161                                                 idx_port++)
1162                                         rte_stats_bitrate_calc(bitrate_data,
1163                                                 idx_port);
1164                                 tics_datum = tics_current;
1165                         }
1166                 }
1167 #endif
1168 #ifdef RTE_LIBRTE_LATENCY_STATS
1169                 if (latencystats_enabled != 0 &&
1170                                 latencystats_lcore_id == rte_lcore_id())
1171                         rte_latencystats_update();
1172 #endif
1173
1174         } while (! fc->stopped);
1175 }
1176
1177 static int
1178 start_pkt_forward_on_core(void *fwd_arg)
1179 {
1180         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1181                              cur_fwd_config.fwd_eng->packet_fwd);
1182         return 0;
1183 }
1184
1185 /*
1186  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1187  * Used to start communication flows in network loopback test configurations.
1188  */
1189 static int
1190 run_one_txonly_burst_on_core(void *fwd_arg)
1191 {
1192         struct fwd_lcore *fwd_lc;
1193         struct fwd_lcore tmp_lcore;
1194
1195         fwd_lc = (struct fwd_lcore *) fwd_arg;
1196         tmp_lcore = *fwd_lc;
1197         tmp_lcore.stopped = 1;
1198         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1199         return 0;
1200 }
1201
1202 /*
1203  * Launch packet forwarding:
1204  *     - Setup per-port forwarding context.
1205  *     - launch logical cores with their forwarding configuration.
1206  */
1207 static void
1208 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1209 {
1210         port_fwd_begin_t port_fwd_begin;
1211         unsigned int i;
1212         unsigned int lc_id;
1213         int diag;
1214
1215         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1216         if (port_fwd_begin != NULL) {
1217                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1218                         (*port_fwd_begin)(fwd_ports_ids[i]);
1219         }
1220         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1221                 lc_id = fwd_lcores_cpuids[i];
1222                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1223                         fwd_lcores[i]->stopped = 0;
1224                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1225                                                      fwd_lcores[i], lc_id);
1226                         if (diag != 0)
1227                                 printf("launch lcore %u failed - diag=%d\n",
1228                                        lc_id, diag);
1229                 }
1230         }
1231 }
1232
1233 /*
1234  * Update the forward ports list.
1235  */
1236 void
1237 update_fwd_ports(portid_t new_pid)
1238 {
1239         unsigned int i;
1240         unsigned int new_nb_fwd_ports = 0;
1241         int move = 0;
1242
1243         for (i = 0; i < nb_fwd_ports; ++i) {
1244                 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1245                         move = 1;
1246                 else if (move)
1247                         fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1248                 else
1249                         new_nb_fwd_ports++;
1250         }
1251         if (new_pid < RTE_MAX_ETHPORTS)
1252                 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1253
1254         nb_fwd_ports = new_nb_fwd_ports;
1255         nb_cfg_ports = new_nb_fwd_ports;
1256 }
1257
1258 /*
1259  * Launch packet forwarding configuration.
1260  */
1261 void
1262 start_packet_forwarding(int with_tx_first)
1263 {
1264         port_fwd_begin_t port_fwd_begin;
1265         port_fwd_end_t  port_fwd_end;
1266         struct rte_port *port;
1267         unsigned int i;
1268         portid_t   pt_id;
1269         streamid_t sm_id;
1270
1271         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1272                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1273
1274         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1275                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1276
1277         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1278                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1279                 (!nb_rxq || !nb_txq))
1280                 rte_exit(EXIT_FAILURE,
1281                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1282                         cur_fwd_eng->fwd_mode_name);
1283
1284         if (all_ports_started() == 0) {
1285                 printf("Not all ports were started\n");
1286                 return;
1287         }
1288         if (test_done == 0) {
1289                 printf("Packet forwarding already started\n");
1290                 return;
1291         }
1292
1293
1294         if(dcb_test) {
1295                 for (i = 0; i < nb_fwd_ports; i++) {
1296                         pt_id = fwd_ports_ids[i];
1297                         port = &ports[pt_id];
1298                         if (!port->dcb_flag) {
1299                                 printf("In DCB mode, all forwarding ports must "
1300                                        "be configured in this mode.\n");
1301                                 return;
1302                         }
1303                 }
1304                 if (nb_fwd_lcores == 1) {
1305                         printf("In DCB mode,the nb forwarding cores "
1306                                "should be larger than 1.\n");
1307                         return;
1308                 }
1309         }
1310         test_done = 0;
1311
1312         fwd_config_setup();
1313
1314         if(!no_flush_rx)
1315                 flush_fwd_rx_queues();
1316
1317         pkt_fwd_config_display(&cur_fwd_config);
1318         rxtx_config_display();
1319
1320         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1321                 pt_id = fwd_ports_ids[i];
1322                 port = &ports[pt_id];
1323                 rte_eth_stats_get(pt_id, &port->stats);
1324                 port->tx_dropped = 0;
1325
1326                 map_port_queue_stats_mapping_registers(pt_id, port);
1327         }
1328         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1329                 fwd_streams[sm_id]->rx_packets = 0;
1330                 fwd_streams[sm_id]->tx_packets = 0;
1331                 fwd_streams[sm_id]->fwd_dropped = 0;
1332                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1333                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1334
1335 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1336                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1337                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1338                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1339                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1340 #endif
1341 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1342                 fwd_streams[sm_id]->core_cycles = 0;
1343 #endif
1344         }
1345         if (with_tx_first) {
1346                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1347                 if (port_fwd_begin != NULL) {
1348                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1349                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1350                 }
1351                 while (with_tx_first--) {
1352                         launch_packet_forwarding(
1353                                         run_one_txonly_burst_on_core);
1354                         rte_eal_mp_wait_lcore();
1355                 }
1356                 port_fwd_end = tx_only_engine.port_fwd_end;
1357                 if (port_fwd_end != NULL) {
1358                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1359                                 (*port_fwd_end)(fwd_ports_ids[i]);
1360                 }
1361         }
1362         launch_packet_forwarding(start_pkt_forward_on_core);
1363 }
1364
1365 void
1366 stop_packet_forwarding(void)
1367 {
1368         struct rte_eth_stats stats;
1369         struct rte_port *port;
1370         port_fwd_end_t  port_fwd_end;
1371         int i;
1372         portid_t   pt_id;
1373         streamid_t sm_id;
1374         lcoreid_t  lc_id;
1375         uint64_t total_recv;
1376         uint64_t total_xmit;
1377         uint64_t total_rx_dropped;
1378         uint64_t total_tx_dropped;
1379         uint64_t total_rx_nombuf;
1380         uint64_t tx_dropped;
1381         uint64_t rx_bad_ip_csum;
1382         uint64_t rx_bad_l4_csum;
1383 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1384         uint64_t fwd_cycles;
1385 #endif
1386
1387         static const char *acc_stats_border = "+++++++++++++++";
1388
1389         if (test_done) {
1390                 printf("Packet forwarding not started\n");
1391                 return;
1392         }
1393         printf("Telling cores to stop...");
1394         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1395                 fwd_lcores[lc_id]->stopped = 1;
1396         printf("\nWaiting for lcores to finish...\n");
1397         rte_eal_mp_wait_lcore();
1398         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1399         if (port_fwd_end != NULL) {
1400                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1401                         pt_id = fwd_ports_ids[i];
1402                         (*port_fwd_end)(pt_id);
1403                 }
1404         }
1405 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1406         fwd_cycles = 0;
1407 #endif
1408         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1409                 if (cur_fwd_config.nb_fwd_streams >
1410                     cur_fwd_config.nb_fwd_ports) {
1411                         fwd_stream_stats_display(sm_id);
1412                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1413                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1414                 } else {
1415                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1416                                 fwd_streams[sm_id];
1417                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1418                                 fwd_streams[sm_id];
1419                 }
1420                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1421                 tx_dropped = (uint64_t) (tx_dropped +
1422                                          fwd_streams[sm_id]->fwd_dropped);
1423                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1424
1425                 rx_bad_ip_csum =
1426                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1427                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1428                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1429                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1430                                                         rx_bad_ip_csum;
1431
1432                 rx_bad_l4_csum =
1433                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1434                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1435                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1436                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1437                                                         rx_bad_l4_csum;
1438
1439 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1440                 fwd_cycles = (uint64_t) (fwd_cycles +
1441                                          fwd_streams[sm_id]->core_cycles);
1442 #endif
1443         }
1444         total_recv = 0;
1445         total_xmit = 0;
1446         total_rx_dropped = 0;
1447         total_tx_dropped = 0;
1448         total_rx_nombuf  = 0;
1449         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1450                 pt_id = fwd_ports_ids[i];
1451
1452                 port = &ports[pt_id];
1453                 rte_eth_stats_get(pt_id, &stats);
1454                 stats.ipackets -= port->stats.ipackets;
1455                 port->stats.ipackets = 0;
1456                 stats.opackets -= port->stats.opackets;
1457                 port->stats.opackets = 0;
1458                 stats.ibytes   -= port->stats.ibytes;
1459                 port->stats.ibytes = 0;
1460                 stats.obytes   -= port->stats.obytes;
1461                 port->stats.obytes = 0;
1462                 stats.imissed  -= port->stats.imissed;
1463                 port->stats.imissed = 0;
1464                 stats.oerrors  -= port->stats.oerrors;
1465                 port->stats.oerrors = 0;
1466                 stats.rx_nombuf -= port->stats.rx_nombuf;
1467                 port->stats.rx_nombuf = 0;
1468
1469                 total_recv += stats.ipackets;
1470                 total_xmit += stats.opackets;
1471                 total_rx_dropped += stats.imissed;
1472                 total_tx_dropped += port->tx_dropped;
1473                 total_rx_nombuf  += stats.rx_nombuf;
1474
1475                 fwd_port_stats_display(pt_id, &stats);
1476         }
1477
1478         printf("\n  %s Accumulated forward statistics for all ports"
1479                "%s\n",
1480                acc_stats_border, acc_stats_border);
1481         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1482                "%-"PRIu64"\n"
1483                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1484                "%-"PRIu64"\n",
1485                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1486                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1487         if (total_rx_nombuf > 0)
1488                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1489         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1490                "%s\n",
1491                acc_stats_border, acc_stats_border);
1492 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1493         if (total_recv > 0)
1494                 printf("\n  CPU cycles/packet=%u (total cycles="
1495                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1496                        (unsigned int)(fwd_cycles / total_recv),
1497                        fwd_cycles, total_recv);
1498 #endif
1499         printf("\nDone.\n");
1500         test_done = 1;
1501 }
1502
1503 void
1504 dev_set_link_up(portid_t pid)
1505 {
1506         if (rte_eth_dev_set_link_up(pid) < 0)
1507                 printf("\nSet link up fail.\n");
1508 }
1509
1510 void
1511 dev_set_link_down(portid_t pid)
1512 {
1513         if (rte_eth_dev_set_link_down(pid) < 0)
1514                 printf("\nSet link down fail.\n");
1515 }
1516
1517 static int
1518 all_ports_started(void)
1519 {
1520         portid_t pi;
1521         struct rte_port *port;
1522
1523         RTE_ETH_FOREACH_DEV(pi) {
1524                 port = &ports[pi];
1525                 /* Check if there is a port which is not started */
1526                 if ((port->port_status != RTE_PORT_STARTED) &&
1527                         (port->slave_flag == 0))
1528                         return 0;
1529         }
1530
1531         /* No port is not started */
1532         return 1;
1533 }
1534
1535 int
1536 all_ports_stopped(void)
1537 {
1538         portid_t pi;
1539         struct rte_port *port;
1540
1541         RTE_ETH_FOREACH_DEV(pi) {
1542                 port = &ports[pi];
1543                 if ((port->port_status != RTE_PORT_STOPPED) &&
1544                         (port->slave_flag == 0))
1545                         return 0;
1546         }
1547
1548         return 1;
1549 }
1550
1551 int
1552 port_is_started(portid_t port_id)
1553 {
1554         if (port_id_is_invalid(port_id, ENABLED_WARN))
1555                 return 0;
1556
1557         if (ports[port_id].port_status != RTE_PORT_STARTED)
1558                 return 0;
1559
1560         return 1;
1561 }
1562
1563 static int
1564 port_is_closed(portid_t port_id)
1565 {
1566         if (port_id_is_invalid(port_id, ENABLED_WARN))
1567                 return 0;
1568
1569         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1570                 return 0;
1571
1572         return 1;
1573 }
1574
1575 int
1576 start_port(portid_t pid)
1577 {
1578         int diag, need_check_link_status = -1;
1579         portid_t pi;
1580         queueid_t qi;
1581         struct rte_port *port;
1582         struct ether_addr mac_addr;
1583         enum rte_eth_event_type event_type;
1584
1585         if (port_id_is_invalid(pid, ENABLED_WARN))
1586                 return 0;
1587
1588         if(dcb_config)
1589                 dcb_test = 1;
1590         RTE_ETH_FOREACH_DEV(pi) {
1591                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1592                         continue;
1593
1594                 need_check_link_status = 0;
1595                 port = &ports[pi];
1596                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1597                                                  RTE_PORT_HANDLING) == 0) {
1598                         printf("Port %d is now not stopped\n", pi);
1599                         continue;
1600                 }
1601
1602                 if (port->need_reconfig > 0) {
1603                         port->need_reconfig = 0;
1604
1605                         if (flow_isolate_all) {
1606                                 int ret = port_flow_isolate(pi, 1);
1607                                 if (ret) {
1608                                         printf("Failed to apply isolated"
1609                                                " mode on port %d\n", pi);
1610                                         return -1;
1611                                 }
1612                         }
1613
1614                         printf("Configuring Port %d (socket %u)\n", pi,
1615                                         port->socket_id);
1616                         /* configure port */
1617                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1618                                                 &(port->dev_conf));
1619                         if (diag != 0) {
1620                                 if (rte_atomic16_cmpset(&(port->port_status),
1621                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1622                                         printf("Port %d can not be set back "
1623                                                         "to stopped\n", pi);
1624                                 printf("Fail to configure port %d\n", pi);
1625                                 /* try to reconfigure port next time */
1626                                 port->need_reconfig = 1;
1627                                 return -1;
1628                         }
1629                 }
1630                 if (port->need_reconfig_queues > 0) {
1631                         port->need_reconfig_queues = 0;
1632                         /* setup tx queues */
1633                         for (qi = 0; qi < nb_txq; qi++) {
1634                                 if ((numa_support) &&
1635                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1636                                         diag = rte_eth_tx_queue_setup(pi, qi,
1637                                                 nb_txd,txring_numa[pi],
1638                                                 &(port->tx_conf));
1639                                 else
1640                                         diag = rte_eth_tx_queue_setup(pi, qi,
1641                                                 nb_txd,port->socket_id,
1642                                                 &(port->tx_conf));
1643
1644                                 if (diag == 0)
1645                                         continue;
1646
1647                                 /* Fail to setup tx queue, return */
1648                                 if (rte_atomic16_cmpset(&(port->port_status),
1649                                                         RTE_PORT_HANDLING,
1650                                                         RTE_PORT_STOPPED) == 0)
1651                                         printf("Port %d can not be set back "
1652                                                         "to stopped\n", pi);
1653                                 printf("Fail to configure port %d tx queues\n", pi);
1654                                 /* try to reconfigure queues next time */
1655                                 port->need_reconfig_queues = 1;
1656                                 return -1;
1657                         }
1658                         /* setup rx queues */
1659                         for (qi = 0; qi < nb_rxq; qi++) {
1660                                 if ((numa_support) &&
1661                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1662                                         struct rte_mempool * mp =
1663                                                 mbuf_pool_find(rxring_numa[pi]);
1664                                         if (mp == NULL) {
1665                                                 printf("Failed to setup RX queue:"
1666                                                         "No mempool allocation"
1667                                                         " on the socket %d\n",
1668                                                         rxring_numa[pi]);
1669                                                 return -1;
1670                                         }
1671
1672                                         diag = rte_eth_rx_queue_setup(pi, qi,
1673                                              nb_rxd,rxring_numa[pi],
1674                                              &(port->rx_conf),mp);
1675                                 } else {
1676                                         struct rte_mempool *mp =
1677                                                 mbuf_pool_find(port->socket_id);
1678                                         if (mp == NULL) {
1679                                                 printf("Failed to setup RX queue:"
1680                                                         "No mempool allocation"
1681                                                         " on the socket %d\n",
1682                                                         port->socket_id);
1683                                                 return -1;
1684                                         }
1685                                         diag = rte_eth_rx_queue_setup(pi, qi,
1686                                              nb_rxd,port->socket_id,
1687                                              &(port->rx_conf), mp);
1688                                 }
1689                                 if (diag == 0)
1690                                         continue;
1691
1692                                 /* Fail to setup rx queue, return */
1693                                 if (rte_atomic16_cmpset(&(port->port_status),
1694                                                         RTE_PORT_HANDLING,
1695                                                         RTE_PORT_STOPPED) == 0)
1696                                         printf("Port %d can not be set back "
1697                                                         "to stopped\n", pi);
1698                                 printf("Fail to configure port %d rx queues\n", pi);
1699                                 /* try to reconfigure queues next time */
1700                                 port->need_reconfig_queues = 1;
1701                                 return -1;
1702                         }
1703                 }
1704
1705                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1706                      event_type < RTE_ETH_EVENT_MAX;
1707                      event_type++) {
1708                         diag = rte_eth_dev_callback_register(pi,
1709                                                         event_type,
1710                                                         eth_event_callback,
1711                                                         NULL);
1712                         if (diag) {
1713                                 printf("Failed to setup even callback for event %d\n",
1714                                         event_type);
1715                                 return -1;
1716                         }
1717                 }
1718
1719                 /* start port */
1720                 if (rte_eth_dev_start(pi) < 0) {
1721                         printf("Fail to start port %d\n", pi);
1722
1723                         /* Fail to setup rx queue, return */
1724                         if (rte_atomic16_cmpset(&(port->port_status),
1725                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1726                                 printf("Port %d can not be set back to "
1727                                                         "stopped\n", pi);
1728                         continue;
1729                 }
1730
1731                 if (rte_atomic16_cmpset(&(port->port_status),
1732                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1733                         printf("Port %d can not be set into started\n", pi);
1734
1735                 rte_eth_macaddr_get(pi, &mac_addr);
1736                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1737                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1738                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1739                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1740
1741                 /* at least one port started, need checking link status */
1742                 need_check_link_status = 1;
1743         }
1744
1745         if (need_check_link_status == 1 && !no_link_check)
1746                 check_all_ports_link_status(RTE_PORT_ALL);
1747         else if (need_check_link_status == 0)
1748                 printf("Please stop the ports first\n");
1749
1750         printf("Done\n");
1751         return 0;
1752 }
1753
1754 void
1755 stop_port(portid_t pid)
1756 {
1757         portid_t pi;
1758         struct rte_port *port;
1759         int need_check_link_status = 0;
1760
1761         if (dcb_test) {
1762                 dcb_test = 0;
1763                 dcb_config = 0;
1764         }
1765
1766         if (port_id_is_invalid(pid, ENABLED_WARN))
1767                 return;
1768
1769         printf("Stopping ports...\n");
1770
1771         RTE_ETH_FOREACH_DEV(pi) {
1772                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1773                         continue;
1774
1775                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1776                         printf("Please remove port %d from forwarding configuration.\n", pi);
1777                         continue;
1778                 }
1779
1780                 if (port_is_bonding_slave(pi)) {
1781                         printf("Please remove port %d from bonded device.\n", pi);
1782                         continue;
1783                 }
1784
1785                 port = &ports[pi];
1786                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1787                                                 RTE_PORT_HANDLING) == 0)
1788                         continue;
1789
1790                 rte_eth_dev_stop(pi);
1791
1792                 if (rte_atomic16_cmpset(&(port->port_status),
1793                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1794                         printf("Port %d can not be set into stopped\n", pi);
1795                 need_check_link_status = 1;
1796         }
1797         if (need_check_link_status && !no_link_check)
1798                 check_all_ports_link_status(RTE_PORT_ALL);
1799
1800         printf("Done\n");
1801 }
1802
1803 void
1804 close_port(portid_t pid)
1805 {
1806         portid_t pi;
1807         struct rte_port *port;
1808
1809         if (port_id_is_invalid(pid, ENABLED_WARN))
1810                 return;
1811
1812         printf("Closing ports...\n");
1813
1814         RTE_ETH_FOREACH_DEV(pi) {
1815                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1816                         continue;
1817
1818                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1819                         printf("Please remove port %d from forwarding configuration.\n", pi);
1820                         continue;
1821                 }
1822
1823                 if (port_is_bonding_slave(pi)) {
1824                         printf("Please remove port %d from bonded device.\n", pi);
1825                         continue;
1826                 }
1827
1828                 port = &ports[pi];
1829                 if (rte_atomic16_cmpset(&(port->port_status),
1830                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1831                         printf("Port %d is already closed\n", pi);
1832                         continue;
1833                 }
1834
1835                 if (rte_atomic16_cmpset(&(port->port_status),
1836                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1837                         printf("Port %d is now not stopped\n", pi);
1838                         continue;
1839                 }
1840
1841                 if (port->flow_list)
1842                         port_flow_flush(pi);
1843                 rte_eth_dev_close(pi);
1844
1845                 if (rte_atomic16_cmpset(&(port->port_status),
1846                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1847                         printf("Port %d cannot be set to closed\n", pi);
1848         }
1849
1850         printf("Done\n");
1851 }
1852
1853 void
1854 reset_port(portid_t pid)
1855 {
1856         int diag;
1857         portid_t pi;
1858         struct rte_port *port;
1859
1860         if (port_id_is_invalid(pid, ENABLED_WARN))
1861                 return;
1862
1863         printf("Resetting ports...\n");
1864
1865         RTE_ETH_FOREACH_DEV(pi) {
1866                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1867                         continue;
1868
1869                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1870                         printf("Please remove port %d from forwarding "
1871                                "configuration.\n", pi);
1872                         continue;
1873                 }
1874
1875                 if (port_is_bonding_slave(pi)) {
1876                         printf("Please remove port %d from bonded device.\n",
1877                                pi);
1878                         continue;
1879                 }
1880
1881                 diag = rte_eth_dev_reset(pi);
1882                 if (diag == 0) {
1883                         port = &ports[pi];
1884                         port->need_reconfig = 1;
1885                         port->need_reconfig_queues = 1;
1886                 } else {
1887                         printf("Failed to reset port %d. diag=%d\n", pi, diag);
1888                 }
1889         }
1890
1891         printf("Done\n");
1892 }
1893
1894 void
1895 attach_port(char *identifier)
1896 {
1897         portid_t pi = 0;
1898         unsigned int socket_id;
1899
1900         printf("Attaching a new port...\n");
1901
1902         if (identifier == NULL) {
1903                 printf("Invalid parameters are specified\n");
1904                 return;
1905         }
1906
1907         if (rte_eth_dev_attach(identifier, &pi))
1908                 return;
1909
1910         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1911         /* if socket_id is invalid, set to the first available socket. */
1912         if (check_socket_id(socket_id) < 0)
1913                 socket_id = socket_ids[0];
1914         reconfig(pi, socket_id);
1915         rte_eth_promiscuous_enable(pi);
1916
1917         nb_ports = rte_eth_dev_count();
1918
1919         ports[pi].port_status = RTE_PORT_STOPPED;
1920
1921         update_fwd_ports(pi);
1922
1923         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1924         printf("Done\n");
1925 }
1926
1927 void
1928 detach_port(portid_t port_id)
1929 {
1930         char name[RTE_ETH_NAME_MAX_LEN];
1931
1932         printf("Detaching a port...\n");
1933
1934         if (!port_is_closed(port_id)) {
1935                 printf("Please close port first\n");
1936                 return;
1937         }
1938
1939         if (ports[port_id].flow_list)
1940                 port_flow_flush(port_id);
1941
1942         if (rte_eth_dev_detach(port_id, name)) {
1943                 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1944                 return;
1945         }
1946
1947         nb_ports = rte_eth_dev_count();
1948
1949         update_fwd_ports(RTE_MAX_ETHPORTS);
1950
1951         printf("Port '%s' is detached. Now total ports is %d\n",
1952                         name, nb_ports);
1953         printf("Done\n");
1954         return;
1955 }
1956
1957 void
1958 pmd_test_exit(void)
1959 {
1960         portid_t pt_id;
1961
1962         if (test_done == 0)
1963                 stop_packet_forwarding();
1964
1965         if (ports != NULL) {
1966                 no_link_check = 1;
1967                 RTE_ETH_FOREACH_DEV(pt_id) {
1968                         printf("\nShutting down port %d...\n", pt_id);
1969                         fflush(stdout);
1970                         stop_port(pt_id);
1971                         close_port(pt_id);
1972                 }
1973         }
1974         printf("\nBye...\n");
1975 }
1976
1977 typedef void (*cmd_func_t)(void);
1978 struct pmd_test_command {
1979         const char *cmd_name;
1980         cmd_func_t cmd_func;
1981 };
1982
1983 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1984
1985 /* Check the link status of all ports in up to 9s, and print them finally */
1986 static void
1987 check_all_ports_link_status(uint32_t port_mask)
1988 {
1989 #define CHECK_INTERVAL 100 /* 100ms */
1990 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1991         portid_t portid;
1992         uint8_t count, all_ports_up, print_flag = 0;
1993         struct rte_eth_link link;
1994
1995         printf("Checking link statuses...\n");
1996         fflush(stdout);
1997         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1998                 all_ports_up = 1;
1999                 RTE_ETH_FOREACH_DEV(portid) {
2000                         if ((port_mask & (1 << portid)) == 0)
2001                                 continue;
2002                         memset(&link, 0, sizeof(link));
2003                         rte_eth_link_get_nowait(portid, &link);
2004                         /* print link status if flag set */
2005                         if (print_flag == 1) {
2006                                 if (link.link_status)
2007                                         printf(
2008                                         "Port%d Link Up. speed %u Mbps- %s\n",
2009                                         portid, link.link_speed,
2010                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2011                                         ("full-duplex") : ("half-duplex\n"));
2012                                 else
2013                                         printf("Port %d Link Down\n", portid);
2014                                 continue;
2015                         }
2016                         /* clear all_ports_up flag if any link down */
2017                         if (link.link_status == ETH_LINK_DOWN) {
2018                                 all_ports_up = 0;
2019                                 break;
2020                         }
2021                 }
2022                 /* after finally printing all link status, get out */
2023                 if (print_flag == 1)
2024                         break;
2025
2026                 if (all_ports_up == 0) {
2027                         fflush(stdout);
2028                         rte_delay_ms(CHECK_INTERVAL);
2029                 }
2030
2031                 /* set the print_flag if all ports up or timeout */
2032                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2033                         print_flag = 1;
2034                 }
2035
2036                 if (lsc_interrupt)
2037                         break;
2038         }
2039 }
2040
2041 static void
2042 rmv_event_callback(void *arg)
2043 {
2044         int org_no_link_check = no_link_check;
2045         struct rte_eth_dev *dev;
2046         portid_t port_id = (intptr_t)arg;
2047
2048         RTE_ETH_VALID_PORTID_OR_RET(port_id);
2049         dev = &rte_eth_devices[port_id];
2050
2051         no_link_check = 1;
2052         stop_port(port_id);
2053         no_link_check = org_no_link_check;
2054         close_port(port_id);
2055         printf("removing device %s\n", dev->device->name);
2056         if (rte_eal_dev_detach(dev->device))
2057                 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
2058                         dev->device->name);
2059 }
2060
2061 /* This function is used by the interrupt thread */
2062 static int
2063 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2064                   void *ret_param)
2065 {
2066         static const char * const event_desc[] = {
2067                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2068                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2069                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2070                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2071                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2072                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2073                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2074                 [RTE_ETH_EVENT_MAX] = NULL,
2075         };
2076
2077         RTE_SET_USED(param);
2078         RTE_SET_USED(ret_param);
2079
2080         if (type >= RTE_ETH_EVENT_MAX) {
2081                 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2082                         port_id, __func__, type);
2083                 fflush(stderr);
2084         } else if (event_print_mask & (UINT32_C(1) << type)) {
2085                 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2086                         event_desc[type]);
2087                 fflush(stdout);
2088         }
2089
2090         switch (type) {
2091         case RTE_ETH_EVENT_INTR_RMV:
2092                 if (rte_eal_alarm_set(100000,
2093                                 rmv_event_callback, (void *)(intptr_t)port_id))
2094                         fprintf(stderr, "Could not set up deferred device removal\n");
2095                 break;
2096         default:
2097                 break;
2098         }
2099         return 0;
2100 }
2101
2102 static int
2103 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2104 {
2105         uint16_t i;
2106         int diag;
2107         uint8_t mapping_found = 0;
2108
2109         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2110                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2111                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2112                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2113                                         tx_queue_stats_mappings[i].queue_id,
2114                                         tx_queue_stats_mappings[i].stats_counter_id);
2115                         if (diag != 0)
2116                                 return diag;
2117                         mapping_found = 1;
2118                 }
2119         }
2120         if (mapping_found)
2121                 port->tx_queue_stats_mapping_enabled = 1;
2122         return 0;
2123 }
2124
2125 static int
2126 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2127 {
2128         uint16_t i;
2129         int diag;
2130         uint8_t mapping_found = 0;
2131
2132         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2133                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2134                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2135                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2136                                         rx_queue_stats_mappings[i].queue_id,
2137                                         rx_queue_stats_mappings[i].stats_counter_id);
2138                         if (diag != 0)
2139                                 return diag;
2140                         mapping_found = 1;
2141                 }
2142         }
2143         if (mapping_found)
2144                 port->rx_queue_stats_mapping_enabled = 1;
2145         return 0;
2146 }
2147
2148 static void
2149 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2150 {
2151         int diag = 0;
2152
2153         diag = set_tx_queue_stats_mapping_registers(pi, port);
2154         if (diag != 0) {
2155                 if (diag == -ENOTSUP) {
2156                         port->tx_queue_stats_mapping_enabled = 0;
2157                         printf("TX queue stats mapping not supported port id=%d\n", pi);
2158                 }
2159                 else
2160                         rte_exit(EXIT_FAILURE,
2161                                         "set_tx_queue_stats_mapping_registers "
2162                                         "failed for port id=%d diag=%d\n",
2163                                         pi, diag);
2164         }
2165
2166         diag = set_rx_queue_stats_mapping_registers(pi, port);
2167         if (diag != 0) {
2168                 if (diag == -ENOTSUP) {
2169                         port->rx_queue_stats_mapping_enabled = 0;
2170                         printf("RX queue stats mapping not supported port id=%d\n", pi);
2171                 }
2172                 else
2173                         rte_exit(EXIT_FAILURE,
2174                                         "set_rx_queue_stats_mapping_registers "
2175                                         "failed for port id=%d diag=%d\n",
2176                                         pi, diag);
2177         }
2178 }
2179
2180 static void
2181 rxtx_port_config(struct rte_port *port)
2182 {
2183         port->rx_conf = port->dev_info.default_rxconf;
2184         port->tx_conf = port->dev_info.default_txconf;
2185
2186         /* Check if any RX/TX parameters have been passed */
2187         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2188                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2189
2190         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2191                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2192
2193         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2194                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2195
2196         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2197                 port->rx_conf.rx_free_thresh = rx_free_thresh;
2198
2199         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2200                 port->rx_conf.rx_drop_en = rx_drop_en;
2201
2202         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2203                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2204
2205         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2206                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2207
2208         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2209                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2210
2211         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2212                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2213
2214         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2215                 port->tx_conf.tx_free_thresh = tx_free_thresh;
2216
2217         if (txq_flags != RTE_PMD_PARAM_UNSET)
2218                 port->tx_conf.txq_flags = txq_flags;
2219 }
2220
2221 void
2222 init_port_config(void)
2223 {
2224         portid_t pid;
2225         struct rte_port *port;
2226
2227         RTE_ETH_FOREACH_DEV(pid) {
2228                 port = &ports[pid];
2229                 port->dev_conf.rxmode = rx_mode;
2230                 port->dev_conf.fdir_conf = fdir_conf;
2231                 if (nb_rxq > 1) {
2232                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2233                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2234                 } else {
2235                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2236                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2237                 }
2238
2239                 if (port->dcb_flag == 0) {
2240                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2241                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2242                         else
2243                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2244                 }
2245
2246                 rxtx_port_config(port);
2247
2248                 rte_eth_macaddr_get(pid, &port->eth_addr);
2249
2250                 map_port_queue_stats_mapping_registers(pid, port);
2251 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2252                 rte_pmd_ixgbe_bypass_init(pid);
2253 #endif
2254
2255                 if (lsc_interrupt &&
2256                     (rte_eth_devices[pid].data->dev_flags &
2257                      RTE_ETH_DEV_INTR_LSC))
2258                         port->dev_conf.intr_conf.lsc = 1;
2259                 if (rmv_interrupt &&
2260                     (rte_eth_devices[pid].data->dev_flags &
2261                      RTE_ETH_DEV_INTR_RMV))
2262                         port->dev_conf.intr_conf.rmv = 1;
2263
2264 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2265                 /* Detect softnic port */
2266                 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2267                         port->softnic_enable = 1;
2268                         memset(&port->softport, 0, sizeof(struct softnic_port));
2269
2270                         if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2271                                 port->softport.tm_flag = 1;
2272                 }
2273 #endif
2274         }
2275 }
2276
2277 void set_port_slave_flag(portid_t slave_pid)
2278 {
2279         struct rte_port *port;
2280
2281         port = &ports[slave_pid];
2282         port->slave_flag = 1;
2283 }
2284
2285 void clear_port_slave_flag(portid_t slave_pid)
2286 {
2287         struct rte_port *port;
2288
2289         port = &ports[slave_pid];
2290         port->slave_flag = 0;
2291 }
2292
2293 uint8_t port_is_bonding_slave(portid_t slave_pid)
2294 {
2295         struct rte_port *port;
2296
2297         port = &ports[slave_pid];
2298         if ((rte_eth_devices[slave_pid].data->dev_flags &
2299             RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2300                 return 1;
2301         return 0;
2302 }
2303
2304 const uint16_t vlan_tags[] = {
2305                 0,  1,  2,  3,  4,  5,  6,  7,
2306                 8,  9, 10, 11,  12, 13, 14, 15,
2307                 16, 17, 18, 19, 20, 21, 22, 23,
2308                 24, 25, 26, 27, 28, 29, 30, 31
2309 };
2310
2311 static  int
2312 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2313                  enum dcb_mode_enable dcb_mode,
2314                  enum rte_eth_nb_tcs num_tcs,
2315                  uint8_t pfc_en)
2316 {
2317         uint8_t i;
2318         int32_t rc;
2319         struct rte_eth_rss_conf rss_conf;
2320
2321         /*
2322          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2323          * given above, and the number of traffic classes available for use.
2324          */
2325         if (dcb_mode == DCB_VT_ENABLED) {
2326                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2327                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2328                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2329                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2330
2331                 /* VMDQ+DCB RX and TX configurations */
2332                 vmdq_rx_conf->enable_default_pool = 0;
2333                 vmdq_rx_conf->default_pool = 0;
2334                 vmdq_rx_conf->nb_queue_pools =
2335                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2336                 vmdq_tx_conf->nb_queue_pools =
2337                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2338
2339                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2340                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2341                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2342                         vmdq_rx_conf->pool_map[i].pools =
2343                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2344                 }
2345                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2346                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2347                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2348                 }
2349
2350                 /* set DCB mode of RX and TX of multiple queues */
2351                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2352                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2353         } else {
2354                 struct rte_eth_dcb_rx_conf *rx_conf =
2355                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2356                 struct rte_eth_dcb_tx_conf *tx_conf =
2357                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2358
2359                 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2360                 if (rc != 0)
2361                         return rc;
2362
2363                 rx_conf->nb_tcs = num_tcs;
2364                 tx_conf->nb_tcs = num_tcs;
2365
2366                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2367                         rx_conf->dcb_tc[i] = i % num_tcs;
2368                         tx_conf->dcb_tc[i] = i % num_tcs;
2369                 }
2370
2371                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2372                 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2373                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2374         }
2375
2376         if (pfc_en)
2377                 eth_conf->dcb_capability_en =
2378                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2379         else
2380                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2381
2382         return 0;
2383 }
2384
2385 int
2386 init_port_dcb_config(portid_t pid,
2387                      enum dcb_mode_enable dcb_mode,
2388                      enum rte_eth_nb_tcs num_tcs,
2389                      uint8_t pfc_en)
2390 {
2391         struct rte_eth_conf port_conf;
2392         struct rte_port *rte_port;
2393         int retval;
2394         uint16_t i;
2395
2396         rte_port = &ports[pid];
2397
2398         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2399         /* Enter DCB configuration status */
2400         dcb_config = 1;
2401
2402         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2403         retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2404         if (retval < 0)
2405                 return retval;
2406         port_conf.rxmode.hw_vlan_filter = 1;
2407
2408         /**
2409          * Write the configuration into the device.
2410          * Set the numbers of RX & TX queues to 0, so
2411          * the RX & TX queues will not be setup.
2412          */
2413         rte_eth_dev_configure(pid, 0, 0, &port_conf);
2414
2415         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2416
2417         /* If dev_info.vmdq_pool_base is greater than 0,
2418          * the queue id of vmdq pools is started after pf queues.
2419          */
2420         if (dcb_mode == DCB_VT_ENABLED &&
2421             rte_port->dev_info.vmdq_pool_base > 0) {
2422                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2423                         " for port %d.", pid);
2424                 return -1;
2425         }
2426
2427         /* Assume the ports in testpmd have the same dcb capability
2428          * and has the same number of rxq and txq in dcb mode
2429          */
2430         if (dcb_mode == DCB_VT_ENABLED) {
2431                 if (rte_port->dev_info.max_vfs > 0) {
2432                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2433                         nb_txq = rte_port->dev_info.nb_tx_queues;
2434                 } else {
2435                         nb_rxq = rte_port->dev_info.max_rx_queues;
2436                         nb_txq = rte_port->dev_info.max_tx_queues;
2437                 }
2438         } else {
2439                 /*if vt is disabled, use all pf queues */
2440                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2441                         nb_rxq = rte_port->dev_info.max_rx_queues;
2442                         nb_txq = rte_port->dev_info.max_tx_queues;
2443                 } else {
2444                         nb_rxq = (queueid_t)num_tcs;
2445                         nb_txq = (queueid_t)num_tcs;
2446
2447                 }
2448         }
2449         rx_free_thresh = 64;
2450
2451         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2452
2453         rxtx_port_config(rte_port);
2454         /* VLAN filter */
2455         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2456         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2457                 rx_vft_set(pid, vlan_tags[i], 1);
2458
2459         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2460         map_port_queue_stats_mapping_registers(pid, rte_port);
2461
2462         rte_port->dcb_flag = 1;
2463
2464         return 0;
2465 }
2466
2467 static void
2468 init_port(void)
2469 {
2470         /* Configuration of Ethernet ports. */
2471         ports = rte_zmalloc("testpmd: ports",
2472                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2473                             RTE_CACHE_LINE_SIZE);
2474         if (ports == NULL) {
2475                 rte_exit(EXIT_FAILURE,
2476                                 "rte_zmalloc(%d struct rte_port) failed\n",
2477                                 RTE_MAX_ETHPORTS);
2478         }
2479
2480         /* Initialize ports NUMA structures */
2481         memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2482         memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2483         memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2484 }
2485
2486 static void
2487 force_quit(void)
2488 {
2489         pmd_test_exit();
2490         prompt_exit();
2491 }
2492
2493 static void
2494 print_stats(void)
2495 {
2496         uint8_t i;
2497         const char clr[] = { 27, '[', '2', 'J', '\0' };
2498         const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2499
2500         /* Clear screen and move to top left */
2501         printf("%s%s", clr, top_left);
2502
2503         printf("\nPort statistics ====================================");
2504         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2505                 nic_stats_display(fwd_ports_ids[i]);
2506 }
2507
2508 static void
2509 signal_handler(int signum)
2510 {
2511         if (signum == SIGINT || signum == SIGTERM) {
2512                 printf("\nSignal %d received, preparing to exit...\n",
2513                                 signum);
2514 #ifdef RTE_LIBRTE_PDUMP
2515                 /* uninitialize packet capture framework */
2516                 rte_pdump_uninit();
2517 #endif
2518 #ifdef RTE_LIBRTE_LATENCY_STATS
2519                 rte_latencystats_uninit();
2520 #endif
2521                 force_quit();
2522                 /* Set flag to indicate the force termination. */
2523                 f_quit = 1;
2524                 /* exit with the expected status */
2525                 signal(signum, SIG_DFL);
2526                 kill(getpid(), signum);
2527         }
2528 }
2529
2530 int
2531 main(int argc, char** argv)
2532 {
2533         int  diag;
2534         portid_t port_id;
2535
2536         signal(SIGINT, signal_handler);
2537         signal(SIGTERM, signal_handler);
2538
2539         diag = rte_eal_init(argc, argv);
2540         if (diag < 0)
2541                 rte_panic("Cannot init EAL\n");
2542
2543         if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2544                 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2545                         strerror(errno));
2546         }
2547
2548 #ifdef RTE_LIBRTE_PDUMP
2549         /* initialize packet capture framework */
2550         rte_pdump_init(NULL);
2551 #endif
2552
2553         nb_ports = (portid_t) rte_eth_dev_count();
2554         if (nb_ports == 0)
2555                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2556
2557         /* allocate port structures, and init them */
2558         init_port();
2559
2560         set_def_fwd_config();
2561         if (nb_lcores == 0)
2562                 rte_panic("Empty set of forwarding logical cores - check the "
2563                           "core mask supplied in the command parameters\n");
2564
2565         /* Bitrate/latency stats disabled by default */
2566 #ifdef RTE_LIBRTE_BITRATE
2567         bitrate_enabled = 0;
2568 #endif
2569 #ifdef RTE_LIBRTE_LATENCY_STATS
2570         latencystats_enabled = 0;
2571 #endif
2572
2573         argc -= diag;
2574         argv += diag;
2575         if (argc > 1)
2576                 launch_args_parse(argc, argv);
2577
2578         if (tx_first && interactive)
2579                 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2580                                 "interactive mode.\n");
2581
2582         if (tx_first && lsc_interrupt) {
2583                 printf("Warning: lsc_interrupt needs to be off when "
2584                                 " using tx_first. Disabling.\n");
2585                 lsc_interrupt = 0;
2586         }
2587
2588         if (!nb_rxq && !nb_txq)
2589                 printf("Warning: Either rx or tx queues should be non-zero\n");
2590
2591         if (nb_rxq > 1 && nb_rxq > nb_txq)
2592                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2593                        "but nb_txq=%d will prevent to fully test it.\n",
2594                        nb_rxq, nb_txq);
2595
2596         init_config();
2597         if (start_port(RTE_PORT_ALL) != 0)
2598                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2599
2600         /* set all ports to promiscuous mode by default */
2601         RTE_ETH_FOREACH_DEV(port_id)
2602                 rte_eth_promiscuous_enable(port_id);
2603
2604         /* Init metrics library */
2605         rte_metrics_init(rte_socket_id());
2606
2607 #ifdef RTE_LIBRTE_LATENCY_STATS
2608         if (latencystats_enabled != 0) {
2609                 int ret = rte_latencystats_init(1, NULL);
2610                 if (ret)
2611                         printf("Warning: latencystats init()"
2612                                 " returned error %d\n", ret);
2613                 printf("Latencystats running on lcore %d\n",
2614                         latencystats_lcore_id);
2615         }
2616 #endif
2617
2618         /* Setup bitrate stats */
2619 #ifdef RTE_LIBRTE_BITRATE
2620         if (bitrate_enabled != 0) {
2621                 bitrate_data = rte_stats_bitrate_create();
2622                 if (bitrate_data == NULL)
2623                         rte_exit(EXIT_FAILURE,
2624                                 "Could not allocate bitrate data.\n");
2625                 rte_stats_bitrate_reg(bitrate_data);
2626         }
2627 #endif
2628
2629 #ifdef RTE_LIBRTE_CMDLINE
2630         if (strlen(cmdline_filename) != 0)
2631                 cmdline_read_from_file(cmdline_filename);
2632
2633         if (interactive == 1) {
2634                 if (auto_start) {
2635                         printf("Start automatic packet forwarding\n");
2636                         start_packet_forwarding(0);
2637                 }
2638                 prompt();
2639                 pmd_test_exit();
2640         } else
2641 #endif
2642         {
2643                 char c;
2644                 int rc;
2645
2646                 f_quit = 0;
2647
2648                 printf("No commandline core given, start packet forwarding\n");
2649                 start_packet_forwarding(tx_first);
2650                 if (stats_period != 0) {
2651                         uint64_t prev_time = 0, cur_time, diff_time = 0;
2652                         uint64_t timer_period;
2653
2654                         /* Convert to number of cycles */
2655                         timer_period = stats_period * rte_get_timer_hz();
2656
2657                         while (f_quit == 0) {
2658                                 cur_time = rte_get_timer_cycles();
2659                                 diff_time += cur_time - prev_time;
2660
2661                                 if (diff_time >= timer_period) {
2662                                         print_stats();
2663                                         /* Reset the timer */
2664                                         diff_time = 0;
2665                                 }
2666                                 /* Sleep to avoid unnecessary checks */
2667                                 prev_time = cur_time;
2668                                 sleep(1);
2669                         }
2670                 }
2671
2672                 printf("Press enter to exit\n");
2673                 rc = read(0, &c, 1);
2674                 pmd_test_exit();
2675                 if (rc < 0)
2676                         return 1;
2677         }
2678
2679         return 0;
2680 }