Fix d/p/fix-vhost-user-socket-permission.patch
[deb_dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82 #include <rte_flow.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
86 #endif
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
89 #endif
90
91 #include "testpmd.h"
92
93 uint16_t verbose_level = 0; /**< Silent by default. */
94
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 char cmdline_filename[PATH_MAX] = {0};
99
100 /*
101  * NUMA support configuration.
102  * When set, the NUMA support attempts to dispatch the allocation of the
103  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104  * probed ports among the CPU sockets 0 and 1.
105  * Otherwise, all memory is allocated from CPU socket 0.
106  */
107 uint8_t numa_support = 1; /**< numa enabled by default */
108
109 /*
110  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
111  * not configured.
112  */
113 uint8_t socket_num = UMA_NO_CONFIG;
114
115 /*
116  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
117  */
118 uint8_t mp_anon = 0;
119
120 /*
121  * Record the Ethernet address of peer target ports to which packets are
122  * forwarded.
123  * Must be instantiated with the ethernet addresses of peer traffic generator
124  * ports.
125  */
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
128
129 /*
130  * Probed Target Environment.
131  */
132 struct rte_port *ports;        /**< For all probed ethernet ports. */
133 portid_t nb_ports;             /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
136
137 /*
138  * Test Forwarding Configuration.
139  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
141  */
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
145 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
146
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
149
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
152
153 /*
154  * Forwarding engines.
155  */
156 struct fwd_engine * fwd_engines[] = {
157         &io_fwd_engine,
158         &mac_fwd_engine,
159         &mac_swap_engine,
160         &flow_gen_engine,
161         &rx_only_engine,
162         &tx_only_engine,
163         &csum_fwd_engine,
164         &icmp_echo_engine,
165 #ifdef RTE_LIBRTE_IEEE1588
166         &ieee1588_fwd_engine,
167 #endif
168         NULL,
169 };
170
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
176
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
179                                       * specified on command-line. */
180
181 /*
182  * Configuration of packet segments used by the "txonly" processing engine.
183  */
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186         TXONLY_DEF_PACKET_LEN,
187 };
188 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
189
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
192
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
195
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
198
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
201
202 /*
203  * Configurable number of RX/TX queues.
204  */
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
207
208 /*
209  * Configurable number of RX/TX ring descriptors.
210  */
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
215
216 #define RTE_PMD_PARAM_UNSET -1
217 /*
218  * Configurable values of RX and TX ring threshold registers.
219  */
220
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
224
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
228
229 /*
230  * Configurable value of RX free threshold.
231  */
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
233
234 /*
235  * Configurable value of RX drop enable.
236  */
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
238
239 /*
240  * Configurable value of TX free threshold.
241  */
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
243
244 /*
245  * Configurable value of TX RS bit threshold.
246  */
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
248
249 /*
250  * Configurable value of TX queue flags.
251  */
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
253
254 /*
255  * Receive Side Scaling (RSS) configuration.
256  */
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
258
259 /*
260  * Port topology configuration
261  */
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
263
264 /*
265  * Avoids to flush all the RX streams before starts forwarding.
266  */
267 uint8_t no_flush_rx = 0; /* flush by default */
268
269 /*
270  * Avoids to check link status when starting/stopping a port.
271  */
272 uint8_t no_link_check = 0; /* check by default */
273
274 /*
275  * Enable link status change notification
276  */
277 uint8_t lsc_interrupt = 1; /* enabled by default */
278
279 /*
280  * Enable device removal notification.
281  */
282 uint8_t rmv_interrupt = 1; /* enabled by default */
283
284 /*
285  * Display or mask ether events
286  * Default to all events except VF_MBOX
287  */
288 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
289                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
290                             (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
291                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
292                             (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
293                             (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
294
295 /*
296  * NIC bypass mode configuration options.
297  */
298 #ifdef RTE_NIC_BYPASS
299
300 /* The NIC bypass watchdog timeout. */
301 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
302
303 #endif
304
305 #ifdef RTE_LIBRTE_LATENCY_STATS
306
307 /*
308  * Set when latency stats is enabled in the commandline
309  */
310 uint8_t latencystats_enabled;
311
312 /*
313  * Lcore ID to serive latency statistics.
314  */
315 lcoreid_t latencystats_lcore_id = -1;
316
317 #endif
318
319 /*
320  * Ethernet device configuration.
321  */
322 struct rte_eth_rxmode rx_mode = {
323         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
324         .split_hdr_size = 0,
325         .header_split   = 0, /**< Header Split disabled. */
326         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
327         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
328         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
329         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
330         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
331         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
332 };
333
334 struct rte_fdir_conf fdir_conf = {
335         .mode = RTE_FDIR_MODE_NONE,
336         .pballoc = RTE_FDIR_PBALLOC_64K,
337         .status = RTE_FDIR_REPORT_STATUS,
338         .mask = {
339                 .vlan_tci_mask = 0x0,
340                 .ipv4_mask     = {
341                         .src_ip = 0xFFFFFFFF,
342                         .dst_ip = 0xFFFFFFFF,
343                 },
344                 .ipv6_mask     = {
345                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
346                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
347                 },
348                 .src_port_mask = 0xFFFF,
349                 .dst_port_mask = 0xFFFF,
350                 .mac_addr_byte_mask = 0xFF,
351                 .tunnel_type_mask = 1,
352                 .tunnel_id_mask = 0xFFFFFFFF,
353         },
354         .drop_queue = 127,
355 };
356
357 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
358
359 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
360 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
361
362 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
363 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
364
365 uint16_t nb_tx_queue_stats_mappings = 0;
366 uint16_t nb_rx_queue_stats_mappings = 0;
367
368 unsigned int num_sockets = 0;
369 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
370
371 #ifdef RTE_LIBRTE_BITRATE
372 /* Bitrate statistics */
373 struct rte_stats_bitrates *bitrate_data;
374 lcoreid_t bitrate_lcore_id;
375 uint8_t bitrate_enabled;
376 #endif
377
378 /* Forward function declarations */
379 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
380 static void check_all_ports_link_status(uint32_t port_mask);
381 static void eth_event_callback(uint8_t port_id,
382                                enum rte_eth_event_type type,
383                                void *param);
384
385 /*
386  * Check if all the ports are started.
387  * If yes, return positive value. If not, return zero.
388  */
389 static int all_ports_started(void);
390
391 /*
392  * Helper function to check if socket is allready discovered.
393  * If yes, return positive value. If not, return zero.
394  */
395 int
396 new_socket_id(unsigned int socket_id)
397 {
398         unsigned int i;
399
400         for (i = 0; i < num_sockets; i++) {
401                 if (socket_ids[i] == socket_id)
402                         return 0;
403         }
404         return 1;
405 }
406
407 /*
408  * Setup default configuration.
409  */
410 static void
411 set_default_fwd_lcores_config(void)
412 {
413         unsigned int i;
414         unsigned int nb_lc;
415         unsigned int sock_num;
416
417         nb_lc = 0;
418         for (i = 0; i < RTE_MAX_LCORE; i++) {
419                 sock_num = rte_lcore_to_socket_id(i);
420                 if (new_socket_id(sock_num)) {
421                         if (num_sockets >= RTE_MAX_NUMA_NODES) {
422                                 rte_exit(EXIT_FAILURE,
423                                          "Total sockets greater than %u\n",
424                                          RTE_MAX_NUMA_NODES);
425                         }
426                         socket_ids[num_sockets++] = sock_num;
427                 }
428                 if (!rte_lcore_is_enabled(i))
429                         continue;
430                 if (i == rte_get_master_lcore())
431                         continue;
432                 fwd_lcores_cpuids[nb_lc++] = i;
433         }
434         nb_lcores = (lcoreid_t) nb_lc;
435         nb_cfg_lcores = nb_lcores;
436         nb_fwd_lcores = 1;
437 }
438
439 static void
440 set_def_peer_eth_addrs(void)
441 {
442         portid_t i;
443
444         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
445                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
446                 peer_eth_addrs[i].addr_bytes[5] = i;
447         }
448 }
449
450 static void
451 set_default_fwd_ports_config(void)
452 {
453         portid_t pt_id;
454
455         for (pt_id = 0; pt_id < nb_ports; pt_id++)
456                 fwd_ports_ids[pt_id] = pt_id;
457
458         nb_cfg_ports = nb_ports;
459         nb_fwd_ports = nb_ports;
460 }
461
462 void
463 set_def_fwd_config(void)
464 {
465         set_default_fwd_lcores_config();
466         set_def_peer_eth_addrs();
467         set_default_fwd_ports_config();
468 }
469
470 /*
471  * Configuration initialisation done once at init time.
472  */
473 static void
474 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
475                  unsigned int socket_id)
476 {
477         char pool_name[RTE_MEMPOOL_NAMESIZE];
478         struct rte_mempool *rte_mp = NULL;
479         uint32_t mb_size;
480
481         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
482         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
483
484         RTE_LOG(INFO, USER1,
485                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
486                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
487
488 #ifdef RTE_LIBRTE_PMD_XENVIRT
489         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
490                 (unsigned) mb_mempool_cache,
491                 sizeof(struct rte_pktmbuf_pool_private),
492                 rte_pktmbuf_pool_init, NULL,
493                 rte_pktmbuf_init, NULL,
494                 socket_id, 0);
495 #endif
496
497         /* if the former XEN allocation failed fall back to normal allocation */
498         if (rte_mp == NULL) {
499                 if (mp_anon != 0) {
500                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
501                                 mb_size, (unsigned) mb_mempool_cache,
502                                 sizeof(struct rte_pktmbuf_pool_private),
503                                 socket_id, 0);
504                         if (rte_mp == NULL)
505                                 goto err;
506
507                         if (rte_mempool_populate_anon(rte_mp) == 0) {
508                                 rte_mempool_free(rte_mp);
509                                 rte_mp = NULL;
510                                 goto err;
511                         }
512                         rte_pktmbuf_pool_init(rte_mp, NULL);
513                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
514                 } else {
515                         /* wrapper to rte_mempool_create() */
516                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
517                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
518                 }
519         }
520
521 err:
522         if (rte_mp == NULL) {
523                 rte_exit(EXIT_FAILURE,
524                         "Creation of mbuf pool for socket %u failed: %s\n",
525                         socket_id, rte_strerror(rte_errno));
526         } else if (verbose_level > 0) {
527                 rte_mempool_dump(stdout, rte_mp);
528         }
529 }
530
531 /*
532  * Check given socket id is valid or not with NUMA mode,
533  * if valid, return 0, else return -1
534  */
535 static int
536 check_socket_id(const unsigned int socket_id)
537 {
538         static int warning_once = 0;
539
540         if (new_socket_id(socket_id)) {
541                 if (!warning_once && numa_support)
542                         printf("Warning: NUMA should be configured manually by"
543                                " using --port-numa-config and"
544                                " --ring-numa-config parameters along with"
545                                " --numa.\n");
546                 warning_once = 1;
547                 return -1;
548         }
549         return 0;
550 }
551
552 static void
553 init_config(void)
554 {
555         portid_t pid;
556         struct rte_port *port;
557         struct rte_mempool *mbp;
558         unsigned int nb_mbuf_per_pool;
559         lcoreid_t  lc_id;
560         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
561
562         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
563
564         if (numa_support) {
565                 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
566                 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
567                 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
568         }
569
570         /* Configuration of logical cores. */
571         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
572                                 sizeof(struct fwd_lcore *) * nb_lcores,
573                                 RTE_CACHE_LINE_SIZE);
574         if (fwd_lcores == NULL) {
575                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
576                                                         "failed\n", nb_lcores);
577         }
578         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
579                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
580                                                sizeof(struct fwd_lcore),
581                                                RTE_CACHE_LINE_SIZE);
582                 if (fwd_lcores[lc_id] == NULL) {
583                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
584                                                                 "failed\n");
585                 }
586                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
587         }
588
589         RTE_ETH_FOREACH_DEV(pid) {
590                 port = &ports[pid];
591                 rte_eth_dev_info_get(pid, &port->dev_info);
592
593                 if (numa_support) {
594                         if (port_numa[pid] != NUMA_NO_CONFIG)
595                                 port_per_socket[port_numa[pid]]++;
596                         else {
597                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
598
599                                 /* if socket_id is invalid, set to 0 */
600                                 if (check_socket_id(socket_id) < 0)
601                                         socket_id = 0;
602                                 port_per_socket[socket_id]++;
603                         }
604                 }
605
606                 /* set flag to initialize port/queue */
607                 port->need_reconfig = 1;
608                 port->need_reconfig_queues = 1;
609         }
610
611         /*
612          * Create pools of mbuf.
613          * If NUMA support is disabled, create a single pool of mbuf in
614          * socket 0 memory by default.
615          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
616          *
617          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
618          * nb_txd can be configured at run time.
619          */
620         if (param_total_num_mbufs)
621                 nb_mbuf_per_pool = param_total_num_mbufs;
622         else {
623                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
624                         (nb_lcores * mb_mempool_cache) +
625                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
626                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
627         }
628
629         if (numa_support) {
630                 uint8_t i;
631
632                 for (i = 0; i < num_sockets; i++)
633                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
634                                          socket_ids[i]);
635         } else {
636                 if (socket_num == UMA_NO_CONFIG)
637                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
638                 else
639                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
640                                                  socket_num);
641         }
642
643         init_port_config();
644
645         /*
646          * Records which Mbuf pool to use by each logical core, if needed.
647          */
648         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
649                 mbp = mbuf_pool_find(
650                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
651
652                 if (mbp == NULL)
653                         mbp = mbuf_pool_find(0);
654                 fwd_lcores[lc_id]->mbp = mbp;
655         }
656
657         /* Configuration of packet forwarding streams. */
658         if (init_fwd_streams() < 0)
659                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
660
661         fwd_config_setup();
662 }
663
664
665 void
666 reconfig(portid_t new_port_id, unsigned socket_id)
667 {
668         struct rte_port *port;
669
670         /* Reconfiguration of Ethernet ports. */
671         port = &ports[new_port_id];
672         rte_eth_dev_info_get(new_port_id, &port->dev_info);
673
674         /* set flag to initialize port/queue */
675         port->need_reconfig = 1;
676         port->need_reconfig_queues = 1;
677         port->socket_id = socket_id;
678
679         init_port_config();
680 }
681
682
683 int
684 init_fwd_streams(void)
685 {
686         portid_t pid;
687         struct rte_port *port;
688         streamid_t sm_id, nb_fwd_streams_new;
689         queueid_t q;
690
691         /* set socket id according to numa or not */
692         RTE_ETH_FOREACH_DEV(pid) {
693                 port = &ports[pid];
694                 if (nb_rxq > port->dev_info.max_rx_queues) {
695                         printf("Fail: nb_rxq(%d) is greater than "
696                                 "max_rx_queues(%d)\n", nb_rxq,
697                                 port->dev_info.max_rx_queues);
698                         return -1;
699                 }
700                 if (nb_txq > port->dev_info.max_tx_queues) {
701                         printf("Fail: nb_txq(%d) is greater than "
702                                 "max_tx_queues(%d)\n", nb_txq,
703                                 port->dev_info.max_tx_queues);
704                         return -1;
705                 }
706                 if (numa_support) {
707                         if (port_numa[pid] != NUMA_NO_CONFIG)
708                                 port->socket_id = port_numa[pid];
709                         else {
710                                 port->socket_id = rte_eth_dev_socket_id(pid);
711
712                                 /* if socket_id is invalid, set to 0 */
713                                 if (check_socket_id(port->socket_id) < 0)
714                                         port->socket_id = 0;
715                         }
716                 }
717                 else {
718                         if (socket_num == UMA_NO_CONFIG)
719                                 port->socket_id = 0;
720                         else
721                                 port->socket_id = socket_num;
722                 }
723         }
724
725         q = RTE_MAX(nb_rxq, nb_txq);
726         if (q == 0) {
727                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
728                 return -1;
729         }
730         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
731         if (nb_fwd_streams_new == nb_fwd_streams)
732                 return 0;
733         /* clear the old */
734         if (fwd_streams != NULL) {
735                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
736                         if (fwd_streams[sm_id] == NULL)
737                                 continue;
738                         rte_free(fwd_streams[sm_id]);
739                         fwd_streams[sm_id] = NULL;
740                 }
741                 rte_free(fwd_streams);
742                 fwd_streams = NULL;
743         }
744
745         /* init new */
746         nb_fwd_streams = nb_fwd_streams_new;
747         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
748                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
749         if (fwd_streams == NULL)
750                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
751                                                 "failed\n", nb_fwd_streams);
752
753         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
754                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
755                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
756                 if (fwd_streams[sm_id] == NULL)
757                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
758                                                                 " failed\n");
759         }
760
761         return 0;
762 }
763
764 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
765 static void
766 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
767 {
768         unsigned int total_burst;
769         unsigned int nb_burst;
770         unsigned int burst_stats[3];
771         uint16_t pktnb_stats[3];
772         uint16_t nb_pkt;
773         int burst_percent[3];
774
775         /*
776          * First compute the total number of packet bursts and the
777          * two highest numbers of bursts of the same number of packets.
778          */
779         total_burst = 0;
780         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
781         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
782         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
783                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
784                 if (nb_burst == 0)
785                         continue;
786                 total_burst += nb_burst;
787                 if (nb_burst > burst_stats[0]) {
788                         burst_stats[1] = burst_stats[0];
789                         pktnb_stats[1] = pktnb_stats[0];
790                         burst_stats[0] = nb_burst;
791                         pktnb_stats[0] = nb_pkt;
792                 }
793         }
794         if (total_burst == 0)
795                 return;
796         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
797         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
798                burst_percent[0], (int) pktnb_stats[0]);
799         if (burst_stats[0] == total_burst) {
800                 printf("]\n");
801                 return;
802         }
803         if (burst_stats[0] + burst_stats[1] == total_burst) {
804                 printf(" + %d%% of %d pkts]\n",
805                        100 - burst_percent[0], pktnb_stats[1]);
806                 return;
807         }
808         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
809         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
810         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
811                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
812                 return;
813         }
814         printf(" + %d%% of %d pkts + %d%% of others]\n",
815                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
816 }
817 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
818
819 static void
820 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
821 {
822         struct rte_port *port;
823         uint8_t i;
824
825         static const char *fwd_stats_border = "----------------------";
826
827         port = &ports[port_id];
828         printf("\n  %s Forward statistics for port %-2d %s\n",
829                fwd_stats_border, port_id, fwd_stats_border);
830
831         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
832                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
833                        "%-"PRIu64"\n",
834                        stats->ipackets, stats->imissed,
835                        (uint64_t) (stats->ipackets + stats->imissed));
836
837                 if (cur_fwd_eng == &csum_fwd_engine)
838                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
839                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
840                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
841                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
842                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
843                 }
844
845                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
846                        "%-"PRIu64"\n",
847                        stats->opackets, port->tx_dropped,
848                        (uint64_t) (stats->opackets + port->tx_dropped));
849         }
850         else {
851                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
852                        "%14"PRIu64"\n",
853                        stats->ipackets, stats->imissed,
854                        (uint64_t) (stats->ipackets + stats->imissed));
855
856                 if (cur_fwd_eng == &csum_fwd_engine)
857                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
858                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
859                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
860                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
861                         printf("  RX-nombufs:             %14"PRIu64"\n",
862                                stats->rx_nombuf);
863                 }
864
865                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
866                        "%14"PRIu64"\n",
867                        stats->opackets, port->tx_dropped,
868                        (uint64_t) (stats->opackets + port->tx_dropped));
869         }
870
871 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
872         if (port->rx_stream)
873                 pkt_burst_stats_display("RX",
874                         &port->rx_stream->rx_burst_stats);
875         if (port->tx_stream)
876                 pkt_burst_stats_display("TX",
877                         &port->tx_stream->tx_burst_stats);
878 #endif
879
880         if (port->rx_queue_stats_mapping_enabled) {
881                 printf("\n");
882                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
883                         printf("  Stats reg %2d RX-packets:%14"PRIu64
884                                "     RX-errors:%14"PRIu64
885                                "    RX-bytes:%14"PRIu64"\n",
886                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
887                 }
888                 printf("\n");
889         }
890         if (port->tx_queue_stats_mapping_enabled) {
891                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
892                         printf("  Stats reg %2d TX-packets:%14"PRIu64
893                                "                                 TX-bytes:%14"PRIu64"\n",
894                                i, stats->q_opackets[i], stats->q_obytes[i]);
895                 }
896         }
897
898         printf("  %s--------------------------------%s\n",
899                fwd_stats_border, fwd_stats_border);
900 }
901
902 static void
903 fwd_stream_stats_display(streamid_t stream_id)
904 {
905         struct fwd_stream *fs;
906         static const char *fwd_top_stats_border = "-------";
907
908         fs = fwd_streams[stream_id];
909         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
910             (fs->fwd_dropped == 0))
911                 return;
912         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
913                "TX Port=%2d/Queue=%2d %s\n",
914                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
915                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
916         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
917                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
918
919         /* if checksum mode */
920         if (cur_fwd_eng == &csum_fwd_engine) {
921                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
922                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
923         }
924
925 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
926         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
927         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
928 #endif
929 }
930
931 static void
932 flush_fwd_rx_queues(void)
933 {
934         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
935         portid_t  rxp;
936         portid_t port_id;
937         queueid_t rxq;
938         uint16_t  nb_rx;
939         uint16_t  i;
940         uint8_t   j;
941         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
942         uint64_t timer_period;
943
944         /* convert to number of cycles */
945         timer_period = rte_get_timer_hz(); /* 1 second timeout */
946
947         for (j = 0; j < 2; j++) {
948                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
949                         for (rxq = 0; rxq < nb_rxq; rxq++) {
950                                 port_id = fwd_ports_ids[rxp];
951                                 /**
952                                 * testpmd can stuck in the below do while loop
953                                 * if rte_eth_rx_burst() always returns nonzero
954                                 * packets. So timer is added to exit this loop
955                                 * after 1sec timer expiry.
956                                 */
957                                 prev_tsc = rte_rdtsc();
958                                 do {
959                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
960                                                 pkts_burst, MAX_PKT_BURST);
961                                         for (i = 0; i < nb_rx; i++)
962                                                 rte_pktmbuf_free(pkts_burst[i]);
963
964                                         cur_tsc = rte_rdtsc();
965                                         diff_tsc = cur_tsc - prev_tsc;
966                                         timer_tsc += diff_tsc;
967                                 } while ((nb_rx > 0) &&
968                                         (timer_tsc < timer_period));
969                                 timer_tsc = 0;
970                         }
971                 }
972                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
973         }
974 }
975
976 static void
977 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
978 {
979         struct fwd_stream **fsm;
980         streamid_t nb_fs;
981         streamid_t sm_id;
982 #ifdef RTE_LIBRTE_BITRATE
983         uint64_t tics_per_1sec;
984         uint64_t tics_datum;
985         uint64_t tics_current;
986         uint8_t idx_port, cnt_ports;
987
988         cnt_ports = rte_eth_dev_count();
989         tics_datum = rte_rdtsc();
990         tics_per_1sec = rte_get_timer_hz();
991 #endif
992         fsm = &fwd_streams[fc->stream_idx];
993         nb_fs = fc->stream_nb;
994         do {
995                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
996                         (*pkt_fwd)(fsm[sm_id]);
997 #ifdef RTE_LIBRTE_BITRATE
998                 if (bitrate_enabled != 0 &&
999                                 bitrate_lcore_id == rte_lcore_id()) {
1000                         tics_current = rte_rdtsc();
1001                         if (tics_current - tics_datum >= tics_per_1sec) {
1002                                 /* Periodic bitrate calculation */
1003                                 for (idx_port = 0;
1004                                                 idx_port < cnt_ports;
1005                                                 idx_port++)
1006                                         rte_stats_bitrate_calc(bitrate_data,
1007                                                 idx_port);
1008                                 tics_datum = tics_current;
1009                         }
1010                 }
1011 #endif
1012 #ifdef RTE_LIBRTE_LATENCY_STATS
1013                 if (latencystats_enabled != 0 &&
1014                                 latencystats_lcore_id == rte_lcore_id())
1015                         rte_latencystats_update();
1016 #endif
1017
1018         } while (! fc->stopped);
1019 }
1020
1021 static int
1022 start_pkt_forward_on_core(void *fwd_arg)
1023 {
1024         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1025                              cur_fwd_config.fwd_eng->packet_fwd);
1026         return 0;
1027 }
1028
1029 /*
1030  * Run the TXONLY packet forwarding engine to send a single burst of packets.
1031  * Used to start communication flows in network loopback test configurations.
1032  */
1033 static int
1034 run_one_txonly_burst_on_core(void *fwd_arg)
1035 {
1036         struct fwd_lcore *fwd_lc;
1037         struct fwd_lcore tmp_lcore;
1038
1039         fwd_lc = (struct fwd_lcore *) fwd_arg;
1040         tmp_lcore = *fwd_lc;
1041         tmp_lcore.stopped = 1;
1042         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1043         return 0;
1044 }
1045
1046 /*
1047  * Launch packet forwarding:
1048  *     - Setup per-port forwarding context.
1049  *     - launch logical cores with their forwarding configuration.
1050  */
1051 static void
1052 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1053 {
1054         port_fwd_begin_t port_fwd_begin;
1055         unsigned int i;
1056         unsigned int lc_id;
1057         int diag;
1058
1059         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1060         if (port_fwd_begin != NULL) {
1061                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1062                         (*port_fwd_begin)(fwd_ports_ids[i]);
1063         }
1064         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1065                 lc_id = fwd_lcores_cpuids[i];
1066                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1067                         fwd_lcores[i]->stopped = 0;
1068                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1069                                                      fwd_lcores[i], lc_id);
1070                         if (diag != 0)
1071                                 printf("launch lcore %u failed - diag=%d\n",
1072                                        lc_id, diag);
1073                 }
1074         }
1075 }
1076
1077 /*
1078  * Launch packet forwarding configuration.
1079  */
1080 void
1081 start_packet_forwarding(int with_tx_first)
1082 {
1083         port_fwd_begin_t port_fwd_begin;
1084         port_fwd_end_t  port_fwd_end;
1085         struct rte_port *port;
1086         unsigned int i;
1087         portid_t   pt_id;
1088         streamid_t sm_id;
1089
1090         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1091                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1092
1093         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1094                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1095
1096         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1097                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1098                 (!nb_rxq || !nb_txq))
1099                 rte_exit(EXIT_FAILURE,
1100                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1101                         cur_fwd_eng->fwd_mode_name);
1102
1103         if (all_ports_started() == 0) {
1104                 printf("Not all ports were started\n");
1105                 return;
1106         }
1107         if (test_done == 0) {
1108                 printf("Packet forwarding already started\n");
1109                 return;
1110         }
1111
1112         if (init_fwd_streams() < 0) {
1113                 printf("Fail from init_fwd_streams()\n");
1114                 return;
1115         }
1116
1117         if(dcb_test) {
1118                 for (i = 0; i < nb_fwd_ports; i++) {
1119                         pt_id = fwd_ports_ids[i];
1120                         port = &ports[pt_id];
1121                         if (!port->dcb_flag) {
1122                                 printf("In DCB mode, all forwarding ports must "
1123                                        "be configured in this mode.\n");
1124                                 return;
1125                         }
1126                 }
1127                 if (nb_fwd_lcores == 1) {
1128                         printf("In DCB mode,the nb forwarding cores "
1129                                "should be larger than 1.\n");
1130                         return;
1131                 }
1132         }
1133         test_done = 0;
1134
1135         if(!no_flush_rx)
1136                 flush_fwd_rx_queues();
1137
1138         fwd_config_setup();
1139         pkt_fwd_config_display(&cur_fwd_config);
1140         rxtx_config_display();
1141
1142         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1143                 pt_id = fwd_ports_ids[i];
1144                 port = &ports[pt_id];
1145                 rte_eth_stats_get(pt_id, &port->stats);
1146                 port->tx_dropped = 0;
1147
1148                 map_port_queue_stats_mapping_registers(pt_id, port);
1149         }
1150         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1151                 fwd_streams[sm_id]->rx_packets = 0;
1152                 fwd_streams[sm_id]->tx_packets = 0;
1153                 fwd_streams[sm_id]->fwd_dropped = 0;
1154                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1155                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1156
1157 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1158                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1159                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1160                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1161                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1162 #endif
1163 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1164                 fwd_streams[sm_id]->core_cycles = 0;
1165 #endif
1166         }
1167         if (with_tx_first) {
1168                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1169                 if (port_fwd_begin != NULL) {
1170                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1171                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1172                 }
1173                 while (with_tx_first--) {
1174                         launch_packet_forwarding(
1175                                         run_one_txonly_burst_on_core);
1176                         rte_eal_mp_wait_lcore();
1177                 }
1178                 port_fwd_end = tx_only_engine.port_fwd_end;
1179                 if (port_fwd_end != NULL) {
1180                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1181                                 (*port_fwd_end)(fwd_ports_ids[i]);
1182                 }
1183         }
1184         launch_packet_forwarding(start_pkt_forward_on_core);
1185 }
1186
1187 void
1188 stop_packet_forwarding(void)
1189 {
1190         struct rte_eth_stats stats;
1191         struct rte_port *port;
1192         port_fwd_end_t  port_fwd_end;
1193         int i;
1194         portid_t   pt_id;
1195         streamid_t sm_id;
1196         lcoreid_t  lc_id;
1197         uint64_t total_recv;
1198         uint64_t total_xmit;
1199         uint64_t total_rx_dropped;
1200         uint64_t total_tx_dropped;
1201         uint64_t total_rx_nombuf;
1202         uint64_t tx_dropped;
1203         uint64_t rx_bad_ip_csum;
1204         uint64_t rx_bad_l4_csum;
1205 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1206         uint64_t fwd_cycles;
1207 #endif
1208         static const char *acc_stats_border = "+++++++++++++++";
1209
1210         if (test_done) {
1211                 printf("Packet forwarding not started\n");
1212                 return;
1213         }
1214         printf("Telling cores to stop...");
1215         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1216                 fwd_lcores[lc_id]->stopped = 1;
1217         printf("\nWaiting for lcores to finish...\n");
1218         rte_eal_mp_wait_lcore();
1219         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1220         if (port_fwd_end != NULL) {
1221                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1222                         pt_id = fwd_ports_ids[i];
1223                         (*port_fwd_end)(pt_id);
1224                 }
1225         }
1226 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1227         fwd_cycles = 0;
1228 #endif
1229         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1230                 if (cur_fwd_config.nb_fwd_streams >
1231                     cur_fwd_config.nb_fwd_ports) {
1232                         fwd_stream_stats_display(sm_id);
1233                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1234                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1235                 } else {
1236                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1237                                 fwd_streams[sm_id];
1238                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1239                                 fwd_streams[sm_id];
1240                 }
1241                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1242                 tx_dropped = (uint64_t) (tx_dropped +
1243                                          fwd_streams[sm_id]->fwd_dropped);
1244                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1245
1246                 rx_bad_ip_csum =
1247                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1248                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1249                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1250                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1251                                                         rx_bad_ip_csum;
1252
1253                 rx_bad_l4_csum =
1254                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1255                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1256                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1257                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1258                                                         rx_bad_l4_csum;
1259
1260 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1261                 fwd_cycles = (uint64_t) (fwd_cycles +
1262                                          fwd_streams[sm_id]->core_cycles);
1263 #endif
1264         }
1265         total_recv = 0;
1266         total_xmit = 0;
1267         total_rx_dropped = 0;
1268         total_tx_dropped = 0;
1269         total_rx_nombuf  = 0;
1270         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1271                 pt_id = fwd_ports_ids[i];
1272
1273                 port = &ports[pt_id];
1274                 rte_eth_stats_get(pt_id, &stats);
1275                 stats.ipackets -= port->stats.ipackets;
1276                 port->stats.ipackets = 0;
1277                 stats.opackets -= port->stats.opackets;
1278                 port->stats.opackets = 0;
1279                 stats.ibytes   -= port->stats.ibytes;
1280                 port->stats.ibytes = 0;
1281                 stats.obytes   -= port->stats.obytes;
1282                 port->stats.obytes = 0;
1283                 stats.imissed  -= port->stats.imissed;
1284                 port->stats.imissed = 0;
1285                 stats.oerrors  -= port->stats.oerrors;
1286                 port->stats.oerrors = 0;
1287                 stats.rx_nombuf -= port->stats.rx_nombuf;
1288                 port->stats.rx_nombuf = 0;
1289
1290                 total_recv += stats.ipackets;
1291                 total_xmit += stats.opackets;
1292                 total_rx_dropped += stats.imissed;
1293                 total_tx_dropped += port->tx_dropped;
1294                 total_rx_nombuf  += stats.rx_nombuf;
1295
1296                 fwd_port_stats_display(pt_id, &stats);
1297         }
1298         printf("\n  %s Accumulated forward statistics for all ports"
1299                "%s\n",
1300                acc_stats_border, acc_stats_border);
1301         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1302                "%-"PRIu64"\n"
1303                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1304                "%-"PRIu64"\n",
1305                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1306                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1307         if (total_rx_nombuf > 0)
1308                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1309         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1310                "%s\n",
1311                acc_stats_border, acc_stats_border);
1312 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1313         if (total_recv > 0)
1314                 printf("\n  CPU cycles/packet=%u (total cycles="
1315                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1316                        (unsigned int)(fwd_cycles / total_recv),
1317                        fwd_cycles, total_recv);
1318 #endif
1319         printf("\nDone.\n");
1320         test_done = 1;
1321 }
1322
1323 void
1324 dev_set_link_up(portid_t pid)
1325 {
1326         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1327                 printf("\nSet link up fail.\n");
1328 }
1329
1330 void
1331 dev_set_link_down(portid_t pid)
1332 {
1333         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1334                 printf("\nSet link down fail.\n");
1335 }
1336
1337 static int
1338 all_ports_started(void)
1339 {
1340         portid_t pi;
1341         struct rte_port *port;
1342
1343         RTE_ETH_FOREACH_DEV(pi) {
1344                 port = &ports[pi];
1345                 /* Check if there is a port which is not started */
1346                 if ((port->port_status != RTE_PORT_STARTED) &&
1347                         (port->slave_flag == 0))
1348                         return 0;
1349         }
1350
1351         /* No port is not started */
1352         return 1;
1353 }
1354
1355 int
1356 all_ports_stopped(void)
1357 {
1358         portid_t pi;
1359         struct rte_port *port;
1360
1361         RTE_ETH_FOREACH_DEV(pi) {
1362                 port = &ports[pi];
1363                 if ((port->port_status != RTE_PORT_STOPPED) &&
1364                         (port->slave_flag == 0))
1365                         return 0;
1366         }
1367
1368         return 1;
1369 }
1370
1371 int
1372 port_is_started(portid_t port_id)
1373 {
1374         if (port_id_is_invalid(port_id, ENABLED_WARN))
1375                 return 0;
1376
1377         if (ports[port_id].port_status != RTE_PORT_STARTED)
1378                 return 0;
1379
1380         return 1;
1381 }
1382
1383 static int
1384 port_is_closed(portid_t port_id)
1385 {
1386         if (port_id_is_invalid(port_id, ENABLED_WARN))
1387                 return 0;
1388
1389         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1390                 return 0;
1391
1392         return 1;
1393 }
1394
1395 int
1396 start_port(portid_t pid)
1397 {
1398         int diag, need_check_link_status = -1;
1399         portid_t pi;
1400         queueid_t qi;
1401         struct rte_port *port;
1402         struct ether_addr mac_addr;
1403         enum rte_eth_event_type event_type;
1404
1405         if (port_id_is_invalid(pid, ENABLED_WARN))
1406                 return 0;
1407
1408         if(dcb_config)
1409                 dcb_test = 1;
1410         RTE_ETH_FOREACH_DEV(pi) {
1411                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1412                         continue;
1413
1414                 need_check_link_status = 0;
1415                 port = &ports[pi];
1416                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1417                                                  RTE_PORT_HANDLING) == 0) {
1418                         printf("Port %d is now not stopped\n", pi);
1419                         continue;
1420                 }
1421
1422                 if (port->need_reconfig > 0) {
1423                         port->need_reconfig = 0;
1424
1425                         printf("Configuring Port %d (socket %u)\n", pi,
1426                                         port->socket_id);
1427                         /* configure port */
1428                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1429                                                 &(port->dev_conf));
1430                         if (diag != 0) {
1431                                 if (rte_atomic16_cmpset(&(port->port_status),
1432                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1433                                         printf("Port %d can not be set back "
1434                                                         "to stopped\n", pi);
1435                                 printf("Fail to configure port %d\n", pi);
1436                                 /* try to reconfigure port next time */
1437                                 port->need_reconfig = 1;
1438                                 return -1;
1439                         }
1440                 }
1441                 if (port->need_reconfig_queues > 0) {
1442                         port->need_reconfig_queues = 0;
1443                         /* setup tx queues */
1444                         for (qi = 0; qi < nb_txq; qi++) {
1445                                 if ((numa_support) &&
1446                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1447                                         diag = rte_eth_tx_queue_setup(pi, qi,
1448                                                 nb_txd,txring_numa[pi],
1449                                                 &(port->tx_conf));
1450                                 else
1451                                         diag = rte_eth_tx_queue_setup(pi, qi,
1452                                                 nb_txd,port->socket_id,
1453                                                 &(port->tx_conf));
1454
1455                                 if (diag == 0)
1456                                         continue;
1457
1458                                 /* Fail to setup tx queue, return */
1459                                 if (rte_atomic16_cmpset(&(port->port_status),
1460                                                         RTE_PORT_HANDLING,
1461                                                         RTE_PORT_STOPPED) == 0)
1462                                         printf("Port %d can not be set back "
1463                                                         "to stopped\n", pi);
1464                                 printf("Fail to configure port %d tx queues\n", pi);
1465                                 /* try to reconfigure queues next time */
1466                                 port->need_reconfig_queues = 1;
1467                                 return -1;
1468                         }
1469                         /* setup rx queues */
1470                         for (qi = 0; qi < nb_rxq; qi++) {
1471                                 if ((numa_support) &&
1472                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1473                                         struct rte_mempool * mp =
1474                                                 mbuf_pool_find(rxring_numa[pi]);
1475                                         if (mp == NULL) {
1476                                                 printf("Failed to setup RX queue:"
1477                                                         "No mempool allocation"
1478                                                         " on the socket %d\n",
1479                                                         rxring_numa[pi]);
1480                                                 return -1;
1481                                         }
1482
1483                                         diag = rte_eth_rx_queue_setup(pi, qi,
1484                                              nb_rxd,rxring_numa[pi],
1485                                              &(port->rx_conf),mp);
1486                                 } else {
1487                                         struct rte_mempool *mp =
1488                                                 mbuf_pool_find(port->socket_id);
1489                                         if (mp == NULL) {
1490                                                 printf("Failed to setup RX queue:"
1491                                                         "No mempool allocation"
1492                                                         " on the socket %d\n",
1493                                                         port->socket_id);
1494                                                 return -1;
1495                                         }
1496                                         diag = rte_eth_rx_queue_setup(pi, qi,
1497                                              nb_rxd,port->socket_id,
1498                                              &(port->rx_conf), mp);
1499                                 }
1500                                 if (diag == 0)
1501                                         continue;
1502
1503                                 /* Fail to setup rx queue, return */
1504                                 if (rte_atomic16_cmpset(&(port->port_status),
1505                                                         RTE_PORT_HANDLING,
1506                                                         RTE_PORT_STOPPED) == 0)
1507                                         printf("Port %d can not be set back "
1508                                                         "to stopped\n", pi);
1509                                 printf("Fail to configure port %d rx queues\n", pi);
1510                                 /* try to reconfigure queues next time */
1511                                 port->need_reconfig_queues = 1;
1512                                 return -1;
1513                         }
1514                 }
1515
1516                 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1517                      event_type < RTE_ETH_EVENT_MAX;
1518                      event_type++) {
1519                         diag = rte_eth_dev_callback_register(pi,
1520                                                         event_type,
1521                                                         eth_event_callback,
1522                                                         NULL);
1523                         if (diag) {
1524                                 printf("Failed to setup even callback for event %d\n",
1525                                         event_type);
1526                                 return -1;
1527                         }
1528                 }
1529
1530                 /* start port */
1531                 if (rte_eth_dev_start(pi) < 0) {
1532                         printf("Fail to start port %d\n", pi);
1533
1534                         /* Fail to setup rx queue, return */
1535                         if (rte_atomic16_cmpset(&(port->port_status),
1536                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1537                                 printf("Port %d can not be set back to "
1538                                                         "stopped\n", pi);
1539                         continue;
1540                 }
1541
1542                 if (rte_atomic16_cmpset(&(port->port_status),
1543                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1544                         printf("Port %d can not be set into started\n", pi);
1545
1546                 rte_eth_macaddr_get(pi, &mac_addr);
1547                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1548                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1549                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1550                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1551
1552                 /* at least one port started, need checking link status */
1553                 need_check_link_status = 1;
1554         }
1555
1556         if (need_check_link_status == 1 && !no_link_check)
1557                 check_all_ports_link_status(RTE_PORT_ALL);
1558         else if (need_check_link_status == 0)
1559                 printf("Please stop the ports first\n");
1560
1561         printf("Done\n");
1562         return 0;
1563 }
1564
1565 void
1566 stop_port(portid_t pid)
1567 {
1568         portid_t pi;
1569         struct rte_port *port;
1570         int need_check_link_status = 0;
1571
1572         if (dcb_test) {
1573                 dcb_test = 0;
1574                 dcb_config = 0;
1575         }
1576
1577         if (port_id_is_invalid(pid, ENABLED_WARN))
1578                 return;
1579
1580         printf("Stopping ports...\n");
1581
1582         RTE_ETH_FOREACH_DEV(pi) {
1583                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1584                         continue;
1585
1586                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1587                         printf("Please remove port %d from forwarding configuration.\n", pi);
1588                         continue;
1589                 }
1590
1591                 if (port_is_bonding_slave(pi)) {
1592                         printf("Please remove port %d from bonded device.\n", pi);
1593                         continue;
1594                 }
1595
1596                 port = &ports[pi];
1597                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1598                                                 RTE_PORT_HANDLING) == 0)
1599                         continue;
1600
1601                 rte_eth_dev_stop(pi);
1602
1603                 if (rte_atomic16_cmpset(&(port->port_status),
1604                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1605                         printf("Port %d can not be set into stopped\n", pi);
1606                 need_check_link_status = 1;
1607         }
1608         if (need_check_link_status && !no_link_check)
1609                 check_all_ports_link_status(RTE_PORT_ALL);
1610
1611         printf("Done\n");
1612 }
1613
1614 void
1615 close_port(portid_t pid)
1616 {
1617         portid_t pi;
1618         struct rte_port *port;
1619
1620         if (port_id_is_invalid(pid, ENABLED_WARN))
1621                 return;
1622
1623         printf("Closing ports...\n");
1624
1625         RTE_ETH_FOREACH_DEV(pi) {
1626                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1627                         continue;
1628
1629                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1630                         printf("Please remove port %d from forwarding configuration.\n", pi);
1631                         continue;
1632                 }
1633
1634                 if (port_is_bonding_slave(pi)) {
1635                         printf("Please remove port %d from bonded device.\n", pi);
1636                         continue;
1637                 }
1638
1639                 port = &ports[pi];
1640                 if (rte_atomic16_cmpset(&(port->port_status),
1641                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1642                         printf("Port %d is already closed\n", pi);
1643                         continue;
1644                 }
1645
1646                 if (rte_atomic16_cmpset(&(port->port_status),
1647                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1648                         printf("Port %d is now not stopped\n", pi);
1649                         continue;
1650                 }
1651
1652                 if (port->flow_list)
1653                         port_flow_flush(pi);
1654                 rte_eth_dev_close(pi);
1655
1656                 if (rte_atomic16_cmpset(&(port->port_status),
1657                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1658                         printf("Port %d cannot be set to closed\n", pi);
1659         }
1660
1661         printf("Done\n");
1662 }
1663
1664 void
1665 attach_port(char *identifier)
1666 {
1667         portid_t pi = 0;
1668         unsigned int socket_id;
1669
1670         printf("Attaching a new port...\n");
1671
1672         if (identifier == NULL) {
1673                 printf("Invalid parameters are specified\n");
1674                 return;
1675         }
1676
1677         if (rte_eth_dev_attach(identifier, &pi))
1678                 return;
1679
1680         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1681         /* if socket_id is invalid, set to 0 */
1682         if (check_socket_id(socket_id) < 0)
1683                 socket_id = 0;
1684         reconfig(pi, socket_id);
1685         rte_eth_promiscuous_enable(pi);
1686
1687         nb_ports = rte_eth_dev_count();
1688
1689         ports[pi].port_status = RTE_PORT_STOPPED;
1690
1691         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1692         printf("Done\n");
1693 }
1694
1695 void
1696 detach_port(uint8_t port_id)
1697 {
1698         char name[RTE_ETH_NAME_MAX_LEN];
1699
1700         printf("Detaching a port...\n");
1701
1702         if (!port_is_closed(port_id)) {
1703                 printf("Please close port first\n");
1704                 return;
1705         }
1706
1707         if (ports[port_id].flow_list)
1708                 port_flow_flush(port_id);
1709
1710         if (rte_eth_dev_detach(port_id, name))
1711                 return;
1712
1713         nb_ports = rte_eth_dev_count();
1714
1715         printf("Port '%s' is detached. Now total ports is %d\n",
1716                         name, nb_ports);
1717         printf("Done\n");
1718         return;
1719 }
1720
1721 void
1722 pmd_test_exit(void)
1723 {
1724         portid_t pt_id;
1725
1726         if (test_done == 0)
1727                 stop_packet_forwarding();
1728
1729         if (ports != NULL) {
1730                 no_link_check = 1;
1731                 RTE_ETH_FOREACH_DEV(pt_id) {
1732                         printf("\nShutting down port %d...\n", pt_id);
1733                         fflush(stdout);
1734                         stop_port(pt_id);
1735                         close_port(pt_id);
1736                 }
1737         }
1738         printf("\nBye...\n");
1739 }
1740
1741 typedef void (*cmd_func_t)(void);
1742 struct pmd_test_command {
1743         const char *cmd_name;
1744         cmd_func_t cmd_func;
1745 };
1746
1747 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1748
1749 /* Check the link status of all ports in up to 9s, and print them finally */
1750 static void
1751 check_all_ports_link_status(uint32_t port_mask)
1752 {
1753 #define CHECK_INTERVAL 100 /* 100ms */
1754 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1755         uint8_t portid, count, all_ports_up, print_flag = 0;
1756         struct rte_eth_link link;
1757
1758         printf("Checking link statuses...\n");
1759         fflush(stdout);
1760         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1761                 all_ports_up = 1;
1762                 RTE_ETH_FOREACH_DEV(portid) {
1763                         if ((port_mask & (1 << portid)) == 0)
1764                                 continue;
1765                         memset(&link, 0, sizeof(link));
1766                         rte_eth_link_get_nowait(portid, &link);
1767                         /* print link status if flag set */
1768                         if (print_flag == 1) {
1769                                 if (link.link_status)
1770                                         printf("Port %d Link Up - speed %u "
1771                                                 "Mbps - %s\n", (uint8_t)portid,
1772                                                 (unsigned)link.link_speed,
1773                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1774                                         ("full-duplex") : ("half-duplex\n"));
1775                                 else
1776                                         printf("Port %d Link Down\n",
1777                                                 (uint8_t)portid);
1778                                 continue;
1779                         }
1780                         /* clear all_ports_up flag if any link down */
1781                         if (link.link_status == ETH_LINK_DOWN) {
1782                                 all_ports_up = 0;
1783                                 break;
1784                         }
1785                 }
1786                 /* after finally printing all link status, get out */
1787                 if (print_flag == 1)
1788                         break;
1789
1790                 if (all_ports_up == 0) {
1791                         fflush(stdout);
1792                         rte_delay_ms(CHECK_INTERVAL);
1793                 }
1794
1795                 /* set the print_flag if all ports up or timeout */
1796                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1797                         print_flag = 1;
1798                 }
1799
1800                 if (lsc_interrupt)
1801                         break;
1802         }
1803 }
1804
1805 static void
1806 rmv_event_callback(void *arg)
1807 {
1808         struct rte_eth_dev *dev;
1809         struct rte_devargs *da;
1810         char name[32] = "";
1811         uint8_t port_id = (intptr_t)arg;
1812
1813         RTE_ETH_VALID_PORTID_OR_RET(port_id);
1814         dev = &rte_eth_devices[port_id];
1815         da = dev->device->devargs;
1816
1817         stop_port(port_id);
1818         close_port(port_id);
1819         if (da->type == RTE_DEVTYPE_VIRTUAL)
1820                 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1821         else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1822                 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1823         printf("removing device %s\n", name);
1824         rte_eal_dev_detach(name);
1825         dev->state = RTE_ETH_DEV_UNUSED;
1826 }
1827
1828 /* This function is used by the interrupt thread */
1829 static void
1830 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1831 {
1832         static const char * const event_desc[] = {
1833                 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1834                 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1835                 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1836                 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1837                 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1838                 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1839                 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1840                 [RTE_ETH_EVENT_MAX] = NULL,
1841         };
1842
1843         RTE_SET_USED(param);
1844
1845         if (type >= RTE_ETH_EVENT_MAX) {
1846                 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1847                         port_id, __func__, type);
1848                 fflush(stderr);
1849         } else if (event_print_mask & (UINT32_C(1) << type)) {
1850                 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1851                         event_desc[type]);
1852                 fflush(stdout);
1853         }
1854
1855         switch (type) {
1856         case RTE_ETH_EVENT_INTR_RMV:
1857                 if (rte_eal_alarm_set(100000,
1858                                 rmv_event_callback, (void *)(intptr_t)port_id))
1859                         fprintf(stderr, "Could not set up deferred device removal\n");
1860                 break;
1861         default:
1862                 break;
1863         }
1864 }
1865
1866 static int
1867 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1868 {
1869         uint16_t i;
1870         int diag;
1871         uint8_t mapping_found = 0;
1872
1873         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1874                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1875                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1876                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1877                                         tx_queue_stats_mappings[i].queue_id,
1878                                         tx_queue_stats_mappings[i].stats_counter_id);
1879                         if (diag != 0)
1880                                 return diag;
1881                         mapping_found = 1;
1882                 }
1883         }
1884         if (mapping_found)
1885                 port->tx_queue_stats_mapping_enabled = 1;
1886         return 0;
1887 }
1888
1889 static int
1890 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1891 {
1892         uint16_t i;
1893         int diag;
1894         uint8_t mapping_found = 0;
1895
1896         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1897                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1898                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1899                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1900                                         rx_queue_stats_mappings[i].queue_id,
1901                                         rx_queue_stats_mappings[i].stats_counter_id);
1902                         if (diag != 0)
1903                                 return diag;
1904                         mapping_found = 1;
1905                 }
1906         }
1907         if (mapping_found)
1908                 port->rx_queue_stats_mapping_enabled = 1;
1909         return 0;
1910 }
1911
1912 static void
1913 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1914 {
1915         int diag = 0;
1916
1917         diag = set_tx_queue_stats_mapping_registers(pi, port);
1918         if (diag != 0) {
1919                 if (diag == -ENOTSUP) {
1920                         port->tx_queue_stats_mapping_enabled = 0;
1921                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1922                 }
1923                 else
1924                         rte_exit(EXIT_FAILURE,
1925                                         "set_tx_queue_stats_mapping_registers "
1926                                         "failed for port id=%d diag=%d\n",
1927                                         pi, diag);
1928         }
1929
1930         diag = set_rx_queue_stats_mapping_registers(pi, port);
1931         if (diag != 0) {
1932                 if (diag == -ENOTSUP) {
1933                         port->rx_queue_stats_mapping_enabled = 0;
1934                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1935                 }
1936                 else
1937                         rte_exit(EXIT_FAILURE,
1938                                         "set_rx_queue_stats_mapping_registers "
1939                                         "failed for port id=%d diag=%d\n",
1940                                         pi, diag);
1941         }
1942 }
1943
1944 static void
1945 rxtx_port_config(struct rte_port *port)
1946 {
1947         port->rx_conf = port->dev_info.default_rxconf;
1948         port->tx_conf = port->dev_info.default_txconf;
1949
1950         /* Check if any RX/TX parameters have been passed */
1951         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1952                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1953
1954         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1955                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1956
1957         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1958                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1959
1960         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1961                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1962
1963         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1964                 port->rx_conf.rx_drop_en = rx_drop_en;
1965
1966         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1967                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1968
1969         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1970                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1971
1972         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1973                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1974
1975         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1976                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1977
1978         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1979                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1980
1981         if (txq_flags != RTE_PMD_PARAM_UNSET)
1982                 port->tx_conf.txq_flags = txq_flags;
1983 }
1984
1985 void
1986 init_port_config(void)
1987 {
1988         portid_t pid;
1989         struct rte_port *port;
1990
1991         RTE_ETH_FOREACH_DEV(pid) {
1992                 port = &ports[pid];
1993                 port->dev_conf.rxmode = rx_mode;
1994                 port->dev_conf.fdir_conf = fdir_conf;
1995                 if (nb_rxq > 1) {
1996                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1997                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1998                 } else {
1999                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2000                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2001                 }
2002
2003                 if (port->dcb_flag == 0) {
2004                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2005                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2006                         else
2007                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2008                 }
2009
2010                 rxtx_port_config(port);
2011
2012                 rte_eth_macaddr_get(pid, &port->eth_addr);
2013
2014                 map_port_queue_stats_mapping_registers(pid, port);
2015 #ifdef RTE_NIC_BYPASS
2016                 rte_eth_dev_bypass_init(pid);
2017 #endif
2018
2019                 if (lsc_interrupt &&
2020                     (rte_eth_devices[pid].data->dev_flags &
2021                      RTE_ETH_DEV_INTR_LSC))
2022                         port->dev_conf.intr_conf.lsc = 1;
2023                 if (rmv_interrupt &&
2024                     (rte_eth_devices[pid].data->dev_flags &
2025                      RTE_ETH_DEV_INTR_RMV))
2026                         port->dev_conf.intr_conf.rmv = 1;
2027         }
2028 }
2029
2030 void set_port_slave_flag(portid_t slave_pid)
2031 {
2032         struct rte_port *port;
2033
2034         port = &ports[slave_pid];
2035         port->slave_flag = 1;
2036 }
2037
2038 void clear_port_slave_flag(portid_t slave_pid)
2039 {
2040         struct rte_port *port;
2041
2042         port = &ports[slave_pid];
2043         port->slave_flag = 0;
2044 }
2045
2046 uint8_t port_is_bonding_slave(portid_t slave_pid)
2047 {
2048         struct rte_port *port;
2049
2050         port = &ports[slave_pid];
2051         return port->slave_flag;
2052 }
2053
2054 const uint16_t vlan_tags[] = {
2055                 0,  1,  2,  3,  4,  5,  6,  7,
2056                 8,  9, 10, 11,  12, 13, 14, 15,
2057                 16, 17, 18, 19, 20, 21, 22, 23,
2058                 24, 25, 26, 27, 28, 29, 30, 31
2059 };
2060
2061 static  int
2062 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2063                  enum dcb_mode_enable dcb_mode,
2064                  enum rte_eth_nb_tcs num_tcs,
2065                  uint8_t pfc_en)
2066 {
2067         uint8_t i;
2068
2069         /*
2070          * Builds up the correct configuration for dcb+vt based on the vlan tags array
2071          * given above, and the number of traffic classes available for use.
2072          */
2073         if (dcb_mode == DCB_VT_ENABLED) {
2074                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2075                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
2076                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2077                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2078
2079                 /* VMDQ+DCB RX and TX configurations */
2080                 vmdq_rx_conf->enable_default_pool = 0;
2081                 vmdq_rx_conf->default_pool = 0;
2082                 vmdq_rx_conf->nb_queue_pools =
2083                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2084                 vmdq_tx_conf->nb_queue_pools =
2085                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2086
2087                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2088                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2089                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2090                         vmdq_rx_conf->pool_map[i].pools =
2091                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
2092                 }
2093                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2094                         vmdq_rx_conf->dcb_tc[i] = i;
2095                         vmdq_tx_conf->dcb_tc[i] = i;
2096                 }
2097
2098                 /* set DCB mode of RX and TX of multiple queues */
2099                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2100                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2101         } else {
2102                 struct rte_eth_dcb_rx_conf *rx_conf =
2103                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
2104                 struct rte_eth_dcb_tx_conf *tx_conf =
2105                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
2106
2107                 rx_conf->nb_tcs = num_tcs;
2108                 tx_conf->nb_tcs = num_tcs;
2109
2110                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2111                         rx_conf->dcb_tc[i] = i % num_tcs;
2112                         tx_conf->dcb_tc[i] = i % num_tcs;
2113                 }
2114                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2115                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2116                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2117         }
2118
2119         if (pfc_en)
2120                 eth_conf->dcb_capability_en =
2121                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2122         else
2123                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2124
2125         return 0;
2126 }
2127
2128 int
2129 init_port_dcb_config(portid_t pid,
2130                      enum dcb_mode_enable dcb_mode,
2131                      enum rte_eth_nb_tcs num_tcs,
2132                      uint8_t pfc_en)
2133 {
2134         struct rte_eth_conf port_conf;
2135         struct rte_port *rte_port;
2136         int retval;
2137         uint16_t i;
2138
2139         rte_port = &ports[pid];
2140
2141         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2142         /* Enter DCB configuration status */
2143         dcb_config = 1;
2144
2145         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2146         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2147         if (retval < 0)
2148                 return retval;
2149         port_conf.rxmode.hw_vlan_filter = 1;
2150
2151         /**
2152          * Write the configuration into the device.
2153          * Set the numbers of RX & TX queues to 0, so
2154          * the RX & TX queues will not be setup.
2155          */
2156         (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2157
2158         rte_eth_dev_info_get(pid, &rte_port->dev_info);
2159
2160         /* If dev_info.vmdq_pool_base is greater than 0,
2161          * the queue id of vmdq pools is started after pf queues.
2162          */
2163         if (dcb_mode == DCB_VT_ENABLED &&
2164             rte_port->dev_info.vmdq_pool_base > 0) {
2165                 printf("VMDQ_DCB multi-queue mode is nonsensical"
2166                         " for port %d.", pid);
2167                 return -1;
2168         }
2169
2170         /* Assume the ports in testpmd have the same dcb capability
2171          * and has the same number of rxq and txq in dcb mode
2172          */
2173         if (dcb_mode == DCB_VT_ENABLED) {
2174                 if (rte_port->dev_info.max_vfs > 0) {
2175                         nb_rxq = rte_port->dev_info.nb_rx_queues;
2176                         nb_txq = rte_port->dev_info.nb_tx_queues;
2177                 } else {
2178                         nb_rxq = rte_port->dev_info.max_rx_queues;
2179                         nb_txq = rte_port->dev_info.max_tx_queues;
2180                 }
2181         } else {
2182                 /*if vt is disabled, use all pf queues */
2183                 if (rte_port->dev_info.vmdq_pool_base == 0) {
2184                         nb_rxq = rte_port->dev_info.max_rx_queues;
2185                         nb_txq = rte_port->dev_info.max_tx_queues;
2186                 } else {
2187                         nb_rxq = (queueid_t)num_tcs;
2188                         nb_txq = (queueid_t)num_tcs;
2189
2190                 }
2191         }
2192         rx_free_thresh = 64;
2193
2194         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2195
2196         rxtx_port_config(rte_port);
2197         /* VLAN filter */
2198         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2199         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2200                 rx_vft_set(pid, vlan_tags[i], 1);
2201
2202         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2203         map_port_queue_stats_mapping_registers(pid, rte_port);
2204
2205         rte_port->dcb_flag = 1;
2206
2207         return 0;
2208 }
2209
2210 static void
2211 init_port(void)
2212 {
2213         /* Configuration of Ethernet ports. */
2214         ports = rte_zmalloc("testpmd: ports",
2215                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2216                             RTE_CACHE_LINE_SIZE);
2217         if (ports == NULL) {
2218                 rte_exit(EXIT_FAILURE,
2219                                 "rte_zmalloc(%d struct rte_port) failed\n",
2220                                 RTE_MAX_ETHPORTS);
2221         }
2222 }
2223
2224 static void
2225 force_quit(void)
2226 {
2227         pmd_test_exit();
2228         prompt_exit();
2229 }
2230
2231 static void
2232 signal_handler(int signum)
2233 {
2234         if (signum == SIGINT || signum == SIGTERM) {
2235                 printf("\nSignal %d received, preparing to exit...\n",
2236                                 signum);
2237 #ifdef RTE_LIBRTE_PDUMP
2238                 /* uninitialize packet capture framework */
2239                 rte_pdump_uninit();
2240 #endif
2241 #ifdef RTE_LIBRTE_LATENCY_STATS
2242                 rte_latencystats_uninit();
2243 #endif
2244                 force_quit();
2245                 /* exit with the expected status */
2246                 signal(signum, SIG_DFL);
2247                 kill(getpid(), signum);
2248         }
2249 }
2250
2251 int
2252 main(int argc, char** argv)
2253 {
2254         int  diag;
2255         uint8_t port_id;
2256
2257         signal(SIGINT, signal_handler);
2258         signal(SIGTERM, signal_handler);
2259
2260         diag = rte_eal_init(argc, argv);
2261         if (diag < 0)
2262                 rte_panic("Cannot init EAL\n");
2263
2264 #ifdef RTE_LIBRTE_PDUMP
2265         /* initialize packet capture framework */
2266         rte_pdump_init(NULL);
2267 #endif
2268
2269         nb_ports = (portid_t) rte_eth_dev_count();
2270         if (nb_ports == 0)
2271                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2272
2273         /* allocate port structures, and init them */
2274         init_port();
2275
2276         set_def_fwd_config();
2277         if (nb_lcores == 0)
2278                 rte_panic("Empty set of forwarding logical cores - check the "
2279                           "core mask supplied in the command parameters\n");
2280
2281         /* Bitrate/latency stats disabled by default */
2282 #ifdef RTE_LIBRTE_BITRATE
2283         bitrate_enabled = 0;
2284 #endif
2285 #ifdef RTE_LIBRTE_LATENCY_STATS
2286         latencystats_enabled = 0;
2287 #endif
2288
2289         argc -= diag;
2290         argv += diag;
2291         if (argc > 1)
2292                 launch_args_parse(argc, argv);
2293
2294         if (!nb_rxq && !nb_txq)
2295                 printf("Warning: Either rx or tx queues should be non-zero\n");
2296
2297         if (nb_rxq > 1 && nb_rxq > nb_txq)
2298                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2299                        "but nb_txq=%d will prevent to fully test it.\n",
2300                        nb_rxq, nb_txq);
2301
2302         init_config();
2303         if (start_port(RTE_PORT_ALL) != 0)
2304                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2305
2306         /* set all ports to promiscuous mode by default */
2307         RTE_ETH_FOREACH_DEV(port_id)
2308                 rte_eth_promiscuous_enable(port_id);
2309
2310         /* Init metrics library */
2311         rte_metrics_init(rte_socket_id());
2312
2313 #ifdef RTE_LIBRTE_LATENCY_STATS
2314         if (latencystats_enabled != 0) {
2315                 int ret = rte_latencystats_init(1, NULL);
2316                 if (ret)
2317                         printf("Warning: latencystats init()"
2318                                 " returned error %d\n", ret);
2319                 printf("Latencystats running on lcore %d\n",
2320                         latencystats_lcore_id);
2321         }
2322 #endif
2323
2324         /* Setup bitrate stats */
2325 #ifdef RTE_LIBRTE_BITRATE
2326         if (bitrate_enabled != 0) {
2327                 bitrate_data = rte_stats_bitrate_create();
2328                 if (bitrate_data == NULL)
2329                         rte_exit(EXIT_FAILURE,
2330                                 "Could not allocate bitrate data.\n");
2331                 rte_stats_bitrate_reg(bitrate_data);
2332         }
2333 #endif
2334
2335 #ifdef RTE_LIBRTE_CMDLINE
2336         if (strlen(cmdline_filename) != 0)
2337                 cmdline_read_from_file(cmdline_filename);
2338
2339         if (interactive == 1) {
2340                 if (auto_start) {
2341                         printf("Start automatic packet forwarding\n");
2342                         start_packet_forwarding(0);
2343                 }
2344                 prompt();
2345                 pmd_test_exit();
2346         } else
2347 #endif
2348         {
2349                 char c;
2350                 int rc;
2351
2352                 printf("No commandline core given, start packet forwarding\n");
2353                 start_packet_forwarding(0);
2354                 printf("Press enter to exit\n");
2355                 rc = read(0, &c, 1);
2356                 pmd_test_exit();
2357                 if (rc < 0)
2358                         return 1;
2359         }
2360
2361         return 0;
2362 }