9de01fed271cfd1fc69d01570ef532e14059d20f
[deb_dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
80 #endif
81
82 #include "testpmd.h"
83
84 uint16_t verbose_level = 0; /**< Silent by default. */
85
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
89
90 /*
91  * NUMA support configuration.
92  * When set, the NUMA support attempts to dispatch the allocation of the
93  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
94  * probed ports among the CPU sockets 0 and 1.
95  * Otherwise, all memory is allocated from CPU socket 0.
96  */
97 uint8_t numa_support = 0; /**< No numa support by default */
98
99 /*
100  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101  * not configured.
102  */
103 uint8_t socket_num = UMA_NO_CONFIG;
104
105 /*
106  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
107  */
108 uint8_t mp_anon = 0;
109
110 /*
111  * Record the Ethernet address of peer target ports to which packets are
112  * forwarded.
113  * Must be instanciated with the ethernet addresses of peer traffic generator
114  * ports.
115  */
116 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
117 portid_t nb_peer_eth_addrs = 0;
118
119 /*
120  * Probed Target Environment.
121  */
122 struct rte_port *ports;        /**< For all probed ethernet ports. */
123 portid_t nb_ports;             /**< Number of probed ethernet ports. */
124 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
125 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
126
127 /*
128  * Test Forwarding Configuration.
129  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
130  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
131  */
132 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
133 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
134 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
135 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
136
137 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
138 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
139
140 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
141 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
142
143 /*
144  * Forwarding engines.
145  */
146 struct fwd_engine * fwd_engines[] = {
147         &io_fwd_engine,
148         &mac_fwd_engine,
149         &mac_swap_engine,
150         &flow_gen_engine,
151         &rx_only_engine,
152         &tx_only_engine,
153         &csum_fwd_engine,
154         &icmp_echo_engine,
155 #ifdef RTE_LIBRTE_IEEE1588
156         &ieee1588_fwd_engine,
157 #endif
158         NULL,
159 };
160
161 struct fwd_config cur_fwd_config;
162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint32_t retry_enabled;
164 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
165 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
166
167 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
168 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
169                                       * specified on command-line. */
170
171 /*
172  * Configuration of packet segments used by the "txonly" processing engine.
173  */
174 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
175 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
176         TXONLY_DEF_PACKET_LEN,
177 };
178 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
179
180 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
181 /**< Split policy for packets to TX. */
182
183 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
184 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
185
186 /* current configuration is in DCB or not,0 means it is not in DCB mode */
187 uint8_t dcb_config = 0;
188
189 /* Whether the dcb is in testing status */
190 uint8_t dcb_test = 0;
191
192 /*
193  * Configurable number of RX/TX queues.
194  */
195 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
196 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
197
198 /*
199  * Configurable number of RX/TX ring descriptors.
200  */
201 #define RTE_TEST_RX_DESC_DEFAULT 128
202 #define RTE_TEST_TX_DESC_DEFAULT 512
203 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
204 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
205
206 #define RTE_PMD_PARAM_UNSET -1
207 /*
208  * Configurable values of RX and TX ring threshold registers.
209  */
210
211 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
216 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
218
219 /*
220  * Configurable value of RX free threshold.
221  */
222 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
223
224 /*
225  * Configurable value of RX drop enable.
226  */
227 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
228
229 /*
230  * Configurable value of TX free threshold.
231  */
232 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
233
234 /*
235  * Configurable value of TX RS bit threshold.
236  */
237 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
238
239 /*
240  * Configurable value of TX queue flags.
241  */
242 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
243
244 /*
245  * Receive Side Scaling (RSS) configuration.
246  */
247 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
248
249 /*
250  * Port topology configuration
251  */
252 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
253
254 /*
255  * Avoids to flush all the RX streams before starts forwarding.
256  */
257 uint8_t no_flush_rx = 0; /* flush by default */
258
259 /*
260  * Avoids to check link status when starting/stopping a port.
261  */
262 uint8_t no_link_check = 0; /* check by default */
263
264 /*
265  * NIC bypass mode configuration options.
266  */
267 #ifdef RTE_NIC_BYPASS
268
269 /* The NIC bypass watchdog timeout. */
270 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271
272 #endif
273
274 /*
275  * Ethernet device configuration.
276  */
277 struct rte_eth_rxmode rx_mode = {
278         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
279         .split_hdr_size = 0,
280         .header_split   = 0, /**< Header Split disabled. */
281         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
282         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
283         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
284         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
285         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
286         .hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
287 };
288
289 struct rte_fdir_conf fdir_conf = {
290         .mode = RTE_FDIR_MODE_NONE,
291         .pballoc = RTE_FDIR_PBALLOC_64K,
292         .status = RTE_FDIR_REPORT_STATUS,
293         .mask = {
294                 .vlan_tci_mask = 0x0,
295                 .ipv4_mask     = {
296                         .src_ip = 0xFFFFFFFF,
297                         .dst_ip = 0xFFFFFFFF,
298                 },
299                 .ipv6_mask     = {
300                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
301                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
302                 },
303                 .src_port_mask = 0xFFFF,
304                 .dst_port_mask = 0xFFFF,
305                 .mac_addr_byte_mask = 0xFF,
306                 .tunnel_type_mask = 1,
307                 .tunnel_id_mask = 0xFFFFFFFF,
308         },
309         .drop_queue = 127,
310 };
311
312 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
313
314 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
315 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
316
317 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
318 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
319
320 uint16_t nb_tx_queue_stats_mappings = 0;
321 uint16_t nb_rx_queue_stats_mappings = 0;
322
323 unsigned max_socket = 0;
324
325 /* Forward function declarations */
326 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
327 static void check_all_ports_link_status(uint32_t port_mask);
328
329 /*
330  * Check if all the ports are started.
331  * If yes, return positive value. If not, return zero.
332  */
333 static int all_ports_started(void);
334
335 /*
336  * Find next enabled port
337  */
338 portid_t
339 find_next_port(portid_t p, struct rte_port *ports, int size)
340 {
341         if (ports == NULL)
342                 rte_exit(-EINVAL, "failed to find a next port id\n");
343
344         while ((p < size) && (ports[p].enabled == 0))
345                 p++;
346         return p;
347 }
348
349 /*
350  * Setup default configuration.
351  */
352 static void
353 set_default_fwd_lcores_config(void)
354 {
355         unsigned int i;
356         unsigned int nb_lc;
357         unsigned int sock_num;
358
359         nb_lc = 0;
360         for (i = 0; i < RTE_MAX_LCORE; i++) {
361                 sock_num = rte_lcore_to_socket_id(i) + 1;
362                 if (sock_num > max_socket) {
363                         if (sock_num > RTE_MAX_NUMA_NODES)
364                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
365                         max_socket = sock_num;
366                 }
367                 if (!rte_lcore_is_enabled(i))
368                         continue;
369                 if (i == rte_get_master_lcore())
370                         continue;
371                 fwd_lcores_cpuids[nb_lc++] = i;
372         }
373         nb_lcores = (lcoreid_t) nb_lc;
374         nb_cfg_lcores = nb_lcores;
375         nb_fwd_lcores = 1;
376 }
377
378 static void
379 set_def_peer_eth_addrs(void)
380 {
381         portid_t i;
382
383         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
384                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
385                 peer_eth_addrs[i].addr_bytes[5] = i;
386         }
387 }
388
389 static void
390 set_default_fwd_ports_config(void)
391 {
392         portid_t pt_id;
393
394         for (pt_id = 0; pt_id < nb_ports; pt_id++)
395                 fwd_ports_ids[pt_id] = pt_id;
396
397         nb_cfg_ports = nb_ports;
398         nb_fwd_ports = nb_ports;
399 }
400
401 void
402 set_def_fwd_config(void)
403 {
404         set_default_fwd_lcores_config();
405         set_def_peer_eth_addrs();
406         set_default_fwd_ports_config();
407 }
408
409 /*
410  * Configuration initialisation done once at init time.
411  */
412 static void
413 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
414                  unsigned int socket_id)
415 {
416         char pool_name[RTE_MEMPOOL_NAMESIZE];
417         struct rte_mempool *rte_mp = NULL;
418         uint32_t mb_size;
419
420         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
421         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
422
423         RTE_LOG(INFO, USER1,
424                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
425                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
426
427 #ifdef RTE_LIBRTE_PMD_XENVIRT
428         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
429                 (unsigned) mb_mempool_cache,
430                 sizeof(struct rte_pktmbuf_pool_private),
431                 rte_pktmbuf_pool_init, NULL,
432                 rte_pktmbuf_init, NULL,
433                 socket_id, 0);
434 #endif
435
436         /* if the former XEN allocation failed fall back to normal allocation */
437         if (rte_mp == NULL) {
438                 if (mp_anon != 0) {
439                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
440                                 mb_size, (unsigned) mb_mempool_cache,
441                                 sizeof(struct rte_pktmbuf_pool_private),
442                                 socket_id, 0);
443                         if (rte_mp == NULL)
444                                 goto err;
445
446                         if (rte_mempool_populate_anon(rte_mp) == 0) {
447                                 rte_mempool_free(rte_mp);
448                                 rte_mp = NULL;
449                                 goto err;
450                         }
451                         rte_pktmbuf_pool_init(rte_mp, NULL);
452                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
453                 } else {
454                         /* wrapper to rte_mempool_create() */
455                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
456                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
457                 }
458         }
459
460 err:
461         if (rte_mp == NULL) {
462                 rte_exit(EXIT_FAILURE,
463                         "Creation of mbuf pool for socket %u failed: %s\n",
464                         socket_id, rte_strerror(rte_errno));
465         } else if (verbose_level > 0) {
466                 rte_mempool_dump(stdout, rte_mp);
467         }
468 }
469
470 /*
471  * Check given socket id is valid or not with NUMA mode,
472  * if valid, return 0, else return -1
473  */
474 static int
475 check_socket_id(const unsigned int socket_id)
476 {
477         static int warning_once = 0;
478
479         if (socket_id >= max_socket) {
480                 if (!warning_once && numa_support)
481                         printf("Warning: NUMA should be configured manually by"
482                                " using --port-numa-config and"
483                                " --ring-numa-config parameters along with"
484                                " --numa.\n");
485                 warning_once = 1;
486                 return -1;
487         }
488         return 0;
489 }
490
491 static void
492 init_config(void)
493 {
494         portid_t pid;
495         struct rte_port *port;
496         struct rte_mempool *mbp;
497         unsigned int nb_mbuf_per_pool;
498         lcoreid_t  lc_id;
499         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
500
501         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
502         /* Configuration of logical cores. */
503         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
504                                 sizeof(struct fwd_lcore *) * nb_lcores,
505                                 RTE_CACHE_LINE_SIZE);
506         if (fwd_lcores == NULL) {
507                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
508                                                         "failed\n", nb_lcores);
509         }
510         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
511                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
512                                                sizeof(struct fwd_lcore),
513                                                RTE_CACHE_LINE_SIZE);
514                 if (fwd_lcores[lc_id] == NULL) {
515                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
516                                                                 "failed\n");
517                 }
518                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
519         }
520
521         FOREACH_PORT(pid, ports) {
522                 port = &ports[pid];
523                 rte_eth_dev_info_get(pid, &port->dev_info);
524
525                 if (numa_support) {
526                         if (port_numa[pid] != NUMA_NO_CONFIG)
527                                 port_per_socket[port_numa[pid]]++;
528                         else {
529                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
530
531                                 /* if socket_id is invalid, set to 0 */
532                                 if (check_socket_id(socket_id) < 0)
533                                         socket_id = 0;
534                                 port_per_socket[socket_id]++;
535                         }
536                 }
537
538                 /* set flag to initialize port/queue */
539                 port->need_reconfig = 1;
540                 port->need_reconfig_queues = 1;
541         }
542
543         /*
544          * Create pools of mbuf.
545          * If NUMA support is disabled, create a single pool of mbuf in
546          * socket 0 memory by default.
547          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
548          *
549          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
550          * nb_txd can be configured at run time.
551          */
552         if (param_total_num_mbufs)
553                 nb_mbuf_per_pool = param_total_num_mbufs;
554         else {
555                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
556                         (nb_lcores * mb_mempool_cache) +
557                         RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
558                 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
559         }
560
561         if (numa_support) {
562                 uint8_t i;
563
564                 for (i = 0; i < max_socket; i++)
565                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
566         } else {
567                 if (socket_num == UMA_NO_CONFIG)
568                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
569                 else
570                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
571                                                  socket_num);
572         }
573
574         init_port_config();
575
576         /*
577          * Records which Mbuf pool to use by each logical core, if needed.
578          */
579         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
580                 mbp = mbuf_pool_find(
581                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
582
583                 if (mbp == NULL)
584                         mbp = mbuf_pool_find(0);
585                 fwd_lcores[lc_id]->mbp = mbp;
586         }
587
588         /* Configuration of packet forwarding streams. */
589         if (init_fwd_streams() < 0)
590                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
591
592         fwd_config_setup();
593 }
594
595
596 void
597 reconfig(portid_t new_port_id, unsigned socket_id)
598 {
599         struct rte_port *port;
600
601         /* Reconfiguration of Ethernet ports. */
602         port = &ports[new_port_id];
603         rte_eth_dev_info_get(new_port_id, &port->dev_info);
604
605         /* set flag to initialize port/queue */
606         port->need_reconfig = 1;
607         port->need_reconfig_queues = 1;
608         port->socket_id = socket_id;
609
610         init_port_config();
611 }
612
613
614 int
615 init_fwd_streams(void)
616 {
617         portid_t pid;
618         struct rte_port *port;
619         streamid_t sm_id, nb_fwd_streams_new;
620         queueid_t q;
621
622         /* set socket id according to numa or not */
623         FOREACH_PORT(pid, ports) {
624                 port = &ports[pid];
625                 if (nb_rxq > port->dev_info.max_rx_queues) {
626                         printf("Fail: nb_rxq(%d) is greater than "
627                                 "max_rx_queues(%d)\n", nb_rxq,
628                                 port->dev_info.max_rx_queues);
629                         return -1;
630                 }
631                 if (nb_txq > port->dev_info.max_tx_queues) {
632                         printf("Fail: nb_txq(%d) is greater than "
633                                 "max_tx_queues(%d)\n", nb_txq,
634                                 port->dev_info.max_tx_queues);
635                         return -1;
636                 }
637                 if (numa_support) {
638                         if (port_numa[pid] != NUMA_NO_CONFIG)
639                                 port->socket_id = port_numa[pid];
640                         else {
641                                 port->socket_id = rte_eth_dev_socket_id(pid);
642
643                                 /* if socket_id is invalid, set to 0 */
644                                 if (check_socket_id(port->socket_id) < 0)
645                                         port->socket_id = 0;
646                         }
647                 }
648                 else {
649                         if (socket_num == UMA_NO_CONFIG)
650                                 port->socket_id = 0;
651                         else
652                                 port->socket_id = socket_num;
653                 }
654         }
655
656         q = RTE_MAX(nb_rxq, nb_txq);
657         if (q == 0) {
658                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
659                 return -1;
660         }
661         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
662         if (nb_fwd_streams_new == nb_fwd_streams)
663                 return 0;
664         /* clear the old */
665         if (fwd_streams != NULL) {
666                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
667                         if (fwd_streams[sm_id] == NULL)
668                                 continue;
669                         rte_free(fwd_streams[sm_id]);
670                         fwd_streams[sm_id] = NULL;
671                 }
672                 rte_free(fwd_streams);
673                 fwd_streams = NULL;
674         }
675
676         /* init new */
677         nb_fwd_streams = nb_fwd_streams_new;
678         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
679                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
680         if (fwd_streams == NULL)
681                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
682                                                 "failed\n", nb_fwd_streams);
683
684         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
685                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
686                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
687                 if (fwd_streams[sm_id] == NULL)
688                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
689                                                                 " failed\n");
690         }
691
692         return 0;
693 }
694
695 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
696 static void
697 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
698 {
699         unsigned int total_burst;
700         unsigned int nb_burst;
701         unsigned int burst_stats[3];
702         uint16_t pktnb_stats[3];
703         uint16_t nb_pkt;
704         int burst_percent[3];
705
706         /*
707          * First compute the total number of packet bursts and the
708          * two highest numbers of bursts of the same number of packets.
709          */
710         total_burst = 0;
711         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
712         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
713         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
714                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
715                 if (nb_burst == 0)
716                         continue;
717                 total_burst += nb_burst;
718                 if (nb_burst > burst_stats[0]) {
719                         burst_stats[1] = burst_stats[0];
720                         pktnb_stats[1] = pktnb_stats[0];
721                         burst_stats[0] = nb_burst;
722                         pktnb_stats[0] = nb_pkt;
723                 }
724         }
725         if (total_burst == 0)
726                 return;
727         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
728         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
729                burst_percent[0], (int) pktnb_stats[0]);
730         if (burst_stats[0] == total_burst) {
731                 printf("]\n");
732                 return;
733         }
734         if (burst_stats[0] + burst_stats[1] == total_burst) {
735                 printf(" + %d%% of %d pkts]\n",
736                        100 - burst_percent[0], pktnb_stats[1]);
737                 return;
738         }
739         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
740         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
741         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
742                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
743                 return;
744         }
745         printf(" + %d%% of %d pkts + %d%% of others]\n",
746                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
747 }
748 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
749
750 static void
751 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
752 {
753         struct rte_port *port;
754         uint8_t i;
755
756         static const char *fwd_stats_border = "----------------------";
757
758         port = &ports[port_id];
759         printf("\n  %s Forward statistics for port %-2d %s\n",
760                fwd_stats_border, port_id, fwd_stats_border);
761
762         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
763                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
764                        "%-"PRIu64"\n",
765                        stats->ipackets, stats->imissed,
766                        (uint64_t) (stats->ipackets + stats->imissed));
767
768                 if (cur_fwd_eng == &csum_fwd_engine)
769                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
770                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
771                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
772                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
773                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
774                 }
775
776                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
777                        "%-"PRIu64"\n",
778                        stats->opackets, port->tx_dropped,
779                        (uint64_t) (stats->opackets + port->tx_dropped));
780         }
781         else {
782                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
783                        "%14"PRIu64"\n",
784                        stats->ipackets, stats->imissed,
785                        (uint64_t) (stats->ipackets + stats->imissed));
786
787                 if (cur_fwd_eng == &csum_fwd_engine)
788                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
789                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
790                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
791                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
792                         printf("  RX-nombufs:             %14"PRIu64"\n",
793                                stats->rx_nombuf);
794                 }
795
796                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
797                        "%14"PRIu64"\n",
798                        stats->opackets, port->tx_dropped,
799                        (uint64_t) (stats->opackets + port->tx_dropped));
800         }
801
802 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
803         if (port->rx_stream)
804                 pkt_burst_stats_display("RX",
805                         &port->rx_stream->rx_burst_stats);
806         if (port->tx_stream)
807                 pkt_burst_stats_display("TX",
808                         &port->tx_stream->tx_burst_stats);
809 #endif
810
811         if (port->rx_queue_stats_mapping_enabled) {
812                 printf("\n");
813                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
814                         printf("  Stats reg %2d RX-packets:%14"PRIu64
815                                "     RX-errors:%14"PRIu64
816                                "    RX-bytes:%14"PRIu64"\n",
817                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
818                 }
819                 printf("\n");
820         }
821         if (port->tx_queue_stats_mapping_enabled) {
822                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
823                         printf("  Stats reg %2d TX-packets:%14"PRIu64
824                                "                                 TX-bytes:%14"PRIu64"\n",
825                                i, stats->q_opackets[i], stats->q_obytes[i]);
826                 }
827         }
828
829         printf("  %s--------------------------------%s\n",
830                fwd_stats_border, fwd_stats_border);
831 }
832
833 static void
834 fwd_stream_stats_display(streamid_t stream_id)
835 {
836         struct fwd_stream *fs;
837         static const char *fwd_top_stats_border = "-------";
838
839         fs = fwd_streams[stream_id];
840         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
841             (fs->fwd_dropped == 0))
842                 return;
843         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
844                "TX Port=%2d/Queue=%2d %s\n",
845                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
846                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
847         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
848                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
849
850         /* if checksum mode */
851         if (cur_fwd_eng == &csum_fwd_engine) {
852                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
853                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
854         }
855
856 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
857         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
858         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
859 #endif
860 }
861
862 static void
863 flush_fwd_rx_queues(void)
864 {
865         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
866         portid_t  rxp;
867         portid_t port_id;
868         queueid_t rxq;
869         uint16_t  nb_rx;
870         uint16_t  i;
871         uint8_t   j;
872         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
873         uint64_t timer_period;
874
875         /* convert to number of cycles */
876         timer_period = rte_get_timer_hz(); /* 1 second timeout */
877
878         for (j = 0; j < 2; j++) {
879                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
880                         for (rxq = 0; rxq < nb_rxq; rxq++) {
881                                 port_id = fwd_ports_ids[rxp];
882                                 /**
883                                 * testpmd can stuck in the below do while loop
884                                 * if rte_eth_rx_burst() always returns nonzero
885                                 * packets. So timer is added to exit this loop
886                                 * after 1sec timer expiry.
887                                 */
888                                 prev_tsc = rte_rdtsc();
889                                 do {
890                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
891                                                 pkts_burst, MAX_PKT_BURST);
892                                         for (i = 0; i < nb_rx; i++)
893                                                 rte_pktmbuf_free(pkts_burst[i]);
894
895                                         cur_tsc = rte_rdtsc();
896                                         diff_tsc = cur_tsc - prev_tsc;
897                                         timer_tsc += diff_tsc;
898                                 } while ((nb_rx > 0) &&
899                                         (timer_tsc < timer_period));
900                                 timer_tsc = 0;
901                         }
902                 }
903                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
904         }
905 }
906
907 static void
908 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
909 {
910         struct fwd_stream **fsm;
911         streamid_t nb_fs;
912         streamid_t sm_id;
913
914         fsm = &fwd_streams[fc->stream_idx];
915         nb_fs = fc->stream_nb;
916         do {
917                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
918                         (*pkt_fwd)(fsm[sm_id]);
919         } while (! fc->stopped);
920 }
921
922 static int
923 start_pkt_forward_on_core(void *fwd_arg)
924 {
925         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
926                              cur_fwd_config.fwd_eng->packet_fwd);
927         return 0;
928 }
929
930 /*
931  * Run the TXONLY packet forwarding engine to send a single burst of packets.
932  * Used to start communication flows in network loopback test configurations.
933  */
934 static int
935 run_one_txonly_burst_on_core(void *fwd_arg)
936 {
937         struct fwd_lcore *fwd_lc;
938         struct fwd_lcore tmp_lcore;
939
940         fwd_lc = (struct fwd_lcore *) fwd_arg;
941         tmp_lcore = *fwd_lc;
942         tmp_lcore.stopped = 1;
943         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
944         return 0;
945 }
946
947 /*
948  * Launch packet forwarding:
949  *     - Setup per-port forwarding context.
950  *     - launch logical cores with their forwarding configuration.
951  */
952 static void
953 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
954 {
955         port_fwd_begin_t port_fwd_begin;
956         unsigned int i;
957         unsigned int lc_id;
958         int diag;
959
960         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
961         if (port_fwd_begin != NULL) {
962                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
963                         (*port_fwd_begin)(fwd_ports_ids[i]);
964         }
965         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
966                 lc_id = fwd_lcores_cpuids[i];
967                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
968                         fwd_lcores[i]->stopped = 0;
969                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
970                                                      fwd_lcores[i], lc_id);
971                         if (diag != 0)
972                                 printf("launch lcore %u failed - diag=%d\n",
973                                        lc_id, diag);
974                 }
975         }
976 }
977
978 /*
979  * Launch packet forwarding configuration.
980  */
981 void
982 start_packet_forwarding(int with_tx_first)
983 {
984         port_fwd_begin_t port_fwd_begin;
985         port_fwd_end_t  port_fwd_end;
986         struct rte_port *port;
987         unsigned int i;
988         portid_t   pt_id;
989         streamid_t sm_id;
990
991         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
992                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
993
994         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
995                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
996
997         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
998                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
999                 (!nb_rxq || !nb_txq))
1000                 rte_exit(EXIT_FAILURE,
1001                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1002                         cur_fwd_eng->fwd_mode_name);
1003
1004         if (all_ports_started() == 0) {
1005                 printf("Not all ports were started\n");
1006                 return;
1007         }
1008         if (test_done == 0) {
1009                 printf("Packet forwarding already started\n");
1010                 return;
1011         }
1012
1013         if (init_fwd_streams() < 0) {
1014                 printf("Fail from init_fwd_streams()\n");
1015                 return;
1016         }
1017
1018         if(dcb_test) {
1019                 for (i = 0; i < nb_fwd_ports; i++) {
1020                         pt_id = fwd_ports_ids[i];
1021                         port = &ports[pt_id];
1022                         if (!port->dcb_flag) {
1023                                 printf("In DCB mode, all forwarding ports must "
1024                                        "be configured in this mode.\n");
1025                                 return;
1026                         }
1027                 }
1028                 if (nb_fwd_lcores == 1) {
1029                         printf("In DCB mode,the nb forwarding cores "
1030                                "should be larger than 1.\n");
1031                         return;
1032                 }
1033         }
1034         test_done = 0;
1035
1036         if(!no_flush_rx)
1037                 flush_fwd_rx_queues();
1038
1039         fwd_config_setup();
1040         pkt_fwd_config_display(&cur_fwd_config);
1041         rxtx_config_display();
1042
1043         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1044                 pt_id = fwd_ports_ids[i];
1045                 port = &ports[pt_id];
1046                 rte_eth_stats_get(pt_id, &port->stats);
1047                 port->tx_dropped = 0;
1048
1049                 map_port_queue_stats_mapping_registers(pt_id, port);
1050         }
1051         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1052                 fwd_streams[sm_id]->rx_packets = 0;
1053                 fwd_streams[sm_id]->tx_packets = 0;
1054                 fwd_streams[sm_id]->fwd_dropped = 0;
1055                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1056                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1057
1058 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1059                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1060                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1061                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1062                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1063 #endif
1064 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1065                 fwd_streams[sm_id]->core_cycles = 0;
1066 #endif
1067         }
1068         if (with_tx_first) {
1069                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1070                 if (port_fwd_begin != NULL) {
1071                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1072                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1073                 }
1074                 while (with_tx_first--) {
1075                         launch_packet_forwarding(
1076                                         run_one_txonly_burst_on_core);
1077                         rte_eal_mp_wait_lcore();
1078                 }
1079                 port_fwd_end = tx_only_engine.port_fwd_end;
1080                 if (port_fwd_end != NULL) {
1081                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1082                                 (*port_fwd_end)(fwd_ports_ids[i]);
1083                 }
1084         }
1085         launch_packet_forwarding(start_pkt_forward_on_core);
1086 }
1087
1088 void
1089 stop_packet_forwarding(void)
1090 {
1091         struct rte_eth_stats stats;
1092         struct rte_port *port;
1093         port_fwd_end_t  port_fwd_end;
1094         int i;
1095         portid_t   pt_id;
1096         streamid_t sm_id;
1097         lcoreid_t  lc_id;
1098         uint64_t total_recv;
1099         uint64_t total_xmit;
1100         uint64_t total_rx_dropped;
1101         uint64_t total_tx_dropped;
1102         uint64_t total_rx_nombuf;
1103         uint64_t tx_dropped;
1104         uint64_t rx_bad_ip_csum;
1105         uint64_t rx_bad_l4_csum;
1106 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1107         uint64_t fwd_cycles;
1108 #endif
1109         static const char *acc_stats_border = "+++++++++++++++";
1110
1111         if (test_done) {
1112                 printf("Packet forwarding not started\n");
1113                 return;
1114         }
1115         printf("Telling cores to stop...");
1116         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1117                 fwd_lcores[lc_id]->stopped = 1;
1118         printf("\nWaiting for lcores to finish...\n");
1119         rte_eal_mp_wait_lcore();
1120         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1121         if (port_fwd_end != NULL) {
1122                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1123                         pt_id = fwd_ports_ids[i];
1124                         (*port_fwd_end)(pt_id);
1125                 }
1126         }
1127 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1128         fwd_cycles = 0;
1129 #endif
1130         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1131                 if (cur_fwd_config.nb_fwd_streams >
1132                     cur_fwd_config.nb_fwd_ports) {
1133                         fwd_stream_stats_display(sm_id);
1134                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1135                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1136                 } else {
1137                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1138                                 fwd_streams[sm_id];
1139                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1140                                 fwd_streams[sm_id];
1141                 }
1142                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1143                 tx_dropped = (uint64_t) (tx_dropped +
1144                                          fwd_streams[sm_id]->fwd_dropped);
1145                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1146
1147                 rx_bad_ip_csum =
1148                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1149                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1150                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1151                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1152                                                         rx_bad_ip_csum;
1153
1154                 rx_bad_l4_csum =
1155                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1156                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1157                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1158                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1159                                                         rx_bad_l4_csum;
1160
1161 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1162                 fwd_cycles = (uint64_t) (fwd_cycles +
1163                                          fwd_streams[sm_id]->core_cycles);
1164 #endif
1165         }
1166         total_recv = 0;
1167         total_xmit = 0;
1168         total_rx_dropped = 0;
1169         total_tx_dropped = 0;
1170         total_rx_nombuf  = 0;
1171         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1172                 pt_id = fwd_ports_ids[i];
1173
1174                 port = &ports[pt_id];
1175                 rte_eth_stats_get(pt_id, &stats);
1176                 stats.ipackets -= port->stats.ipackets;
1177                 port->stats.ipackets = 0;
1178                 stats.opackets -= port->stats.opackets;
1179                 port->stats.opackets = 0;
1180                 stats.ibytes   -= port->stats.ibytes;
1181                 port->stats.ibytes = 0;
1182                 stats.obytes   -= port->stats.obytes;
1183                 port->stats.obytes = 0;
1184                 stats.imissed  -= port->stats.imissed;
1185                 port->stats.imissed = 0;
1186                 stats.oerrors  -= port->stats.oerrors;
1187                 port->stats.oerrors = 0;
1188                 stats.rx_nombuf -= port->stats.rx_nombuf;
1189                 port->stats.rx_nombuf = 0;
1190
1191                 total_recv += stats.ipackets;
1192                 total_xmit += stats.opackets;
1193                 total_rx_dropped += stats.imissed;
1194                 total_tx_dropped += port->tx_dropped;
1195                 total_rx_nombuf  += stats.rx_nombuf;
1196
1197                 fwd_port_stats_display(pt_id, &stats);
1198         }
1199         printf("\n  %s Accumulated forward statistics for all ports"
1200                "%s\n",
1201                acc_stats_border, acc_stats_border);
1202         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1203                "%-"PRIu64"\n"
1204                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1205                "%-"PRIu64"\n",
1206                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1207                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1208         if (total_rx_nombuf > 0)
1209                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1210         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1211                "%s\n",
1212                acc_stats_border, acc_stats_border);
1213 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1214         if (total_recv > 0)
1215                 printf("\n  CPU cycles/packet=%u (total cycles="
1216                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1217                        (unsigned int)(fwd_cycles / total_recv),
1218                        fwd_cycles, total_recv);
1219 #endif
1220         printf("\nDone.\n");
1221         test_done = 1;
1222 }
1223
1224 void
1225 dev_set_link_up(portid_t pid)
1226 {
1227         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1228                 printf("\nSet link up fail.\n");
1229 }
1230
1231 void
1232 dev_set_link_down(portid_t pid)
1233 {
1234         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1235                 printf("\nSet link down fail.\n");
1236 }
1237
1238 static int
1239 all_ports_started(void)
1240 {
1241         portid_t pi;
1242         struct rte_port *port;
1243
1244         FOREACH_PORT(pi, ports) {
1245                 port = &ports[pi];
1246                 /* Check if there is a port which is not started */
1247                 if ((port->port_status != RTE_PORT_STARTED) &&
1248                         (port->slave_flag == 0))
1249                         return 0;
1250         }
1251
1252         /* No port is not started */
1253         return 1;
1254 }
1255
1256 int
1257 all_ports_stopped(void)
1258 {
1259         portid_t pi;
1260         struct rte_port *port;
1261
1262         FOREACH_PORT(pi, ports) {
1263                 port = &ports[pi];
1264                 if ((port->port_status != RTE_PORT_STOPPED) &&
1265                         (port->slave_flag == 0))
1266                         return 0;
1267         }
1268
1269         return 1;
1270 }
1271
1272 int
1273 port_is_started(portid_t port_id)
1274 {
1275         if (port_id_is_invalid(port_id, ENABLED_WARN))
1276                 return 0;
1277
1278         if (ports[port_id].port_status != RTE_PORT_STARTED)
1279                 return 0;
1280
1281         return 1;
1282 }
1283
1284 static int
1285 port_is_closed(portid_t port_id)
1286 {
1287         if (port_id_is_invalid(port_id, ENABLED_WARN))
1288                 return 0;
1289
1290         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1291                 return 0;
1292
1293         return 1;
1294 }
1295
1296 int
1297 start_port(portid_t pid)
1298 {
1299         int diag, need_check_link_status = -1;
1300         portid_t pi;
1301         queueid_t qi;
1302         struct rte_port *port;
1303         struct ether_addr mac_addr;
1304
1305         if (port_id_is_invalid(pid, ENABLED_WARN))
1306                 return 0;
1307
1308         if(dcb_config)
1309                 dcb_test = 1;
1310         FOREACH_PORT(pi, ports) {
1311                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1312                         continue;
1313
1314                 need_check_link_status = 0;
1315                 port = &ports[pi];
1316                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1317                                                  RTE_PORT_HANDLING) == 0) {
1318                         printf("Port %d is now not stopped\n", pi);
1319                         continue;
1320                 }
1321
1322                 if (port->need_reconfig > 0) {
1323                         port->need_reconfig = 0;
1324
1325                         printf("Configuring Port %d (socket %u)\n", pi,
1326                                         port->socket_id);
1327                         /* configure port */
1328                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1329                                                 &(port->dev_conf));
1330                         if (diag != 0) {
1331                                 if (rte_atomic16_cmpset(&(port->port_status),
1332                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1333                                         printf("Port %d can not be set back "
1334                                                         "to stopped\n", pi);
1335                                 printf("Fail to configure port %d\n", pi);
1336                                 /* try to reconfigure port next time */
1337                                 port->need_reconfig = 1;
1338                                 return -1;
1339                         }
1340                 }
1341                 if (port->need_reconfig_queues > 0) {
1342                         port->need_reconfig_queues = 0;
1343                         /* setup tx queues */
1344                         for (qi = 0; qi < nb_txq; qi++) {
1345                                 if ((numa_support) &&
1346                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1347                                         diag = rte_eth_tx_queue_setup(pi, qi,
1348                                                 nb_txd,txring_numa[pi],
1349                                                 &(port->tx_conf));
1350                                 else
1351                                         diag = rte_eth_tx_queue_setup(pi, qi,
1352                                                 nb_txd,port->socket_id,
1353                                                 &(port->tx_conf));
1354
1355                                 if (diag == 0)
1356                                         continue;
1357
1358                                 /* Fail to setup tx queue, return */
1359                                 if (rte_atomic16_cmpset(&(port->port_status),
1360                                                         RTE_PORT_HANDLING,
1361                                                         RTE_PORT_STOPPED) == 0)
1362                                         printf("Port %d can not be set back "
1363                                                         "to stopped\n", pi);
1364                                 printf("Fail to configure port %d tx queues\n", pi);
1365                                 /* try to reconfigure queues next time */
1366                                 port->need_reconfig_queues = 1;
1367                                 return -1;
1368                         }
1369                         /* setup rx queues */
1370                         for (qi = 0; qi < nb_rxq; qi++) {
1371                                 if ((numa_support) &&
1372                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1373                                         struct rte_mempool * mp =
1374                                                 mbuf_pool_find(rxring_numa[pi]);
1375                                         if (mp == NULL) {
1376                                                 printf("Failed to setup RX queue:"
1377                                                         "No mempool allocation"
1378                                                         " on the socket %d\n",
1379                                                         rxring_numa[pi]);
1380                                                 return -1;
1381                                         }
1382
1383                                         diag = rte_eth_rx_queue_setup(pi, qi,
1384                                              nb_rxd,rxring_numa[pi],
1385                                              &(port->rx_conf),mp);
1386                                 } else {
1387                                         struct rte_mempool *mp =
1388                                                 mbuf_pool_find(port->socket_id);
1389                                         if (mp == NULL) {
1390                                                 printf("Failed to setup RX queue:"
1391                                                         "No mempool allocation"
1392                                                         " on the socket %d\n",
1393                                                         port->socket_id);
1394                                                 return -1;
1395                                         }
1396                                         diag = rte_eth_rx_queue_setup(pi, qi,
1397                                              nb_rxd,port->socket_id,
1398                                              &(port->rx_conf), mp);
1399                                 }
1400                                 if (diag == 0)
1401                                         continue;
1402
1403                                 /* Fail to setup rx queue, return */
1404                                 if (rte_atomic16_cmpset(&(port->port_status),
1405                                                         RTE_PORT_HANDLING,
1406                                                         RTE_PORT_STOPPED) == 0)
1407                                         printf("Port %d can not be set back "
1408                                                         "to stopped\n", pi);
1409                                 printf("Fail to configure port %d rx queues\n", pi);
1410                                 /* try to reconfigure queues next time */
1411                                 port->need_reconfig_queues = 1;
1412                                 return -1;
1413                         }
1414                 }
1415                 /* start port */
1416                 if (rte_eth_dev_start(pi) < 0) {
1417                         printf("Fail to start port %d\n", pi);
1418
1419                         /* Fail to setup rx queue, return */
1420                         if (rte_atomic16_cmpset(&(port->port_status),
1421                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1422                                 printf("Port %d can not be set back to "
1423                                                         "stopped\n", pi);
1424                         continue;
1425                 }
1426
1427                 if (rte_atomic16_cmpset(&(port->port_status),
1428                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1429                         printf("Port %d can not be set into started\n", pi);
1430
1431                 rte_eth_macaddr_get(pi, &mac_addr);
1432                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1433                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1434                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1435                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1436
1437                 /* at least one port started, need checking link status */
1438                 need_check_link_status = 1;
1439         }
1440
1441         if (need_check_link_status == 1 && !no_link_check)
1442                 check_all_ports_link_status(RTE_PORT_ALL);
1443         else if (need_check_link_status == 0)
1444                 printf("Please stop the ports first\n");
1445
1446         printf("Done\n");
1447         return 0;
1448 }
1449
1450 void
1451 stop_port(portid_t pid)
1452 {
1453         portid_t pi;
1454         struct rte_port *port;
1455         int need_check_link_status = 0;
1456
1457         if (dcb_test) {
1458                 dcb_test = 0;
1459                 dcb_config = 0;
1460         }
1461
1462         if (port_id_is_invalid(pid, ENABLED_WARN))
1463                 return;
1464
1465         printf("Stopping ports...\n");
1466
1467         FOREACH_PORT(pi, ports) {
1468                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1469                         continue;
1470
1471                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1472                         printf("Please remove port %d from forwarding configuration.\n", pi);
1473                         continue;
1474                 }
1475
1476                 if (port_is_bonding_slave(pi)) {
1477                         printf("Please remove port %d from bonded device.\n", pi);
1478                         continue;
1479                 }
1480
1481                 port = &ports[pi];
1482                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1483                                                 RTE_PORT_HANDLING) == 0)
1484                         continue;
1485
1486                 rte_eth_dev_stop(pi);
1487
1488                 if (rte_atomic16_cmpset(&(port->port_status),
1489                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1490                         printf("Port %d can not be set into stopped\n", pi);
1491                 need_check_link_status = 1;
1492         }
1493         if (need_check_link_status && !no_link_check)
1494                 check_all_ports_link_status(RTE_PORT_ALL);
1495
1496         printf("Done\n");
1497 }
1498
1499 void
1500 close_port(portid_t pid)
1501 {
1502         portid_t pi;
1503         struct rte_port *port;
1504
1505         if (port_id_is_invalid(pid, ENABLED_WARN))
1506                 return;
1507
1508         printf("Closing ports...\n");
1509
1510         FOREACH_PORT(pi, ports) {
1511                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1512                         continue;
1513
1514                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1515                         printf("Please remove port %d from forwarding configuration.\n", pi);
1516                         continue;
1517                 }
1518
1519                 if (port_is_bonding_slave(pi)) {
1520                         printf("Please remove port %d from bonded device.\n", pi);
1521                         continue;
1522                 }
1523
1524                 port = &ports[pi];
1525                 if (rte_atomic16_cmpset(&(port->port_status),
1526                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1527                         printf("Port %d is already closed\n", pi);
1528                         continue;
1529                 }
1530
1531                 if (rte_atomic16_cmpset(&(port->port_status),
1532                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1533                         printf("Port %d is now not stopped\n", pi);
1534                         continue;
1535                 }
1536
1537                 rte_eth_dev_close(pi);
1538
1539                 if (rte_atomic16_cmpset(&(port->port_status),
1540                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1541                         printf("Port %d cannot be set to closed\n", pi);
1542         }
1543
1544         printf("Done\n");
1545 }
1546
1547 void
1548 attach_port(char *identifier)
1549 {
1550         portid_t pi = 0;
1551         unsigned int socket_id;
1552
1553         printf("Attaching a new port...\n");
1554
1555         if (identifier == NULL) {
1556                 printf("Invalid parameters are specified\n");
1557                 return;
1558         }
1559
1560         if (rte_eth_dev_attach(identifier, &pi))
1561                 return;
1562
1563         ports[pi].enabled = 1;
1564         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1565         /* if socket_id is invalid, set to 0 */
1566         if (check_socket_id(socket_id) < 0)
1567                 socket_id = 0;
1568         reconfig(pi, socket_id);
1569         rte_eth_promiscuous_enable(pi);
1570
1571         nb_ports = rte_eth_dev_count();
1572
1573         ports[pi].port_status = RTE_PORT_STOPPED;
1574
1575         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1576         printf("Done\n");
1577 }
1578
1579 void
1580 detach_port(uint8_t port_id)
1581 {
1582         char name[RTE_ETH_NAME_MAX_LEN];
1583
1584         printf("Detaching a port...\n");
1585
1586         if (!port_is_closed(port_id)) {
1587                 printf("Please close port first\n");
1588                 return;
1589         }
1590
1591         if (rte_eth_dev_detach(port_id, name))
1592                 return;
1593
1594         ports[port_id].enabled = 0;
1595         nb_ports = rte_eth_dev_count();
1596
1597         printf("Port '%s' is detached. Now total ports is %d\n",
1598                         name, nb_ports);
1599         printf("Done\n");
1600         return;
1601 }
1602
1603 void
1604 pmd_test_exit(void)
1605 {
1606         portid_t pt_id;
1607
1608         if (test_done == 0)
1609                 stop_packet_forwarding();
1610
1611         if (ports != NULL) {
1612                 no_link_check = 1;
1613                 FOREACH_PORT(pt_id, ports) {
1614                         printf("\nShutting down port %d...\n", pt_id);
1615                         fflush(stdout);
1616                         stop_port(pt_id);
1617                         close_port(pt_id);
1618                 }
1619         }
1620         printf("\nBye...\n");
1621 }
1622
1623 typedef void (*cmd_func_t)(void);
1624 struct pmd_test_command {
1625         const char *cmd_name;
1626         cmd_func_t cmd_func;
1627 };
1628
1629 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1630
1631 /* Check the link status of all ports in up to 9s, and print them finally */
1632 static void
1633 check_all_ports_link_status(uint32_t port_mask)
1634 {
1635 #define CHECK_INTERVAL 100 /* 100ms */
1636 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1637         uint8_t portid, count, all_ports_up, print_flag = 0;
1638         struct rte_eth_link link;
1639
1640         printf("Checking link statuses...\n");
1641         fflush(stdout);
1642         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1643                 all_ports_up = 1;
1644                 FOREACH_PORT(portid, ports) {
1645                         if ((port_mask & (1 << portid)) == 0)
1646                                 continue;
1647                         memset(&link, 0, sizeof(link));
1648                         rte_eth_link_get_nowait(portid, &link);
1649                         /* print link status if flag set */
1650                         if (print_flag == 1) {
1651                                 if (link.link_status)
1652                                         printf("Port %d Link Up - speed %u "
1653                                                 "Mbps - %s\n", (uint8_t)portid,
1654                                                 (unsigned)link.link_speed,
1655                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1656                                         ("full-duplex") : ("half-duplex\n"));
1657                                 else
1658                                         printf("Port %d Link Down\n",
1659                                                 (uint8_t)portid);
1660                                 continue;
1661                         }
1662                         /* clear all_ports_up flag if any link down */
1663                         if (link.link_status == ETH_LINK_DOWN) {
1664                                 all_ports_up = 0;
1665                                 break;
1666                         }
1667                 }
1668                 /* after finally printing all link status, get out */
1669                 if (print_flag == 1)
1670                         break;
1671
1672                 if (all_ports_up == 0) {
1673                         fflush(stdout);
1674                         rte_delay_ms(CHECK_INTERVAL);
1675                 }
1676
1677                 /* set the print_flag if all ports up or timeout */
1678                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1679                         print_flag = 1;
1680                 }
1681         }
1682 }
1683
1684 static int
1685 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1686 {
1687         uint16_t i;
1688         int diag;
1689         uint8_t mapping_found = 0;
1690
1691         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1692                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1693                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1694                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1695                                         tx_queue_stats_mappings[i].queue_id,
1696                                         tx_queue_stats_mappings[i].stats_counter_id);
1697                         if (diag != 0)
1698                                 return diag;
1699                         mapping_found = 1;
1700                 }
1701         }
1702         if (mapping_found)
1703                 port->tx_queue_stats_mapping_enabled = 1;
1704         return 0;
1705 }
1706
1707 static int
1708 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1709 {
1710         uint16_t i;
1711         int diag;
1712         uint8_t mapping_found = 0;
1713
1714         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1715                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1716                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1717                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1718                                         rx_queue_stats_mappings[i].queue_id,
1719                                         rx_queue_stats_mappings[i].stats_counter_id);
1720                         if (diag != 0)
1721                                 return diag;
1722                         mapping_found = 1;
1723                 }
1724         }
1725         if (mapping_found)
1726                 port->rx_queue_stats_mapping_enabled = 1;
1727         return 0;
1728 }
1729
1730 static void
1731 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1732 {
1733         int diag = 0;
1734
1735         diag = set_tx_queue_stats_mapping_registers(pi, port);
1736         if (diag != 0) {
1737                 if (diag == -ENOTSUP) {
1738                         port->tx_queue_stats_mapping_enabled = 0;
1739                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1740                 }
1741                 else
1742                         rte_exit(EXIT_FAILURE,
1743                                         "set_tx_queue_stats_mapping_registers "
1744                                         "failed for port id=%d diag=%d\n",
1745                                         pi, diag);
1746         }
1747
1748         diag = set_rx_queue_stats_mapping_registers(pi, port);
1749         if (diag != 0) {
1750                 if (diag == -ENOTSUP) {
1751                         port->rx_queue_stats_mapping_enabled = 0;
1752                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1753                 }
1754                 else
1755                         rte_exit(EXIT_FAILURE,
1756                                         "set_rx_queue_stats_mapping_registers "
1757                                         "failed for port id=%d diag=%d\n",
1758                                         pi, diag);
1759         }
1760 }
1761
1762 static void
1763 rxtx_port_config(struct rte_port *port)
1764 {
1765         port->rx_conf = port->dev_info.default_rxconf;
1766         port->tx_conf = port->dev_info.default_txconf;
1767
1768         /* Check if any RX/TX parameters have been passed */
1769         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1770                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1771
1772         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1773                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1774
1775         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1776                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1777
1778         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1779                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1780
1781         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1782                 port->rx_conf.rx_drop_en = rx_drop_en;
1783
1784         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1785                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1786
1787         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1788                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1789
1790         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1791                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1792
1793         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1794                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1795
1796         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1797                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1798
1799         if (txq_flags != RTE_PMD_PARAM_UNSET)
1800                 port->tx_conf.txq_flags = txq_flags;
1801 }
1802
1803 void
1804 init_port_config(void)
1805 {
1806         portid_t pid;
1807         struct rte_port *port;
1808
1809         FOREACH_PORT(pid, ports) {
1810                 port = &ports[pid];
1811                 port->dev_conf.rxmode = rx_mode;
1812                 port->dev_conf.fdir_conf = fdir_conf;
1813                 if (nb_rxq > 1) {
1814                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1815                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1816                 } else {
1817                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1818                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1819                 }
1820
1821                 if (port->dcb_flag == 0) {
1822                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1823                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1824                         else
1825                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1826                 }
1827
1828                 rxtx_port_config(port);
1829
1830                 rte_eth_macaddr_get(pid, &port->eth_addr);
1831
1832                 map_port_queue_stats_mapping_registers(pid, port);
1833 #ifdef RTE_NIC_BYPASS
1834                 rte_eth_dev_bypass_init(pid);
1835 #endif
1836         }
1837 }
1838
1839 void set_port_slave_flag(portid_t slave_pid)
1840 {
1841         struct rte_port *port;
1842
1843         port = &ports[slave_pid];
1844         port->slave_flag = 1;
1845 }
1846
1847 void clear_port_slave_flag(portid_t slave_pid)
1848 {
1849         struct rte_port *port;
1850
1851         port = &ports[slave_pid];
1852         port->slave_flag = 0;
1853 }
1854
1855 uint8_t port_is_bonding_slave(portid_t slave_pid)
1856 {
1857         struct rte_port *port;
1858
1859         port = &ports[slave_pid];
1860         return port->slave_flag;
1861 }
1862
1863 const uint16_t vlan_tags[] = {
1864                 0,  1,  2,  3,  4,  5,  6,  7,
1865                 8,  9, 10, 11,  12, 13, 14, 15,
1866                 16, 17, 18, 19, 20, 21, 22, 23,
1867                 24, 25, 26, 27, 28, 29, 30, 31
1868 };
1869
1870 static  int
1871 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1872                  enum dcb_mode_enable dcb_mode,
1873                  enum rte_eth_nb_tcs num_tcs,
1874                  uint8_t pfc_en)
1875 {
1876         uint8_t i;
1877
1878         /*
1879          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1880          * given above, and the number of traffic classes available for use.
1881          */
1882         if (dcb_mode == DCB_VT_ENABLED) {
1883                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1884                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1885                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1886                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1887
1888                 /* VMDQ+DCB RX and TX configrations */
1889                 vmdq_rx_conf->enable_default_pool = 0;
1890                 vmdq_rx_conf->default_pool = 0;
1891                 vmdq_rx_conf->nb_queue_pools =
1892                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1893                 vmdq_tx_conf->nb_queue_pools =
1894                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1895
1896                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1897                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1898                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1899                         vmdq_rx_conf->pool_map[i].pools =
1900                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1901                 }
1902                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1903                         vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
1904                         vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
1905                 }
1906
1907                 /* set DCB mode of RX and TX of multiple queues */
1908                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1909                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1910         } else {
1911                 struct rte_eth_dcb_rx_conf *rx_conf =
1912                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1913                 struct rte_eth_dcb_tx_conf *tx_conf =
1914                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1915
1916                 rx_conf->nb_tcs = num_tcs;
1917                 tx_conf->nb_tcs = num_tcs;
1918
1919                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1920                         rx_conf->dcb_tc[i] = i % num_tcs;
1921                         tx_conf->dcb_tc[i] = i % num_tcs;
1922                 }
1923                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1924                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1925                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1926         }
1927
1928         if (pfc_en)
1929                 eth_conf->dcb_capability_en =
1930                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1931         else
1932                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1933
1934         return 0;
1935 }
1936
1937 int
1938 init_port_dcb_config(portid_t pid,
1939                      enum dcb_mode_enable dcb_mode,
1940                      enum rte_eth_nb_tcs num_tcs,
1941                      uint8_t pfc_en)
1942 {
1943         struct rte_eth_conf port_conf;
1944         struct rte_port *rte_port;
1945         int retval;
1946         uint16_t i;
1947
1948         rte_port = &ports[pid];
1949
1950         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1951         /* Enter DCB configuration status */
1952         dcb_config = 1;
1953
1954         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1955         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1956         if (retval < 0)
1957                 return retval;
1958         port_conf.rxmode.hw_vlan_filter = 1;
1959
1960         /**
1961          * Write the configuration into the device.
1962          * Set the numbers of RX & TX queues to 0, so
1963          * the RX & TX queues will not be setup.
1964          */
1965         (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
1966
1967         rte_eth_dev_info_get(pid, &rte_port->dev_info);
1968
1969         /* If dev_info.vmdq_pool_base is greater than 0,
1970          * the queue id of vmdq pools is started after pf queues.
1971          */
1972         if (dcb_mode == DCB_VT_ENABLED &&
1973             rte_port->dev_info.vmdq_pool_base > 0) {
1974                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1975                         " for port %d.", pid);
1976                 return -1;
1977         }
1978
1979         /* Assume the ports in testpmd have the same dcb capability
1980          * and has the same number of rxq and txq in dcb mode
1981          */
1982         if (dcb_mode == DCB_VT_ENABLED) {
1983                 if (rte_port->dev_info.max_vfs > 0) {
1984                         nb_rxq = rte_port->dev_info.nb_rx_queues;
1985                         nb_txq = rte_port->dev_info.nb_tx_queues;
1986                 } else {
1987                         nb_rxq = rte_port->dev_info.max_rx_queues;
1988                         nb_txq = rte_port->dev_info.max_tx_queues;
1989                 }
1990         } else {
1991                 /*if vt is disabled, use all pf queues */
1992                 if (rte_port->dev_info.vmdq_pool_base == 0) {
1993                         nb_rxq = rte_port->dev_info.max_rx_queues;
1994                         nb_txq = rte_port->dev_info.max_tx_queues;
1995                 } else {
1996                         nb_rxq = (queueid_t)num_tcs;
1997                         nb_txq = (queueid_t)num_tcs;
1998
1999                 }
2000         }
2001         rx_free_thresh = 64;
2002
2003         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2004
2005         rxtx_port_config(rte_port);
2006         /* VLAN filter */
2007         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2008         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2009                 rx_vft_set(pid, vlan_tags[i], 1);
2010
2011         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2012         map_port_queue_stats_mapping_registers(pid, rte_port);
2013
2014         rte_port->dcb_flag = 1;
2015
2016         return 0;
2017 }
2018
2019 static void
2020 init_port(void)
2021 {
2022         portid_t pid;
2023
2024         /* Configuration of Ethernet ports. */
2025         ports = rte_zmalloc("testpmd: ports",
2026                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2027                             RTE_CACHE_LINE_SIZE);
2028         if (ports == NULL) {
2029                 rte_exit(EXIT_FAILURE,
2030                                 "rte_zmalloc(%d struct rte_port) failed\n",
2031                                 RTE_MAX_ETHPORTS);
2032         }
2033
2034         /* enabled allocated ports */
2035         for (pid = 0; pid < nb_ports; pid++)
2036                 ports[pid].enabled = 1;
2037 }
2038
2039 static void
2040 force_quit(void)
2041 {
2042         pmd_test_exit();
2043         prompt_exit();
2044 }
2045
2046 static void
2047 signal_handler(int signum)
2048 {
2049         if (signum == SIGINT || signum == SIGTERM) {
2050                 printf("\nSignal %d received, preparing to exit...\n",
2051                                 signum);
2052 #ifdef RTE_LIBRTE_PDUMP
2053                 /* uninitialize packet capture framework */
2054                 rte_pdump_uninit();
2055 #endif
2056                 force_quit();
2057                 /* exit with the expected status */
2058                 signal(signum, SIG_DFL);
2059                 kill(getpid(), signum);
2060         }
2061 }
2062
2063 int
2064 main(int argc, char** argv)
2065 {
2066         int  diag;
2067         uint8_t port_id;
2068
2069         signal(SIGINT, signal_handler);
2070         signal(SIGTERM, signal_handler);
2071
2072         diag = rte_eal_init(argc, argv);
2073         if (diag < 0)
2074                 rte_panic("Cannot init EAL\n");
2075
2076 #ifdef RTE_LIBRTE_PDUMP
2077         /* initialize packet capture framework */
2078         rte_pdump_init(NULL);
2079 #endif
2080
2081         nb_ports = (portid_t) rte_eth_dev_count();
2082         if (nb_ports == 0)
2083                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2084
2085         /* allocate port structures, and init them */
2086         init_port();
2087
2088         set_def_fwd_config();
2089         if (nb_lcores == 0)
2090                 rte_panic("Empty set of forwarding logical cores - check the "
2091                           "core mask supplied in the command parameters\n");
2092
2093         argc -= diag;
2094         argv += diag;
2095         if (argc > 1)
2096                 launch_args_parse(argc, argv);
2097
2098         if (!nb_rxq && !nb_txq)
2099                 printf("Warning: Either rx or tx queues should be non-zero\n");
2100
2101         if (nb_rxq > 1 && nb_rxq > nb_txq)
2102                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2103                        "but nb_txq=%d will prevent to fully test it.\n",
2104                        nb_rxq, nb_txq);
2105
2106         init_config();
2107         if (start_port(RTE_PORT_ALL) != 0)
2108                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2109
2110         /* set all ports to promiscuous mode by default */
2111         FOREACH_PORT(port_id, ports)
2112                 rte_eth_promiscuous_enable(port_id);
2113
2114 #ifdef RTE_LIBRTE_CMDLINE
2115         if (interactive == 1) {
2116                 if (auto_start) {
2117                         printf("Start automatic packet forwarding\n");
2118                         start_packet_forwarding(0);
2119                 }
2120                 prompt();
2121                 pmd_test_exit();
2122         } else
2123 #endif
2124         {
2125                 char c;
2126                 int rc;
2127
2128                 printf("No commandline core given, start packet forwarding\n");
2129                 start_packet_forwarding(0);
2130                 printf("Press enter to exit\n");
2131                 rc = read(0, &c, 1);
2132                 pmd_test_exit();
2133                 if (rc < 0)
2134                         return 1;
2135         }
2136
2137         return 0;
2138 }