Imported Upstream version 16.04
[deb_dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_byteorder.h>
53 #include <rte_log.h>
54 #include <rte_debug.h>
55 #include <rte_cycles.h>
56 #include <rte_memory.h>
57 #include <rte_memcpy.h>
58 #include <rte_memzone.h>
59 #include <rte_launch.h>
60 #include <rte_eal.h>
61 #include <rte_per_lcore.h>
62 #include <rte_lcore.h>
63 #include <rte_atomic.h>
64 #include <rte_branch_prediction.h>
65 #include <rte_ring.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
68 #include <rte_mbuf.h>
69 #include <rte_interrupts.h>
70 #include <rte_pci.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
73 #include <rte_dev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
77 #endif
78
79 #include "testpmd.h"
80 #include "mempool_osdep.h"
81
82 uint16_t verbose_level = 0; /**< Silent by default. */
83
84 /* use master core for command line ? */
85 uint8_t interactive = 0;
86 uint8_t auto_start = 0;
87
88 /*
89  * NUMA support configuration.
90  * When set, the NUMA support attempts to dispatch the allocation of the
91  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
92  * probed ports among the CPU sockets 0 and 1.
93  * Otherwise, all memory is allocated from CPU socket 0.
94  */
95 uint8_t numa_support = 0; /**< No numa support by default */
96
97 /*
98  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
99  * not configured.
100  */
101 uint8_t socket_num = UMA_NO_CONFIG;
102
103 /*
104  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
105  */
106 uint8_t mp_anon = 0;
107
108 /*
109  * Record the Ethernet address of peer target ports to which packets are
110  * forwarded.
111  * Must be instanciated with the ethernet addresses of peer traffic generator
112  * ports.
113  */
114 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
115 portid_t nb_peer_eth_addrs = 0;
116
117 /*
118  * Probed Target Environment.
119  */
120 struct rte_port *ports;        /**< For all probed ethernet ports. */
121 portid_t nb_ports;             /**< Number of probed ethernet ports. */
122 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
123 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
124
125 /*
126  * Test Forwarding Configuration.
127  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
128  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
129  */
130 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
131 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
132 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
133 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
134
135 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
136 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
137
138 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
139 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
140
141 /*
142  * Forwarding engines.
143  */
144 struct fwd_engine * fwd_engines[] = {
145         &io_fwd_engine,
146         &mac_fwd_engine,
147         &mac_retry_fwd_engine,
148         &mac_swap_engine,
149         &flow_gen_engine,
150         &rx_only_engine,
151         &tx_only_engine,
152         &csum_fwd_engine,
153         &icmp_echo_engine,
154 #ifdef RTE_LIBRTE_IEEE1588
155         &ieee1588_fwd_engine,
156 #endif
157         NULL,
158 };
159
160 struct fwd_config cur_fwd_config;
161 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
162
163 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
164 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
165                                       * specified on command-line. */
166
167 /*
168  * Configuration of packet segments used by the "txonly" processing engine.
169  */
170 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
171 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
172         TXONLY_DEF_PACKET_LEN,
173 };
174 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
175
176 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
177 /**< Split policy for packets to TX. */
178
179 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
180 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
181
182 /* current configuration is in DCB or not,0 means it is not in DCB mode */
183 uint8_t dcb_config = 0;
184
185 /* Whether the dcb is in testing status */
186 uint8_t dcb_test = 0;
187
188 /*
189  * Configurable number of RX/TX queues.
190  */
191 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
192 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
193
194 /*
195  * Configurable number of RX/TX ring descriptors.
196  */
197 #define RTE_TEST_RX_DESC_DEFAULT 128
198 #define RTE_TEST_TX_DESC_DEFAULT 512
199 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
200 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
201
202 #define RTE_PMD_PARAM_UNSET -1
203 /*
204  * Configurable values of RX and TX ring threshold registers.
205  */
206
207 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
208 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
209 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
210
211 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
214
215 /*
216  * Configurable value of RX free threshold.
217  */
218 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX drop enable.
222  */
223 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of TX free threshold.
227  */
228 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX RS bit threshold.
232  */
233 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX queue flags.
237  */
238 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Receive Side Scaling (RSS) configuration.
242  */
243 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
244
245 /*
246  * Port topology configuration
247  */
248 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
249
250 /*
251  * Avoids to flush all the RX streams before starts forwarding.
252  */
253 uint8_t no_flush_rx = 0; /* flush by default */
254
255 /*
256  * Avoids to check link status when starting/stopping a port.
257  */
258 uint8_t no_link_check = 0; /* check by default */
259
260 /*
261  * NIC bypass mode configuration options.
262  */
263 #ifdef RTE_NIC_BYPASS
264
265 /* The NIC bypass watchdog timeout. */
266 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
267
268 #endif
269
270 /*
271  * Ethernet device configuration.
272  */
273 struct rte_eth_rxmode rx_mode = {
274         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
275         .split_hdr_size = 0,
276         .header_split   = 0, /**< Header Split disabled. */
277         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
278         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
279         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
280         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
281         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
282         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
283 };
284
285 struct rte_fdir_conf fdir_conf = {
286         .mode = RTE_FDIR_MODE_NONE,
287         .pballoc = RTE_FDIR_PBALLOC_64K,
288         .status = RTE_FDIR_REPORT_STATUS,
289         .mask = {
290                 .vlan_tci_mask = 0x0,
291                 .ipv4_mask     = {
292                         .src_ip = 0xFFFFFFFF,
293                         .dst_ip = 0xFFFFFFFF,
294                 },
295                 .ipv6_mask     = {
296                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
297                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
298                 },
299                 .src_port_mask = 0xFFFF,
300                 .dst_port_mask = 0xFFFF,
301                 .mac_addr_byte_mask = 0xFF,
302                 .tunnel_type_mask = 1,
303                 .tunnel_id_mask = 0xFFFFFFFF,
304         },
305         .drop_queue = 127,
306 };
307
308 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
309
310 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
311 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
312
313 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
314 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
315
316 uint16_t nb_tx_queue_stats_mappings = 0;
317 uint16_t nb_rx_queue_stats_mappings = 0;
318
319 unsigned max_socket = 0;
320
321 /* Forward function declarations */
322 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
323 static void check_all_ports_link_status(uint32_t port_mask);
324
325 /*
326  * Check if all the ports are started.
327  * If yes, return positive value. If not, return zero.
328  */
329 static int all_ports_started(void);
330
331 /*
332  * Find next enabled port
333  */
334 portid_t
335 find_next_port(portid_t p, struct rte_port *ports, int size)
336 {
337         if (ports == NULL)
338                 rte_exit(-EINVAL, "failed to find a next port id\n");
339
340         while ((p < size) && (ports[p].enabled == 0))
341                 p++;
342         return p;
343 }
344
345 /*
346  * Setup default configuration.
347  */
348 static void
349 set_default_fwd_lcores_config(void)
350 {
351         unsigned int i;
352         unsigned int nb_lc;
353         unsigned int sock_num;
354
355         nb_lc = 0;
356         for (i = 0; i < RTE_MAX_LCORE; i++) {
357                 sock_num = rte_lcore_to_socket_id(i) + 1;
358                 if (sock_num > max_socket) {
359                         if (sock_num > RTE_MAX_NUMA_NODES)
360                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
361                         max_socket = sock_num;
362                 }
363                 if (!rte_lcore_is_enabled(i))
364                         continue;
365                 if (i == rte_get_master_lcore())
366                         continue;
367                 fwd_lcores_cpuids[nb_lc++] = i;
368         }
369         nb_lcores = (lcoreid_t) nb_lc;
370         nb_cfg_lcores = nb_lcores;
371         nb_fwd_lcores = 1;
372 }
373
374 static void
375 set_def_peer_eth_addrs(void)
376 {
377         portid_t i;
378
379         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
380                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
381                 peer_eth_addrs[i].addr_bytes[5] = i;
382         }
383 }
384
385 static void
386 set_default_fwd_ports_config(void)
387 {
388         portid_t pt_id;
389
390         for (pt_id = 0; pt_id < nb_ports; pt_id++)
391                 fwd_ports_ids[pt_id] = pt_id;
392
393         nb_cfg_ports = nb_ports;
394         nb_fwd_ports = nb_ports;
395 }
396
397 void
398 set_def_fwd_config(void)
399 {
400         set_default_fwd_lcores_config();
401         set_def_peer_eth_addrs();
402         set_default_fwd_ports_config();
403 }
404
405 /*
406  * Configuration initialisation done once at init time.
407  */
408 static void
409 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
410                  unsigned int socket_id)
411 {
412         char pool_name[RTE_MEMPOOL_NAMESIZE];
413         struct rte_mempool *rte_mp = NULL;
414         uint32_t mb_size;
415
416         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
417         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
418
419 #ifdef RTE_LIBRTE_PMD_XENVIRT
420         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
421                 (unsigned) mb_mempool_cache,
422                 sizeof(struct rte_pktmbuf_pool_private),
423                 rte_pktmbuf_pool_init, NULL,
424                 rte_pktmbuf_init, NULL,
425                 socket_id, 0);
426 #endif
427
428         /* if the former XEN allocation failed fall back to normal allocation */
429         if (rte_mp == NULL) {
430                 if (mp_anon != 0)
431                         rte_mp = mempool_anon_create(pool_name, nb_mbuf,
432                                         mb_size, (unsigned) mb_mempool_cache,
433                                         sizeof(struct rte_pktmbuf_pool_private),
434                                         rte_pktmbuf_pool_init, NULL,
435                                         rte_pktmbuf_init, NULL,
436                                         socket_id, 0);
437                 else
438                         /* wrapper to rte_mempool_create() */
439                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
440                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
441         }
442
443         if (rte_mp == NULL) {
444                 rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u "
445                                                 "failed\n", socket_id);
446         } else if (verbose_level > 0) {
447                 rte_mempool_dump(stdout, rte_mp);
448         }
449 }
450
451 /*
452  * Check given socket id is valid or not with NUMA mode,
453  * if valid, return 0, else return -1
454  */
455 static int
456 check_socket_id(const unsigned int socket_id)
457 {
458         static int warning_once = 0;
459
460         if (socket_id >= max_socket) {
461                 if (!warning_once && numa_support)
462                         printf("Warning: NUMA should be configured manually by"
463                                " using --port-numa-config and"
464                                " --ring-numa-config parameters along with"
465                                " --numa.\n");
466                 warning_once = 1;
467                 return -1;
468         }
469         return 0;
470 }
471
472 static void
473 init_config(void)
474 {
475         portid_t pid;
476         struct rte_port *port;
477         struct rte_mempool *mbp;
478         unsigned int nb_mbuf_per_pool;
479         lcoreid_t  lc_id;
480         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
481
482         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
483         /* Configuration of logical cores. */
484         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
485                                 sizeof(struct fwd_lcore *) * nb_lcores,
486                                 RTE_CACHE_LINE_SIZE);
487         if (fwd_lcores == NULL) {
488                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
489                                                         "failed\n", nb_lcores);
490         }
491         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
492                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
493                                                sizeof(struct fwd_lcore),
494                                                RTE_CACHE_LINE_SIZE);
495                 if (fwd_lcores[lc_id] == NULL) {
496                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
497                                                                 "failed\n");
498                 }
499                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
500         }
501
502         /*
503          * Create pools of mbuf.
504          * If NUMA support is disabled, create a single pool of mbuf in
505          * socket 0 memory by default.
506          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
507          *
508          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
509          * nb_txd can be configured at run time.
510          */
511         if (param_total_num_mbufs)
512                 nb_mbuf_per_pool = param_total_num_mbufs;
513         else {
514                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
515                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
516
517                 if (!numa_support)
518                         nb_mbuf_per_pool =
519                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
520         }
521
522         if (!numa_support) {
523                 if (socket_num == UMA_NO_CONFIG)
524                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
525                 else
526                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
527                                                  socket_num);
528         }
529
530         FOREACH_PORT(pid, ports) {
531                 port = &ports[pid];
532                 rte_eth_dev_info_get(pid, &port->dev_info);
533
534                 if (numa_support) {
535                         if (port_numa[pid] != NUMA_NO_CONFIG)
536                                 port_per_socket[port_numa[pid]]++;
537                         else {
538                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
539
540                                 /* if socket_id is invalid, set to 0 */
541                                 if (check_socket_id(socket_id) < 0)
542                                         socket_id = 0;
543                                 port_per_socket[socket_id]++;
544                         }
545                 }
546
547                 /* set flag to initialize port/queue */
548                 port->need_reconfig = 1;
549                 port->need_reconfig_queues = 1;
550         }
551
552         if (numa_support) {
553                 uint8_t i;
554                 unsigned int nb_mbuf;
555
556                 if (param_total_num_mbufs)
557                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
558
559                 for (i = 0; i < max_socket; i++) {
560                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
561                         if (nb_mbuf)
562                                 mbuf_pool_create(mbuf_data_size,
563                                                 nb_mbuf,i);
564                 }
565         }
566         init_port_config();
567
568         /*
569          * Records which Mbuf pool to use by each logical core, if needed.
570          */
571         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
572                 mbp = mbuf_pool_find(
573                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
574
575                 if (mbp == NULL)
576                         mbp = mbuf_pool_find(0);
577                 fwd_lcores[lc_id]->mbp = mbp;
578         }
579
580         /* Configuration of packet forwarding streams. */
581         if (init_fwd_streams() < 0)
582                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
583 }
584
585
586 void
587 reconfig(portid_t new_port_id, unsigned socket_id)
588 {
589         struct rte_port *port;
590
591         /* Reconfiguration of Ethernet ports. */
592         port = &ports[new_port_id];
593         rte_eth_dev_info_get(new_port_id, &port->dev_info);
594
595         /* set flag to initialize port/queue */
596         port->need_reconfig = 1;
597         port->need_reconfig_queues = 1;
598         port->socket_id = socket_id;
599
600         init_port_config();
601 }
602
603
604 int
605 init_fwd_streams(void)
606 {
607         portid_t pid;
608         struct rte_port *port;
609         streamid_t sm_id, nb_fwd_streams_new;
610         queueid_t q;
611
612         /* set socket id according to numa or not */
613         FOREACH_PORT(pid, ports) {
614                 port = &ports[pid];
615                 if (nb_rxq > port->dev_info.max_rx_queues) {
616                         printf("Fail: nb_rxq(%d) is greater than "
617                                 "max_rx_queues(%d)\n", nb_rxq,
618                                 port->dev_info.max_rx_queues);
619                         return -1;
620                 }
621                 if (nb_txq > port->dev_info.max_tx_queues) {
622                         printf("Fail: nb_txq(%d) is greater than "
623                                 "max_tx_queues(%d)\n", nb_txq,
624                                 port->dev_info.max_tx_queues);
625                         return -1;
626                 }
627                 if (numa_support) {
628                         if (port_numa[pid] != NUMA_NO_CONFIG)
629                                 port->socket_id = port_numa[pid];
630                         else {
631                                 port->socket_id = rte_eth_dev_socket_id(pid);
632
633                                 /* if socket_id is invalid, set to 0 */
634                                 if (check_socket_id(port->socket_id) < 0)
635                                         port->socket_id = 0;
636                         }
637                 }
638                 else {
639                         if (socket_num == UMA_NO_CONFIG)
640                                 port->socket_id = 0;
641                         else
642                                 port->socket_id = socket_num;
643                 }
644         }
645
646         q = RTE_MAX(nb_rxq, nb_txq);
647         if (q == 0) {
648                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
649                 return -1;
650         }
651         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
652         if (nb_fwd_streams_new == nb_fwd_streams)
653                 return 0;
654         /* clear the old */
655         if (fwd_streams != NULL) {
656                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
657                         if (fwd_streams[sm_id] == NULL)
658                                 continue;
659                         rte_free(fwd_streams[sm_id]);
660                         fwd_streams[sm_id] = NULL;
661                 }
662                 rte_free(fwd_streams);
663                 fwd_streams = NULL;
664         }
665
666         /* init new */
667         nb_fwd_streams = nb_fwd_streams_new;
668         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
669                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
670         if (fwd_streams == NULL)
671                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
672                                                 "failed\n", nb_fwd_streams);
673
674         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
675                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
676                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
677                 if (fwd_streams[sm_id] == NULL)
678                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
679                                                                 " failed\n");
680         }
681
682         return 0;
683 }
684
685 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
686 static void
687 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
688 {
689         unsigned int total_burst;
690         unsigned int nb_burst;
691         unsigned int burst_stats[3];
692         uint16_t pktnb_stats[3];
693         uint16_t nb_pkt;
694         int burst_percent[3];
695
696         /*
697          * First compute the total number of packet bursts and the
698          * two highest numbers of bursts of the same number of packets.
699          */
700         total_burst = 0;
701         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
702         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
703         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
704                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
705                 if (nb_burst == 0)
706                         continue;
707                 total_burst += nb_burst;
708                 if (nb_burst > burst_stats[0]) {
709                         burst_stats[1] = burst_stats[0];
710                         pktnb_stats[1] = pktnb_stats[0];
711                         burst_stats[0] = nb_burst;
712                         pktnb_stats[0] = nb_pkt;
713                 }
714         }
715         if (total_burst == 0)
716                 return;
717         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
718         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
719                burst_percent[0], (int) pktnb_stats[0]);
720         if (burst_stats[0] == total_burst) {
721                 printf("]\n");
722                 return;
723         }
724         if (burst_stats[0] + burst_stats[1] == total_burst) {
725                 printf(" + %d%% of %d pkts]\n",
726                        100 - burst_percent[0], pktnb_stats[1]);
727                 return;
728         }
729         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
730         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
731         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
732                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
733                 return;
734         }
735         printf(" + %d%% of %d pkts + %d%% of others]\n",
736                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
737 }
738 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
739
740 static void
741 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
742 {
743         struct rte_port *port;
744         uint8_t i;
745
746         static const char *fwd_stats_border = "----------------------";
747
748         port = &ports[port_id];
749         printf("\n  %s Forward statistics for port %-2d %s\n",
750                fwd_stats_border, port_id, fwd_stats_border);
751
752         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
753                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
754                        "%-"PRIu64"\n",
755                        stats->ipackets, stats->imissed,
756                        (uint64_t) (stats->ipackets + stats->imissed));
757
758                 if (cur_fwd_eng == &csum_fwd_engine)
759                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
760                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
761                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
762                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
763                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
764                 }
765
766                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
767                        "%-"PRIu64"\n",
768                        stats->opackets, port->tx_dropped,
769                        (uint64_t) (stats->opackets + port->tx_dropped));
770         }
771         else {
772                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
773                        "%14"PRIu64"\n",
774                        stats->ipackets, stats->imissed,
775                        (uint64_t) (stats->ipackets + stats->imissed));
776
777                 if (cur_fwd_eng == &csum_fwd_engine)
778                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
779                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
780                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
781                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
782                         printf("  RX-nombufs:             %14"PRIu64"\n",
783                                stats->rx_nombuf);
784                 }
785
786                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
787                        "%14"PRIu64"\n",
788                        stats->opackets, port->tx_dropped,
789                        (uint64_t) (stats->opackets + port->tx_dropped));
790         }
791
792 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
793         if (port->rx_stream)
794                 pkt_burst_stats_display("RX",
795                         &port->rx_stream->rx_burst_stats);
796         if (port->tx_stream)
797                 pkt_burst_stats_display("TX",
798                         &port->tx_stream->tx_burst_stats);
799 #endif
800
801         if (port->rx_queue_stats_mapping_enabled) {
802                 printf("\n");
803                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
804                         printf("  Stats reg %2d RX-packets:%14"PRIu64
805                                "     RX-errors:%14"PRIu64
806                                "    RX-bytes:%14"PRIu64"\n",
807                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
808                 }
809                 printf("\n");
810         }
811         if (port->tx_queue_stats_mapping_enabled) {
812                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
813                         printf("  Stats reg %2d TX-packets:%14"PRIu64
814                                "                                 TX-bytes:%14"PRIu64"\n",
815                                i, stats->q_opackets[i], stats->q_obytes[i]);
816                 }
817         }
818
819         printf("  %s--------------------------------%s\n",
820                fwd_stats_border, fwd_stats_border);
821 }
822
823 static void
824 fwd_stream_stats_display(streamid_t stream_id)
825 {
826         struct fwd_stream *fs;
827         static const char *fwd_top_stats_border = "-------";
828
829         fs = fwd_streams[stream_id];
830         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
831             (fs->fwd_dropped == 0))
832                 return;
833         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
834                "TX Port=%2d/Queue=%2d %s\n",
835                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
836                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
837         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
838                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
839
840         /* if checksum mode */
841         if (cur_fwd_eng == &csum_fwd_engine) {
842                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
843                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
844         }
845
846 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
847         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
848         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
849 #endif
850 }
851
852 static void
853 flush_fwd_rx_queues(void)
854 {
855         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
856         portid_t  rxp;
857         portid_t port_id;
858         queueid_t rxq;
859         uint16_t  nb_rx;
860         uint16_t  i;
861         uint8_t   j;
862
863         for (j = 0; j < 2; j++) {
864                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
865                         for (rxq = 0; rxq < nb_rxq; rxq++) {
866                                 port_id = fwd_ports_ids[rxp];
867                                 do {
868                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
869                                                 pkts_burst, MAX_PKT_BURST);
870                                         for (i = 0; i < nb_rx; i++)
871                                                 rte_pktmbuf_free(pkts_burst[i]);
872                                 } while (nb_rx > 0);
873                         }
874                 }
875                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
876         }
877 }
878
879 static void
880 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
881 {
882         struct fwd_stream **fsm;
883         streamid_t nb_fs;
884         streamid_t sm_id;
885
886         fsm = &fwd_streams[fc->stream_idx];
887         nb_fs = fc->stream_nb;
888         do {
889                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
890                         (*pkt_fwd)(fsm[sm_id]);
891         } while (! fc->stopped);
892 }
893
894 static int
895 start_pkt_forward_on_core(void *fwd_arg)
896 {
897         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
898                              cur_fwd_config.fwd_eng->packet_fwd);
899         return 0;
900 }
901
902 /*
903  * Run the TXONLY packet forwarding engine to send a single burst of packets.
904  * Used to start communication flows in network loopback test configurations.
905  */
906 static int
907 run_one_txonly_burst_on_core(void *fwd_arg)
908 {
909         struct fwd_lcore *fwd_lc;
910         struct fwd_lcore tmp_lcore;
911
912         fwd_lc = (struct fwd_lcore *) fwd_arg;
913         tmp_lcore = *fwd_lc;
914         tmp_lcore.stopped = 1;
915         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
916         return 0;
917 }
918
919 /*
920  * Launch packet forwarding:
921  *     - Setup per-port forwarding context.
922  *     - launch logical cores with their forwarding configuration.
923  */
924 static void
925 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
926 {
927         port_fwd_begin_t port_fwd_begin;
928         unsigned int i;
929         unsigned int lc_id;
930         int diag;
931
932         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
933         if (port_fwd_begin != NULL) {
934                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
935                         (*port_fwd_begin)(fwd_ports_ids[i]);
936         }
937         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
938                 lc_id = fwd_lcores_cpuids[i];
939                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
940                         fwd_lcores[i]->stopped = 0;
941                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
942                                                      fwd_lcores[i], lc_id);
943                         if (diag != 0)
944                                 printf("launch lcore %u failed - diag=%d\n",
945                                        lc_id, diag);
946                 }
947         }
948 }
949
950 /*
951  * Launch packet forwarding configuration.
952  */
953 void
954 start_packet_forwarding(int with_tx_first)
955 {
956         port_fwd_begin_t port_fwd_begin;
957         port_fwd_end_t  port_fwd_end;
958         struct rte_port *port;
959         unsigned int i;
960         portid_t   pt_id;
961         streamid_t sm_id;
962
963         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
964                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
965
966         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
967                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
968
969         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
970                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
971                 (!nb_rxq || !nb_txq))
972                 rte_exit(EXIT_FAILURE,
973                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
974                         cur_fwd_eng->fwd_mode_name);
975
976         if (all_ports_started() == 0) {
977                 printf("Not all ports were started\n");
978                 return;
979         }
980         if (test_done == 0) {
981                 printf("Packet forwarding already started\n");
982                 return;
983         }
984         if(dcb_test) {
985                 for (i = 0; i < nb_fwd_ports; i++) {
986                         pt_id = fwd_ports_ids[i];
987                         port = &ports[pt_id];
988                         if (!port->dcb_flag) {
989                                 printf("In DCB mode, all forwarding ports must "
990                                        "be configured in this mode.\n");
991                                 return;
992                         }
993                 }
994                 if (nb_fwd_lcores == 1) {
995                         printf("In DCB mode,the nb forwarding cores "
996                                "should be larger than 1.\n");
997                         return;
998                 }
999         }
1000         test_done = 0;
1001
1002         if(!no_flush_rx)
1003                 flush_fwd_rx_queues();
1004
1005         fwd_config_setup();
1006         rxtx_config_display();
1007
1008         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1009                 pt_id = fwd_ports_ids[i];
1010                 port = &ports[pt_id];
1011                 rte_eth_stats_get(pt_id, &port->stats);
1012                 port->tx_dropped = 0;
1013
1014                 map_port_queue_stats_mapping_registers(pt_id, port);
1015         }
1016         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1017                 fwd_streams[sm_id]->rx_packets = 0;
1018                 fwd_streams[sm_id]->tx_packets = 0;
1019                 fwd_streams[sm_id]->fwd_dropped = 0;
1020                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1021                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1022
1023 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1024                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1025                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1026                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1027                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1028 #endif
1029 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1030                 fwd_streams[sm_id]->core_cycles = 0;
1031 #endif
1032         }
1033         if (with_tx_first) {
1034                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1035                 if (port_fwd_begin != NULL) {
1036                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1037                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1038                 }
1039                 launch_packet_forwarding(run_one_txonly_burst_on_core);
1040                 rte_eal_mp_wait_lcore();
1041                 port_fwd_end = tx_only_engine.port_fwd_end;
1042                 if (port_fwd_end != NULL) {
1043                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1044                                 (*port_fwd_end)(fwd_ports_ids[i]);
1045                 }
1046         }
1047         launch_packet_forwarding(start_pkt_forward_on_core);
1048 }
1049
1050 void
1051 stop_packet_forwarding(void)
1052 {
1053         struct rte_eth_stats stats;
1054         struct rte_port *port;
1055         port_fwd_end_t  port_fwd_end;
1056         int i;
1057         portid_t   pt_id;
1058         streamid_t sm_id;
1059         lcoreid_t  lc_id;
1060         uint64_t total_recv;
1061         uint64_t total_xmit;
1062         uint64_t total_rx_dropped;
1063         uint64_t total_tx_dropped;
1064         uint64_t total_rx_nombuf;
1065         uint64_t tx_dropped;
1066         uint64_t rx_bad_ip_csum;
1067         uint64_t rx_bad_l4_csum;
1068 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1069         uint64_t fwd_cycles;
1070 #endif
1071         static const char *acc_stats_border = "+++++++++++++++";
1072
1073         if (all_ports_started() == 0) {
1074                 printf("Not all ports were started\n");
1075                 return;
1076         }
1077         if (test_done) {
1078                 printf("Packet forwarding not started\n");
1079                 return;
1080         }
1081         printf("Telling cores to stop...");
1082         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1083                 fwd_lcores[lc_id]->stopped = 1;
1084         printf("\nWaiting for lcores to finish...\n");
1085         rte_eal_mp_wait_lcore();
1086         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1087         if (port_fwd_end != NULL) {
1088                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1089                         pt_id = fwd_ports_ids[i];
1090                         (*port_fwd_end)(pt_id);
1091                 }
1092         }
1093 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1094         fwd_cycles = 0;
1095 #endif
1096         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1097                 if (cur_fwd_config.nb_fwd_streams >
1098                     cur_fwd_config.nb_fwd_ports) {
1099                         fwd_stream_stats_display(sm_id);
1100                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1101                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1102                 } else {
1103                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1104                                 fwd_streams[sm_id];
1105                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1106                                 fwd_streams[sm_id];
1107                 }
1108                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1109                 tx_dropped = (uint64_t) (tx_dropped +
1110                                          fwd_streams[sm_id]->fwd_dropped);
1111                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1112
1113                 rx_bad_ip_csum =
1114                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1115                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1116                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1117                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1118                                                         rx_bad_ip_csum;
1119
1120                 rx_bad_l4_csum =
1121                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1122                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1123                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1124                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1125                                                         rx_bad_l4_csum;
1126
1127 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1128                 fwd_cycles = (uint64_t) (fwd_cycles +
1129                                          fwd_streams[sm_id]->core_cycles);
1130 #endif
1131         }
1132         total_recv = 0;
1133         total_xmit = 0;
1134         total_rx_dropped = 0;
1135         total_tx_dropped = 0;
1136         total_rx_nombuf  = 0;
1137         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1138                 pt_id = fwd_ports_ids[i];
1139
1140                 port = &ports[pt_id];
1141                 rte_eth_stats_get(pt_id, &stats);
1142                 stats.ipackets -= port->stats.ipackets;
1143                 port->stats.ipackets = 0;
1144                 stats.opackets -= port->stats.opackets;
1145                 port->stats.opackets = 0;
1146                 stats.ibytes   -= port->stats.ibytes;
1147                 port->stats.ibytes = 0;
1148                 stats.obytes   -= port->stats.obytes;
1149                 port->stats.obytes = 0;
1150                 stats.imissed  -= port->stats.imissed;
1151                 port->stats.imissed = 0;
1152                 stats.oerrors  -= port->stats.oerrors;
1153                 port->stats.oerrors = 0;
1154                 stats.rx_nombuf -= port->stats.rx_nombuf;
1155                 port->stats.rx_nombuf = 0;
1156
1157                 total_recv += stats.ipackets;
1158                 total_xmit += stats.opackets;
1159                 total_rx_dropped += stats.imissed;
1160                 total_tx_dropped += port->tx_dropped;
1161                 total_rx_nombuf  += stats.rx_nombuf;
1162
1163                 fwd_port_stats_display(pt_id, &stats);
1164         }
1165         printf("\n  %s Accumulated forward statistics for all ports"
1166                "%s\n",
1167                acc_stats_border, acc_stats_border);
1168         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1169                "%-"PRIu64"\n"
1170                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1171                "%-"PRIu64"\n",
1172                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1173                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1174         if (total_rx_nombuf > 0)
1175                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1176         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1177                "%s\n",
1178                acc_stats_border, acc_stats_border);
1179 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1180         if (total_recv > 0)
1181                 printf("\n  CPU cycles/packet=%u (total cycles="
1182                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1183                        (unsigned int)(fwd_cycles / total_recv),
1184                        fwd_cycles, total_recv);
1185 #endif
1186         printf("\nDone.\n");
1187         test_done = 1;
1188 }
1189
1190 void
1191 dev_set_link_up(portid_t pid)
1192 {
1193         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1194                 printf("\nSet link up fail.\n");
1195 }
1196
1197 void
1198 dev_set_link_down(portid_t pid)
1199 {
1200         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1201                 printf("\nSet link down fail.\n");
1202 }
1203
1204 static int
1205 all_ports_started(void)
1206 {
1207         portid_t pi;
1208         struct rte_port *port;
1209
1210         FOREACH_PORT(pi, ports) {
1211                 port = &ports[pi];
1212                 /* Check if there is a port which is not started */
1213                 if ((port->port_status != RTE_PORT_STARTED) &&
1214                         (port->slave_flag == 0))
1215                         return 0;
1216         }
1217
1218         /* No port is not started */
1219         return 1;
1220 }
1221
1222 int
1223 all_ports_stopped(void)
1224 {
1225         portid_t pi;
1226         struct rte_port *port;
1227
1228         FOREACH_PORT(pi, ports) {
1229                 port = &ports[pi];
1230                 if ((port->port_status != RTE_PORT_STOPPED) &&
1231                         (port->slave_flag == 0))
1232                         return 0;
1233         }
1234
1235         return 1;
1236 }
1237
1238 int
1239 port_is_started(portid_t port_id)
1240 {
1241         if (port_id_is_invalid(port_id, ENABLED_WARN))
1242                 return 0;
1243
1244         if (ports[port_id].port_status != RTE_PORT_STARTED)
1245                 return 0;
1246
1247         return 1;
1248 }
1249
1250 static int
1251 port_is_closed(portid_t port_id)
1252 {
1253         if (port_id_is_invalid(port_id, ENABLED_WARN))
1254                 return 0;
1255
1256         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1257                 return 0;
1258
1259         return 1;
1260 }
1261
1262 int
1263 start_port(portid_t pid)
1264 {
1265         int diag, need_check_link_status = -1;
1266         portid_t pi;
1267         queueid_t qi;
1268         struct rte_port *port;
1269         struct ether_addr mac_addr;
1270
1271         if (test_done == 0) {
1272                 printf("Please stop forwarding first\n");
1273                 return -1;
1274         }
1275
1276         if (port_id_is_invalid(pid, ENABLED_WARN))
1277                 return 0;
1278
1279         if (init_fwd_streams() < 0) {
1280                 printf("Fail from init_fwd_streams()\n");
1281                 return -1;
1282         }
1283
1284         if(dcb_config)
1285                 dcb_test = 1;
1286         FOREACH_PORT(pi, ports) {
1287                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1288                         continue;
1289
1290                 need_check_link_status = 0;
1291                 port = &ports[pi];
1292                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1293                                                  RTE_PORT_HANDLING) == 0) {
1294                         printf("Port %d is now not stopped\n", pi);
1295                         continue;
1296                 }
1297
1298                 if (port->need_reconfig > 0) {
1299                         port->need_reconfig = 0;
1300
1301                         printf("Configuring Port %d (socket %u)\n", pi,
1302                                         port->socket_id);
1303                         /* configure port */
1304                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1305                                                 &(port->dev_conf));
1306                         if (diag != 0) {
1307                                 if (rte_atomic16_cmpset(&(port->port_status),
1308                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1309                                         printf("Port %d can not be set back "
1310                                                         "to stopped\n", pi);
1311                                 printf("Fail to configure port %d\n", pi);
1312                                 /* try to reconfigure port next time */
1313                                 port->need_reconfig = 1;
1314                                 return -1;
1315                         }
1316                 }
1317                 if (port->need_reconfig_queues > 0) {
1318                         port->need_reconfig_queues = 0;
1319                         /* setup tx queues */
1320                         for (qi = 0; qi < nb_txq; qi++) {
1321                                 if ((numa_support) &&
1322                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1323                                         diag = rte_eth_tx_queue_setup(pi, qi,
1324                                                 nb_txd,txring_numa[pi],
1325                                                 &(port->tx_conf));
1326                                 else
1327                                         diag = rte_eth_tx_queue_setup(pi, qi,
1328                                                 nb_txd,port->socket_id,
1329                                                 &(port->tx_conf));
1330
1331                                 if (diag == 0)
1332                                         continue;
1333
1334                                 /* Fail to setup tx queue, return */
1335                                 if (rte_atomic16_cmpset(&(port->port_status),
1336                                                         RTE_PORT_HANDLING,
1337                                                         RTE_PORT_STOPPED) == 0)
1338                                         printf("Port %d can not be set back "
1339                                                         "to stopped\n", pi);
1340                                 printf("Fail to configure port %d tx queues\n", pi);
1341                                 /* try to reconfigure queues next time */
1342                                 port->need_reconfig_queues = 1;
1343                                 return -1;
1344                         }
1345                         /* setup rx queues */
1346                         for (qi = 0; qi < nb_rxq; qi++) {
1347                                 if ((numa_support) &&
1348                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1349                                         struct rte_mempool * mp =
1350                                                 mbuf_pool_find(rxring_numa[pi]);
1351                                         if (mp == NULL) {
1352                                                 printf("Failed to setup RX queue:"
1353                                                         "No mempool allocation"
1354                                                         "on the socket %d\n",
1355                                                         rxring_numa[pi]);
1356                                                 return -1;
1357                                         }
1358
1359                                         diag = rte_eth_rx_queue_setup(pi, qi,
1360                                              nb_rxd,rxring_numa[pi],
1361                                              &(port->rx_conf),mp);
1362                                 }
1363                                 else
1364                                         diag = rte_eth_rx_queue_setup(pi, qi,
1365                                              nb_rxd,port->socket_id,
1366                                              &(port->rx_conf),
1367                                              mbuf_pool_find(port->socket_id));
1368
1369                                 if (diag == 0)
1370                                         continue;
1371
1372
1373                                 /* Fail to setup rx queue, return */
1374                                 if (rte_atomic16_cmpset(&(port->port_status),
1375                                                         RTE_PORT_HANDLING,
1376                                                         RTE_PORT_STOPPED) == 0)
1377                                         printf("Port %d can not be set back "
1378                                                         "to stopped\n", pi);
1379                                 printf("Fail to configure port %d rx queues\n", pi);
1380                                 /* try to reconfigure queues next time */
1381                                 port->need_reconfig_queues = 1;
1382                                 return -1;
1383                         }
1384                 }
1385                 /* start port */
1386                 if (rte_eth_dev_start(pi) < 0) {
1387                         printf("Fail to start port %d\n", pi);
1388
1389                         /* Fail to setup rx queue, return */
1390                         if (rte_atomic16_cmpset(&(port->port_status),
1391                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1392                                 printf("Port %d can not be set back to "
1393                                                         "stopped\n", pi);
1394                         continue;
1395                 }
1396
1397                 if (rte_atomic16_cmpset(&(port->port_status),
1398                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1399                         printf("Port %d can not be set into started\n", pi);
1400
1401                 rte_eth_macaddr_get(pi, &mac_addr);
1402                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1403                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1404                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1405                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1406
1407                 /* at least one port started, need checking link status */
1408                 need_check_link_status = 1;
1409         }
1410
1411         if (need_check_link_status == 1 && !no_link_check)
1412                 check_all_ports_link_status(RTE_PORT_ALL);
1413         else if (need_check_link_status == 0)
1414                 printf("Please stop the ports first\n");
1415
1416         printf("Done\n");
1417         return 0;
1418 }
1419
1420 void
1421 stop_port(portid_t pid)
1422 {
1423         portid_t pi;
1424         struct rte_port *port;
1425         int need_check_link_status = 0;
1426
1427         if (test_done == 0) {
1428                 printf("Please stop forwarding first\n");
1429                 return;
1430         }
1431         if (dcb_test) {
1432                 dcb_test = 0;
1433                 dcb_config = 0;
1434         }
1435
1436         if (port_id_is_invalid(pid, ENABLED_WARN))
1437                 return;
1438
1439         printf("Stopping ports...\n");
1440
1441         FOREACH_PORT(pi, ports) {
1442                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1443                         continue;
1444
1445                 port = &ports[pi];
1446                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1447                                                 RTE_PORT_HANDLING) == 0)
1448                         continue;
1449
1450                 rte_eth_dev_stop(pi);
1451
1452                 if (rte_atomic16_cmpset(&(port->port_status),
1453                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1454                         printf("Port %d can not be set into stopped\n", pi);
1455                 need_check_link_status = 1;
1456         }
1457         if (need_check_link_status && !no_link_check)
1458                 check_all_ports_link_status(RTE_PORT_ALL);
1459
1460         printf("Done\n");
1461 }
1462
1463 void
1464 close_port(portid_t pid)
1465 {
1466         portid_t pi;
1467         struct rte_port *port;
1468
1469         if (test_done == 0) {
1470                 printf("Please stop forwarding first\n");
1471                 return;
1472         }
1473
1474         if (port_id_is_invalid(pid, ENABLED_WARN))
1475                 return;
1476
1477         printf("Closing ports...\n");
1478
1479         FOREACH_PORT(pi, ports) {
1480                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1481                         continue;
1482
1483                 port = &ports[pi];
1484                 if (rte_atomic16_cmpset(&(port->port_status),
1485                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1486                         printf("Port %d is already closed\n", pi);
1487                         continue;
1488                 }
1489
1490                 if (rte_atomic16_cmpset(&(port->port_status),
1491                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1492                         printf("Port %d is now not stopped\n", pi);
1493                         continue;
1494                 }
1495
1496                 rte_eth_dev_close(pi);
1497
1498                 if (rte_atomic16_cmpset(&(port->port_status),
1499                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1500                         printf("Port %d can not be set into stopped\n", pi);
1501         }
1502
1503         printf("Done\n");
1504 }
1505
1506 void
1507 attach_port(char *identifier)
1508 {
1509         portid_t i, j, pi = 0;
1510
1511         printf("Attaching a new port...\n");
1512
1513         if (identifier == NULL) {
1514                 printf("Invalid parameters are specified\n");
1515                 return;
1516         }
1517
1518         if (test_done == 0) {
1519                 printf("Please stop forwarding first\n");
1520                 return;
1521         }
1522
1523         if (rte_eth_dev_attach(identifier, &pi))
1524                 return;
1525
1526         ports[pi].enabled = 1;
1527         reconfig(pi, rte_eth_dev_socket_id(pi));
1528         rte_eth_promiscuous_enable(pi);
1529
1530         nb_ports = rte_eth_dev_count();
1531
1532         /* set_default_fwd_ports_config(); */
1533         memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1534         i = 0;
1535         FOREACH_PORT(j, ports) {
1536                 fwd_ports_ids[i] = j;
1537                 i++;
1538         }
1539         nb_cfg_ports = nb_ports;
1540         nb_fwd_ports++;
1541
1542         ports[pi].port_status = RTE_PORT_STOPPED;
1543
1544         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1545         printf("Done\n");
1546 }
1547
1548 void
1549 detach_port(uint8_t port_id)
1550 {
1551         portid_t i, pi = 0;
1552         char name[RTE_ETH_NAME_MAX_LEN];
1553
1554         printf("Detaching a port...\n");
1555
1556         if (!port_is_closed(port_id)) {
1557                 printf("Please close port first\n");
1558                 return;
1559         }
1560
1561         if (rte_eth_dev_detach(port_id, name))
1562                 return;
1563
1564         ports[port_id].enabled = 0;
1565         nb_ports = rte_eth_dev_count();
1566
1567         /* set_default_fwd_ports_config(); */
1568         memset(fwd_ports_ids, 0, sizeof(fwd_ports_ids));
1569         i = 0;
1570         FOREACH_PORT(pi, ports) {
1571                 fwd_ports_ids[i] = pi;
1572                 i++;
1573         }
1574         nb_cfg_ports = nb_ports;
1575         nb_fwd_ports--;
1576
1577         printf("Port '%s' is detached. Now total ports is %d\n",
1578                         name, nb_ports);
1579         printf("Done\n");
1580         return;
1581 }
1582
1583 void
1584 pmd_test_exit(void)
1585 {
1586         portid_t pt_id;
1587
1588         if (test_done == 0)
1589                 stop_packet_forwarding();
1590
1591         if (ports != NULL) {
1592                 no_link_check = 1;
1593                 FOREACH_PORT(pt_id, ports) {
1594                         printf("\nShutting down port %d...\n", pt_id);
1595                         fflush(stdout);
1596                         stop_port(pt_id);
1597                         close_port(pt_id);
1598                 }
1599         }
1600         printf("\nBye...\n");
1601 }
1602
1603 typedef void (*cmd_func_t)(void);
1604 struct pmd_test_command {
1605         const char *cmd_name;
1606         cmd_func_t cmd_func;
1607 };
1608
1609 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1610
1611 /* Check the link status of all ports in up to 9s, and print them finally */
1612 static void
1613 check_all_ports_link_status(uint32_t port_mask)
1614 {
1615 #define CHECK_INTERVAL 100 /* 100ms */
1616 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1617         uint8_t portid, count, all_ports_up, print_flag = 0;
1618         struct rte_eth_link link;
1619
1620         printf("Checking link statuses...\n");
1621         fflush(stdout);
1622         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1623                 all_ports_up = 1;
1624                 FOREACH_PORT(portid, ports) {
1625                         if ((port_mask & (1 << portid)) == 0)
1626                                 continue;
1627                         memset(&link, 0, sizeof(link));
1628                         rte_eth_link_get_nowait(portid, &link);
1629                         /* print link status if flag set */
1630                         if (print_flag == 1) {
1631                                 if (link.link_status)
1632                                         printf("Port %d Link Up - speed %u "
1633                                                 "Mbps - %s\n", (uint8_t)portid,
1634                                                 (unsigned)link.link_speed,
1635                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1636                                         ("full-duplex") : ("half-duplex\n"));
1637                                 else
1638                                         printf("Port %d Link Down\n",
1639                                                 (uint8_t)portid);
1640                                 continue;
1641                         }
1642                         /* clear all_ports_up flag if any link down */
1643                         if (link.link_status == ETH_LINK_DOWN) {
1644                                 all_ports_up = 0;
1645                                 break;
1646                         }
1647                 }
1648                 /* after finally printing all link status, get out */
1649                 if (print_flag == 1)
1650                         break;
1651
1652                 if (all_ports_up == 0) {
1653                         fflush(stdout);
1654                         rte_delay_ms(CHECK_INTERVAL);
1655                 }
1656
1657                 /* set the print_flag if all ports up or timeout */
1658                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1659                         print_flag = 1;
1660                 }
1661         }
1662 }
1663
1664 static int
1665 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1666 {
1667         uint16_t i;
1668         int diag;
1669         uint8_t mapping_found = 0;
1670
1671         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1672                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1673                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1674                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1675                                         tx_queue_stats_mappings[i].queue_id,
1676                                         tx_queue_stats_mappings[i].stats_counter_id);
1677                         if (diag != 0)
1678                                 return diag;
1679                         mapping_found = 1;
1680                 }
1681         }
1682         if (mapping_found)
1683                 port->tx_queue_stats_mapping_enabled = 1;
1684         return 0;
1685 }
1686
1687 static int
1688 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1689 {
1690         uint16_t i;
1691         int diag;
1692         uint8_t mapping_found = 0;
1693
1694         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1695                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1696                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1697                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1698                                         rx_queue_stats_mappings[i].queue_id,
1699                                         rx_queue_stats_mappings[i].stats_counter_id);
1700                         if (diag != 0)
1701                                 return diag;
1702                         mapping_found = 1;
1703                 }
1704         }
1705         if (mapping_found)
1706                 port->rx_queue_stats_mapping_enabled = 1;
1707         return 0;
1708 }
1709
1710 static void
1711 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1712 {
1713         int diag = 0;
1714
1715         diag = set_tx_queue_stats_mapping_registers(pi, port);
1716         if (diag != 0) {
1717                 if (diag == -ENOTSUP) {
1718                         port->tx_queue_stats_mapping_enabled = 0;
1719                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1720                 }
1721                 else
1722                         rte_exit(EXIT_FAILURE,
1723                                         "set_tx_queue_stats_mapping_registers "
1724                                         "failed for port id=%d diag=%d\n",
1725                                         pi, diag);
1726         }
1727
1728         diag = set_rx_queue_stats_mapping_registers(pi, port);
1729         if (diag != 0) {
1730                 if (diag == -ENOTSUP) {
1731                         port->rx_queue_stats_mapping_enabled = 0;
1732                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1733                 }
1734                 else
1735                         rte_exit(EXIT_FAILURE,
1736                                         "set_rx_queue_stats_mapping_registers "
1737                                         "failed for port id=%d diag=%d\n",
1738                                         pi, diag);
1739         }
1740 }
1741
1742 static void
1743 rxtx_port_config(struct rte_port *port)
1744 {
1745         port->rx_conf = port->dev_info.default_rxconf;
1746         port->tx_conf = port->dev_info.default_txconf;
1747
1748         /* Check if any RX/TX parameters have been passed */
1749         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1750                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1751
1752         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1753                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1754
1755         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1756                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1757
1758         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1759                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1760
1761         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1762                 port->rx_conf.rx_drop_en = rx_drop_en;
1763
1764         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1765                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1766
1767         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1768                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1769
1770         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1771                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1772
1773         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1774                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1775
1776         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1777                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1778
1779         if (txq_flags != RTE_PMD_PARAM_UNSET)
1780                 port->tx_conf.txq_flags = txq_flags;
1781 }
1782
1783 void
1784 init_port_config(void)
1785 {
1786         portid_t pid;
1787         struct rte_port *port;
1788
1789         FOREACH_PORT(pid, ports) {
1790                 port = &ports[pid];
1791                 port->dev_conf.rxmode = rx_mode;
1792                 port->dev_conf.fdir_conf = fdir_conf;
1793                 if (nb_rxq > 1) {
1794                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1795                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1796                 } else {
1797                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1798                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1799                 }
1800
1801                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1802                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1803                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1804                         else
1805                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1806                 }
1807
1808                 if (port->dev_info.max_vfs != 0) {
1809                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1810                                 port->dev_conf.rxmode.mq_mode =
1811                                         ETH_MQ_RX_VMDQ_RSS;
1812                         else
1813                                 port->dev_conf.rxmode.mq_mode =
1814                                         ETH_MQ_RX_NONE;
1815
1816                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1817                 }
1818
1819                 rxtx_port_config(port);
1820
1821                 rte_eth_macaddr_get(pid, &port->eth_addr);
1822
1823                 map_port_queue_stats_mapping_registers(pid, port);
1824 #ifdef RTE_NIC_BYPASS
1825                 rte_eth_dev_bypass_init(pid);
1826 #endif
1827         }
1828 }
1829
1830 void set_port_slave_flag(portid_t slave_pid)
1831 {
1832         struct rte_port *port;
1833
1834         port = &ports[slave_pid];
1835         port->slave_flag = 1;
1836 }
1837
1838 void clear_port_slave_flag(portid_t slave_pid)
1839 {
1840         struct rte_port *port;
1841
1842         port = &ports[slave_pid];
1843         port->slave_flag = 0;
1844 }
1845
1846 const uint16_t vlan_tags[] = {
1847                 0,  1,  2,  3,  4,  5,  6,  7,
1848                 8,  9, 10, 11,  12, 13, 14, 15,
1849                 16, 17, 18, 19, 20, 21, 22, 23,
1850                 24, 25, 26, 27, 28, 29, 30, 31
1851 };
1852
1853 static  int
1854 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1855                  enum dcb_mode_enable dcb_mode,
1856                  enum rte_eth_nb_tcs num_tcs,
1857                  uint8_t pfc_en)
1858 {
1859         uint8_t i;
1860
1861         /*
1862          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1863          * given above, and the number of traffic classes available for use.
1864          */
1865         if (dcb_mode == DCB_VT_ENABLED) {
1866                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1867                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1868                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1869                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1870
1871                 /* VMDQ+DCB RX and TX configrations */
1872                 vmdq_rx_conf->enable_default_pool = 0;
1873                 vmdq_rx_conf->default_pool = 0;
1874                 vmdq_rx_conf->nb_queue_pools =
1875                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1876                 vmdq_tx_conf->nb_queue_pools =
1877                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1878
1879                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1880                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1881                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1882                         vmdq_rx_conf->pool_map[i].pools =
1883                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1884                 }
1885                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1886                         vmdq_rx_conf->dcb_tc[i] = i;
1887                         vmdq_tx_conf->dcb_tc[i] = i;
1888                 }
1889
1890                 /* set DCB mode of RX and TX of multiple queues */
1891                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1892                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1893         } else {
1894                 struct rte_eth_dcb_rx_conf *rx_conf =
1895                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1896                 struct rte_eth_dcb_tx_conf *tx_conf =
1897                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1898
1899                 rx_conf->nb_tcs = num_tcs;
1900                 tx_conf->nb_tcs = num_tcs;
1901
1902                 for (i = 0; i < num_tcs; i++) {
1903                         rx_conf->dcb_tc[i] = i;
1904                         tx_conf->dcb_tc[i] = i;
1905                 }
1906                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1907                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1908                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1909         }
1910
1911         if (pfc_en)
1912                 eth_conf->dcb_capability_en =
1913                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1914         else
1915                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1916
1917         return 0;
1918 }
1919
1920 int
1921 init_port_dcb_config(portid_t pid,
1922                      enum dcb_mode_enable dcb_mode,
1923                      enum rte_eth_nb_tcs num_tcs,
1924                      uint8_t pfc_en)
1925 {
1926         struct rte_eth_conf port_conf;
1927         struct rte_eth_dev_info dev_info;
1928         struct rte_port *rte_port;
1929         int retval;
1930         uint16_t i;
1931
1932         rte_eth_dev_info_get(pid, &dev_info);
1933
1934         /* If dev_info.vmdq_pool_base is greater than 0,
1935          * the queue id of vmdq pools is started after pf queues.
1936          */
1937         if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1938                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1939                         " for port %d.", pid);
1940                 return -1;
1941         }
1942
1943         /* Assume the ports in testpmd have the same dcb capability
1944          * and has the same number of rxq and txq in dcb mode
1945          */
1946         if (dcb_mode == DCB_VT_ENABLED) {
1947                 nb_rxq = dev_info.max_rx_queues;
1948                 nb_txq = dev_info.max_tx_queues;
1949         } else {
1950                 /*if vt is disabled, use all pf queues */
1951                 if (dev_info.vmdq_pool_base == 0) {
1952                         nb_rxq = dev_info.max_rx_queues;
1953                         nb_txq = dev_info.max_tx_queues;
1954                 } else {
1955                         nb_rxq = (queueid_t)num_tcs;
1956                         nb_txq = (queueid_t)num_tcs;
1957
1958                 }
1959         }
1960         rx_free_thresh = 64;
1961
1962         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1963         /* Enter DCB configuration status */
1964         dcb_config = 1;
1965
1966         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1967         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1968         if (retval < 0)
1969                 return retval;
1970
1971         rte_port = &ports[pid];
1972         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1973
1974         rxtx_port_config(rte_port);
1975         /* VLAN filter */
1976         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1977         for (i = 0; i < RTE_DIM(vlan_tags); i++)
1978                 rx_vft_set(pid, vlan_tags[i], 1);
1979
1980         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1981         map_port_queue_stats_mapping_registers(pid, rte_port);
1982
1983         rte_port->dcb_flag = 1;
1984
1985         return 0;
1986 }
1987
1988 static void
1989 init_port(void)
1990 {
1991         portid_t pid;
1992
1993         /* Configuration of Ethernet ports. */
1994         ports = rte_zmalloc("testpmd: ports",
1995                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
1996                             RTE_CACHE_LINE_SIZE);
1997         if (ports == NULL) {
1998                 rte_exit(EXIT_FAILURE,
1999                                 "rte_zmalloc(%d struct rte_port) failed\n",
2000                                 RTE_MAX_ETHPORTS);
2001         }
2002
2003         /* enabled allocated ports */
2004         for (pid = 0; pid < nb_ports; pid++)
2005                 ports[pid].enabled = 1;
2006 }
2007
2008 static void
2009 force_quit(void)
2010 {
2011         pmd_test_exit();
2012         prompt_exit();
2013 }
2014
2015 static void
2016 signal_handler(int signum)
2017 {
2018         if (signum == SIGINT || signum == SIGTERM) {
2019                 printf("\nSignal %d received, preparing to exit...\n",
2020                                 signum);
2021                 force_quit();
2022                 /* exit with the expected status */
2023                 signal(signum, SIG_DFL);
2024                 kill(getpid(), signum);
2025         }
2026 }
2027
2028 int
2029 main(int argc, char** argv)
2030 {
2031         int  diag;
2032         uint8_t port_id;
2033
2034         signal(SIGINT, signal_handler);
2035         signal(SIGTERM, signal_handler);
2036
2037         diag = rte_eal_init(argc, argv);
2038         if (diag < 0)
2039                 rte_panic("Cannot init EAL\n");
2040
2041         nb_ports = (portid_t) rte_eth_dev_count();
2042         if (nb_ports == 0)
2043                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2044
2045         /* allocate port structures, and init them */
2046         init_port();
2047
2048         set_def_fwd_config();
2049         if (nb_lcores == 0)
2050                 rte_panic("Empty set of forwarding logical cores - check the "
2051                           "core mask supplied in the command parameters\n");
2052
2053         argc -= diag;
2054         argv += diag;
2055         if (argc > 1)
2056                 launch_args_parse(argc, argv);
2057
2058         if (!nb_rxq && !nb_txq)
2059                 printf("Warning: Either rx or tx queues should be non-zero\n");
2060
2061         if (nb_rxq > 1 && nb_rxq > nb_txq)
2062                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2063                        "but nb_txq=%d will prevent to fully test it.\n",
2064                        nb_rxq, nb_txq);
2065
2066         init_config();
2067         if (start_port(RTE_PORT_ALL) != 0)
2068                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2069
2070         /* set all ports to promiscuous mode by default */
2071         FOREACH_PORT(port_id, ports)
2072                 rte_eth_promiscuous_enable(port_id);
2073
2074 #ifdef RTE_LIBRTE_CMDLINE
2075         if (interactive == 1) {
2076                 if (auto_start) {
2077                         printf("Start automatic packet forwarding\n");
2078                         start_packet_forwarding(0);
2079                 }
2080                 prompt();
2081         } else
2082 #endif
2083         {
2084                 char c;
2085                 int rc;
2086
2087                 printf("No commandline core given, start packet forwarding\n");
2088                 start_packet_forwarding(0);
2089                 printf("Press enter to exit\n");
2090                 rc = read(0, &c, 1);
2091                 pmd_test_exit();
2092                 if (rc < 0)
2093                         return 1;
2094         }
2095
2096         return 0;
2097 }