Imported Upstream version 16.07-rc3
[deb_dpdk.git] / app / test-pmd / testpmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdarg.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <signal.h>
38 #include <string.h>
39 #include <time.h>
40 #include <fcntl.h>
41 #include <sys/types.h>
42 #include <errno.h>
43
44 #include <sys/queue.h>
45 #include <sys/stat.h>
46
47 #include <stdint.h>
48 #include <unistd.h>
49 #include <inttypes.h>
50
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
54 #include <rte_log.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
61 #include <rte_eal.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_ring.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
69 #include <rte_mbuf.h>
70 #include <rte_interrupts.h>
71 #include <rte_pci.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
74 #include <rte_dev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
78 #endif
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
81 #endif
82
83 #include "testpmd.h"
84
85 uint16_t verbose_level = 0; /**< Silent by default. */
86
87 /* use master core for command line ? */
88 uint8_t interactive = 0;
89 uint8_t auto_start = 0;
90
91 /*
92  * NUMA support configuration.
93  * When set, the NUMA support attempts to dispatch the allocation of the
94  * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95  * probed ports among the CPU sockets 0 and 1.
96  * Otherwise, all memory is allocated from CPU socket 0.
97  */
98 uint8_t numa_support = 0; /**< No numa support by default */
99
100 /*
101  * In UMA mode,all memory is allocated from socket 0 if --socket-num is
102  * not configured.
103  */
104 uint8_t socket_num = UMA_NO_CONFIG;
105
106 /*
107  * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
108  */
109 uint8_t mp_anon = 0;
110
111 /*
112  * Record the Ethernet address of peer target ports to which packets are
113  * forwarded.
114  * Must be instanciated with the ethernet addresses of peer traffic generator
115  * ports.
116  */
117 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
118 portid_t nb_peer_eth_addrs = 0;
119
120 /*
121  * Probed Target Environment.
122  */
123 struct rte_port *ports;        /**< For all probed ethernet ports. */
124 portid_t nb_ports;             /**< Number of probed ethernet ports. */
125 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
126 lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
127
128 /*
129  * Test Forwarding Configuration.
130  *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
131  *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
132  */
133 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
134 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
135 portid_t  nb_cfg_ports;  /**< Number of configured ports. */
136 portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
137
138 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
139 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
140
141 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
142 streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
143
144 /*
145  * Forwarding engines.
146  */
147 struct fwd_engine * fwd_engines[] = {
148         &io_fwd_engine,
149         &mac_fwd_engine,
150         &mac_swap_engine,
151         &flow_gen_engine,
152         &rx_only_engine,
153         &tx_only_engine,
154         &csum_fwd_engine,
155         &icmp_echo_engine,
156 #ifdef RTE_LIBRTE_IEEE1588
157         &ieee1588_fwd_engine,
158 #endif
159         NULL,
160 };
161
162 struct fwd_config cur_fwd_config;
163 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
164 uint32_t retry_enabled;
165 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
166 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
167
168 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
169 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
170                                       * specified on command-line. */
171
172 /*
173  * Configuration of packet segments used by the "txonly" processing engine.
174  */
175 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
176 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
177         TXONLY_DEF_PACKET_LEN,
178 };
179 uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
180
181 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
182 /**< Split policy for packets to TX. */
183
184 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
185 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
186
187 /* current configuration is in DCB or not,0 means it is not in DCB mode */
188 uint8_t dcb_config = 0;
189
190 /* Whether the dcb is in testing status */
191 uint8_t dcb_test = 0;
192
193 /*
194  * Configurable number of RX/TX queues.
195  */
196 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
197 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
198
199 /*
200  * Configurable number of RX/TX ring descriptors.
201  */
202 #define RTE_TEST_RX_DESC_DEFAULT 128
203 #define RTE_TEST_TX_DESC_DEFAULT 512
204 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
205 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
206
207 #define RTE_PMD_PARAM_UNSET -1
208 /*
209  * Configurable values of RX and TX ring threshold registers.
210  */
211
212 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
215
216 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
218 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
219
220 /*
221  * Configurable value of RX free threshold.
222  */
223 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
224
225 /*
226  * Configurable value of RX drop enable.
227  */
228 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
229
230 /*
231  * Configurable value of TX free threshold.
232  */
233 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
234
235 /*
236  * Configurable value of TX RS bit threshold.
237  */
238 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
239
240 /*
241  * Configurable value of TX queue flags.
242  */
243 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
244
245 /*
246  * Receive Side Scaling (RSS) configuration.
247  */
248 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
249
250 /*
251  * Port topology configuration
252  */
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
254
255 /*
256  * Avoids to flush all the RX streams before starts forwarding.
257  */
258 uint8_t no_flush_rx = 0; /* flush by default */
259
260 /*
261  * Avoids to check link status when starting/stopping a port.
262  */
263 uint8_t no_link_check = 0; /* check by default */
264
265 /*
266  * NIC bypass mode configuration options.
267  */
268 #ifdef RTE_NIC_BYPASS
269
270 /* The NIC bypass watchdog timeout. */
271 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
272
273 #endif
274
275 /* default period is 1 second */
276 static uint64_t timer_period = 1;
277
278 /*
279  * Ethernet device configuration.
280  */
281 struct rte_eth_rxmode rx_mode = {
282         .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
283         .split_hdr_size = 0,
284         .header_split   = 0, /**< Header Split disabled. */
285         .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
286         .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
287         .hw_vlan_strip  = 1, /**< VLAN strip enabled. */
288         .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
289         .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
290         .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
291 };
292
293 struct rte_fdir_conf fdir_conf = {
294         .mode = RTE_FDIR_MODE_NONE,
295         .pballoc = RTE_FDIR_PBALLOC_64K,
296         .status = RTE_FDIR_REPORT_STATUS,
297         .mask = {
298                 .vlan_tci_mask = 0x0,
299                 .ipv4_mask     = {
300                         .src_ip = 0xFFFFFFFF,
301                         .dst_ip = 0xFFFFFFFF,
302                 },
303                 .ipv6_mask     = {
304                         .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
305                         .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
306                 },
307                 .src_port_mask = 0xFFFF,
308                 .dst_port_mask = 0xFFFF,
309                 .mac_addr_byte_mask = 0xFF,
310                 .tunnel_type_mask = 1,
311                 .tunnel_id_mask = 0xFFFFFFFF,
312         },
313         .drop_queue = 127,
314 };
315
316 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
317
318 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
319 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
320
321 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
322 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
323
324 uint16_t nb_tx_queue_stats_mappings = 0;
325 uint16_t nb_rx_queue_stats_mappings = 0;
326
327 unsigned max_socket = 0;
328
329 /* Forward function declarations */
330 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
331 static void check_all_ports_link_status(uint32_t port_mask);
332
333 /*
334  * Check if all the ports are started.
335  * If yes, return positive value. If not, return zero.
336  */
337 static int all_ports_started(void);
338
339 /*
340  * Find next enabled port
341  */
342 portid_t
343 find_next_port(portid_t p, struct rte_port *ports, int size)
344 {
345         if (ports == NULL)
346                 rte_exit(-EINVAL, "failed to find a next port id\n");
347
348         while ((p < size) && (ports[p].enabled == 0))
349                 p++;
350         return p;
351 }
352
353 /*
354  * Setup default configuration.
355  */
356 static void
357 set_default_fwd_lcores_config(void)
358 {
359         unsigned int i;
360         unsigned int nb_lc;
361         unsigned int sock_num;
362
363         nb_lc = 0;
364         for (i = 0; i < RTE_MAX_LCORE; i++) {
365                 sock_num = rte_lcore_to_socket_id(i) + 1;
366                 if (sock_num > max_socket) {
367                         if (sock_num > RTE_MAX_NUMA_NODES)
368                                 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
369                         max_socket = sock_num;
370                 }
371                 if (!rte_lcore_is_enabled(i))
372                         continue;
373                 if (i == rte_get_master_lcore())
374                         continue;
375                 fwd_lcores_cpuids[nb_lc++] = i;
376         }
377         nb_lcores = (lcoreid_t) nb_lc;
378         nb_cfg_lcores = nb_lcores;
379         nb_fwd_lcores = 1;
380 }
381
382 static void
383 set_def_peer_eth_addrs(void)
384 {
385         portid_t i;
386
387         for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
388                 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
389                 peer_eth_addrs[i].addr_bytes[5] = i;
390         }
391 }
392
393 static void
394 set_default_fwd_ports_config(void)
395 {
396         portid_t pt_id;
397
398         for (pt_id = 0; pt_id < nb_ports; pt_id++)
399                 fwd_ports_ids[pt_id] = pt_id;
400
401         nb_cfg_ports = nb_ports;
402         nb_fwd_ports = nb_ports;
403 }
404
405 void
406 set_def_fwd_config(void)
407 {
408         set_default_fwd_lcores_config();
409         set_def_peer_eth_addrs();
410         set_default_fwd_ports_config();
411 }
412
413 /*
414  * Configuration initialisation done once at init time.
415  */
416 static void
417 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
418                  unsigned int socket_id)
419 {
420         char pool_name[RTE_MEMPOOL_NAMESIZE];
421         struct rte_mempool *rte_mp = NULL;
422         uint32_t mb_size;
423
424         mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
425         mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
426
427         RTE_LOG(INFO, USER1,
428                 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
429                 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
430
431 #ifdef RTE_LIBRTE_PMD_XENVIRT
432         rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
433                 (unsigned) mb_mempool_cache,
434                 sizeof(struct rte_pktmbuf_pool_private),
435                 rte_pktmbuf_pool_init, NULL,
436                 rte_pktmbuf_init, NULL,
437                 socket_id, 0);
438 #endif
439
440         /* if the former XEN allocation failed fall back to normal allocation */
441         if (rte_mp == NULL) {
442                 if (mp_anon != 0) {
443                         rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
444                                 mb_size, (unsigned) mb_mempool_cache,
445                                 sizeof(struct rte_pktmbuf_pool_private),
446                                 socket_id, 0);
447
448                         if (rte_mempool_populate_anon(rte_mp) == 0) {
449                                 rte_mempool_free(rte_mp);
450                                 rte_mp = NULL;
451                         }
452                         rte_pktmbuf_pool_init(rte_mp, NULL);
453                         rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
454                 } else {
455                         /* wrapper to rte_mempool_create() */
456                         rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
457                                 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
458                 }
459         }
460
461         if (rte_mp == NULL) {
462                 rte_exit(EXIT_FAILURE,
463                         "Creation of mbuf pool for socket %u failed: %s\n",
464                         socket_id, rte_strerror(rte_errno));
465         } else if (verbose_level > 0) {
466                 rte_mempool_dump(stdout, rte_mp);
467         }
468 }
469
470 /*
471  * Check given socket id is valid or not with NUMA mode,
472  * if valid, return 0, else return -1
473  */
474 static int
475 check_socket_id(const unsigned int socket_id)
476 {
477         static int warning_once = 0;
478
479         if (socket_id >= max_socket) {
480                 if (!warning_once && numa_support)
481                         printf("Warning: NUMA should be configured manually by"
482                                " using --port-numa-config and"
483                                " --ring-numa-config parameters along with"
484                                " --numa.\n");
485                 warning_once = 1;
486                 return -1;
487         }
488         return 0;
489 }
490
491 static void
492 init_config(void)
493 {
494         portid_t pid;
495         struct rte_port *port;
496         struct rte_mempool *mbp;
497         unsigned int nb_mbuf_per_pool;
498         lcoreid_t  lc_id;
499         uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
500
501         memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
502         /* Configuration of logical cores. */
503         fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
504                                 sizeof(struct fwd_lcore *) * nb_lcores,
505                                 RTE_CACHE_LINE_SIZE);
506         if (fwd_lcores == NULL) {
507                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
508                                                         "failed\n", nb_lcores);
509         }
510         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
511                 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
512                                                sizeof(struct fwd_lcore),
513                                                RTE_CACHE_LINE_SIZE);
514                 if (fwd_lcores[lc_id] == NULL) {
515                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
516                                                                 "failed\n");
517                 }
518                 fwd_lcores[lc_id]->cpuid_idx = lc_id;
519         }
520
521         /*
522          * Create pools of mbuf.
523          * If NUMA support is disabled, create a single pool of mbuf in
524          * socket 0 memory by default.
525          * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
526          *
527          * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
528          * nb_txd can be configured at run time.
529          */
530         if (param_total_num_mbufs)
531                 nb_mbuf_per_pool = param_total_num_mbufs;
532         else {
533                 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
534                                 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
535
536                 if (!numa_support)
537                         nb_mbuf_per_pool =
538                                 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
539         }
540
541         if (!numa_support) {
542                 if (socket_num == UMA_NO_CONFIG)
543                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
544                 else
545                         mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
546                                                  socket_num);
547         }
548
549         FOREACH_PORT(pid, ports) {
550                 port = &ports[pid];
551                 rte_eth_dev_info_get(pid, &port->dev_info);
552
553                 if (numa_support) {
554                         if (port_numa[pid] != NUMA_NO_CONFIG)
555                                 port_per_socket[port_numa[pid]]++;
556                         else {
557                                 uint32_t socket_id = rte_eth_dev_socket_id(pid);
558
559                                 /* if socket_id is invalid, set to 0 */
560                                 if (check_socket_id(socket_id) < 0)
561                                         socket_id = 0;
562                                 port_per_socket[socket_id]++;
563                         }
564                 }
565
566                 /* set flag to initialize port/queue */
567                 port->need_reconfig = 1;
568                 port->need_reconfig_queues = 1;
569         }
570
571         if (numa_support) {
572                 uint8_t i;
573                 unsigned int nb_mbuf;
574
575                 if (param_total_num_mbufs)
576                         nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
577
578                 for (i = 0; i < max_socket; i++) {
579                         nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
580                         if (nb_mbuf)
581                                 mbuf_pool_create(mbuf_data_size,
582                                                 nb_mbuf,i);
583                 }
584         }
585         init_port_config();
586
587         /*
588          * Records which Mbuf pool to use by each logical core, if needed.
589          */
590         for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
591                 mbp = mbuf_pool_find(
592                         rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
593
594                 if (mbp == NULL)
595                         mbp = mbuf_pool_find(0);
596                 fwd_lcores[lc_id]->mbp = mbp;
597         }
598
599         /* Configuration of packet forwarding streams. */
600         if (init_fwd_streams() < 0)
601                 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
602
603         fwd_config_setup();
604 }
605
606
607 void
608 reconfig(portid_t new_port_id, unsigned socket_id)
609 {
610         struct rte_port *port;
611
612         /* Reconfiguration of Ethernet ports. */
613         port = &ports[new_port_id];
614         rte_eth_dev_info_get(new_port_id, &port->dev_info);
615
616         /* set flag to initialize port/queue */
617         port->need_reconfig = 1;
618         port->need_reconfig_queues = 1;
619         port->socket_id = socket_id;
620
621         init_port_config();
622 }
623
624
625 int
626 init_fwd_streams(void)
627 {
628         portid_t pid;
629         struct rte_port *port;
630         streamid_t sm_id, nb_fwd_streams_new;
631         queueid_t q;
632
633         /* set socket id according to numa or not */
634         FOREACH_PORT(pid, ports) {
635                 port = &ports[pid];
636                 if (nb_rxq > port->dev_info.max_rx_queues) {
637                         printf("Fail: nb_rxq(%d) is greater than "
638                                 "max_rx_queues(%d)\n", nb_rxq,
639                                 port->dev_info.max_rx_queues);
640                         return -1;
641                 }
642                 if (nb_txq > port->dev_info.max_tx_queues) {
643                         printf("Fail: nb_txq(%d) is greater than "
644                                 "max_tx_queues(%d)\n", nb_txq,
645                                 port->dev_info.max_tx_queues);
646                         return -1;
647                 }
648                 if (numa_support) {
649                         if (port_numa[pid] != NUMA_NO_CONFIG)
650                                 port->socket_id = port_numa[pid];
651                         else {
652                                 port->socket_id = rte_eth_dev_socket_id(pid);
653
654                                 /* if socket_id is invalid, set to 0 */
655                                 if (check_socket_id(port->socket_id) < 0)
656                                         port->socket_id = 0;
657                         }
658                 }
659                 else {
660                         if (socket_num == UMA_NO_CONFIG)
661                                 port->socket_id = 0;
662                         else
663                                 port->socket_id = socket_num;
664                 }
665         }
666
667         q = RTE_MAX(nb_rxq, nb_txq);
668         if (q == 0) {
669                 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
670                 return -1;
671         }
672         nb_fwd_streams_new = (streamid_t)(nb_ports * q);
673         if (nb_fwd_streams_new == nb_fwd_streams)
674                 return 0;
675         /* clear the old */
676         if (fwd_streams != NULL) {
677                 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
678                         if (fwd_streams[sm_id] == NULL)
679                                 continue;
680                         rte_free(fwd_streams[sm_id]);
681                         fwd_streams[sm_id] = NULL;
682                 }
683                 rte_free(fwd_streams);
684                 fwd_streams = NULL;
685         }
686
687         /* init new */
688         nb_fwd_streams = nb_fwd_streams_new;
689         fwd_streams = rte_zmalloc("testpmd: fwd_streams",
690                 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
691         if (fwd_streams == NULL)
692                 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
693                                                 "failed\n", nb_fwd_streams);
694
695         for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
696                 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
697                                 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
698                 if (fwd_streams[sm_id] == NULL)
699                         rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
700                                                                 " failed\n");
701         }
702
703         return 0;
704 }
705
706 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
707 static void
708 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
709 {
710         unsigned int total_burst;
711         unsigned int nb_burst;
712         unsigned int burst_stats[3];
713         uint16_t pktnb_stats[3];
714         uint16_t nb_pkt;
715         int burst_percent[3];
716
717         /*
718          * First compute the total number of packet bursts and the
719          * two highest numbers of bursts of the same number of packets.
720          */
721         total_burst = 0;
722         burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
723         pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
724         for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
725                 nb_burst = pbs->pkt_burst_spread[nb_pkt];
726                 if (nb_burst == 0)
727                         continue;
728                 total_burst += nb_burst;
729                 if (nb_burst > burst_stats[0]) {
730                         burst_stats[1] = burst_stats[0];
731                         pktnb_stats[1] = pktnb_stats[0];
732                         burst_stats[0] = nb_burst;
733                         pktnb_stats[0] = nb_pkt;
734                 }
735         }
736         if (total_burst == 0)
737                 return;
738         burst_percent[0] = (burst_stats[0] * 100) / total_burst;
739         printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
740                burst_percent[0], (int) pktnb_stats[0]);
741         if (burst_stats[0] == total_burst) {
742                 printf("]\n");
743                 return;
744         }
745         if (burst_stats[0] + burst_stats[1] == total_burst) {
746                 printf(" + %d%% of %d pkts]\n",
747                        100 - burst_percent[0], pktnb_stats[1]);
748                 return;
749         }
750         burst_percent[1] = (burst_stats[1] * 100) / total_burst;
751         burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
752         if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
753                 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
754                 return;
755         }
756         printf(" + %d%% of %d pkts + %d%% of others]\n",
757                burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
758 }
759 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
760
761 static void
762 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
763 {
764         struct rte_port *port;
765         uint8_t i;
766
767         static const char *fwd_stats_border = "----------------------";
768
769         port = &ports[port_id];
770         printf("\n  %s Forward statistics for port %-2d %s\n",
771                fwd_stats_border, port_id, fwd_stats_border);
772
773         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
774                 printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
775                        "%-"PRIu64"\n",
776                        stats->ipackets, stats->imissed,
777                        (uint64_t) (stats->ipackets + stats->imissed));
778
779                 if (cur_fwd_eng == &csum_fwd_engine)
780                         printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
781                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
782                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
783                         printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
784                         printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
785                 }
786
787                 printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
788                        "%-"PRIu64"\n",
789                        stats->opackets, port->tx_dropped,
790                        (uint64_t) (stats->opackets + port->tx_dropped));
791         }
792         else {
793                 printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
794                        "%14"PRIu64"\n",
795                        stats->ipackets, stats->imissed,
796                        (uint64_t) (stats->ipackets + stats->imissed));
797
798                 if (cur_fwd_eng == &csum_fwd_engine)
799                         printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
800                                port->rx_bad_ip_csum, port->rx_bad_l4_csum);
801                 if ((stats->ierrors + stats->rx_nombuf) > 0) {
802                         printf("  RX-error:%"PRIu64"\n", stats->ierrors);
803                         printf("  RX-nombufs:             %14"PRIu64"\n",
804                                stats->rx_nombuf);
805                 }
806
807                 printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
808                        "%14"PRIu64"\n",
809                        stats->opackets, port->tx_dropped,
810                        (uint64_t) (stats->opackets + port->tx_dropped));
811         }
812
813 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
814         if (port->rx_stream)
815                 pkt_burst_stats_display("RX",
816                         &port->rx_stream->rx_burst_stats);
817         if (port->tx_stream)
818                 pkt_burst_stats_display("TX",
819                         &port->tx_stream->tx_burst_stats);
820 #endif
821
822         if (port->rx_queue_stats_mapping_enabled) {
823                 printf("\n");
824                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
825                         printf("  Stats reg %2d RX-packets:%14"PRIu64
826                                "     RX-errors:%14"PRIu64
827                                "    RX-bytes:%14"PRIu64"\n",
828                                i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
829                 }
830                 printf("\n");
831         }
832         if (port->tx_queue_stats_mapping_enabled) {
833                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
834                         printf("  Stats reg %2d TX-packets:%14"PRIu64
835                                "                                 TX-bytes:%14"PRIu64"\n",
836                                i, stats->q_opackets[i], stats->q_obytes[i]);
837                 }
838         }
839
840         printf("  %s--------------------------------%s\n",
841                fwd_stats_border, fwd_stats_border);
842 }
843
844 static void
845 fwd_stream_stats_display(streamid_t stream_id)
846 {
847         struct fwd_stream *fs;
848         static const char *fwd_top_stats_border = "-------";
849
850         fs = fwd_streams[stream_id];
851         if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
852             (fs->fwd_dropped == 0))
853                 return;
854         printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
855                "TX Port=%2d/Queue=%2d %s\n",
856                fwd_top_stats_border, fs->rx_port, fs->rx_queue,
857                fs->tx_port, fs->tx_queue, fwd_top_stats_border);
858         printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
859                fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
860
861         /* if checksum mode */
862         if (cur_fwd_eng == &csum_fwd_engine) {
863                printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
864                         "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
865         }
866
867 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
868         pkt_burst_stats_display("RX", &fs->rx_burst_stats);
869         pkt_burst_stats_display("TX", &fs->tx_burst_stats);
870 #endif
871 }
872
873 static void
874 flush_fwd_rx_queues(void)
875 {
876         struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
877         portid_t  rxp;
878         portid_t port_id;
879         queueid_t rxq;
880         uint16_t  nb_rx;
881         uint16_t  i;
882         uint8_t   j;
883         uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
884
885         /* convert to number of cycles */
886         timer_period *= rte_get_timer_hz();
887
888         for (j = 0; j < 2; j++) {
889                 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
890                         for (rxq = 0; rxq < nb_rxq; rxq++) {
891                                 port_id = fwd_ports_ids[rxp];
892                                 /**
893                                 * testpmd can stuck in the below do while loop
894                                 * if rte_eth_rx_burst() always returns nonzero
895                                 * packets. So timer is added to exit this loop
896                                 * after 1sec timer expiry.
897                                 */
898                                 prev_tsc = rte_rdtsc();
899                                 do {
900                                         nb_rx = rte_eth_rx_burst(port_id, rxq,
901                                                 pkts_burst, MAX_PKT_BURST);
902                                         for (i = 0; i < nb_rx; i++)
903                                                 rte_pktmbuf_free(pkts_burst[i]);
904
905                                         cur_tsc = rte_rdtsc();
906                                         diff_tsc = cur_tsc - prev_tsc;
907                                         timer_tsc += diff_tsc;
908                                 } while ((nb_rx > 0) &&
909                                         (timer_tsc < timer_period));
910                                 timer_tsc = 0;
911                         }
912                 }
913                 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
914         }
915 }
916
917 static void
918 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
919 {
920         struct fwd_stream **fsm;
921         streamid_t nb_fs;
922         streamid_t sm_id;
923
924         fsm = &fwd_streams[fc->stream_idx];
925         nb_fs = fc->stream_nb;
926         do {
927                 for (sm_id = 0; sm_id < nb_fs; sm_id++)
928                         (*pkt_fwd)(fsm[sm_id]);
929         } while (! fc->stopped);
930 }
931
932 static int
933 start_pkt_forward_on_core(void *fwd_arg)
934 {
935         run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
936                              cur_fwd_config.fwd_eng->packet_fwd);
937         return 0;
938 }
939
940 /*
941  * Run the TXONLY packet forwarding engine to send a single burst of packets.
942  * Used to start communication flows in network loopback test configurations.
943  */
944 static int
945 run_one_txonly_burst_on_core(void *fwd_arg)
946 {
947         struct fwd_lcore *fwd_lc;
948         struct fwd_lcore tmp_lcore;
949
950         fwd_lc = (struct fwd_lcore *) fwd_arg;
951         tmp_lcore = *fwd_lc;
952         tmp_lcore.stopped = 1;
953         run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
954         return 0;
955 }
956
957 /*
958  * Launch packet forwarding:
959  *     - Setup per-port forwarding context.
960  *     - launch logical cores with their forwarding configuration.
961  */
962 static void
963 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
964 {
965         port_fwd_begin_t port_fwd_begin;
966         unsigned int i;
967         unsigned int lc_id;
968         int diag;
969
970         port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
971         if (port_fwd_begin != NULL) {
972                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
973                         (*port_fwd_begin)(fwd_ports_ids[i]);
974         }
975         for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
976                 lc_id = fwd_lcores_cpuids[i];
977                 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
978                         fwd_lcores[i]->stopped = 0;
979                         diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
980                                                      fwd_lcores[i], lc_id);
981                         if (diag != 0)
982                                 printf("launch lcore %u failed - diag=%d\n",
983                                        lc_id, diag);
984                 }
985         }
986 }
987
988 /*
989  * Launch packet forwarding configuration.
990  */
991 void
992 start_packet_forwarding(int with_tx_first)
993 {
994         port_fwd_begin_t port_fwd_begin;
995         port_fwd_end_t  port_fwd_end;
996         struct rte_port *port;
997         unsigned int i;
998         portid_t   pt_id;
999         streamid_t sm_id;
1000
1001         if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1002                 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1003
1004         if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1005                 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1006
1007         if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1008                 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1009                 (!nb_rxq || !nb_txq))
1010                 rte_exit(EXIT_FAILURE,
1011                         "Either rxq or txq are 0, cannot use %s fwd mode\n",
1012                         cur_fwd_eng->fwd_mode_name);
1013
1014         if (all_ports_started() == 0) {
1015                 printf("Not all ports were started\n");
1016                 return;
1017         }
1018         if (test_done == 0) {
1019                 printf("Packet forwarding already started\n");
1020                 return;
1021         }
1022
1023         if (init_fwd_streams() < 0) {
1024                 printf("Fail from init_fwd_streams()\n");
1025                 return;
1026         }
1027
1028         if(dcb_test) {
1029                 for (i = 0; i < nb_fwd_ports; i++) {
1030                         pt_id = fwd_ports_ids[i];
1031                         port = &ports[pt_id];
1032                         if (!port->dcb_flag) {
1033                                 printf("In DCB mode, all forwarding ports must "
1034                                        "be configured in this mode.\n");
1035                                 return;
1036                         }
1037                 }
1038                 if (nb_fwd_lcores == 1) {
1039                         printf("In DCB mode,the nb forwarding cores "
1040                                "should be larger than 1.\n");
1041                         return;
1042                 }
1043         }
1044         test_done = 0;
1045
1046         if(!no_flush_rx)
1047                 flush_fwd_rx_queues();
1048
1049         fwd_config_setup();
1050         pkt_fwd_config_display(&cur_fwd_config);
1051         rxtx_config_display();
1052
1053         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1054                 pt_id = fwd_ports_ids[i];
1055                 port = &ports[pt_id];
1056                 rte_eth_stats_get(pt_id, &port->stats);
1057                 port->tx_dropped = 0;
1058
1059                 map_port_queue_stats_mapping_registers(pt_id, port);
1060         }
1061         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1062                 fwd_streams[sm_id]->rx_packets = 0;
1063                 fwd_streams[sm_id]->tx_packets = 0;
1064                 fwd_streams[sm_id]->fwd_dropped = 0;
1065                 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1066                 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1067
1068 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1069                 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1070                        sizeof(fwd_streams[sm_id]->rx_burst_stats));
1071                 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1072                        sizeof(fwd_streams[sm_id]->tx_burst_stats));
1073 #endif
1074 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1075                 fwd_streams[sm_id]->core_cycles = 0;
1076 #endif
1077         }
1078         if (with_tx_first) {
1079                 port_fwd_begin = tx_only_engine.port_fwd_begin;
1080                 if (port_fwd_begin != NULL) {
1081                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1082                                 (*port_fwd_begin)(fwd_ports_ids[i]);
1083                 }
1084                 while (with_tx_first--) {
1085                         launch_packet_forwarding(
1086                                         run_one_txonly_burst_on_core);
1087                         rte_eal_mp_wait_lcore();
1088                 }
1089                 port_fwd_end = tx_only_engine.port_fwd_end;
1090                 if (port_fwd_end != NULL) {
1091                         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1092                                 (*port_fwd_end)(fwd_ports_ids[i]);
1093                 }
1094         }
1095         launch_packet_forwarding(start_pkt_forward_on_core);
1096 }
1097
1098 void
1099 stop_packet_forwarding(void)
1100 {
1101         struct rte_eth_stats stats;
1102         struct rte_port *port;
1103         port_fwd_end_t  port_fwd_end;
1104         int i;
1105         portid_t   pt_id;
1106         streamid_t sm_id;
1107         lcoreid_t  lc_id;
1108         uint64_t total_recv;
1109         uint64_t total_xmit;
1110         uint64_t total_rx_dropped;
1111         uint64_t total_tx_dropped;
1112         uint64_t total_rx_nombuf;
1113         uint64_t tx_dropped;
1114         uint64_t rx_bad_ip_csum;
1115         uint64_t rx_bad_l4_csum;
1116 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1117         uint64_t fwd_cycles;
1118 #endif
1119         static const char *acc_stats_border = "+++++++++++++++";
1120
1121         if (test_done) {
1122                 printf("Packet forwarding not started\n");
1123                 return;
1124         }
1125         printf("Telling cores to stop...");
1126         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1127                 fwd_lcores[lc_id]->stopped = 1;
1128         printf("\nWaiting for lcores to finish...\n");
1129         rte_eal_mp_wait_lcore();
1130         port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1131         if (port_fwd_end != NULL) {
1132                 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1133                         pt_id = fwd_ports_ids[i];
1134                         (*port_fwd_end)(pt_id);
1135                 }
1136         }
1137 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1138         fwd_cycles = 0;
1139 #endif
1140         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1141                 if (cur_fwd_config.nb_fwd_streams >
1142                     cur_fwd_config.nb_fwd_ports) {
1143                         fwd_stream_stats_display(sm_id);
1144                         ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1145                         ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1146                 } else {
1147                         ports[fwd_streams[sm_id]->tx_port].tx_stream =
1148                                 fwd_streams[sm_id];
1149                         ports[fwd_streams[sm_id]->rx_port].rx_stream =
1150                                 fwd_streams[sm_id];
1151                 }
1152                 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1153                 tx_dropped = (uint64_t) (tx_dropped +
1154                                          fwd_streams[sm_id]->fwd_dropped);
1155                 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1156
1157                 rx_bad_ip_csum =
1158                         ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1159                 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1160                                          fwd_streams[sm_id]->rx_bad_ip_csum);
1161                 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1162                                                         rx_bad_ip_csum;
1163
1164                 rx_bad_l4_csum =
1165                         ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1166                 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1167                                          fwd_streams[sm_id]->rx_bad_l4_csum);
1168                 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1169                                                         rx_bad_l4_csum;
1170
1171 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1172                 fwd_cycles = (uint64_t) (fwd_cycles +
1173                                          fwd_streams[sm_id]->core_cycles);
1174 #endif
1175         }
1176         total_recv = 0;
1177         total_xmit = 0;
1178         total_rx_dropped = 0;
1179         total_tx_dropped = 0;
1180         total_rx_nombuf  = 0;
1181         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1182                 pt_id = fwd_ports_ids[i];
1183
1184                 port = &ports[pt_id];
1185                 rte_eth_stats_get(pt_id, &stats);
1186                 stats.ipackets -= port->stats.ipackets;
1187                 port->stats.ipackets = 0;
1188                 stats.opackets -= port->stats.opackets;
1189                 port->stats.opackets = 0;
1190                 stats.ibytes   -= port->stats.ibytes;
1191                 port->stats.ibytes = 0;
1192                 stats.obytes   -= port->stats.obytes;
1193                 port->stats.obytes = 0;
1194                 stats.imissed  -= port->stats.imissed;
1195                 port->stats.imissed = 0;
1196                 stats.oerrors  -= port->stats.oerrors;
1197                 port->stats.oerrors = 0;
1198                 stats.rx_nombuf -= port->stats.rx_nombuf;
1199                 port->stats.rx_nombuf = 0;
1200
1201                 total_recv += stats.ipackets;
1202                 total_xmit += stats.opackets;
1203                 total_rx_dropped += stats.imissed;
1204                 total_tx_dropped += port->tx_dropped;
1205                 total_rx_nombuf  += stats.rx_nombuf;
1206
1207                 fwd_port_stats_display(pt_id, &stats);
1208         }
1209         printf("\n  %s Accumulated forward statistics for all ports"
1210                "%s\n",
1211                acc_stats_border, acc_stats_border);
1212         printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1213                "%-"PRIu64"\n"
1214                "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1215                "%-"PRIu64"\n",
1216                total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1217                total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1218         if (total_rx_nombuf > 0)
1219                 printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1220         printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1221                "%s\n",
1222                acc_stats_border, acc_stats_border);
1223 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1224         if (total_recv > 0)
1225                 printf("\n  CPU cycles/packet=%u (total cycles="
1226                        "%"PRIu64" / total RX packets=%"PRIu64")\n",
1227                        (unsigned int)(fwd_cycles / total_recv),
1228                        fwd_cycles, total_recv);
1229 #endif
1230         printf("\nDone.\n");
1231         test_done = 1;
1232 }
1233
1234 void
1235 dev_set_link_up(portid_t pid)
1236 {
1237         if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1238                 printf("\nSet link up fail.\n");
1239 }
1240
1241 void
1242 dev_set_link_down(portid_t pid)
1243 {
1244         if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1245                 printf("\nSet link down fail.\n");
1246 }
1247
1248 static int
1249 all_ports_started(void)
1250 {
1251         portid_t pi;
1252         struct rte_port *port;
1253
1254         FOREACH_PORT(pi, ports) {
1255                 port = &ports[pi];
1256                 /* Check if there is a port which is not started */
1257                 if ((port->port_status != RTE_PORT_STARTED) &&
1258                         (port->slave_flag == 0))
1259                         return 0;
1260         }
1261
1262         /* No port is not started */
1263         return 1;
1264 }
1265
1266 int
1267 all_ports_stopped(void)
1268 {
1269         portid_t pi;
1270         struct rte_port *port;
1271
1272         FOREACH_PORT(pi, ports) {
1273                 port = &ports[pi];
1274                 if ((port->port_status != RTE_PORT_STOPPED) &&
1275                         (port->slave_flag == 0))
1276                         return 0;
1277         }
1278
1279         return 1;
1280 }
1281
1282 int
1283 port_is_started(portid_t port_id)
1284 {
1285         if (port_id_is_invalid(port_id, ENABLED_WARN))
1286                 return 0;
1287
1288         if (ports[port_id].port_status != RTE_PORT_STARTED)
1289                 return 0;
1290
1291         return 1;
1292 }
1293
1294 static int
1295 port_is_closed(portid_t port_id)
1296 {
1297         if (port_id_is_invalid(port_id, ENABLED_WARN))
1298                 return 0;
1299
1300         if (ports[port_id].port_status != RTE_PORT_CLOSED)
1301                 return 0;
1302
1303         return 1;
1304 }
1305
1306 int
1307 start_port(portid_t pid)
1308 {
1309         int diag, need_check_link_status = -1;
1310         portid_t pi;
1311         queueid_t qi;
1312         struct rte_port *port;
1313         struct ether_addr mac_addr;
1314
1315         if (port_id_is_invalid(pid, ENABLED_WARN))
1316                 return 0;
1317
1318         if(dcb_config)
1319                 dcb_test = 1;
1320         FOREACH_PORT(pi, ports) {
1321                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1322                         continue;
1323
1324                 need_check_link_status = 0;
1325                 port = &ports[pi];
1326                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1327                                                  RTE_PORT_HANDLING) == 0) {
1328                         printf("Port %d is now not stopped\n", pi);
1329                         continue;
1330                 }
1331
1332                 if (port->need_reconfig > 0) {
1333                         port->need_reconfig = 0;
1334
1335                         printf("Configuring Port %d (socket %u)\n", pi,
1336                                         port->socket_id);
1337                         /* configure port */
1338                         diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1339                                                 &(port->dev_conf));
1340                         if (diag != 0) {
1341                                 if (rte_atomic16_cmpset(&(port->port_status),
1342                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1343                                         printf("Port %d can not be set back "
1344                                                         "to stopped\n", pi);
1345                                 printf("Fail to configure port %d\n", pi);
1346                                 /* try to reconfigure port next time */
1347                                 port->need_reconfig = 1;
1348                                 return -1;
1349                         }
1350                 }
1351                 if (port->need_reconfig_queues > 0) {
1352                         port->need_reconfig_queues = 0;
1353                         /* setup tx queues */
1354                         for (qi = 0; qi < nb_txq; qi++) {
1355                                 if ((numa_support) &&
1356                                         (txring_numa[pi] != NUMA_NO_CONFIG))
1357                                         diag = rte_eth_tx_queue_setup(pi, qi,
1358                                                 nb_txd,txring_numa[pi],
1359                                                 &(port->tx_conf));
1360                                 else
1361                                         diag = rte_eth_tx_queue_setup(pi, qi,
1362                                                 nb_txd,port->socket_id,
1363                                                 &(port->tx_conf));
1364
1365                                 if (diag == 0)
1366                                         continue;
1367
1368                                 /* Fail to setup tx queue, return */
1369                                 if (rte_atomic16_cmpset(&(port->port_status),
1370                                                         RTE_PORT_HANDLING,
1371                                                         RTE_PORT_STOPPED) == 0)
1372                                         printf("Port %d can not be set back "
1373                                                         "to stopped\n", pi);
1374                                 printf("Fail to configure port %d tx queues\n", pi);
1375                                 /* try to reconfigure queues next time */
1376                                 port->need_reconfig_queues = 1;
1377                                 return -1;
1378                         }
1379                         /* setup rx queues */
1380                         for (qi = 0; qi < nb_rxq; qi++) {
1381                                 if ((numa_support) &&
1382                                         (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1383                                         struct rte_mempool * mp =
1384                                                 mbuf_pool_find(rxring_numa[pi]);
1385                                         if (mp == NULL) {
1386                                                 printf("Failed to setup RX queue:"
1387                                                         "No mempool allocation"
1388                                                         " on the socket %d\n",
1389                                                         rxring_numa[pi]);
1390                                                 return -1;
1391                                         }
1392
1393                                         diag = rte_eth_rx_queue_setup(pi, qi,
1394                                              nb_rxd,rxring_numa[pi],
1395                                              &(port->rx_conf),mp);
1396                                 } else {
1397                                         struct rte_mempool *mp =
1398                                                 mbuf_pool_find(port->socket_id);
1399                                         if (mp == NULL) {
1400                                                 printf("Failed to setup RX queue:"
1401                                                         "No mempool allocation"
1402                                                         " on the socket %d\n",
1403                                                         port->socket_id);
1404                                                 return -1;
1405                                         }
1406                                         diag = rte_eth_rx_queue_setup(pi, qi,
1407                                              nb_rxd,port->socket_id,
1408                                              &(port->rx_conf), mp);
1409                                 }
1410                                 if (diag == 0)
1411                                         continue;
1412
1413                                 /* Fail to setup rx queue, return */
1414                                 if (rte_atomic16_cmpset(&(port->port_status),
1415                                                         RTE_PORT_HANDLING,
1416                                                         RTE_PORT_STOPPED) == 0)
1417                                         printf("Port %d can not be set back "
1418                                                         "to stopped\n", pi);
1419                                 printf("Fail to configure port %d rx queues\n", pi);
1420                                 /* try to reconfigure queues next time */
1421                                 port->need_reconfig_queues = 1;
1422                                 return -1;
1423                         }
1424                 }
1425                 /* start port */
1426                 if (rte_eth_dev_start(pi) < 0) {
1427                         printf("Fail to start port %d\n", pi);
1428
1429                         /* Fail to setup rx queue, return */
1430                         if (rte_atomic16_cmpset(&(port->port_status),
1431                                 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1432                                 printf("Port %d can not be set back to "
1433                                                         "stopped\n", pi);
1434                         continue;
1435                 }
1436
1437                 if (rte_atomic16_cmpset(&(port->port_status),
1438                         RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1439                         printf("Port %d can not be set into started\n", pi);
1440
1441                 rte_eth_macaddr_get(pi, &mac_addr);
1442                 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1443                                 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1444                                 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1445                                 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1446
1447                 /* at least one port started, need checking link status */
1448                 need_check_link_status = 1;
1449         }
1450
1451         if (need_check_link_status == 1 && !no_link_check)
1452                 check_all_ports_link_status(RTE_PORT_ALL);
1453         else if (need_check_link_status == 0)
1454                 printf("Please stop the ports first\n");
1455
1456         printf("Done\n");
1457         return 0;
1458 }
1459
1460 void
1461 stop_port(portid_t pid)
1462 {
1463         portid_t pi;
1464         struct rte_port *port;
1465         int need_check_link_status = 0;
1466
1467         if (dcb_test) {
1468                 dcb_test = 0;
1469                 dcb_config = 0;
1470         }
1471
1472         if (port_id_is_invalid(pid, ENABLED_WARN))
1473                 return;
1474
1475         printf("Stopping ports...\n");
1476
1477         FOREACH_PORT(pi, ports) {
1478                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1479                         continue;
1480
1481                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1482                         printf("Please remove port %d from forwarding configuration.\n", pi);
1483                         continue;
1484                 }
1485
1486                 if (port_is_bonding_slave(pi)) {
1487                         printf("Please remove port %d from bonded device.\n", pi);
1488                         continue;
1489                 }
1490
1491                 port = &ports[pi];
1492                 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1493                                                 RTE_PORT_HANDLING) == 0)
1494                         continue;
1495
1496                 rte_eth_dev_stop(pi);
1497
1498                 if (rte_atomic16_cmpset(&(port->port_status),
1499                         RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1500                         printf("Port %d can not be set into stopped\n", pi);
1501                 need_check_link_status = 1;
1502         }
1503         if (need_check_link_status && !no_link_check)
1504                 check_all_ports_link_status(RTE_PORT_ALL);
1505
1506         printf("Done\n");
1507 }
1508
1509 void
1510 close_port(portid_t pid)
1511 {
1512         portid_t pi;
1513         struct rte_port *port;
1514
1515         if (port_id_is_invalid(pid, ENABLED_WARN))
1516                 return;
1517
1518         printf("Closing ports...\n");
1519
1520         FOREACH_PORT(pi, ports) {
1521                 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1522                         continue;
1523
1524                 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1525                         printf("Please remove port %d from forwarding configuration.\n", pi);
1526                         continue;
1527                 }
1528
1529                 if (port_is_bonding_slave(pi)) {
1530                         printf("Please remove port %d from bonded device.\n", pi);
1531                         continue;
1532                 }
1533
1534                 port = &ports[pi];
1535                 if (rte_atomic16_cmpset(&(port->port_status),
1536                         RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1537                         printf("Port %d is already closed\n", pi);
1538                         continue;
1539                 }
1540
1541                 if (rte_atomic16_cmpset(&(port->port_status),
1542                         RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1543                         printf("Port %d is now not stopped\n", pi);
1544                         continue;
1545                 }
1546
1547                 rte_eth_dev_close(pi);
1548
1549                 if (rte_atomic16_cmpset(&(port->port_status),
1550                         RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1551                         printf("Port %d cannot be set to closed\n", pi);
1552         }
1553
1554         printf("Done\n");
1555 }
1556
1557 void
1558 attach_port(char *identifier)
1559 {
1560         portid_t pi = 0;
1561         unsigned int socket_id;
1562
1563         printf("Attaching a new port...\n");
1564
1565         if (identifier == NULL) {
1566                 printf("Invalid parameters are specified\n");
1567                 return;
1568         }
1569
1570         if (rte_eth_dev_attach(identifier, &pi))
1571                 return;
1572
1573         ports[pi].enabled = 1;
1574         socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1575         /* if socket_id is invalid, set to 0 */
1576         if (check_socket_id(socket_id) < 0)
1577                 socket_id = 0;
1578         reconfig(pi, socket_id);
1579         rte_eth_promiscuous_enable(pi);
1580
1581         nb_ports = rte_eth_dev_count();
1582
1583         ports[pi].port_status = RTE_PORT_STOPPED;
1584
1585         printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1586         printf("Done\n");
1587 }
1588
1589 void
1590 detach_port(uint8_t port_id)
1591 {
1592         char name[RTE_ETH_NAME_MAX_LEN];
1593
1594         printf("Detaching a port...\n");
1595
1596         if (!port_is_closed(port_id)) {
1597                 printf("Please close port first\n");
1598                 return;
1599         }
1600
1601         if (rte_eth_dev_detach(port_id, name))
1602                 return;
1603
1604         ports[port_id].enabled = 0;
1605         nb_ports = rte_eth_dev_count();
1606
1607         printf("Port '%s' is detached. Now total ports is %d\n",
1608                         name, nb_ports);
1609         printf("Done\n");
1610         return;
1611 }
1612
1613 void
1614 pmd_test_exit(void)
1615 {
1616         portid_t pt_id;
1617
1618         if (test_done == 0)
1619                 stop_packet_forwarding();
1620
1621         if (ports != NULL) {
1622                 no_link_check = 1;
1623                 FOREACH_PORT(pt_id, ports) {
1624                         printf("\nShutting down port %d...\n", pt_id);
1625                         fflush(stdout);
1626                         stop_port(pt_id);
1627                         close_port(pt_id);
1628                 }
1629         }
1630         printf("\nBye...\n");
1631 }
1632
1633 typedef void (*cmd_func_t)(void);
1634 struct pmd_test_command {
1635         const char *cmd_name;
1636         cmd_func_t cmd_func;
1637 };
1638
1639 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1640
1641 /* Check the link status of all ports in up to 9s, and print them finally */
1642 static void
1643 check_all_ports_link_status(uint32_t port_mask)
1644 {
1645 #define CHECK_INTERVAL 100 /* 100ms */
1646 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1647         uint8_t portid, count, all_ports_up, print_flag = 0;
1648         struct rte_eth_link link;
1649
1650         printf("Checking link statuses...\n");
1651         fflush(stdout);
1652         for (count = 0; count <= MAX_CHECK_TIME; count++) {
1653                 all_ports_up = 1;
1654                 FOREACH_PORT(portid, ports) {
1655                         if ((port_mask & (1 << portid)) == 0)
1656                                 continue;
1657                         memset(&link, 0, sizeof(link));
1658                         rte_eth_link_get_nowait(portid, &link);
1659                         /* print link status if flag set */
1660                         if (print_flag == 1) {
1661                                 if (link.link_status)
1662                                         printf("Port %d Link Up - speed %u "
1663                                                 "Mbps - %s\n", (uint8_t)portid,
1664                                                 (unsigned)link.link_speed,
1665                                 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1666                                         ("full-duplex") : ("half-duplex\n"));
1667                                 else
1668                                         printf("Port %d Link Down\n",
1669                                                 (uint8_t)portid);
1670                                 continue;
1671                         }
1672                         /* clear all_ports_up flag if any link down */
1673                         if (link.link_status == ETH_LINK_DOWN) {
1674                                 all_ports_up = 0;
1675                                 break;
1676                         }
1677                 }
1678                 /* after finally printing all link status, get out */
1679                 if (print_flag == 1)
1680                         break;
1681
1682                 if (all_ports_up == 0) {
1683                         fflush(stdout);
1684                         rte_delay_ms(CHECK_INTERVAL);
1685                 }
1686
1687                 /* set the print_flag if all ports up or timeout */
1688                 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1689                         print_flag = 1;
1690                 }
1691         }
1692 }
1693
1694 static int
1695 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1696 {
1697         uint16_t i;
1698         int diag;
1699         uint8_t mapping_found = 0;
1700
1701         for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1702                 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1703                                 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1704                         diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1705                                         tx_queue_stats_mappings[i].queue_id,
1706                                         tx_queue_stats_mappings[i].stats_counter_id);
1707                         if (diag != 0)
1708                                 return diag;
1709                         mapping_found = 1;
1710                 }
1711         }
1712         if (mapping_found)
1713                 port->tx_queue_stats_mapping_enabled = 1;
1714         return 0;
1715 }
1716
1717 static int
1718 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1719 {
1720         uint16_t i;
1721         int diag;
1722         uint8_t mapping_found = 0;
1723
1724         for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1725                 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1726                                 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1727                         diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1728                                         rx_queue_stats_mappings[i].queue_id,
1729                                         rx_queue_stats_mappings[i].stats_counter_id);
1730                         if (diag != 0)
1731                                 return diag;
1732                         mapping_found = 1;
1733                 }
1734         }
1735         if (mapping_found)
1736                 port->rx_queue_stats_mapping_enabled = 1;
1737         return 0;
1738 }
1739
1740 static void
1741 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1742 {
1743         int diag = 0;
1744
1745         diag = set_tx_queue_stats_mapping_registers(pi, port);
1746         if (diag != 0) {
1747                 if (diag == -ENOTSUP) {
1748                         port->tx_queue_stats_mapping_enabled = 0;
1749                         printf("TX queue stats mapping not supported port id=%d\n", pi);
1750                 }
1751                 else
1752                         rte_exit(EXIT_FAILURE,
1753                                         "set_tx_queue_stats_mapping_registers "
1754                                         "failed for port id=%d diag=%d\n",
1755                                         pi, diag);
1756         }
1757
1758         diag = set_rx_queue_stats_mapping_registers(pi, port);
1759         if (diag != 0) {
1760                 if (diag == -ENOTSUP) {
1761                         port->rx_queue_stats_mapping_enabled = 0;
1762                         printf("RX queue stats mapping not supported port id=%d\n", pi);
1763                 }
1764                 else
1765                         rte_exit(EXIT_FAILURE,
1766                                         "set_rx_queue_stats_mapping_registers "
1767                                         "failed for port id=%d diag=%d\n",
1768                                         pi, diag);
1769         }
1770 }
1771
1772 static void
1773 rxtx_port_config(struct rte_port *port)
1774 {
1775         port->rx_conf = port->dev_info.default_rxconf;
1776         port->tx_conf = port->dev_info.default_txconf;
1777
1778         /* Check if any RX/TX parameters have been passed */
1779         if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1780                 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1781
1782         if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1783                 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1784
1785         if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1786                 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1787
1788         if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1789                 port->rx_conf.rx_free_thresh = rx_free_thresh;
1790
1791         if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1792                 port->rx_conf.rx_drop_en = rx_drop_en;
1793
1794         if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1795                 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1796
1797         if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1798                 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1799
1800         if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1801                 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1802
1803         if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1804                 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1805
1806         if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1807                 port->tx_conf.tx_free_thresh = tx_free_thresh;
1808
1809         if (txq_flags != RTE_PMD_PARAM_UNSET)
1810                 port->tx_conf.txq_flags = txq_flags;
1811 }
1812
1813 void
1814 init_port_config(void)
1815 {
1816         portid_t pid;
1817         struct rte_port *port;
1818
1819         FOREACH_PORT(pid, ports) {
1820                 port = &ports[pid];
1821                 port->dev_conf.rxmode = rx_mode;
1822                 port->dev_conf.fdir_conf = fdir_conf;
1823                 if (nb_rxq > 1) {
1824                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1825                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1826                 } else {
1827                         port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1828                         port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1829                 }
1830
1831                 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1832                         if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1833                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1834                         else
1835                                 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1836                 }
1837
1838                 if (port->dev_info.max_vfs != 0) {
1839                         if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1840                                 port->dev_conf.rxmode.mq_mode =
1841                                         ETH_MQ_RX_VMDQ_RSS;
1842                         else
1843                                 port->dev_conf.rxmode.mq_mode =
1844                                         ETH_MQ_RX_NONE;
1845
1846                         port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1847                 }
1848
1849                 rxtx_port_config(port);
1850
1851                 rte_eth_macaddr_get(pid, &port->eth_addr);
1852
1853                 map_port_queue_stats_mapping_registers(pid, port);
1854 #ifdef RTE_NIC_BYPASS
1855                 rte_eth_dev_bypass_init(pid);
1856 #endif
1857         }
1858 }
1859
1860 void set_port_slave_flag(portid_t slave_pid)
1861 {
1862         struct rte_port *port;
1863
1864         port = &ports[slave_pid];
1865         port->slave_flag = 1;
1866 }
1867
1868 void clear_port_slave_flag(portid_t slave_pid)
1869 {
1870         struct rte_port *port;
1871
1872         port = &ports[slave_pid];
1873         port->slave_flag = 0;
1874 }
1875
1876 uint8_t port_is_bonding_slave(portid_t slave_pid)
1877 {
1878         struct rte_port *port;
1879
1880         port = &ports[slave_pid];
1881         return port->slave_flag;
1882 }
1883
1884 const uint16_t vlan_tags[] = {
1885                 0,  1,  2,  3,  4,  5,  6,  7,
1886                 8,  9, 10, 11,  12, 13, 14, 15,
1887                 16, 17, 18, 19, 20, 21, 22, 23,
1888                 24, 25, 26, 27, 28, 29, 30, 31
1889 };
1890
1891 static  int
1892 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1893                  enum dcb_mode_enable dcb_mode,
1894                  enum rte_eth_nb_tcs num_tcs,
1895                  uint8_t pfc_en)
1896 {
1897         uint8_t i;
1898
1899         /*
1900          * Builds up the correct configuration for dcb+vt based on the vlan tags array
1901          * given above, and the number of traffic classes available for use.
1902          */
1903         if (dcb_mode == DCB_VT_ENABLED) {
1904                 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1905                                 &eth_conf->rx_adv_conf.vmdq_dcb_conf;
1906                 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1907                                 &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1908
1909                 /* VMDQ+DCB RX and TX configrations */
1910                 vmdq_rx_conf->enable_default_pool = 0;
1911                 vmdq_rx_conf->default_pool = 0;
1912                 vmdq_rx_conf->nb_queue_pools =
1913                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1914                 vmdq_tx_conf->nb_queue_pools =
1915                         (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1916
1917                 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1918                 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1919                         vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1920                         vmdq_rx_conf->pool_map[i].pools =
1921                                 1 << (i % vmdq_rx_conf->nb_queue_pools);
1922                 }
1923                 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1924                         vmdq_rx_conf->dcb_tc[i] = i;
1925                         vmdq_tx_conf->dcb_tc[i] = i;
1926                 }
1927
1928                 /* set DCB mode of RX and TX of multiple queues */
1929                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1930                 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1931         } else {
1932                 struct rte_eth_dcb_rx_conf *rx_conf =
1933                                 &eth_conf->rx_adv_conf.dcb_rx_conf;
1934                 struct rte_eth_dcb_tx_conf *tx_conf =
1935                                 &eth_conf->tx_adv_conf.dcb_tx_conf;
1936
1937                 rx_conf->nb_tcs = num_tcs;
1938                 tx_conf->nb_tcs = num_tcs;
1939
1940                 for (i = 0; i < num_tcs; i++) {
1941                         rx_conf->dcb_tc[i] = i;
1942                         tx_conf->dcb_tc[i] = i;
1943                 }
1944                 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1945                 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1946                 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1947         }
1948
1949         if (pfc_en)
1950                 eth_conf->dcb_capability_en =
1951                                 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1952         else
1953                 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1954
1955         return 0;
1956 }
1957
1958 int
1959 init_port_dcb_config(portid_t pid,
1960                      enum dcb_mode_enable dcb_mode,
1961                      enum rte_eth_nb_tcs num_tcs,
1962                      uint8_t pfc_en)
1963 {
1964         struct rte_eth_conf port_conf;
1965         struct rte_eth_dev_info dev_info;
1966         struct rte_port *rte_port;
1967         int retval;
1968         uint16_t i;
1969
1970         rte_eth_dev_info_get(pid, &dev_info);
1971
1972         /* If dev_info.vmdq_pool_base is greater than 0,
1973          * the queue id of vmdq pools is started after pf queues.
1974          */
1975         if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1976                 printf("VMDQ_DCB multi-queue mode is nonsensical"
1977                         " for port %d.", pid);
1978                 return -1;
1979         }
1980
1981         /* Assume the ports in testpmd have the same dcb capability
1982          * and has the same number of rxq and txq in dcb mode
1983          */
1984         if (dcb_mode == DCB_VT_ENABLED) {
1985                 nb_rxq = dev_info.max_rx_queues;
1986                 nb_txq = dev_info.max_tx_queues;
1987         } else {
1988                 /*if vt is disabled, use all pf queues */
1989                 if (dev_info.vmdq_pool_base == 0) {
1990                         nb_rxq = dev_info.max_rx_queues;
1991                         nb_txq = dev_info.max_tx_queues;
1992                 } else {
1993                         nb_rxq = (queueid_t)num_tcs;
1994                         nb_txq = (queueid_t)num_tcs;
1995
1996                 }
1997         }
1998         rx_free_thresh = 64;
1999
2000         memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2001         /* Enter DCB configuration status */
2002         dcb_config = 1;
2003
2004         /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2005         retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2006         if (retval < 0)
2007                 return retval;
2008
2009         rte_port = &ports[pid];
2010         memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2011
2012         rxtx_port_config(rte_port);
2013         /* VLAN filter */
2014         rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2015         for (i = 0; i < RTE_DIM(vlan_tags); i++)
2016                 rx_vft_set(pid, vlan_tags[i], 1);
2017
2018         rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2019         map_port_queue_stats_mapping_registers(pid, rte_port);
2020
2021         rte_port->dcb_flag = 1;
2022
2023         return 0;
2024 }
2025
2026 static void
2027 init_port(void)
2028 {
2029         portid_t pid;
2030
2031         /* Configuration of Ethernet ports. */
2032         ports = rte_zmalloc("testpmd: ports",
2033                             sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2034                             RTE_CACHE_LINE_SIZE);
2035         if (ports == NULL) {
2036                 rte_exit(EXIT_FAILURE,
2037                                 "rte_zmalloc(%d struct rte_port) failed\n",
2038                                 RTE_MAX_ETHPORTS);
2039         }
2040
2041         /* enabled allocated ports */
2042         for (pid = 0; pid < nb_ports; pid++)
2043                 ports[pid].enabled = 1;
2044 }
2045
2046 static void
2047 force_quit(void)
2048 {
2049         pmd_test_exit();
2050         prompt_exit();
2051 }
2052
2053 static void
2054 signal_handler(int signum)
2055 {
2056         if (signum == SIGINT || signum == SIGTERM) {
2057                 printf("\nSignal %d received, preparing to exit...\n",
2058                                 signum);
2059 #ifdef RTE_LIBRTE_PDUMP
2060                 /* uninitialize packet capture framework */
2061                 rte_pdump_uninit();
2062 #endif
2063                 force_quit();
2064                 /* exit with the expected status */
2065                 signal(signum, SIG_DFL);
2066                 kill(getpid(), signum);
2067         }
2068 }
2069
2070 int
2071 main(int argc, char** argv)
2072 {
2073         int  diag;
2074         uint8_t port_id;
2075
2076         signal(SIGINT, signal_handler);
2077         signal(SIGTERM, signal_handler);
2078
2079         diag = rte_eal_init(argc, argv);
2080         if (diag < 0)
2081                 rte_panic("Cannot init EAL\n");
2082
2083 #ifdef RTE_LIBRTE_PDUMP
2084         /* initialize packet capture framework */
2085         rte_pdump_init(NULL);
2086 #endif
2087
2088         nb_ports = (portid_t) rte_eth_dev_count();
2089         if (nb_ports == 0)
2090                 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2091
2092         /* allocate port structures, and init them */
2093         init_port();
2094
2095         set_def_fwd_config();
2096         if (nb_lcores == 0)
2097                 rte_panic("Empty set of forwarding logical cores - check the "
2098                           "core mask supplied in the command parameters\n");
2099
2100         argc -= diag;
2101         argv += diag;
2102         if (argc > 1)
2103                 launch_args_parse(argc, argv);
2104
2105         if (!nb_rxq && !nb_txq)
2106                 printf("Warning: Either rx or tx queues should be non-zero\n");
2107
2108         if (nb_rxq > 1 && nb_rxq > nb_txq)
2109                 printf("Warning: nb_rxq=%d enables RSS configuration, "
2110                        "but nb_txq=%d will prevent to fully test it.\n",
2111                        nb_rxq, nb_txq);
2112
2113         init_config();
2114         if (start_port(RTE_PORT_ALL) != 0)
2115                 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2116
2117         /* set all ports to promiscuous mode by default */
2118         FOREACH_PORT(port_id, ports)
2119                 rte_eth_promiscuous_enable(port_id);
2120
2121 #ifdef RTE_LIBRTE_CMDLINE
2122         if (interactive == 1) {
2123                 if (auto_start) {
2124                         printf("Start automatic packet forwarding\n");
2125                         start_packet_forwarding(0);
2126                 }
2127                 prompt();
2128         } else
2129 #endif
2130         {
2131                 char c;
2132                 int rc;
2133
2134                 printf("No commandline core given, start packet forwarding\n");
2135                 start_packet_forwarding(0);
2136                 printf("Press enter to exit\n");
2137                 rc = read(0, &c, 1);
2138                 pmd_test_exit();
2139                 if (rc < 0)
2140                         return 1;
2141         }
2142
2143         return 0;
2144 }