69fa04be4c1f0c55ea14568c635e50f7d9b69420
[deb_dpdk.git] / app / test-pmd / config.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 /*   BSD LICENSE
34  *
35  *   Copyright 2013-2014 6WIND S.A.
36  *
37  *   Redistribution and use in source and binary forms, with or without
38  *   modification, are permitted provided that the following conditions
39  *   are met:
40  *
41  *     * Redistributions of source code must retain the above copyright
42  *       notice, this list of conditions and the following disclaimer.
43  *     * Redistributions in binary form must reproduce the above copyright
44  *       notice, this list of conditions and the following disclaimer in
45  *       the documentation and/or other materials provided with the
46  *       distribution.
47  *     * Neither the name of 6WIND S.A. nor the names of its
48  *       contributors may be used to endorse or promote products derived
49  *       from this software without specific prior written permission.
50  *
51  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62  */
63
64 #include <stdarg.h>
65 #include <errno.h>
66 #include <stdio.h>
67 #include <string.h>
68 #include <stdarg.h>
69 #include <stdint.h>
70 #include <inttypes.h>
71
72 #include <sys/queue.h>
73
74 #include <rte_common.h>
75 #include <rte_byteorder.h>
76 #include <rte_debug.h>
77 #include <rte_log.h>
78 #include <rte_memory.h>
79 #include <rte_memcpy.h>
80 #include <rte_memzone.h>
81 #include <rte_launch.h>
82 #include <rte_eal.h>
83 #include <rte_per_lcore.h>
84 #include <rte_lcore.h>
85 #include <rte_atomic.h>
86 #include <rte_branch_prediction.h>
87 #include <rte_mempool.h>
88 #include <rte_mbuf.h>
89 #include <rte_interrupts.h>
90 #include <rte_pci.h>
91 #include <rte_ether.h>
92 #include <rte_ethdev.h>
93 #include <rte_string_fns.h>
94 #include <rte_cycles.h>
95
96 #include "testpmd.h"
97
98 static char *flowtype_to_str(uint16_t flow_type);
99
100 static const struct {
101         enum tx_pkt_split split;
102         const char *name;
103 } tx_split_name[] = {
104         {
105                 .split = TX_PKT_SPLIT_OFF,
106                 .name = "off",
107         },
108         {
109                 .split = TX_PKT_SPLIT_ON,
110                 .name = "on",
111         },
112         {
113                 .split = TX_PKT_SPLIT_RND,
114                 .name = "rand",
115         },
116 };
117
118 struct rss_type_info {
119         char str[32];
120         uint64_t rss_type;
121 };
122
123 static const struct rss_type_info rss_type_table[] = {
124         { "ipv4", ETH_RSS_IPV4 },
125         { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
126         { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
127         { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
128         { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
129         { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
130         { "ipv6", ETH_RSS_IPV6 },
131         { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
132         { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
133         { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
134         { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
135         { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
136         { "l2-payload", ETH_RSS_L2_PAYLOAD },
137         { "ipv6-ex", ETH_RSS_IPV6_EX },
138         { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
139         { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
140         { "port", ETH_RSS_PORT },
141         { "vxlan", ETH_RSS_VXLAN },
142         { "geneve", ETH_RSS_GENEVE },
143         { "nvgre", ETH_RSS_NVGRE },
144
145 };
146
147 static void
148 print_ethaddr(const char *name, struct ether_addr *eth_addr)
149 {
150         char buf[ETHER_ADDR_FMT_SIZE];
151         ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
152         printf("%s%s", name, buf);
153 }
154
155 void
156 nic_stats_display(portid_t port_id)
157 {
158         static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
159         static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
160         static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
161         uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
162         uint64_t mpps_rx, mpps_tx;
163         struct rte_eth_stats stats;
164         struct rte_port *port = &ports[port_id];
165         uint8_t i;
166         portid_t pid;
167
168         static const char *nic_stats_border = "########################";
169
170         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
171                 printf("Valid port range is [0");
172                 FOREACH_PORT(pid, ports)
173                         printf(", %d", pid);
174                 printf("]\n");
175                 return;
176         }
177         rte_eth_stats_get(port_id, &stats);
178         printf("\n  %s NIC statistics for port %-2d %s\n",
179                nic_stats_border, port_id, nic_stats_border);
180
181         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
182                 printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
183                        "%-"PRIu64"\n",
184                        stats.ipackets, stats.imissed, stats.ibytes);
185                 printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
186                 printf("  RX-nombuf:  %-10"PRIu64"\n",
187                        stats.rx_nombuf);
188                 printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
189                        "%-"PRIu64"\n",
190                        stats.opackets, stats.oerrors, stats.obytes);
191         }
192         else {
193                 printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
194                        "    RX-bytes: %10"PRIu64"\n",
195                        stats.ipackets, stats.ierrors, stats.ibytes);
196                 printf("  RX-errors:  %10"PRIu64"\n", stats.ierrors);
197                 printf("  RX-nombuf:               %10"PRIu64"\n",
198                        stats.rx_nombuf);
199                 printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
200                        "    TX-bytes: %10"PRIu64"\n",
201                        stats.opackets, stats.oerrors, stats.obytes);
202         }
203
204         if (port->rx_queue_stats_mapping_enabled) {
205                 printf("\n");
206                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
207                         printf("  Stats reg %2d RX-packets: %10"PRIu64
208                                "    RX-errors: %10"PRIu64
209                                "    RX-bytes: %10"PRIu64"\n",
210                                i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
211                 }
212         }
213         if (port->tx_queue_stats_mapping_enabled) {
214                 printf("\n");
215                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
216                         printf("  Stats reg %2d TX-packets: %10"PRIu64
217                                "                             TX-bytes: %10"PRIu64"\n",
218                                i, stats.q_opackets[i], stats.q_obytes[i]);
219                 }
220         }
221
222         diff_cycles = prev_cycles[port_id];
223         prev_cycles[port_id] = rte_rdtsc();
224         if (diff_cycles > 0)
225                 diff_cycles = prev_cycles[port_id] - diff_cycles;
226
227         diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
228                 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
229         diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
230                 (stats.opackets - prev_pkts_tx[port_id]) : 0;
231         prev_pkts_rx[port_id] = stats.ipackets;
232         prev_pkts_tx[port_id] = stats.opackets;
233         mpps_rx = diff_cycles > 0 ?
234                 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
235         mpps_tx = diff_cycles > 0 ?
236                 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
237         printf("\n  Throughput (since last show)\n");
238         printf("  Rx-pps: %12"PRIu64"\n  Tx-pps: %12"PRIu64"\n",
239                         mpps_rx, mpps_tx);
240
241         printf("  %s############################%s\n",
242                nic_stats_border, nic_stats_border);
243 }
244
245 void
246 nic_stats_clear(portid_t port_id)
247 {
248         portid_t pid;
249
250         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
251                 printf("Valid port range is [0");
252                 FOREACH_PORT(pid, ports)
253                         printf(", %d", pid);
254                 printf("]\n");
255                 return;
256         }
257         rte_eth_stats_reset(port_id);
258         printf("\n  NIC statistics for port %d cleared\n", port_id);
259 }
260
261 void
262 nic_xstats_display(portid_t port_id)
263 {
264         struct rte_eth_xstat *xstats;
265         int cnt_xstats, idx_xstat;
266         struct rte_eth_xstat_name *xstats_names;
267
268         printf("###### NIC extended statistics for port %-2d\n", port_id);
269         if (!rte_eth_dev_is_valid_port(port_id)) {
270                 printf("Error: Invalid port number %i\n", port_id);
271                 return;
272         }
273
274         /* Get count */
275         cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
276         if (cnt_xstats  < 0) {
277                 printf("Error: Cannot get count of xstats\n");
278                 return;
279         }
280
281         /* Get id-name lookup table */
282         xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
283         if (xstats_names == NULL) {
284                 printf("Cannot allocate memory for xstats lookup\n");
285                 return;
286         }
287         if (cnt_xstats != rte_eth_xstats_get_names(
288                         port_id, xstats_names, cnt_xstats)) {
289                 printf("Error: Cannot get xstats lookup\n");
290                 free(xstats_names);
291                 return;
292         }
293
294         /* Get stats themselves */
295         xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
296         if (xstats == NULL) {
297                 printf("Cannot allocate memory for xstats\n");
298                 free(xstats_names);
299                 return;
300         }
301         if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
302                 printf("Error: Unable to get xstats\n");
303                 free(xstats_names);
304                 free(xstats);
305                 return;
306         }
307
308         /* Display xstats */
309         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++)
310                 printf("%s: %"PRIu64"\n",
311                         xstats_names[idx_xstat].name,
312                         xstats[idx_xstat].value);
313         free(xstats_names);
314         free(xstats);
315 }
316
317 void
318 nic_xstats_clear(portid_t port_id)
319 {
320         rte_eth_xstats_reset(port_id);
321 }
322
323 void
324 nic_stats_mapping_display(portid_t port_id)
325 {
326         struct rte_port *port = &ports[port_id];
327         uint16_t i;
328         portid_t pid;
329
330         static const char *nic_stats_mapping_border = "########################";
331
332         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
333                 printf("Valid port range is [0");
334                 FOREACH_PORT(pid, ports)
335                         printf(", %d", pid);
336                 printf("]\n");
337                 return;
338         }
339
340         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
341                 printf("Port id %d - either does not support queue statistic mapping or"
342                        " no queue statistic mapping set\n", port_id);
343                 return;
344         }
345
346         printf("\n  %s NIC statistics mapping for port %-2d %s\n",
347                nic_stats_mapping_border, port_id, nic_stats_mapping_border);
348
349         if (port->rx_queue_stats_mapping_enabled) {
350                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
351                         if (rx_queue_stats_mappings[i].port_id == port_id) {
352                                 printf("  RX-queue %2d mapped to Stats Reg %2d\n",
353                                        rx_queue_stats_mappings[i].queue_id,
354                                        rx_queue_stats_mappings[i].stats_counter_id);
355                         }
356                 }
357                 printf("\n");
358         }
359
360
361         if (port->tx_queue_stats_mapping_enabled) {
362                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
363                         if (tx_queue_stats_mappings[i].port_id == port_id) {
364                                 printf("  TX-queue %2d mapped to Stats Reg %2d\n",
365                                        tx_queue_stats_mappings[i].queue_id,
366                                        tx_queue_stats_mappings[i].stats_counter_id);
367                         }
368                 }
369         }
370
371         printf("  %s####################################%s\n",
372                nic_stats_mapping_border, nic_stats_mapping_border);
373 }
374
375 void
376 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
377 {
378         struct rte_eth_rxq_info qinfo;
379         int32_t rc;
380         static const char *info_border = "*********************";
381
382         rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
383         if (rc != 0) {
384                 printf("Failed to retrieve information for port: %hhu, "
385                         "RX queue: %hu\nerror desc: %s(%d)\n",
386                         port_id, queue_id, strerror(-rc), rc);
387                 return;
388         }
389
390         printf("\n%s Infos for port %-2u, RX queue %-2u %s",
391                info_border, port_id, queue_id, info_border);
392
393         printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
394         printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
395         printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
396         printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
397         printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
398         printf("\nRX drop packets: %s",
399                 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
400         printf("\nRX deferred start: %s",
401                 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
402         printf("\nRX scattered packets: %s",
403                 (qinfo.scattered_rx != 0) ? "on" : "off");
404         printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
405         printf("\n");
406 }
407
408 void
409 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
410 {
411         struct rte_eth_txq_info qinfo;
412         int32_t rc;
413         static const char *info_border = "*********************";
414
415         rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
416         if (rc != 0) {
417                 printf("Failed to retrieve information for port: %hhu, "
418                         "TX queue: %hu\nerror desc: %s(%d)\n",
419                         port_id, queue_id, strerror(-rc), rc);
420                 return;
421         }
422
423         printf("\n%s Infos for port %-2u, TX queue %-2u %s",
424                info_border, port_id, queue_id, info_border);
425
426         printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
427         printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
428         printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
429         printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
430         printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
431         printf("\nTX flags: %#x", qinfo.conf.txq_flags);
432         printf("\nTX deferred start: %s",
433                 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
434         printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
435         printf("\n");
436 }
437
438 void
439 port_infos_display(portid_t port_id)
440 {
441         struct rte_port *port;
442         struct ether_addr mac_addr;
443         struct rte_eth_link link;
444         struct rte_eth_dev_info dev_info;
445         int vlan_offload;
446         struct rte_mempool * mp;
447         static const char *info_border = "*********************";
448         portid_t pid;
449
450         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
451                 printf("Valid port range is [0");
452                 FOREACH_PORT(pid, ports)
453                         printf(", %d", pid);
454                 printf("]\n");
455                 return;
456         }
457         port = &ports[port_id];
458         rte_eth_link_get_nowait(port_id, &link);
459         printf("\n%s Infos for port %-2d %s\n",
460                info_border, port_id, info_border);
461         rte_eth_macaddr_get(port_id, &mac_addr);
462         print_ethaddr("MAC address: ", &mac_addr);
463         printf("\nConnect to socket: %u", port->socket_id);
464
465         if (port_numa[port_id] != NUMA_NO_CONFIG) {
466                 mp = mbuf_pool_find(port_numa[port_id]);
467                 if (mp)
468                         printf("\nmemory allocation on the socket: %d",
469                                                         port_numa[port_id]);
470         } else
471                 printf("\nmemory allocation on the socket: %u",port->socket_id);
472
473         printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
474         printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
475         printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
476                ("full-duplex") : ("half-duplex"));
477         printf("Promiscuous mode: %s\n",
478                rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
479         printf("Allmulticast mode: %s\n",
480                rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
481         printf("Maximum number of MAC addresses: %u\n",
482                (unsigned int)(port->dev_info.max_mac_addrs));
483         printf("Maximum number of MAC addresses of hash filtering: %u\n",
484                (unsigned int)(port->dev_info.max_hash_mac_addrs));
485
486         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
487         if (vlan_offload >= 0){
488                 printf("VLAN offload: \n");
489                 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
490                         printf("  strip on \n");
491                 else
492                         printf("  strip off \n");
493
494                 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
495                         printf("  filter on \n");
496                 else
497                         printf("  filter off \n");
498
499                 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
500                         printf("  qinq(extend) on \n");
501                 else
502                         printf("  qinq(extend) off \n");
503         }
504
505         memset(&dev_info, 0, sizeof(dev_info));
506         rte_eth_dev_info_get(port_id, &dev_info);
507         if (dev_info.hash_key_size > 0)
508                 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
509         if (dev_info.reta_size > 0)
510                 printf("Redirection table size: %u\n", dev_info.reta_size);
511         if (!dev_info.flow_type_rss_offloads)
512                 printf("No flow type is supported.\n");
513         else {
514                 uint16_t i;
515                 char *p;
516
517                 printf("Supported flow types:\n");
518                 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX;
519                                                                 i++) {
520                         if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
521                                 continue;
522                         p = flowtype_to_str(i);
523                         printf("  %s\n", (p ? p : "unknown"));
524                 }
525         }
526
527         printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
528         printf("Max possible number of RXDs per queue: %hu\n",
529                 dev_info.rx_desc_lim.nb_max);
530         printf("Min possible number of RXDs per queue: %hu\n",
531                 dev_info.rx_desc_lim.nb_min);
532         printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
533
534         printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
535         printf("Max possible number of TXDs per queue: %hu\n",
536                 dev_info.tx_desc_lim.nb_max);
537         printf("Min possible number of TXDs per queue: %hu\n",
538                 dev_info.tx_desc_lim.nb_min);
539         printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
540 }
541
542 int
543 port_id_is_invalid(portid_t port_id, enum print_warning warning)
544 {
545         if (port_id == (portid_t)RTE_PORT_ALL)
546                 return 0;
547
548         if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled)
549                 return 0;
550
551         if (warning == ENABLED_WARN)
552                 printf("Invalid port %d\n", port_id);
553
554         return 1;
555 }
556
557 static int
558 vlan_id_is_invalid(uint16_t vlan_id)
559 {
560         if (vlan_id < 4096)
561                 return 0;
562         printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
563         return 1;
564 }
565
566 static int
567 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
568 {
569         uint64_t pci_len;
570
571         if (reg_off & 0x3) {
572                 printf("Port register offset 0x%X not aligned on a 4-byte "
573                        "boundary\n",
574                        (unsigned)reg_off);
575                 return 1;
576         }
577         pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
578         if (reg_off >= pci_len) {
579                 printf("Port %d: register offset %u (0x%X) out of port PCI "
580                        "resource (length=%"PRIu64")\n",
581                        port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
582                 return 1;
583         }
584         return 0;
585 }
586
587 static int
588 reg_bit_pos_is_invalid(uint8_t bit_pos)
589 {
590         if (bit_pos <= 31)
591                 return 0;
592         printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
593         return 1;
594 }
595
596 #define display_port_and_reg_off(port_id, reg_off) \
597         printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
598
599 static inline void
600 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
601 {
602         display_port_and_reg_off(port_id, (unsigned)reg_off);
603         printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
604 }
605
606 void
607 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
608 {
609         uint32_t reg_v;
610
611
612         if (port_id_is_invalid(port_id, ENABLED_WARN))
613                 return;
614         if (port_reg_off_is_invalid(port_id, reg_off))
615                 return;
616         if (reg_bit_pos_is_invalid(bit_x))
617                 return;
618         reg_v = port_id_pci_reg_read(port_id, reg_off);
619         display_port_and_reg_off(port_id, (unsigned)reg_off);
620         printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
621 }
622
623 void
624 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
625                            uint8_t bit1_pos, uint8_t bit2_pos)
626 {
627         uint32_t reg_v;
628         uint8_t  l_bit;
629         uint8_t  h_bit;
630
631         if (port_id_is_invalid(port_id, ENABLED_WARN))
632                 return;
633         if (port_reg_off_is_invalid(port_id, reg_off))
634                 return;
635         if (reg_bit_pos_is_invalid(bit1_pos))
636                 return;
637         if (reg_bit_pos_is_invalid(bit2_pos))
638                 return;
639         if (bit1_pos > bit2_pos)
640                 l_bit = bit2_pos, h_bit = bit1_pos;
641         else
642                 l_bit = bit1_pos, h_bit = bit2_pos;
643
644         reg_v = port_id_pci_reg_read(port_id, reg_off);
645         reg_v >>= l_bit;
646         if (h_bit < 31)
647                 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
648         display_port_and_reg_off(port_id, (unsigned)reg_off);
649         printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
650                ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
651 }
652
653 void
654 port_reg_display(portid_t port_id, uint32_t reg_off)
655 {
656         uint32_t reg_v;
657
658         if (port_id_is_invalid(port_id, ENABLED_WARN))
659                 return;
660         if (port_reg_off_is_invalid(port_id, reg_off))
661                 return;
662         reg_v = port_id_pci_reg_read(port_id, reg_off);
663         display_port_reg_value(port_id, reg_off, reg_v);
664 }
665
666 void
667 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
668                  uint8_t bit_v)
669 {
670         uint32_t reg_v;
671
672         if (port_id_is_invalid(port_id, ENABLED_WARN))
673                 return;
674         if (port_reg_off_is_invalid(port_id, reg_off))
675                 return;
676         if (reg_bit_pos_is_invalid(bit_pos))
677                 return;
678         if (bit_v > 1) {
679                 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
680                 return;
681         }
682         reg_v = port_id_pci_reg_read(port_id, reg_off);
683         if (bit_v == 0)
684                 reg_v &= ~(1 << bit_pos);
685         else
686                 reg_v |= (1 << bit_pos);
687         port_id_pci_reg_write(port_id, reg_off, reg_v);
688         display_port_reg_value(port_id, reg_off, reg_v);
689 }
690
691 void
692 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
693                        uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
694 {
695         uint32_t max_v;
696         uint32_t reg_v;
697         uint8_t  l_bit;
698         uint8_t  h_bit;
699
700         if (port_id_is_invalid(port_id, ENABLED_WARN))
701                 return;
702         if (port_reg_off_is_invalid(port_id, reg_off))
703                 return;
704         if (reg_bit_pos_is_invalid(bit1_pos))
705                 return;
706         if (reg_bit_pos_is_invalid(bit2_pos))
707                 return;
708         if (bit1_pos > bit2_pos)
709                 l_bit = bit2_pos, h_bit = bit1_pos;
710         else
711                 l_bit = bit1_pos, h_bit = bit2_pos;
712
713         if ((h_bit - l_bit) < 31)
714                 max_v = (1 << (h_bit - l_bit + 1)) - 1;
715         else
716                 max_v = 0xFFFFFFFF;
717
718         if (value > max_v) {
719                 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
720                                 (unsigned)value, (unsigned)value,
721                                 (unsigned)max_v, (unsigned)max_v);
722                 return;
723         }
724         reg_v = port_id_pci_reg_read(port_id, reg_off);
725         reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
726         reg_v |= (value << l_bit); /* Set changed bits */
727         port_id_pci_reg_write(port_id, reg_off, reg_v);
728         display_port_reg_value(port_id, reg_off, reg_v);
729 }
730
731 void
732 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
733 {
734         if (port_id_is_invalid(port_id, ENABLED_WARN))
735                 return;
736         if (port_reg_off_is_invalid(port_id, reg_off))
737                 return;
738         port_id_pci_reg_write(port_id, reg_off, reg_v);
739         display_port_reg_value(port_id, reg_off, reg_v);
740 }
741
742 void
743 port_mtu_set(portid_t port_id, uint16_t mtu)
744 {
745         int diag;
746
747         if (port_id_is_invalid(port_id, ENABLED_WARN))
748                 return;
749         diag = rte_eth_dev_set_mtu(port_id, mtu);
750         if (diag == 0)
751                 return;
752         printf("Set MTU failed. diag=%d\n", diag);
753 }
754
755 /*
756  * RX/TX ring descriptors display functions.
757  */
758 int
759 rx_queue_id_is_invalid(queueid_t rxq_id)
760 {
761         if (rxq_id < nb_rxq)
762                 return 0;
763         printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
764         return 1;
765 }
766
767 int
768 tx_queue_id_is_invalid(queueid_t txq_id)
769 {
770         if (txq_id < nb_txq)
771                 return 0;
772         printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
773         return 1;
774 }
775
776 static int
777 rx_desc_id_is_invalid(uint16_t rxdesc_id)
778 {
779         if (rxdesc_id < nb_rxd)
780                 return 0;
781         printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
782                rxdesc_id, nb_rxd);
783         return 1;
784 }
785
786 static int
787 tx_desc_id_is_invalid(uint16_t txdesc_id)
788 {
789         if (txdesc_id < nb_txd)
790                 return 0;
791         printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
792                txdesc_id, nb_txd);
793         return 1;
794 }
795
796 static const struct rte_memzone *
797 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
798 {
799         char mz_name[RTE_MEMZONE_NAMESIZE];
800         const struct rte_memzone *mz;
801
802         snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
803                  ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
804         mz = rte_memzone_lookup(mz_name);
805         if (mz == NULL)
806                 printf("%s ring memory zoneof (port %d, queue %d) not"
807                        "found (zone name = %s\n",
808                        ring_name, port_id, q_id, mz_name);
809         return mz;
810 }
811
812 union igb_ring_dword {
813         uint64_t dword;
814         struct {
815 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
816                 uint32_t lo;
817                 uint32_t hi;
818 #else
819                 uint32_t hi;
820                 uint32_t lo;
821 #endif
822         } words;
823 };
824
825 struct igb_ring_desc_32_bytes {
826         union igb_ring_dword lo_dword;
827         union igb_ring_dword hi_dword;
828         union igb_ring_dword resv1;
829         union igb_ring_dword resv2;
830 };
831
832 struct igb_ring_desc_16_bytes {
833         union igb_ring_dword lo_dword;
834         union igb_ring_dword hi_dword;
835 };
836
837 static void
838 ring_rxd_display_dword(union igb_ring_dword dword)
839 {
840         printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
841                                         (unsigned)dword.words.hi);
842 }
843
844 static void
845 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
846 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
847                            uint8_t port_id,
848 #else
849                            __rte_unused uint8_t port_id,
850 #endif
851                            uint16_t desc_id)
852 {
853         struct igb_ring_desc_16_bytes *ring =
854                 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
855 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
856         struct rte_eth_dev_info dev_info;
857
858         memset(&dev_info, 0, sizeof(dev_info));
859         rte_eth_dev_info_get(port_id, &dev_info);
860         if (strstr(dev_info.driver_name, "i40e") != NULL) {
861                 /* 32 bytes RX descriptor, i40e only */
862                 struct igb_ring_desc_32_bytes *ring =
863                         (struct igb_ring_desc_32_bytes *)ring_mz->addr;
864                 ring[desc_id].lo_dword.dword =
865                         rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
866                 ring_rxd_display_dword(ring[desc_id].lo_dword);
867                 ring[desc_id].hi_dword.dword =
868                         rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
869                 ring_rxd_display_dword(ring[desc_id].hi_dword);
870                 ring[desc_id].resv1.dword =
871                         rte_le_to_cpu_64(ring[desc_id].resv1.dword);
872                 ring_rxd_display_dword(ring[desc_id].resv1);
873                 ring[desc_id].resv2.dword =
874                         rte_le_to_cpu_64(ring[desc_id].resv2.dword);
875                 ring_rxd_display_dword(ring[desc_id].resv2);
876
877                 return;
878         }
879 #endif
880         /* 16 bytes RX descriptor */
881         ring[desc_id].lo_dword.dword =
882                 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
883         ring_rxd_display_dword(ring[desc_id].lo_dword);
884         ring[desc_id].hi_dword.dword =
885                 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
886         ring_rxd_display_dword(ring[desc_id].hi_dword);
887 }
888
889 static void
890 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
891 {
892         struct igb_ring_desc_16_bytes *ring;
893         struct igb_ring_desc_16_bytes txd;
894
895         ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
896         txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
897         txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
898         printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
899                         (unsigned)txd.lo_dword.words.lo,
900                         (unsigned)txd.lo_dword.words.hi,
901                         (unsigned)txd.hi_dword.words.lo,
902                         (unsigned)txd.hi_dword.words.hi);
903 }
904
905 void
906 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
907 {
908         const struct rte_memzone *rx_mz;
909
910         if (port_id_is_invalid(port_id, ENABLED_WARN))
911                 return;
912         if (rx_queue_id_is_invalid(rxq_id))
913                 return;
914         if (rx_desc_id_is_invalid(rxd_id))
915                 return;
916         rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
917         if (rx_mz == NULL)
918                 return;
919         ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
920 }
921
922 void
923 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
924 {
925         const struct rte_memzone *tx_mz;
926
927         if (port_id_is_invalid(port_id, ENABLED_WARN))
928                 return;
929         if (tx_queue_id_is_invalid(txq_id))
930                 return;
931         if (tx_desc_id_is_invalid(txd_id))
932                 return;
933         tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
934         if (tx_mz == NULL)
935                 return;
936         ring_tx_descriptor_display(tx_mz, txd_id);
937 }
938
939 void
940 fwd_lcores_config_display(void)
941 {
942         lcoreid_t lc_id;
943
944         printf("List of forwarding lcores:");
945         for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
946                 printf(" %2u", fwd_lcores_cpuids[lc_id]);
947         printf("\n");
948 }
949 void
950 rxtx_config_display(void)
951 {
952         printf("  %s packet forwarding%s - CRC stripping %s - "
953                "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
954                retry_enabled == 0 ? "" : " with retry",
955                rx_mode.hw_strip_crc ? "enabled" : "disabled",
956                nb_pkt_per_burst);
957
958         if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
959                 printf("  packet len=%u - nb packet segments=%d\n",
960                                 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
961
962         struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf;
963         struct rte_eth_txconf *tx_conf = &ports[0].tx_conf;
964
965         printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
966                nb_fwd_lcores, nb_fwd_ports);
967         printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
968                nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
969         printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
970                rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh,
971                rx_conf->rx_thresh.wthresh);
972         printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
973                nb_txq, nb_txd, tx_conf->tx_free_thresh);
974         printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
975                tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh,
976                tx_conf->tx_thresh.wthresh);
977         printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
978                tx_conf->tx_rs_thresh, tx_conf->txq_flags);
979 }
980
981 void
982 port_rss_reta_info(portid_t port_id,
983                    struct rte_eth_rss_reta_entry64 *reta_conf,
984                    uint16_t nb_entries)
985 {
986         uint16_t i, idx, shift;
987         int ret;
988
989         if (port_id_is_invalid(port_id, ENABLED_WARN))
990                 return;
991
992         ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
993         if (ret != 0) {
994                 printf("Failed to get RSS RETA info, return code = %d\n", ret);
995                 return;
996         }
997
998         for (i = 0; i < nb_entries; i++) {
999                 idx = i / RTE_RETA_GROUP_SIZE;
1000                 shift = i % RTE_RETA_GROUP_SIZE;
1001                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1002                         continue;
1003                 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1004                                         i, reta_conf[idx].reta[shift]);
1005         }
1006 }
1007
1008 /*
1009  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1010  * key of the port.
1011  */
1012 void
1013 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
1014 {
1015         struct rte_eth_rss_conf rss_conf;
1016         uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1017         uint64_t rss_hf;
1018         uint8_t i;
1019         int diag;
1020         struct rte_eth_dev_info dev_info;
1021         uint8_t hash_key_size;
1022
1023         if (port_id_is_invalid(port_id, ENABLED_WARN))
1024                 return;
1025
1026         memset(&dev_info, 0, sizeof(dev_info));
1027         rte_eth_dev_info_get(port_id, &dev_info);
1028         if (dev_info.hash_key_size > 0 &&
1029                         dev_info.hash_key_size <= sizeof(rss_key))
1030                 hash_key_size = dev_info.hash_key_size;
1031         else {
1032                 printf("dev_info did not provide a valid hash key size\n");
1033                 return;
1034         }
1035
1036         rss_conf.rss_hf = 0;
1037         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1038                 if (!strcmp(rss_info, rss_type_table[i].str))
1039                         rss_conf.rss_hf = rss_type_table[i].rss_type;
1040         }
1041
1042         /* Get RSS hash key if asked to display it */
1043         rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1044         rss_conf.rss_key_len = hash_key_size;
1045         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1046         if (diag != 0) {
1047                 switch (diag) {
1048                 case -ENODEV:
1049                         printf("port index %d invalid\n", port_id);
1050                         break;
1051                 case -ENOTSUP:
1052                         printf("operation not supported by device\n");
1053                         break;
1054                 default:
1055                         printf("operation failed - diag=%d\n", diag);
1056                         break;
1057                 }
1058                 return;
1059         }
1060         rss_hf = rss_conf.rss_hf;
1061         if (rss_hf == 0) {
1062                 printf("RSS disabled\n");
1063                 return;
1064         }
1065         printf("RSS functions:\n ");
1066         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1067                 if (rss_hf & rss_type_table[i].rss_type)
1068                         printf("%s ", rss_type_table[i].str);
1069         }
1070         printf("\n");
1071         if (!show_rss_key)
1072                 return;
1073         printf("RSS key:\n");
1074         for (i = 0; i < hash_key_size; i++)
1075                 printf("%02X", rss_key[i]);
1076         printf("\n");
1077 }
1078
1079 void
1080 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1081                          uint hash_key_len)
1082 {
1083         struct rte_eth_rss_conf rss_conf;
1084         int diag;
1085         unsigned int i;
1086
1087         rss_conf.rss_key = NULL;
1088         rss_conf.rss_key_len = hash_key_len;
1089         rss_conf.rss_hf = 0;
1090         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1091                 if (!strcmp(rss_type_table[i].str, rss_type))
1092                         rss_conf.rss_hf = rss_type_table[i].rss_type;
1093         }
1094         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1095         if (diag == 0) {
1096                 rss_conf.rss_key = hash_key;
1097                 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1098         }
1099         if (diag == 0)
1100                 return;
1101
1102         switch (diag) {
1103         case -ENODEV:
1104                 printf("port index %d invalid\n", port_id);
1105                 break;
1106         case -ENOTSUP:
1107                 printf("operation not supported by device\n");
1108                 break;
1109         default:
1110                 printf("operation failed - diag=%d\n", diag);
1111                 break;
1112         }
1113 }
1114
1115 /*
1116  * Setup forwarding configuration for each logical core.
1117  */
1118 static void
1119 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1120 {
1121         streamid_t nb_fs_per_lcore;
1122         streamid_t nb_fs;
1123         streamid_t sm_id;
1124         lcoreid_t  nb_extra;
1125         lcoreid_t  nb_fc;
1126         lcoreid_t  nb_lc;
1127         lcoreid_t  lc_id;
1128
1129         nb_fs = cfg->nb_fwd_streams;
1130         nb_fc = cfg->nb_fwd_lcores;
1131         if (nb_fs <= nb_fc) {
1132                 nb_fs_per_lcore = 1;
1133                 nb_extra = 0;
1134         } else {
1135                 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1136                 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1137         }
1138
1139         nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1140         sm_id = 0;
1141         for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1142                 fwd_lcores[lc_id]->stream_idx = sm_id;
1143                 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1144                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1145         }
1146
1147         /*
1148          * Assign extra remaining streams, if any.
1149          */
1150         nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1151         for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1152                 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1153                 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1154                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1155         }
1156 }
1157
1158 static portid_t
1159 fwd_topology_tx_port_get(portid_t rxp)
1160 {
1161         static int warning_once = 1;
1162
1163         RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
1164
1165         switch (port_topology) {
1166         default:
1167         case PORT_TOPOLOGY_PAIRED:
1168                 if ((rxp & 0x1) == 0) {
1169                         if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
1170                                 return rxp + 1;
1171                         if (warning_once) {
1172                                 printf("\nWarning! port-topology=paired"
1173                                        " and odd forward ports number,"
1174                                        " the last port will pair with"
1175                                        " itself.\n\n");
1176                                 warning_once = 0;
1177                         }
1178                         return rxp;
1179                 }
1180                 return rxp - 1;
1181         case PORT_TOPOLOGY_CHAINED:
1182                 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
1183         case PORT_TOPOLOGY_LOOP:
1184                 return rxp;
1185         }
1186 }
1187
1188 static void
1189 simple_fwd_config_setup(void)
1190 {
1191         portid_t i;
1192         portid_t j;
1193         portid_t inc = 2;
1194
1195         if (port_topology == PORT_TOPOLOGY_CHAINED ||
1196             port_topology == PORT_TOPOLOGY_LOOP) {
1197                 inc = 1;
1198         } else if (nb_fwd_ports % 2) {
1199                 printf("\nWarning! Cannot handle an odd number of ports "
1200                        "with the current port topology. Configuration "
1201                        "must be changed to have an even number of ports, "
1202                        "or relaunch application with "
1203                        "--port-topology=chained\n\n");
1204         }
1205
1206         cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1207         cur_fwd_config.nb_fwd_streams =
1208                 (streamid_t) cur_fwd_config.nb_fwd_ports;
1209
1210         /* reinitialize forwarding streams */
1211         init_fwd_streams();
1212
1213         /*
1214          * In the simple forwarding test, the number of forwarding cores
1215          * must be lower or equal to the number of forwarding ports.
1216          */
1217         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1218         if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1219                 cur_fwd_config.nb_fwd_lcores =
1220                         (lcoreid_t) cur_fwd_config.nb_fwd_ports;
1221         setup_fwd_config_of_each_lcore(&cur_fwd_config);
1222
1223         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
1224                 if (port_topology != PORT_TOPOLOGY_LOOP)
1225                         j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
1226                 else
1227                         j = i;
1228                 fwd_streams[i]->rx_port   = fwd_ports_ids[i];
1229                 fwd_streams[i]->rx_queue  = 0;
1230                 fwd_streams[i]->tx_port   = fwd_ports_ids[j];
1231                 fwd_streams[i]->tx_queue  = 0;
1232                 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
1233                 fwd_streams[i]->retry_enabled = retry_enabled;
1234
1235                 if (port_topology == PORT_TOPOLOGY_PAIRED) {
1236                         fwd_streams[j]->rx_port   = fwd_ports_ids[j];
1237                         fwd_streams[j]->rx_queue  = 0;
1238                         fwd_streams[j]->tx_port   = fwd_ports_ids[i];
1239                         fwd_streams[j]->tx_queue  = 0;
1240                         fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port;
1241                         fwd_streams[j]->retry_enabled = retry_enabled;
1242                 }
1243         }
1244 }
1245
1246 /**
1247  * For the RSS forwarding test all streams distributed over lcores. Each stream
1248  * being composed of a RX queue to poll on a RX port for input messages,
1249  * associated with a TX queue of a TX port where to send forwarded packets.
1250  */
1251 static void
1252 rss_fwd_config_setup(void)
1253 {
1254         portid_t   rxp;
1255         portid_t   txp;
1256         queueid_t  rxq;
1257         queueid_t  nb_q;
1258         streamid_t  sm_id;
1259
1260         nb_q = nb_rxq;
1261         if (nb_q > nb_txq)
1262                 nb_q = nb_txq;
1263         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1264         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1265         cur_fwd_config.nb_fwd_streams =
1266                 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1267
1268         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1269                 cur_fwd_config.nb_fwd_lcores =
1270                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1271
1272         /* reinitialize forwarding streams */
1273         init_fwd_streams();
1274
1275         setup_fwd_config_of_each_lcore(&cur_fwd_config);
1276         rxp = 0; rxq = 0;
1277         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1278                 struct fwd_stream *fs;
1279
1280                 fs = fwd_streams[sm_id];
1281                 txp = fwd_topology_tx_port_get(rxp);
1282                 fs->rx_port = fwd_ports_ids[rxp];
1283                 fs->rx_queue = rxq;
1284                 fs->tx_port = fwd_ports_ids[txp];
1285                 fs->tx_queue = rxq;
1286                 fs->peer_addr = fs->tx_port;
1287                 fs->retry_enabled = retry_enabled;
1288                 rxq = (queueid_t) (rxq + 1);
1289                 if (rxq < nb_q)
1290                         continue;
1291                 /*
1292                  * rxq == nb_q
1293                  * Restart from RX queue 0 on next RX port
1294                  */
1295                 rxq = 0;
1296                 rxp++;
1297         }
1298 }
1299
1300 /**
1301  * For the DCB forwarding test, each core is assigned on each traffic class.
1302  *
1303  * Each core is assigned a multi-stream, each stream being composed of
1304  * a RX queue to poll on a RX port for input messages, associated with
1305  * a TX queue of a TX port where to send forwarded packets. All RX and
1306  * TX queues are mapping to the same traffic class.
1307  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
1308  * the same core
1309  */
1310 static void
1311 dcb_fwd_config_setup(void)
1312 {
1313         struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
1314         portid_t txp, rxp = 0;
1315         queueid_t txq, rxq = 0;
1316         lcoreid_t  lc_id;
1317         uint16_t nb_rx_queue, nb_tx_queue;
1318         uint16_t i, j, k, sm_id = 0;
1319         uint8_t tc = 0;
1320
1321         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1322         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1323         cur_fwd_config.nb_fwd_streams =
1324                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1325
1326         /* reinitialize forwarding streams */
1327         init_fwd_streams();
1328         sm_id = 0;
1329         txp = 1;
1330         /* get the dcb info on the first RX and TX ports */
1331         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
1332         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
1333
1334         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1335                 fwd_lcores[lc_id]->stream_nb = 0;
1336                 fwd_lcores[lc_id]->stream_idx = sm_id;
1337                 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
1338                         /* if the nb_queue is zero, means this tc is
1339                          * not enabled on the POOL
1340                          */
1341                         if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
1342                                 break;
1343                         k = fwd_lcores[lc_id]->stream_nb +
1344                                 fwd_lcores[lc_id]->stream_idx;
1345                         rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
1346                         txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
1347                         nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
1348                         nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
1349                         for (j = 0; j < nb_rx_queue; j++) {
1350                                 struct fwd_stream *fs;
1351
1352                                 fs = fwd_streams[k + j];
1353                                 fs->rx_port = fwd_ports_ids[rxp];
1354                                 fs->rx_queue = rxq + j;
1355                                 fs->tx_port = fwd_ports_ids[txp];
1356                                 fs->tx_queue = txq + j % nb_tx_queue;
1357                                 fs->peer_addr = fs->tx_port;
1358                                 fs->retry_enabled = retry_enabled;
1359                         }
1360                         fwd_lcores[lc_id]->stream_nb +=
1361                                 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
1362                 }
1363                 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
1364
1365                 tc++;
1366                 if (tc < rxp_dcb_info.nb_tcs)
1367                         continue;
1368                 /* Restart from TC 0 on next RX port */
1369                 tc = 0;
1370                 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1371                         rxp = (portid_t)
1372                                 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
1373                 else
1374                         rxp++;
1375                 if (rxp >= nb_fwd_ports)
1376                         return;
1377                 /* get the dcb information on next RX and TX ports */
1378                 if ((rxp & 0x1) == 0)
1379                         txp = (portid_t) (rxp + 1);
1380                 else
1381                         txp = (portid_t) (rxp - 1);
1382                 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
1383                 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
1384         }
1385 }
1386
1387 static void
1388 icmp_echo_config_setup(void)
1389 {
1390         portid_t  rxp;
1391         queueid_t rxq;
1392         lcoreid_t lc_id;
1393         uint16_t  sm_id;
1394
1395         if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
1396                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
1397                         (nb_txq * nb_fwd_ports);
1398         else
1399                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1400         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1401         cur_fwd_config.nb_fwd_streams =
1402                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1403         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1404                 cur_fwd_config.nb_fwd_lcores =
1405                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1406         if (verbose_level > 0) {
1407                 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
1408                        __FUNCTION__,
1409                        cur_fwd_config.nb_fwd_lcores,
1410                        cur_fwd_config.nb_fwd_ports,
1411                        cur_fwd_config.nb_fwd_streams);
1412         }
1413
1414         /* reinitialize forwarding streams */
1415         init_fwd_streams();
1416         setup_fwd_config_of_each_lcore(&cur_fwd_config);
1417         rxp = 0; rxq = 0;
1418         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1419                 if (verbose_level > 0)
1420                         printf("  core=%d: \n", lc_id);
1421                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1422                         struct fwd_stream *fs;
1423                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1424                         fs->rx_port = fwd_ports_ids[rxp];
1425                         fs->rx_queue = rxq;
1426                         fs->tx_port = fs->rx_port;
1427                         fs->tx_queue = rxq;
1428                         fs->peer_addr = fs->tx_port;
1429                         fs->retry_enabled = retry_enabled;
1430                         if (verbose_level > 0)
1431                                 printf("  stream=%d port=%d rxq=%d txq=%d\n",
1432                                        sm_id, fs->rx_port, fs->rx_queue,
1433                                        fs->tx_queue);
1434                         rxq = (queueid_t) (rxq + 1);
1435                         if (rxq == nb_rxq) {
1436                                 rxq = 0;
1437                                 rxp = (portid_t) (rxp + 1);
1438                         }
1439                 }
1440         }
1441 }
1442
1443 void
1444 fwd_config_setup(void)
1445 {
1446         cur_fwd_config.fwd_eng = cur_fwd_eng;
1447         if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
1448                 icmp_echo_config_setup();
1449                 return;
1450         }
1451         if ((nb_rxq > 1) && (nb_txq > 1)){
1452                 if (dcb_config)
1453                         dcb_fwd_config_setup();
1454                 else
1455                         rss_fwd_config_setup();
1456         }
1457         else
1458                 simple_fwd_config_setup();
1459 }
1460
1461 void
1462 pkt_fwd_config_display(struct fwd_config *cfg)
1463 {
1464         struct fwd_stream *fs;
1465         lcoreid_t  lc_id;
1466         streamid_t sm_id;
1467
1468         printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
1469                 "NUMA support %s, MP over anonymous pages %s\n",
1470                 cfg->fwd_eng->fwd_mode_name,
1471                 retry_enabled == 0 ? "" : " with retry",
1472                 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
1473                 numa_support == 1 ? "enabled" : "disabled",
1474                 mp_anon != 0 ? "enabled" : "disabled");
1475
1476         if (retry_enabled)
1477                 printf("TX retry num: %u, delay between TX retries: %uus\n",
1478                         burst_tx_retry_num, burst_tx_delay_time);
1479         for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
1480                 printf("Logical Core %u (socket %u) forwards packets on "
1481                        "%d streams:",
1482                        fwd_lcores_cpuids[lc_id],
1483                        rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
1484                        fwd_lcores[lc_id]->stream_nb);
1485                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1486                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1487                         printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
1488                                "P=%d/Q=%d (socket %u) ",
1489                                fs->rx_port, fs->rx_queue,
1490                                ports[fs->rx_port].socket_id,
1491                                fs->tx_port, fs->tx_queue,
1492                                ports[fs->tx_port].socket_id);
1493                         print_ethaddr("peer=",
1494                                       &peer_eth_addrs[fs->peer_addr]);
1495                 }
1496                 printf("\n");
1497         }
1498         printf("\n");
1499 }
1500
1501 int
1502 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
1503 {
1504         unsigned int i;
1505         unsigned int lcore_cpuid;
1506         int record_now;
1507
1508         record_now = 0;
1509  again:
1510         for (i = 0; i < nb_lc; i++) {
1511                 lcore_cpuid = lcorelist[i];
1512                 if (! rte_lcore_is_enabled(lcore_cpuid)) {
1513                         printf("lcore %u not enabled\n", lcore_cpuid);
1514                         return -1;
1515                 }
1516                 if (lcore_cpuid == rte_get_master_lcore()) {
1517                         printf("lcore %u cannot be masked on for running "
1518                                "packet forwarding, which is the master lcore "
1519                                "and reserved for command line parsing only\n",
1520                                lcore_cpuid);
1521                         return -1;
1522                 }
1523                 if (record_now)
1524                         fwd_lcores_cpuids[i] = lcore_cpuid;
1525         }
1526         if (record_now == 0) {
1527                 record_now = 1;
1528                 goto again;
1529         }
1530         nb_cfg_lcores = (lcoreid_t) nb_lc;
1531         if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
1532                 printf("previous number of forwarding cores %u - changed to "
1533                        "number of configured cores %u\n",
1534                        (unsigned int) nb_fwd_lcores, nb_lc);
1535                 nb_fwd_lcores = (lcoreid_t) nb_lc;
1536         }
1537
1538         return 0;
1539 }
1540
1541 int
1542 set_fwd_lcores_mask(uint64_t lcoremask)
1543 {
1544         unsigned int lcorelist[64];
1545         unsigned int nb_lc;
1546         unsigned int i;
1547
1548         if (lcoremask == 0) {
1549                 printf("Invalid NULL mask of cores\n");
1550                 return -1;
1551         }
1552         nb_lc = 0;
1553         for (i = 0; i < 64; i++) {
1554                 if (! ((uint64_t)(1ULL << i) & lcoremask))
1555                         continue;
1556                 lcorelist[nb_lc++] = i;
1557         }
1558         return set_fwd_lcores_list(lcorelist, nb_lc);
1559 }
1560
1561 void
1562 set_fwd_lcores_number(uint16_t nb_lc)
1563 {
1564         if (nb_lc > nb_cfg_lcores) {
1565                 printf("nb fwd cores %u > %u (max. number of configured "
1566                        "lcores) - ignored\n",
1567                        (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
1568                 return;
1569         }
1570         nb_fwd_lcores = (lcoreid_t) nb_lc;
1571         printf("Number of forwarding cores set to %u\n",
1572                (unsigned int) nb_fwd_lcores);
1573 }
1574
1575 void
1576 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
1577 {
1578         unsigned int i;
1579         portid_t port_id;
1580         int record_now;
1581
1582         record_now = 0;
1583  again:
1584         for (i = 0; i < nb_pt; i++) {
1585                 port_id = (portid_t) portlist[i];
1586                 if (port_id_is_invalid(port_id, ENABLED_WARN))
1587                         return;
1588                 if (record_now)
1589                         fwd_ports_ids[i] = port_id;
1590         }
1591         if (record_now == 0) {
1592                 record_now = 1;
1593                 goto again;
1594         }
1595         nb_cfg_ports = (portid_t) nb_pt;
1596         if (nb_fwd_ports != (portid_t) nb_pt) {
1597                 printf("previous number of forwarding ports %u - changed to "
1598                        "number of configured ports %u\n",
1599                        (unsigned int) nb_fwd_ports, nb_pt);
1600                 nb_fwd_ports = (portid_t) nb_pt;
1601         }
1602 }
1603
1604 void
1605 set_fwd_ports_mask(uint64_t portmask)
1606 {
1607         unsigned int portlist[64];
1608         unsigned int nb_pt;
1609         unsigned int i;
1610
1611         if (portmask == 0) {
1612                 printf("Invalid NULL mask of ports\n");
1613                 return;
1614         }
1615         nb_pt = 0;
1616         for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) {
1617                 if (! ((uint64_t)(1ULL << i) & portmask))
1618                         continue;
1619                 portlist[nb_pt++] = i;
1620         }
1621         set_fwd_ports_list(portlist, nb_pt);
1622 }
1623
1624 void
1625 set_fwd_ports_number(uint16_t nb_pt)
1626 {
1627         if (nb_pt > nb_cfg_ports) {
1628                 printf("nb fwd ports %u > %u (number of configured "
1629                        "ports) - ignored\n",
1630                        (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
1631                 return;
1632         }
1633         nb_fwd_ports = (portid_t) nb_pt;
1634         printf("Number of forwarding ports set to %u\n",
1635                (unsigned int) nb_fwd_ports);
1636 }
1637
1638 int
1639 port_is_forwarding(portid_t port_id)
1640 {
1641         unsigned int i;
1642
1643         if (port_id_is_invalid(port_id, ENABLED_WARN))
1644                 return -1;
1645
1646         for (i = 0; i < nb_fwd_ports; i++) {
1647                 if (fwd_ports_ids[i] == port_id)
1648                         return 1;
1649         }
1650
1651         return 0;
1652 }
1653
1654 void
1655 set_nb_pkt_per_burst(uint16_t nb)
1656 {
1657         if (nb > MAX_PKT_BURST) {
1658                 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
1659                        " ignored\n",
1660                        (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
1661                 return;
1662         }
1663         nb_pkt_per_burst = nb;
1664         printf("Number of packets per burst set to %u\n",
1665                (unsigned int) nb_pkt_per_burst);
1666 }
1667
1668 static const char *
1669 tx_split_get_name(enum tx_pkt_split split)
1670 {
1671         uint32_t i;
1672
1673         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
1674                 if (tx_split_name[i].split == split)
1675                         return tx_split_name[i].name;
1676         }
1677         return NULL;
1678 }
1679
1680 void
1681 set_tx_pkt_split(const char *name)
1682 {
1683         uint32_t i;
1684
1685         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
1686                 if (strcmp(tx_split_name[i].name, name) == 0) {
1687                         tx_pkt_split = tx_split_name[i].split;
1688                         return;
1689                 }
1690         }
1691         printf("unknown value: \"%s\"\n", name);
1692 }
1693
1694 void
1695 show_tx_pkt_segments(void)
1696 {
1697         uint32_t i, n;
1698         const char *split;
1699
1700         n = tx_pkt_nb_segs;
1701         split = tx_split_get_name(tx_pkt_split);
1702
1703         printf("Number of segments: %u\n", n);
1704         printf("Segment sizes: ");
1705         for (i = 0; i != n - 1; i++)
1706                 printf("%hu,", tx_pkt_seg_lengths[i]);
1707         printf("%hu\n", tx_pkt_seg_lengths[i]);
1708         printf("Split packet: %s\n", split);
1709 }
1710
1711 void
1712 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
1713 {
1714         uint16_t tx_pkt_len;
1715         unsigned i;
1716
1717         if (nb_segs >= (unsigned) nb_txd) {
1718                 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
1719                        nb_segs, (unsigned int) nb_txd);
1720                 return;
1721         }
1722
1723         /*
1724          * Check that each segment length is greater or equal than
1725          * the mbuf data sise.
1726          * Check also that the total packet length is greater or equal than the
1727          * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
1728          */
1729         tx_pkt_len = 0;
1730         for (i = 0; i < nb_segs; i++) {
1731                 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
1732                         printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
1733                                i, seg_lengths[i], (unsigned) mbuf_data_size);
1734                         return;
1735                 }
1736                 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
1737         }
1738         if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
1739                 printf("total packet length=%u < %d - give up\n",
1740                                 (unsigned) tx_pkt_len,
1741                                 (int)(sizeof(struct ether_hdr) + 20 + 8));
1742                 return;
1743         }
1744
1745         for (i = 0; i < nb_segs; i++)
1746                 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
1747
1748         tx_pkt_length  = tx_pkt_len;
1749         tx_pkt_nb_segs = (uint8_t) nb_segs;
1750 }
1751
1752 char*
1753 list_pkt_forwarding_modes(void)
1754 {
1755         static char fwd_modes[128] = "";
1756         const char *separator = "|";
1757         struct fwd_engine *fwd_eng;
1758         unsigned i = 0;
1759
1760         if (strlen (fwd_modes) == 0) {
1761                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
1762                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
1763                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
1764                         strncat(fwd_modes, separator,
1765                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
1766                 }
1767                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
1768         }
1769
1770         return fwd_modes;
1771 }
1772
1773 char*
1774 list_pkt_forwarding_retry_modes(void)
1775 {
1776         static char fwd_modes[128] = "";
1777         const char *separator = "|";
1778         struct fwd_engine *fwd_eng;
1779         unsigned i = 0;
1780
1781         if (strlen(fwd_modes) == 0) {
1782                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
1783                         if (fwd_eng == &rx_only_engine)
1784                                 continue;
1785                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
1786                                         sizeof(fwd_modes) -
1787                                         strlen(fwd_modes) - 1);
1788                         strncat(fwd_modes, separator,
1789                                         sizeof(fwd_modes) -
1790                                         strlen(fwd_modes) - 1);
1791                 }
1792                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
1793         }
1794
1795         return fwd_modes;
1796 }
1797
1798 void
1799 set_pkt_forwarding_mode(const char *fwd_mode_name)
1800 {
1801         struct fwd_engine *fwd_eng;
1802         unsigned i;
1803
1804         i = 0;
1805         while ((fwd_eng = fwd_engines[i]) != NULL) {
1806                 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
1807                         printf("Set %s packet forwarding mode%s\n",
1808                                fwd_mode_name,
1809                                retry_enabled == 0 ? "" : " with retry");
1810                         cur_fwd_eng = fwd_eng;
1811                         return;
1812                 }
1813                 i++;
1814         }
1815         printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
1816 }
1817
1818 void
1819 set_verbose_level(uint16_t vb_level)
1820 {
1821         printf("Change verbose level from %u to %u\n",
1822                (unsigned int) verbose_level, (unsigned int) vb_level);
1823         verbose_level = vb_level;
1824 }
1825
1826 void
1827 vlan_extend_set(portid_t port_id, int on)
1828 {
1829         int diag;
1830         int vlan_offload;
1831
1832         if (port_id_is_invalid(port_id, ENABLED_WARN))
1833                 return;
1834
1835         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1836
1837         if (on)
1838                 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
1839         else
1840                 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
1841
1842         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1843         if (diag < 0)
1844                 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
1845                "diag=%d\n", port_id, on, diag);
1846 }
1847
1848 void
1849 rx_vlan_strip_set(portid_t port_id, int on)
1850 {
1851         int diag;
1852         int vlan_offload;
1853
1854         if (port_id_is_invalid(port_id, ENABLED_WARN))
1855                 return;
1856
1857         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1858
1859         if (on)
1860                 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
1861         else
1862                 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
1863
1864         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1865         if (diag < 0)
1866                 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
1867                "diag=%d\n", port_id, on, diag);
1868 }
1869
1870 void
1871 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
1872 {
1873         int diag;
1874
1875         if (port_id_is_invalid(port_id, ENABLED_WARN))
1876                 return;
1877
1878         diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
1879         if (diag < 0)
1880                 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
1881                "diag=%d\n", port_id, queue_id, on, diag);
1882 }
1883
1884 void
1885 rx_vlan_filter_set(portid_t port_id, int on)
1886 {
1887         int diag;
1888         int vlan_offload;
1889
1890         if (port_id_is_invalid(port_id, ENABLED_WARN))
1891                 return;
1892
1893         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1894
1895         if (on)
1896                 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1897         else
1898                 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
1899
1900         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1901         if (diag < 0)
1902                 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
1903                "diag=%d\n", port_id, on, diag);
1904 }
1905
1906 int
1907 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
1908 {
1909         int diag;
1910
1911         if (port_id_is_invalid(port_id, ENABLED_WARN))
1912                 return 1;
1913         if (vlan_id_is_invalid(vlan_id))
1914                 return 1;
1915         diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1916         if (diag == 0)
1917                 return 0;
1918         printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
1919                "diag=%d\n",
1920                port_id, vlan_id, on, diag);
1921         return -1;
1922 }
1923
1924 void
1925 rx_vlan_all_filter_set(portid_t port_id, int on)
1926 {
1927         uint16_t vlan_id;
1928
1929         if (port_id_is_invalid(port_id, ENABLED_WARN))
1930                 return;
1931         for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
1932                 if (rx_vft_set(port_id, vlan_id, on))
1933                         break;
1934         }
1935 }
1936
1937 void
1938 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
1939 {
1940         int diag;
1941
1942         if (port_id_is_invalid(port_id, ENABLED_WARN))
1943                 return;
1944
1945         diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
1946         if (diag == 0)
1947                 return;
1948
1949         printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
1950                "diag=%d\n",
1951                port_id, vlan_type, tp_id, diag);
1952 }
1953
1954 void
1955 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
1956 {
1957         int vlan_offload;
1958         if (port_id_is_invalid(port_id, ENABLED_WARN))
1959                 return;
1960         if (vlan_id_is_invalid(vlan_id))
1961                 return;
1962
1963         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1964         if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
1965                 printf("Error, as QinQ has been enabled.\n");
1966                 return;
1967         }
1968
1969         tx_vlan_reset(port_id);
1970         ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
1971         ports[port_id].tx_vlan_id = vlan_id;
1972 }
1973
1974 void
1975 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
1976 {
1977         int vlan_offload;
1978         if (port_id_is_invalid(port_id, ENABLED_WARN))
1979                 return;
1980         if (vlan_id_is_invalid(vlan_id))
1981                 return;
1982         if (vlan_id_is_invalid(vlan_id_outer))
1983                 return;
1984
1985         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1986         if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
1987                 printf("Error, as QinQ hasn't been enabled.\n");
1988                 return;
1989         }
1990
1991         tx_vlan_reset(port_id);
1992         ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ;
1993         ports[port_id].tx_vlan_id = vlan_id;
1994         ports[port_id].tx_vlan_id_outer = vlan_id_outer;
1995 }
1996
1997 void
1998 tx_vlan_reset(portid_t port_id)
1999 {
2000         if (port_id_is_invalid(port_id, ENABLED_WARN))
2001                 return;
2002         ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN |
2003                                 TESTPMD_TX_OFFLOAD_INSERT_QINQ);
2004         ports[port_id].tx_vlan_id = 0;
2005         ports[port_id].tx_vlan_id_outer = 0;
2006 }
2007
2008 void
2009 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
2010 {
2011         if (port_id_is_invalid(port_id, ENABLED_WARN))
2012                 return;
2013
2014         rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
2015 }
2016
2017 void
2018 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
2019 {
2020         uint16_t i;
2021         uint8_t existing_mapping_found = 0;
2022
2023         if (port_id_is_invalid(port_id, ENABLED_WARN))
2024                 return;
2025
2026         if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
2027                 return;
2028
2029         if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
2030                 printf("map_value not in required range 0..%d\n",
2031                                 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
2032                 return;
2033         }
2034
2035         if (!is_rx) { /*then tx*/
2036                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2037                         if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2038                             (tx_queue_stats_mappings[i].queue_id == queue_id)) {
2039                                 tx_queue_stats_mappings[i].stats_counter_id = map_value;
2040                                 existing_mapping_found = 1;
2041                                 break;
2042                         }
2043                 }
2044                 if (!existing_mapping_found) { /* A new additional mapping... */
2045                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
2046                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
2047                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
2048                         nb_tx_queue_stats_mappings++;
2049                 }
2050         }
2051         else { /*rx*/
2052                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2053                         if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2054                             (rx_queue_stats_mappings[i].queue_id == queue_id)) {
2055                                 rx_queue_stats_mappings[i].stats_counter_id = map_value;
2056                                 existing_mapping_found = 1;
2057                                 break;
2058                         }
2059                 }
2060                 if (!existing_mapping_found) { /* A new additional mapping... */
2061                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
2062                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
2063                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
2064                         nb_rx_queue_stats_mappings++;
2065                 }
2066         }
2067 }
2068
2069 static inline void
2070 print_fdir_mask(struct rte_eth_fdir_masks *mask)
2071 {
2072         printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
2073
2074         if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2075                 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
2076                         " tunnel_id: 0x%08x",
2077                         mask->mac_addr_byte_mask, mask->tunnel_type_mask,
2078                         rte_be_to_cpu_32(mask->tunnel_id_mask));
2079         else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2080                 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
2081                         rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
2082                         rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
2083
2084                 printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
2085                         rte_be_to_cpu_16(mask->src_port_mask),
2086                         rte_be_to_cpu_16(mask->dst_port_mask));
2087
2088                 printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2089                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
2090                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
2091                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
2092                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
2093
2094                 printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2095                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
2096                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
2097                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
2098                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
2099         }
2100
2101         printf("\n");
2102 }
2103
2104 static inline void
2105 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2106 {
2107         struct rte_eth_flex_payload_cfg *cfg;
2108         uint32_t i, j;
2109
2110         for (i = 0; i < flex_conf->nb_payloads; i++) {
2111                 cfg = &flex_conf->flex_set[i];
2112                 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
2113                         printf("\n    RAW:  ");
2114                 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
2115                         printf("\n    L2_PAYLOAD:  ");
2116                 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
2117                         printf("\n    L3_PAYLOAD:  ");
2118                 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
2119                         printf("\n    L4_PAYLOAD:  ");
2120                 else
2121                         printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
2122                 for (j = 0; j < num; j++)
2123                         printf("  %-5u", cfg->src_offset[j]);
2124         }
2125         printf("\n");
2126 }
2127
2128 static char *
2129 flowtype_to_str(uint16_t flow_type)
2130 {
2131         struct flow_type_info {
2132                 char str[32];
2133                 uint16_t ftype;
2134         };
2135
2136         uint8_t i;
2137         static struct flow_type_info flowtype_str_table[] = {
2138                 {"raw", RTE_ETH_FLOW_RAW},
2139                 {"ipv4", RTE_ETH_FLOW_IPV4},
2140                 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
2141                 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
2142                 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
2143                 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
2144                 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
2145                 {"ipv6", RTE_ETH_FLOW_IPV6},
2146                 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
2147                 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
2148                 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
2149                 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
2150                 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
2151                 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
2152                 {"port", RTE_ETH_FLOW_PORT},
2153                 {"vxlan", RTE_ETH_FLOW_VXLAN},
2154                 {"geneve", RTE_ETH_FLOW_GENEVE},
2155                 {"nvgre", RTE_ETH_FLOW_NVGRE},
2156         };
2157
2158         for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
2159                 if (flowtype_str_table[i].ftype == flow_type)
2160                         return flowtype_str_table[i].str;
2161         }
2162
2163         return NULL;
2164 }
2165
2166 static inline void
2167 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2168 {
2169         struct rte_eth_fdir_flex_mask *mask;
2170         uint32_t i, j;
2171         char *p;
2172
2173         for (i = 0; i < flex_conf->nb_flexmasks; i++) {
2174                 mask = &flex_conf->flex_mask[i];
2175                 p = flowtype_to_str(mask->flow_type);
2176                 printf("\n    %s:\t", p ? p : "unknown");
2177                 for (j = 0; j < num; j++)
2178                         printf(" %02x", mask->mask[j]);
2179         }
2180         printf("\n");
2181 }
2182
2183 static inline void
2184 print_fdir_flow_type(uint32_t flow_types_mask)
2185 {
2186         int i;
2187         char *p;
2188
2189         for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
2190                 if (!(flow_types_mask & (1 << i)))
2191                         continue;
2192                 p = flowtype_to_str(i);
2193                 if (p)
2194                         printf(" %s", p);
2195                 else
2196                         printf(" unknown");
2197         }
2198         printf("\n");
2199 }
2200
2201 void
2202 fdir_get_infos(portid_t port_id)
2203 {
2204         struct rte_eth_fdir_stats fdir_stat;
2205         struct rte_eth_fdir_info fdir_info;
2206         int ret;
2207
2208         static const char *fdir_stats_border = "########################";
2209
2210         if (port_id_is_invalid(port_id, ENABLED_WARN))
2211                 return;
2212         ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
2213         if (ret < 0) {
2214                 printf("\n FDIR is not supported on port %-2d\n",
2215                         port_id);
2216                 return;
2217         }
2218
2219         memset(&fdir_info, 0, sizeof(fdir_info));
2220         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2221                                RTE_ETH_FILTER_INFO, &fdir_info);
2222         memset(&fdir_stat, 0, sizeof(fdir_stat));
2223         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2224                                RTE_ETH_FILTER_STATS, &fdir_stat);
2225         printf("\n  %s FDIR infos for port %-2d     %s\n",
2226                fdir_stats_border, port_id, fdir_stats_border);
2227         printf("  MODE: ");
2228         if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
2229                 printf("  PERFECT\n");
2230         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
2231                 printf("  PERFECT-MAC-VLAN\n");
2232         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2233                 printf("  PERFECT-TUNNEL\n");
2234         else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
2235                 printf("  SIGNATURE\n");
2236         else
2237                 printf("  DISABLE\n");
2238         if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
2239                 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
2240                 printf("  SUPPORTED FLOW TYPE: ");
2241                 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
2242         }
2243         printf("  FLEX PAYLOAD INFO:\n");
2244         printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
2245                "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
2246                "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
2247                 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
2248                 fdir_info.flex_payload_unit,
2249                 fdir_info.max_flex_payload_segment_num,
2250                 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
2251         printf("  MASK: ");
2252         print_fdir_mask(&fdir_info.mask);
2253         if (fdir_info.flex_conf.nb_payloads > 0) {
2254                 printf("  FLEX PAYLOAD SRC OFFSET:");
2255                 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2256         }
2257         if (fdir_info.flex_conf.nb_flexmasks > 0) {
2258                 printf("  FLEX MASK CFG:");
2259                 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2260         }
2261         printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
2262                fdir_stat.guarant_cnt, fdir_stat.best_cnt);
2263         printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
2264                fdir_info.guarant_spc, fdir_info.best_spc);
2265         printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
2266                "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
2267                "  add:           %-10"PRIu64"  remove:        %"PRIu64"\n"
2268                "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
2269                fdir_stat.collision, fdir_stat.free,
2270                fdir_stat.maxhash, fdir_stat.maxlen,
2271                fdir_stat.add, fdir_stat.remove,
2272                fdir_stat.f_add, fdir_stat.f_remove);
2273         printf("  %s############################%s\n",
2274                fdir_stats_border, fdir_stats_border);
2275 }
2276
2277 void
2278 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
2279 {
2280         struct rte_port *port;
2281         struct rte_eth_fdir_flex_conf *flex_conf;
2282         int i, idx = 0;
2283
2284         port = &ports[port_id];
2285         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2286         for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
2287                 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
2288                         idx = i;
2289                         break;
2290                 }
2291         }
2292         if (i >= RTE_ETH_FLOW_MAX) {
2293                 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
2294                         idx = flex_conf->nb_flexmasks;
2295                         flex_conf->nb_flexmasks++;
2296                 } else {
2297                         printf("The flex mask table is full. Can not set flex"
2298                                 " mask for flow_type(%u).", cfg->flow_type);
2299                         return;
2300                 }
2301         }
2302         (void)rte_memcpy(&flex_conf->flex_mask[idx],
2303                          cfg,
2304                          sizeof(struct rte_eth_fdir_flex_mask));
2305 }
2306
2307 void
2308 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
2309 {
2310         struct rte_port *port;
2311         struct rte_eth_fdir_flex_conf *flex_conf;
2312         int i, idx = 0;
2313
2314         port = &ports[port_id];
2315         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2316         for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
2317                 if (cfg->type == flex_conf->flex_set[i].type) {
2318                         idx = i;
2319                         break;
2320                 }
2321         }
2322         if (i >= RTE_ETH_PAYLOAD_MAX) {
2323                 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
2324                         idx = flex_conf->nb_payloads;
2325                         flex_conf->nb_payloads++;
2326                 } else {
2327                         printf("The flex payload table is full. Can not set"
2328                                 " flex payload for type(%u).", cfg->type);
2329                         return;
2330                 }
2331         }
2332         (void)rte_memcpy(&flex_conf->flex_set[idx],
2333                          cfg,
2334                          sizeof(struct rte_eth_flex_payload_cfg));
2335
2336 }
2337
2338 void
2339 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
2340 {
2341         int diag;
2342
2343         if (port_id_is_invalid(port_id, ENABLED_WARN))
2344                 return;
2345         if (is_rx)
2346                 diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
2347         else
2348                 diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
2349         if (diag == 0)
2350                 return;
2351         if(is_rx)
2352                 printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
2353                         "diag=%d\n", port_id, diag);
2354         else
2355                 printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
2356                         "diag=%d\n", port_id, diag);
2357
2358 }
2359
2360 void
2361 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
2362 {
2363         int diag;
2364
2365         if (port_id_is_invalid(port_id, ENABLED_WARN))
2366                 return;
2367         if (vlan_id_is_invalid(vlan_id))
2368                 return;
2369         diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
2370         if (diag == 0)
2371                 return;
2372         printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
2373                "diag=%d\n", port_id, diag);
2374 }
2375
2376 int
2377 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
2378 {
2379         int diag;
2380         struct rte_eth_link link;
2381
2382         if (port_id_is_invalid(port_id, ENABLED_WARN))
2383                 return 1;
2384         rte_eth_link_get_nowait(port_id, &link);
2385         if (rate > link.link_speed) {
2386                 printf("Invalid rate value:%u bigger than link speed: %u\n",
2387                         rate, link.link_speed);
2388                 return 1;
2389         }
2390         diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
2391         if (diag == 0)
2392                 return diag;
2393         printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
2394                 port_id, diag);
2395         return diag;
2396 }
2397
2398 int
2399 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
2400 {
2401         int diag;
2402         struct rte_eth_link link;
2403
2404         if (q_msk == 0)
2405                 return 0;
2406
2407         if (port_id_is_invalid(port_id, ENABLED_WARN))
2408                 return 1;
2409         rte_eth_link_get_nowait(port_id, &link);
2410         if (rate > link.link_speed) {
2411                 printf("Invalid rate value:%u bigger than link speed: %u\n",
2412                         rate, link.link_speed);
2413                 return 1;
2414         }
2415         diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk);
2416         if (diag == 0)
2417                 return diag;
2418         printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n",
2419                 port_id, diag);
2420         return diag;
2421 }
2422
2423 /*
2424  * Functions to manage the set of filtered Multicast MAC addresses.
2425  *
2426  * A pool of filtered multicast MAC addresses is associated with each port.
2427  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
2428  * The address of the pool and the number of valid multicast MAC addresses
2429  * recorded in the pool are stored in the fields "mc_addr_pool" and
2430  * "mc_addr_nb" of the "rte_port" data structure.
2431  *
2432  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
2433  * to be supplied a contiguous array of multicast MAC addresses.
2434  * To comply with this constraint, the set of multicast addresses recorded
2435  * into the pool are systematically compacted at the beginning of the pool.
2436  * Hence, when a multicast address is removed from the pool, all following
2437  * addresses, if any, are copied back to keep the set contiguous.
2438  */
2439 #define MCAST_POOL_INC 32
2440
2441 static int
2442 mcast_addr_pool_extend(struct rte_port *port)
2443 {
2444         struct ether_addr *mc_pool;
2445         size_t mc_pool_size;
2446
2447         /*
2448          * If a free entry is available at the end of the pool, just
2449          * increment the number of recorded multicast addresses.
2450          */
2451         if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
2452                 port->mc_addr_nb++;
2453                 return 0;
2454         }
2455
2456         /*
2457          * [re]allocate a pool with MCAST_POOL_INC more entries.
2458          * The previous test guarantees that port->mc_addr_nb is a multiple
2459          * of MCAST_POOL_INC.
2460          */
2461         mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
2462                                                     MCAST_POOL_INC);
2463         mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
2464                                                 mc_pool_size);
2465         if (mc_pool == NULL) {
2466                 printf("allocation of pool of %u multicast addresses failed\n",
2467                        port->mc_addr_nb + MCAST_POOL_INC);
2468                 return -ENOMEM;
2469         }
2470
2471         port->mc_addr_pool = mc_pool;
2472         port->mc_addr_nb++;
2473         return 0;
2474
2475 }
2476
2477 static void
2478 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
2479 {
2480         port->mc_addr_nb--;
2481         if (addr_idx == port->mc_addr_nb) {
2482                 /* No need to recompact the set of multicast addressses. */
2483                 if (port->mc_addr_nb == 0) {
2484                         /* free the pool of multicast addresses. */
2485                         free(port->mc_addr_pool);
2486                         port->mc_addr_pool = NULL;
2487                 }
2488                 return;
2489         }
2490         memmove(&port->mc_addr_pool[addr_idx],
2491                 &port->mc_addr_pool[addr_idx + 1],
2492                 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
2493 }
2494
2495 static void
2496 eth_port_multicast_addr_list_set(uint8_t port_id)
2497 {
2498         struct rte_port *port;
2499         int diag;
2500
2501         port = &ports[port_id];
2502         diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
2503                                             port->mc_addr_nb);
2504         if (diag == 0)
2505                 return;
2506         printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
2507                port->mc_addr_nb, port_id, -diag);
2508 }
2509
2510 void
2511 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr)
2512 {
2513         struct rte_port *port;
2514         uint32_t i;
2515
2516         if (port_id_is_invalid(port_id, ENABLED_WARN))
2517                 return;
2518
2519         port = &ports[port_id];
2520
2521         /*
2522          * Check that the added multicast MAC address is not already recorded
2523          * in the pool of multicast addresses.
2524          */
2525         for (i = 0; i < port->mc_addr_nb; i++) {
2526                 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
2527                         printf("multicast address already filtered by port\n");
2528                         return;
2529                 }
2530         }
2531
2532         if (mcast_addr_pool_extend(port) != 0)
2533                 return;
2534         ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
2535         eth_port_multicast_addr_list_set(port_id);
2536 }
2537
2538 void
2539 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr)
2540 {
2541         struct rte_port *port;
2542         uint32_t i;
2543
2544         if (port_id_is_invalid(port_id, ENABLED_WARN))
2545                 return;
2546
2547         port = &ports[port_id];
2548
2549         /*
2550          * Search the pool of multicast MAC addresses for the removed address.
2551          */
2552         for (i = 0; i < port->mc_addr_nb; i++) {
2553                 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
2554                         break;
2555         }
2556         if (i == port->mc_addr_nb) {
2557                 printf("multicast address not filtered by port %d\n", port_id);
2558                 return;
2559         }
2560
2561         mcast_addr_pool_remove(port, i);
2562         eth_port_multicast_addr_list_set(port_id);
2563 }
2564
2565 void
2566 port_dcb_info_display(uint8_t port_id)
2567 {
2568         struct rte_eth_dcb_info dcb_info;
2569         uint16_t i;
2570         int ret;
2571         static const char *border = "================";
2572
2573         if (port_id_is_invalid(port_id, ENABLED_WARN))
2574                 return;
2575
2576         ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
2577         if (ret) {
2578                 printf("\n Failed to get dcb infos on port %-2d\n",
2579                         port_id);
2580                 return;
2581         }
2582         printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
2583         printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
2584         printf("\n  TC :        ");
2585         for (i = 0; i < dcb_info.nb_tcs; i++)
2586                 printf("\t%4d", i);
2587         printf("\n  Priority :  ");
2588         for (i = 0; i < dcb_info.nb_tcs; i++)
2589                 printf("\t%4d", dcb_info.prio_tc[i]);
2590         printf("\n  BW percent :");
2591         for (i = 0; i < dcb_info.nb_tcs; i++)
2592                 printf("\t%4d%%", dcb_info.tc_bws[i]);
2593         printf("\n  RXQ base :  ");
2594         for (i = 0; i < dcb_info.nb_tcs; i++)
2595                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
2596         printf("\n  RXQ number :");
2597         for (i = 0; i < dcb_info.nb_tcs; i++)
2598                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
2599         printf("\n  TXQ base :  ");
2600         for (i = 0; i < dcb_info.nb_tcs; i++)
2601                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
2602         printf("\n  TXQ number :");
2603         for (i = 0; i < dcb_info.nb_tcs; i++)
2604                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
2605         printf("\n");
2606 }