New upstream version 17.11.3
[deb_dpdk.git] / app / test-pmd / config.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   Copyright 2013-2014 6WIND S.A.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <stdarg.h>
36 #include <errno.h>
37 #include <stdio.h>
38 #include <string.h>
39 #include <stdint.h>
40 #include <inttypes.h>
41
42 #include <sys/queue.h>
43 #include <sys/types.h>
44 #include <sys/stat.h>
45 #include <fcntl.h>
46 #include <unistd.h>
47
48 #include <rte_common.h>
49 #include <rte_byteorder.h>
50 #include <rte_debug.h>
51 #include <rte_log.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_eal.h>
57 #include <rte_per_lcore.h>
58 #include <rte_lcore.h>
59 #include <rte_atomic.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_mbuf.h>
63 #include <rte_interrupts.h>
64 #include <rte_pci.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
67 #include <rte_string_fns.h>
68 #include <rte_cycles.h>
69 #include <rte_flow.h>
70 #include <rte_errno.h>
71 #ifdef RTE_LIBRTE_IXGBE_PMD
72 #include <rte_pmd_ixgbe.h>
73 #endif
74 #ifdef RTE_LIBRTE_I40E_PMD
75 #include <rte_pmd_i40e.h>
76 #endif
77 #ifdef RTE_LIBRTE_BNXT_PMD
78 #include <rte_pmd_bnxt.h>
79 #endif
80 #include <rte_gro.h>
81
82 #include "testpmd.h"
83
84 static char *flowtype_to_str(uint16_t flow_type);
85
86 static const struct {
87         enum tx_pkt_split split;
88         const char *name;
89 } tx_split_name[] = {
90         {
91                 .split = TX_PKT_SPLIT_OFF,
92                 .name = "off",
93         },
94         {
95                 .split = TX_PKT_SPLIT_ON,
96                 .name = "on",
97         },
98         {
99                 .split = TX_PKT_SPLIT_RND,
100                 .name = "rand",
101         },
102 };
103
104 struct rss_type_info {
105         char str[32];
106         uint64_t rss_type;
107 };
108
109 static const struct rss_type_info rss_type_table[] = {
110         { "ipv4", ETH_RSS_IPV4 },
111         { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
112         { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
113         { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
114         { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
115         { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
116         { "ipv6", ETH_RSS_IPV6 },
117         { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
118         { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
119         { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
120         { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
121         { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
122         { "l2-payload", ETH_RSS_L2_PAYLOAD },
123         { "ipv6-ex", ETH_RSS_IPV6_EX },
124         { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
125         { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
126         { "port", ETH_RSS_PORT },
127         { "vxlan", ETH_RSS_VXLAN },
128         { "geneve", ETH_RSS_GENEVE },
129         { "nvgre", ETH_RSS_NVGRE },
130
131 };
132
133 static void
134 print_ethaddr(const char *name, struct ether_addr *eth_addr)
135 {
136         char buf[ETHER_ADDR_FMT_SIZE];
137         ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
138         printf("%s%s", name, buf);
139 }
140
141 void
142 nic_stats_display(portid_t port_id)
143 {
144         static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
145         static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
146         static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
147         uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
148         uint64_t mpps_rx, mpps_tx;
149         struct rte_eth_stats stats;
150         struct rte_port *port = &ports[port_id];
151         uint8_t i;
152
153         static const char *nic_stats_border = "########################";
154
155         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
156                 print_valid_ports();
157                 return;
158         }
159         rte_eth_stats_get(port_id, &stats);
160         printf("\n  %s NIC statistics for port %-2d %s\n",
161                nic_stats_border, port_id, nic_stats_border);
162
163         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
164                 printf("  RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes:  "
165                        "%-"PRIu64"\n",
166                        stats.ipackets, stats.imissed, stats.ibytes);
167                 printf("  RX-errors: %-"PRIu64"\n", stats.ierrors);
168                 printf("  RX-nombuf:  %-10"PRIu64"\n",
169                        stats.rx_nombuf);
170                 printf("  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes:  "
171                        "%-"PRIu64"\n",
172                        stats.opackets, stats.oerrors, stats.obytes);
173         }
174         else {
175                 printf("  RX-packets:              %10"PRIu64"    RX-errors: %10"PRIu64
176                        "    RX-bytes: %10"PRIu64"\n",
177                        stats.ipackets, stats.ierrors, stats.ibytes);
178                 printf("  RX-errors:  %10"PRIu64"\n", stats.ierrors);
179                 printf("  RX-nombuf:               %10"PRIu64"\n",
180                        stats.rx_nombuf);
181                 printf("  TX-packets:              %10"PRIu64"    TX-errors: %10"PRIu64
182                        "    TX-bytes: %10"PRIu64"\n",
183                        stats.opackets, stats.oerrors, stats.obytes);
184         }
185
186         if (port->rx_queue_stats_mapping_enabled) {
187                 printf("\n");
188                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
189                         printf("  Stats reg %2d RX-packets: %10"PRIu64
190                                "    RX-errors: %10"PRIu64
191                                "    RX-bytes: %10"PRIu64"\n",
192                                i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
193                 }
194         }
195         if (port->tx_queue_stats_mapping_enabled) {
196                 printf("\n");
197                 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
198                         printf("  Stats reg %2d TX-packets: %10"PRIu64
199                                "                             TX-bytes: %10"PRIu64"\n",
200                                i, stats.q_opackets[i], stats.q_obytes[i]);
201                 }
202         }
203
204         diff_cycles = prev_cycles[port_id];
205         prev_cycles[port_id] = rte_rdtsc();
206         if (diff_cycles > 0)
207                 diff_cycles = prev_cycles[port_id] - diff_cycles;
208
209         diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
210                 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
211         diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
212                 (stats.opackets - prev_pkts_tx[port_id]) : 0;
213         prev_pkts_rx[port_id] = stats.ipackets;
214         prev_pkts_tx[port_id] = stats.opackets;
215         mpps_rx = diff_cycles > 0 ?
216                 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
217         mpps_tx = diff_cycles > 0 ?
218                 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
219         printf("\n  Throughput (since last show)\n");
220         printf("  Rx-pps: %12"PRIu64"\n  Tx-pps: %12"PRIu64"\n",
221                         mpps_rx, mpps_tx);
222
223         printf("  %s############################%s\n",
224                nic_stats_border, nic_stats_border);
225 }
226
227 void
228 nic_stats_clear(portid_t port_id)
229 {
230         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
231                 print_valid_ports();
232                 return;
233         }
234         rte_eth_stats_reset(port_id);
235         printf("\n  NIC statistics for port %d cleared\n", port_id);
236 }
237
238 void
239 nic_xstats_display(portid_t port_id)
240 {
241         struct rte_eth_xstat *xstats;
242         int cnt_xstats, idx_xstat;
243         struct rte_eth_xstat_name *xstats_names;
244
245         printf("###### NIC extended statistics for port %-2d\n", port_id);
246         if (!rte_eth_dev_is_valid_port(port_id)) {
247                 printf("Error: Invalid port number %i\n", port_id);
248                 return;
249         }
250
251         /* Get count */
252         cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
253         if (cnt_xstats  < 0) {
254                 printf("Error: Cannot get count of xstats\n");
255                 return;
256         }
257
258         /* Get id-name lookup table */
259         xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
260         if (xstats_names == NULL) {
261                 printf("Cannot allocate memory for xstats lookup\n");
262                 return;
263         }
264         if (cnt_xstats != rte_eth_xstats_get_names(
265                         port_id, xstats_names, cnt_xstats)) {
266                 printf("Error: Cannot get xstats lookup\n");
267                 free(xstats_names);
268                 return;
269         }
270
271         /* Get stats themselves */
272         xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
273         if (xstats == NULL) {
274                 printf("Cannot allocate memory for xstats\n");
275                 free(xstats_names);
276                 return;
277         }
278         if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
279                 printf("Error: Unable to get xstats\n");
280                 free(xstats_names);
281                 free(xstats);
282                 return;
283         }
284
285         /* Display xstats */
286         for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) {
287                 if (xstats_hide_zero && !xstats[idx_xstat].value)
288                         continue;
289                 printf("%s: %"PRIu64"\n",
290                         xstats_names[idx_xstat].name,
291                         xstats[idx_xstat].value);
292         }
293         free(xstats_names);
294         free(xstats);
295 }
296
297 void
298 nic_xstats_clear(portid_t port_id)
299 {
300         rte_eth_xstats_reset(port_id);
301 }
302
303 void
304 nic_stats_mapping_display(portid_t port_id)
305 {
306         struct rte_port *port = &ports[port_id];
307         uint16_t i;
308
309         static const char *nic_stats_mapping_border = "########################";
310
311         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
312                 print_valid_ports();
313                 return;
314         }
315
316         if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
317                 printf("Port id %d - either does not support queue statistic mapping or"
318                        " no queue statistic mapping set\n", port_id);
319                 return;
320         }
321
322         printf("\n  %s NIC statistics mapping for port %-2d %s\n",
323                nic_stats_mapping_border, port_id, nic_stats_mapping_border);
324
325         if (port->rx_queue_stats_mapping_enabled) {
326                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
327                         if (rx_queue_stats_mappings[i].port_id == port_id) {
328                                 printf("  RX-queue %2d mapped to Stats Reg %2d\n",
329                                        rx_queue_stats_mappings[i].queue_id,
330                                        rx_queue_stats_mappings[i].stats_counter_id);
331                         }
332                 }
333                 printf("\n");
334         }
335
336
337         if (port->tx_queue_stats_mapping_enabled) {
338                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
339                         if (tx_queue_stats_mappings[i].port_id == port_id) {
340                                 printf("  TX-queue %2d mapped to Stats Reg %2d\n",
341                                        tx_queue_stats_mappings[i].queue_id,
342                                        tx_queue_stats_mappings[i].stats_counter_id);
343                         }
344                 }
345         }
346
347         printf("  %s####################################%s\n",
348                nic_stats_mapping_border, nic_stats_mapping_border);
349 }
350
351 void
352 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
353 {
354         struct rte_eth_rxq_info qinfo;
355         int32_t rc;
356         static const char *info_border = "*********************";
357
358         rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
359         if (rc != 0) {
360                 printf("Failed to retrieve information for port: %u, "
361                         "RX queue: %hu\nerror desc: %s(%d)\n",
362                         port_id, queue_id, strerror(-rc), rc);
363                 return;
364         }
365
366         printf("\n%s Infos for port %-2u, RX queue %-2u %s",
367                info_border, port_id, queue_id, info_border);
368
369         printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
370         printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
371         printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
372         printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
373         printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
374         printf("\nRX drop packets: %s",
375                 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
376         printf("\nRX deferred start: %s",
377                 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
378         printf("\nRX scattered packets: %s",
379                 (qinfo.scattered_rx != 0) ? "on" : "off");
380         printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
381         printf("\n");
382 }
383
384 void
385 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
386 {
387         struct rte_eth_txq_info qinfo;
388         int32_t rc;
389         static const char *info_border = "*********************";
390
391         rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
392         if (rc != 0) {
393                 printf("Failed to retrieve information for port: %u, "
394                         "TX queue: %hu\nerror desc: %s(%d)\n",
395                         port_id, queue_id, strerror(-rc), rc);
396                 return;
397         }
398
399         printf("\n%s Infos for port %-2u, TX queue %-2u %s",
400                info_border, port_id, queue_id, info_border);
401
402         printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
403         printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
404         printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
405         printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
406         printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
407         printf("\nTX flags: %#x", qinfo.conf.txq_flags);
408         printf("\nTX deferred start: %s",
409                 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
410         printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
411         printf("\n");
412 }
413
414 void
415 port_infos_display(portid_t port_id)
416 {
417         struct rte_port *port;
418         struct ether_addr mac_addr;
419         struct rte_eth_link link;
420         struct rte_eth_dev_info dev_info;
421         int vlan_offload;
422         struct rte_mempool * mp;
423         static const char *info_border = "*********************";
424         uint16_t mtu;
425
426         if (port_id_is_invalid(port_id, ENABLED_WARN)) {
427                 print_valid_ports();
428                 return;
429         }
430         port = &ports[port_id];
431         rte_eth_link_get_nowait(port_id, &link);
432         memset(&dev_info, 0, sizeof(dev_info));
433         rte_eth_dev_info_get(port_id, &dev_info);
434         printf("\n%s Infos for port %-2d %s\n",
435                info_border, port_id, info_border);
436         rte_eth_macaddr_get(port_id, &mac_addr);
437         print_ethaddr("MAC address: ", &mac_addr);
438         printf("\nDriver name: %s", dev_info.driver_name);
439         printf("\nConnect to socket: %u", port->socket_id);
440
441         if (port_numa[port_id] != NUMA_NO_CONFIG) {
442                 mp = mbuf_pool_find(port_numa[port_id]);
443                 if (mp)
444                         printf("\nmemory allocation on the socket: %d",
445                                                         port_numa[port_id]);
446         } else
447                 printf("\nmemory allocation on the socket: %u",port->socket_id);
448
449         printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
450         printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
451         printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
452                ("full-duplex") : ("half-duplex"));
453
454         if (!rte_eth_dev_get_mtu(port_id, &mtu))
455                 printf("MTU: %u\n", mtu);
456
457         printf("Promiscuous mode: %s\n",
458                rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
459         printf("Allmulticast mode: %s\n",
460                rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
461         printf("Maximum number of MAC addresses: %u\n",
462                (unsigned int)(port->dev_info.max_mac_addrs));
463         printf("Maximum number of MAC addresses of hash filtering: %u\n",
464                (unsigned int)(port->dev_info.max_hash_mac_addrs));
465
466         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
467         if (vlan_offload >= 0){
468                 printf("VLAN offload: \n");
469                 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
470                         printf("  strip on \n");
471                 else
472                         printf("  strip off \n");
473
474                 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
475                         printf("  filter on \n");
476                 else
477                         printf("  filter off \n");
478
479                 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
480                         printf("  qinq(extend) on \n");
481                 else
482                         printf("  qinq(extend) off \n");
483         }
484
485         if (dev_info.hash_key_size > 0)
486                 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
487         if (dev_info.reta_size > 0)
488                 printf("Redirection table size: %u\n", dev_info.reta_size);
489         if (!dev_info.flow_type_rss_offloads)
490                 printf("No flow type is supported.\n");
491         else {
492                 uint16_t i;
493                 char *p;
494
495                 printf("Supported flow types:\n");
496                 for (i = RTE_ETH_FLOW_UNKNOWN + 1;
497                      i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) {
498                         if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
499                                 continue;
500                         p = flowtype_to_str(i);
501                         if (p)
502                                 printf("  %s\n", p);
503                         else
504                                 printf("  user defined %d\n", i);
505                 }
506         }
507
508         printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
509         printf("Max possible number of RXDs per queue: %hu\n",
510                 dev_info.rx_desc_lim.nb_max);
511         printf("Min possible number of RXDs per queue: %hu\n",
512                 dev_info.rx_desc_lim.nb_min);
513         printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
514
515         printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
516         printf("Max possible number of TXDs per queue: %hu\n",
517                 dev_info.tx_desc_lim.nb_max);
518         printf("Min possible number of TXDs per queue: %hu\n",
519                 dev_info.tx_desc_lim.nb_min);
520         printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
521 }
522
523 void
524 port_offload_cap_display(portid_t port_id)
525 {
526         struct rte_eth_dev *dev;
527         struct rte_eth_dev_info dev_info;
528         static const char *info_border = "************";
529
530         if (port_id_is_invalid(port_id, ENABLED_WARN))
531                 return;
532
533         dev = &rte_eth_devices[port_id];
534         rte_eth_dev_info_get(port_id, &dev_info);
535
536         printf("\n%s Port %d supported offload features: %s\n",
537                 info_border, port_id, info_border);
538
539         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) {
540                 printf("VLAN stripped:                 ");
541                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
542                         printf("on\n");
543                 else
544                         printf("off\n");
545         }
546
547         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) {
548                 printf("Double VLANs stripped:         ");
549                 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
550                         printf("on\n");
551                 else
552                         printf("off\n");
553         }
554
555         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) {
556                 printf("RX IPv4 checksum:              ");
557                 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
558                         printf("on\n");
559                 else
560                         printf("off\n");
561         }
562
563         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) {
564                 printf("RX UDP checksum:               ");
565                 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
566                         printf("on\n");
567                 else
568                         printf("off\n");
569         }
570
571         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) {
572                 printf("RX TCP checksum:               ");
573                 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
574                         printf("on\n");
575                 else
576                         printf("off\n");
577         }
578
579         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)
580                 printf("RX Outer IPv4 checksum:        on");
581
582         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) {
583                 printf("Large receive offload:         ");
584                 if (dev->data->dev_conf.rxmode.enable_lro)
585                         printf("on\n");
586                 else
587                         printf("off\n");
588         }
589
590         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) {
591                 printf("VLAN insert:                   ");
592                 if (ports[port_id].tx_ol_flags &
593                     TESTPMD_TX_OFFLOAD_INSERT_VLAN)
594                         printf("on\n");
595                 else
596                         printf("off\n");
597         }
598
599         if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) {
600                 printf("HW timestamp:                  ");
601                 if (dev->data->dev_conf.rxmode.hw_timestamp)
602                         printf("on\n");
603                 else
604                         printf("off\n");
605         }
606
607         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) {
608                 printf("Double VLANs insert:           ");
609                 if (ports[port_id].tx_ol_flags &
610                     TESTPMD_TX_OFFLOAD_INSERT_QINQ)
611                         printf("on\n");
612                 else
613                         printf("off\n");
614         }
615
616         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) {
617                 printf("TX IPv4 checksum:              ");
618                 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
619                         printf("on\n");
620                 else
621                         printf("off\n");
622         }
623
624         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) {
625                 printf("TX UDP checksum:               ");
626                 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM)
627                         printf("on\n");
628                 else
629                         printf("off\n");
630         }
631
632         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) {
633                 printf("TX TCP checksum:               ");
634                 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM)
635                         printf("on\n");
636                 else
637                         printf("off\n");
638         }
639
640         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) {
641                 printf("TX SCTP checksum:              ");
642                 if (ports[port_id].tx_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM)
643                         printf("on\n");
644                 else
645                         printf("off\n");
646         }
647
648         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) {
649                 printf("TX Outer IPv4 checksum:        ");
650                 if (ports[port_id].tx_ol_flags &
651                     TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
652                         printf("on\n");
653                 else
654                         printf("off\n");
655         }
656
657         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) {
658                 printf("TX TCP segmentation:           ");
659                 if (ports[port_id].tso_segsz != 0)
660                         printf("on\n");
661                 else
662                         printf("off\n");
663         }
664
665         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) {
666                 printf("TX UDP segmentation:           ");
667                 if (ports[port_id].tso_segsz != 0)
668                         printf("on\n");
669                 else
670                         printf("off\n");
671         }
672
673         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) {
674                 printf("TSO for VXLAN tunnel packet:   ");
675                 if (ports[port_id].tunnel_tso_segsz)
676                         printf("on\n");
677                 else
678                         printf("off\n");
679         }
680
681         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) {
682                 printf("TSO for GRE tunnel packet:     ");
683                 if (ports[port_id].tunnel_tso_segsz)
684                         printf("on\n");
685                 else
686                         printf("off\n");
687         }
688
689         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) {
690                 printf("TSO for IPIP tunnel packet:    ");
691                 if (ports[port_id].tunnel_tso_segsz)
692                         printf("on\n");
693                 else
694                         printf("off\n");
695         }
696
697         if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) {
698                 printf("TSO for GENEVE tunnel packet:  ");
699                 if (ports[port_id].tunnel_tso_segsz)
700                         printf("on\n");
701                 else
702                         printf("off\n");
703         }
704
705 }
706
707 int
708 port_id_is_invalid(portid_t port_id, enum print_warning warning)
709 {
710         uint16_t pid;
711
712         if (port_id == (portid_t)RTE_PORT_ALL)
713                 return 0;
714
715         RTE_ETH_FOREACH_DEV(pid)
716                 if (port_id == pid)
717                         return 0;
718
719         if (warning == ENABLED_WARN)
720                 printf("Invalid port %d\n", port_id);
721
722         return 1;
723 }
724
725 void print_valid_ports(void)
726 {
727         portid_t pid;
728
729         printf("The valid ports array is [");
730         RTE_ETH_FOREACH_DEV(pid) {
731                 printf(" %d", pid);
732         }
733         printf(" ]\n");
734 }
735
736 static int
737 vlan_id_is_invalid(uint16_t vlan_id)
738 {
739         if (vlan_id < 4096)
740                 return 0;
741         printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
742         return 1;
743 }
744
745 static int
746 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
747 {
748         uint64_t pci_len;
749
750         if (reg_off & 0x3) {
751                 printf("Port register offset 0x%X not aligned on a 4-byte "
752                        "boundary\n",
753                        (unsigned)reg_off);
754                 return 1;
755         }
756         pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
757         if (reg_off >= pci_len) {
758                 printf("Port %d: register offset %u (0x%X) out of port PCI "
759                        "resource (length=%"PRIu64")\n",
760                        port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
761                 return 1;
762         }
763         return 0;
764 }
765
766 static int
767 reg_bit_pos_is_invalid(uint8_t bit_pos)
768 {
769         if (bit_pos <= 31)
770                 return 0;
771         printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
772         return 1;
773 }
774
775 #define display_port_and_reg_off(port_id, reg_off) \
776         printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
777
778 static inline void
779 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
780 {
781         display_port_and_reg_off(port_id, (unsigned)reg_off);
782         printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
783 }
784
785 void
786 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
787 {
788         uint32_t reg_v;
789
790
791         if (port_id_is_invalid(port_id, ENABLED_WARN))
792                 return;
793         if (port_reg_off_is_invalid(port_id, reg_off))
794                 return;
795         if (reg_bit_pos_is_invalid(bit_x))
796                 return;
797         reg_v = port_id_pci_reg_read(port_id, reg_off);
798         display_port_and_reg_off(port_id, (unsigned)reg_off);
799         printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
800 }
801
802 void
803 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
804                            uint8_t bit1_pos, uint8_t bit2_pos)
805 {
806         uint32_t reg_v;
807         uint8_t  l_bit;
808         uint8_t  h_bit;
809
810         if (port_id_is_invalid(port_id, ENABLED_WARN))
811                 return;
812         if (port_reg_off_is_invalid(port_id, reg_off))
813                 return;
814         if (reg_bit_pos_is_invalid(bit1_pos))
815                 return;
816         if (reg_bit_pos_is_invalid(bit2_pos))
817                 return;
818         if (bit1_pos > bit2_pos)
819                 l_bit = bit2_pos, h_bit = bit1_pos;
820         else
821                 l_bit = bit1_pos, h_bit = bit2_pos;
822
823         reg_v = port_id_pci_reg_read(port_id, reg_off);
824         reg_v >>= l_bit;
825         if (h_bit < 31)
826                 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
827         display_port_and_reg_off(port_id, (unsigned)reg_off);
828         printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
829                ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
830 }
831
832 void
833 port_reg_display(portid_t port_id, uint32_t reg_off)
834 {
835         uint32_t reg_v;
836
837         if (port_id_is_invalid(port_id, ENABLED_WARN))
838                 return;
839         if (port_reg_off_is_invalid(port_id, reg_off))
840                 return;
841         reg_v = port_id_pci_reg_read(port_id, reg_off);
842         display_port_reg_value(port_id, reg_off, reg_v);
843 }
844
845 void
846 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
847                  uint8_t bit_v)
848 {
849         uint32_t reg_v;
850
851         if (port_id_is_invalid(port_id, ENABLED_WARN))
852                 return;
853         if (port_reg_off_is_invalid(port_id, reg_off))
854                 return;
855         if (reg_bit_pos_is_invalid(bit_pos))
856                 return;
857         if (bit_v > 1) {
858                 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
859                 return;
860         }
861         reg_v = port_id_pci_reg_read(port_id, reg_off);
862         if (bit_v == 0)
863                 reg_v &= ~(1 << bit_pos);
864         else
865                 reg_v |= (1 << bit_pos);
866         port_id_pci_reg_write(port_id, reg_off, reg_v);
867         display_port_reg_value(port_id, reg_off, reg_v);
868 }
869
870 void
871 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
872                        uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
873 {
874         uint32_t max_v;
875         uint32_t reg_v;
876         uint8_t  l_bit;
877         uint8_t  h_bit;
878
879         if (port_id_is_invalid(port_id, ENABLED_WARN))
880                 return;
881         if (port_reg_off_is_invalid(port_id, reg_off))
882                 return;
883         if (reg_bit_pos_is_invalid(bit1_pos))
884                 return;
885         if (reg_bit_pos_is_invalid(bit2_pos))
886                 return;
887         if (bit1_pos > bit2_pos)
888                 l_bit = bit2_pos, h_bit = bit1_pos;
889         else
890                 l_bit = bit1_pos, h_bit = bit2_pos;
891
892         if ((h_bit - l_bit) < 31)
893                 max_v = (1 << (h_bit - l_bit + 1)) - 1;
894         else
895                 max_v = 0xFFFFFFFF;
896
897         if (value > max_v) {
898                 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
899                                 (unsigned)value, (unsigned)value,
900                                 (unsigned)max_v, (unsigned)max_v);
901                 return;
902         }
903         reg_v = port_id_pci_reg_read(port_id, reg_off);
904         reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
905         reg_v |= (value << l_bit); /* Set changed bits */
906         port_id_pci_reg_write(port_id, reg_off, reg_v);
907         display_port_reg_value(port_id, reg_off, reg_v);
908 }
909
910 void
911 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
912 {
913         if (port_id_is_invalid(port_id, ENABLED_WARN))
914                 return;
915         if (port_reg_off_is_invalid(port_id, reg_off))
916                 return;
917         port_id_pci_reg_write(port_id, reg_off, reg_v);
918         display_port_reg_value(port_id, reg_off, reg_v);
919 }
920
921 void
922 port_mtu_set(portid_t port_id, uint16_t mtu)
923 {
924         int diag;
925
926         if (port_id_is_invalid(port_id, ENABLED_WARN))
927                 return;
928         diag = rte_eth_dev_set_mtu(port_id, mtu);
929         if (diag == 0)
930                 return;
931         printf("Set MTU failed. diag=%d\n", diag);
932 }
933
934 /* Generic flow management functions. */
935
936 /** Generate flow_item[] entry. */
937 #define MK_FLOW_ITEM(t, s) \
938         [RTE_FLOW_ITEM_TYPE_ ## t] = { \
939                 .name = # t, \
940                 .size = s, \
941         }
942
943 /** Information about known flow pattern items. */
944 static const struct {
945         const char *name;
946         size_t size;
947 } flow_item[] = {
948         MK_FLOW_ITEM(END, 0),
949         MK_FLOW_ITEM(VOID, 0),
950         MK_FLOW_ITEM(INVERT, 0),
951         MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
952         MK_FLOW_ITEM(PF, 0),
953         MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
954         MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
955         MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
956         MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
957         MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
958         MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
959         MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
960         MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
961         MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
962         MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
963         MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
964         MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
965         MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
966         MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
967         MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
968         MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
969         MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
970         MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
971         MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
972         MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
973 };
974
975 /** Compute storage space needed by item specification. */
976 static void
977 flow_item_spec_size(const struct rte_flow_item *item,
978                     size_t *size, size_t *pad)
979 {
980         if (!item->spec) {
981                 *size = 0;
982                 goto empty;
983         }
984         switch (item->type) {
985                 union {
986                         const struct rte_flow_item_raw *raw;
987                 } spec;
988
989         case RTE_FLOW_ITEM_TYPE_RAW:
990                 spec.raw = item->spec;
991                 *size = offsetof(struct rte_flow_item_raw, pattern) +
992                         spec.raw->length * sizeof(*spec.raw->pattern);
993                 break;
994         default:
995                 *size = flow_item[item->type].size;
996                 break;
997         }
998 empty:
999         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
1000 }
1001
1002 /** Generate flow_action[] entry. */
1003 #define MK_FLOW_ACTION(t, s) \
1004         [RTE_FLOW_ACTION_TYPE_ ## t] = { \
1005                 .name = # t, \
1006                 .size = s, \
1007         }
1008
1009 /** Information about known flow actions. */
1010 static const struct {
1011         const char *name;
1012         size_t size;
1013 } flow_action[] = {
1014         MK_FLOW_ACTION(END, 0),
1015         MK_FLOW_ACTION(VOID, 0),
1016         MK_FLOW_ACTION(PASSTHRU, 0),
1017         MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
1018         MK_FLOW_ACTION(FLAG, 0),
1019         MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
1020         MK_FLOW_ACTION(DROP, 0),
1021         MK_FLOW_ACTION(COUNT, 0),
1022         MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
1023         MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
1024         MK_FLOW_ACTION(PF, 0),
1025         MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
1026 };
1027
1028 /** Compute storage space needed by action configuration. */
1029 static void
1030 flow_action_conf_size(const struct rte_flow_action *action,
1031                       size_t *size, size_t *pad)
1032 {
1033         if (!action->conf) {
1034                 *size = 0;
1035                 goto empty;
1036         }
1037         switch (action->type) {
1038                 union {
1039                         const struct rte_flow_action_rss *rss;
1040                 } conf;
1041
1042         case RTE_FLOW_ACTION_TYPE_RSS:
1043                 conf.rss = action->conf;
1044                 *size = offsetof(struct rte_flow_action_rss, queue) +
1045                         conf.rss->num * sizeof(*conf.rss->queue);
1046                 break;
1047         default:
1048                 *size = flow_action[action->type].size;
1049                 break;
1050         }
1051 empty:
1052         *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
1053 }
1054
1055 /** Generate a port_flow entry from attributes/pattern/actions. */
1056 static struct port_flow *
1057 port_flow_new(const struct rte_flow_attr *attr,
1058               const struct rte_flow_item *pattern,
1059               const struct rte_flow_action *actions)
1060 {
1061         const struct rte_flow_item *item;
1062         const struct rte_flow_action *action;
1063         struct port_flow *pf = NULL;
1064         size_t tmp;
1065         size_t pad;
1066         size_t off1 = 0;
1067         size_t off2 = 0;
1068         int err = ENOTSUP;
1069
1070 store:
1071         item = pattern;
1072         if (pf)
1073                 pf->pattern = (void *)&pf->data[off1];
1074         do {
1075                 struct rte_flow_item *dst = NULL;
1076
1077                 if ((unsigned int)item->type >= RTE_DIM(flow_item) ||
1078                     !flow_item[item->type].name)
1079                         goto notsup;
1080                 if (pf)
1081                         dst = memcpy(pf->data + off1, item, sizeof(*item));
1082                 off1 += sizeof(*item);
1083                 flow_item_spec_size(item, &tmp, &pad);
1084                 if (item->spec) {
1085                         if (pf)
1086                                 dst->spec = memcpy(pf->data + off2,
1087                                                    item->spec, tmp);
1088                         off2 += tmp + pad;
1089                 }
1090                 if (item->last) {
1091                         if (pf)
1092                                 dst->last = memcpy(pf->data + off2,
1093                                                    item->last, tmp);
1094                         off2 += tmp + pad;
1095                 }
1096                 if (item->mask) {
1097                         if (pf)
1098                                 dst->mask = memcpy(pf->data + off2,
1099                                                    item->mask, tmp);
1100                         off2 += tmp + pad;
1101                 }
1102                 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
1103         } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
1104         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
1105         action = actions;
1106         if (pf)
1107                 pf->actions = (void *)&pf->data[off1];
1108         do {
1109                 struct rte_flow_action *dst = NULL;
1110
1111                 if ((unsigned int)action->type >= RTE_DIM(flow_action) ||
1112                     !flow_action[action->type].name)
1113                         goto notsup;
1114                 if (pf)
1115                         dst = memcpy(pf->data + off1, action, sizeof(*action));
1116                 off1 += sizeof(*action);
1117                 flow_action_conf_size(action, &tmp, &pad);
1118                 if (action->conf) {
1119                         if (pf)
1120                                 dst->conf = memcpy(pf->data + off2,
1121                                                    action->conf, tmp);
1122                         off2 += tmp + pad;
1123                 }
1124                 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
1125         } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
1126         if (pf != NULL)
1127                 return pf;
1128         off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
1129         tmp = RTE_ALIGN_CEIL(offsetof(struct port_flow, data), sizeof(double));
1130         pf = calloc(1, tmp + off1 + off2);
1131         if (pf == NULL)
1132                 err = errno;
1133         else {
1134                 *pf = (const struct port_flow){
1135                         .size = tmp + off1 + off2,
1136                         .attr = *attr,
1137                 };
1138                 tmp -= offsetof(struct port_flow, data);
1139                 off2 = tmp + off1;
1140                 off1 = tmp;
1141                 goto store;
1142         }
1143 notsup:
1144         rte_errno = err;
1145         return NULL;
1146 }
1147
1148 /** Print a message out of a flow error. */
1149 static int
1150 port_flow_complain(struct rte_flow_error *error)
1151 {
1152         static const char *const errstrlist[] = {
1153                 [RTE_FLOW_ERROR_TYPE_NONE] = "no error",
1154                 [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified",
1155                 [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)",
1156                 [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field",
1157                 [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field",
1158                 [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field",
1159                 [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field",
1160                 [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure",
1161                 [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length",
1162                 [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item",
1163                 [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions",
1164                 [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action",
1165         };
1166         const char *errstr;
1167         char buf[32];
1168         int err = rte_errno;
1169
1170         if ((unsigned int)error->type >= RTE_DIM(errstrlist) ||
1171             !errstrlist[error->type])
1172                 errstr = "unknown type";
1173         else
1174                 errstr = errstrlist[error->type];
1175         printf("Caught error type %d (%s): %s%s\n",
1176                error->type, errstr,
1177                error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ",
1178                                         error->cause), buf) : "",
1179                error->message ? error->message : "(no stated reason)");
1180         return -err;
1181 }
1182
1183 /** Validate flow rule. */
1184 int
1185 port_flow_validate(portid_t port_id,
1186                    const struct rte_flow_attr *attr,
1187                    const struct rte_flow_item *pattern,
1188                    const struct rte_flow_action *actions)
1189 {
1190         struct rte_flow_error error;
1191
1192         /* Poisoning to make sure PMDs update it in case of error. */
1193         memset(&error, 0x11, sizeof(error));
1194         if (rte_flow_validate(port_id, attr, pattern, actions, &error))
1195                 return port_flow_complain(&error);
1196         printf("Flow rule validated\n");
1197         return 0;
1198 }
1199
1200 /** Create flow rule. */
1201 int
1202 port_flow_create(portid_t port_id,
1203                  const struct rte_flow_attr *attr,
1204                  const struct rte_flow_item *pattern,
1205                  const struct rte_flow_action *actions)
1206 {
1207         struct rte_flow *flow;
1208         struct rte_port *port;
1209         struct port_flow *pf;
1210         uint32_t id;
1211         struct rte_flow_error error;
1212
1213         /* Poisoning to make sure PMDs update it in case of error. */
1214         memset(&error, 0x22, sizeof(error));
1215         flow = rte_flow_create(port_id, attr, pattern, actions, &error);
1216         if (!flow)
1217                 return port_flow_complain(&error);
1218         port = &ports[port_id];
1219         if (port->flow_list) {
1220                 if (port->flow_list->id == UINT32_MAX) {
1221                         printf("Highest rule ID is already assigned, delete"
1222                                " it first");
1223                         rte_flow_destroy(port_id, flow, NULL);
1224                         return -ENOMEM;
1225                 }
1226                 id = port->flow_list->id + 1;
1227         } else
1228                 id = 0;
1229         pf = port_flow_new(attr, pattern, actions);
1230         if (!pf) {
1231                 int err = rte_errno;
1232
1233                 printf("Cannot allocate flow: %s\n", rte_strerror(err));
1234                 rte_flow_destroy(port_id, flow, NULL);
1235                 return -err;
1236         }
1237         pf->next = port->flow_list;
1238         pf->id = id;
1239         pf->flow = flow;
1240         port->flow_list = pf;
1241         printf("Flow rule #%u created\n", pf->id);
1242         return 0;
1243 }
1244
1245 /** Destroy a number of flow rules. */
1246 int
1247 port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule)
1248 {
1249         struct rte_port *port;
1250         struct port_flow **tmp;
1251         uint32_t c = 0;
1252         int ret = 0;
1253
1254         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1255             port_id == (portid_t)RTE_PORT_ALL)
1256                 return -EINVAL;
1257         port = &ports[port_id];
1258         tmp = &port->flow_list;
1259         while (*tmp) {
1260                 uint32_t i;
1261
1262                 for (i = 0; i != n; ++i) {
1263                         struct rte_flow_error error;
1264                         struct port_flow *pf = *tmp;
1265
1266                         if (rule[i] != pf->id)
1267                                 continue;
1268                         /*
1269                          * Poisoning to make sure PMDs update it in case
1270                          * of error.
1271                          */
1272                         memset(&error, 0x33, sizeof(error));
1273                         if (rte_flow_destroy(port_id, pf->flow, &error)) {
1274                                 ret = port_flow_complain(&error);
1275                                 continue;
1276                         }
1277                         printf("Flow rule #%u destroyed\n", pf->id);
1278                         *tmp = pf->next;
1279                         free(pf);
1280                         break;
1281                 }
1282                 if (i == n)
1283                         tmp = &(*tmp)->next;
1284                 ++c;
1285         }
1286         return ret;
1287 }
1288
1289 /** Remove all flow rules. */
1290 int
1291 port_flow_flush(portid_t port_id)
1292 {
1293         struct rte_flow_error error;
1294         struct rte_port *port;
1295         int ret = 0;
1296
1297         /* Poisoning to make sure PMDs update it in case of error. */
1298         memset(&error, 0x44, sizeof(error));
1299         if (rte_flow_flush(port_id, &error)) {
1300                 ret = port_flow_complain(&error);
1301                 if (port_id_is_invalid(port_id, DISABLED_WARN) ||
1302                     port_id == (portid_t)RTE_PORT_ALL)
1303                         return ret;
1304         }
1305         port = &ports[port_id];
1306         while (port->flow_list) {
1307                 struct port_flow *pf = port->flow_list->next;
1308
1309                 free(port->flow_list);
1310                 port->flow_list = pf;
1311         }
1312         return ret;
1313 }
1314
1315 /** Query a flow rule. */
1316 int
1317 port_flow_query(portid_t port_id, uint32_t rule,
1318                 enum rte_flow_action_type action)
1319 {
1320         struct rte_flow_error error;
1321         struct rte_port *port;
1322         struct port_flow *pf;
1323         const char *name;
1324         union {
1325                 struct rte_flow_query_count count;
1326         } query;
1327
1328         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1329             port_id == (portid_t)RTE_PORT_ALL)
1330                 return -EINVAL;
1331         port = &ports[port_id];
1332         for (pf = port->flow_list; pf; pf = pf->next)
1333                 if (pf->id == rule)
1334                         break;
1335         if (!pf) {
1336                 printf("Flow rule #%u not found\n", rule);
1337                 return -ENOENT;
1338         }
1339         if ((unsigned int)action >= RTE_DIM(flow_action) ||
1340             !flow_action[action].name)
1341                 name = "unknown";
1342         else
1343                 name = flow_action[action].name;
1344         switch (action) {
1345         case RTE_FLOW_ACTION_TYPE_COUNT:
1346                 break;
1347         default:
1348                 printf("Cannot query action type %d (%s)\n", action, name);
1349                 return -ENOTSUP;
1350         }
1351         /* Poisoning to make sure PMDs update it in case of error. */
1352         memset(&error, 0x55, sizeof(error));
1353         memset(&query, 0, sizeof(query));
1354         if (rte_flow_query(port_id, pf->flow, action, &query, &error))
1355                 return port_flow_complain(&error);
1356         switch (action) {
1357         case RTE_FLOW_ACTION_TYPE_COUNT:
1358                 printf("%s:\n"
1359                        " hits_set: %u\n"
1360                        " bytes_set: %u\n"
1361                        " hits: %" PRIu64 "\n"
1362                        " bytes: %" PRIu64 "\n",
1363                        name,
1364                        query.count.hits_set,
1365                        query.count.bytes_set,
1366                        query.count.hits,
1367                        query.count.bytes);
1368                 break;
1369         default:
1370                 printf("Cannot display result for action type %d (%s)\n",
1371                        action, name);
1372                 break;
1373         }
1374         return 0;
1375 }
1376
1377 /** List flow rules. */
1378 void
1379 port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n])
1380 {
1381         struct rte_port *port;
1382         struct port_flow *pf;
1383         struct port_flow *list = NULL;
1384         uint32_t i;
1385
1386         if (port_id_is_invalid(port_id, ENABLED_WARN) ||
1387             port_id == (portid_t)RTE_PORT_ALL)
1388                 return;
1389         port = &ports[port_id];
1390         if (!port->flow_list)
1391                 return;
1392         /* Sort flows by group, priority and ID. */
1393         for (pf = port->flow_list; pf != NULL; pf = pf->next) {
1394                 struct port_flow **tmp;
1395
1396                 if (n) {
1397                         /* Filter out unwanted groups. */
1398                         for (i = 0; i != n; ++i)
1399                                 if (pf->attr.group == group[i])
1400                                         break;
1401                         if (i == n)
1402                                 continue;
1403                 }
1404                 tmp = &list;
1405                 while (*tmp &&
1406                        (pf->attr.group > (*tmp)->attr.group ||
1407                         (pf->attr.group == (*tmp)->attr.group &&
1408                          pf->attr.priority > (*tmp)->attr.priority) ||
1409                         (pf->attr.group == (*tmp)->attr.group &&
1410                          pf->attr.priority == (*tmp)->attr.priority &&
1411                          pf->id > (*tmp)->id)))
1412                         tmp = &(*tmp)->tmp;
1413                 pf->tmp = *tmp;
1414                 *tmp = pf;
1415         }
1416         printf("ID\tGroup\tPrio\tAttr\tRule\n");
1417         for (pf = list; pf != NULL; pf = pf->tmp) {
1418                 const struct rte_flow_item *item = pf->pattern;
1419                 const struct rte_flow_action *action = pf->actions;
1420
1421                 printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c\t",
1422                        pf->id,
1423                        pf->attr.group,
1424                        pf->attr.priority,
1425                        pf->attr.ingress ? 'i' : '-',
1426                        pf->attr.egress ? 'e' : '-');
1427                 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1428                         if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
1429                                 printf("%s ", flow_item[item->type].name);
1430                         ++item;
1431                 }
1432                 printf("=>");
1433                 while (action->type != RTE_FLOW_ACTION_TYPE_END) {
1434                         if (action->type != RTE_FLOW_ACTION_TYPE_VOID)
1435                                 printf(" %s", flow_action[action->type].name);
1436                         ++action;
1437                 }
1438                 printf("\n");
1439         }
1440 }
1441
1442 /** Restrict ingress traffic to the defined flow rules. */
1443 int
1444 port_flow_isolate(portid_t port_id, int set)
1445 {
1446         struct rte_flow_error error;
1447
1448         /* Poisoning to make sure PMDs update it in case of error. */
1449         memset(&error, 0x66, sizeof(error));
1450         if (rte_flow_isolate(port_id, set, &error))
1451                 return port_flow_complain(&error);
1452         printf("Ingress traffic on port %u is %s to the defined flow rules\n",
1453                port_id,
1454                set ? "now restricted" : "not restricted anymore");
1455         return 0;
1456 }
1457
1458 /*
1459  * RX/TX ring descriptors display functions.
1460  */
1461 int
1462 rx_queue_id_is_invalid(queueid_t rxq_id)
1463 {
1464         if (rxq_id < nb_rxq)
1465                 return 0;
1466         printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
1467         return 1;
1468 }
1469
1470 int
1471 tx_queue_id_is_invalid(queueid_t txq_id)
1472 {
1473         if (txq_id < nb_txq)
1474                 return 0;
1475         printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
1476         return 1;
1477 }
1478
1479 static int
1480 rx_desc_id_is_invalid(uint16_t rxdesc_id)
1481 {
1482         if (rxdesc_id < nb_rxd)
1483                 return 0;
1484         printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
1485                rxdesc_id, nb_rxd);
1486         return 1;
1487 }
1488
1489 static int
1490 tx_desc_id_is_invalid(uint16_t txdesc_id)
1491 {
1492         if (txdesc_id < nb_txd)
1493                 return 0;
1494         printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
1495                txdesc_id, nb_txd);
1496         return 1;
1497 }
1498
1499 static const struct rte_memzone *
1500 ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id)
1501 {
1502         char mz_name[RTE_MEMZONE_NAMESIZE];
1503         const struct rte_memzone *mz;
1504
1505         snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
1506                  ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
1507         mz = rte_memzone_lookup(mz_name);
1508         if (mz == NULL)
1509                 printf("%s ring memory zoneof (port %d, queue %d) not"
1510                        "found (zone name = %s\n",
1511                        ring_name, port_id, q_id, mz_name);
1512         return mz;
1513 }
1514
1515 union igb_ring_dword {
1516         uint64_t dword;
1517         struct {
1518 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1519                 uint32_t lo;
1520                 uint32_t hi;
1521 #else
1522                 uint32_t hi;
1523                 uint32_t lo;
1524 #endif
1525         } words;
1526 };
1527
1528 struct igb_ring_desc_32_bytes {
1529         union igb_ring_dword lo_dword;
1530         union igb_ring_dword hi_dword;
1531         union igb_ring_dword resv1;
1532         union igb_ring_dword resv2;
1533 };
1534
1535 struct igb_ring_desc_16_bytes {
1536         union igb_ring_dword lo_dword;
1537         union igb_ring_dword hi_dword;
1538 };
1539
1540 static void
1541 ring_rxd_display_dword(union igb_ring_dword dword)
1542 {
1543         printf("    0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
1544                                         (unsigned)dword.words.hi);
1545 }
1546
1547 static void
1548 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
1549 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1550                            portid_t port_id,
1551 #else
1552                            __rte_unused portid_t port_id,
1553 #endif
1554                            uint16_t desc_id)
1555 {
1556         struct igb_ring_desc_16_bytes *ring =
1557                 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1558 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
1559         struct rte_eth_dev_info dev_info;
1560
1561         memset(&dev_info, 0, sizeof(dev_info));
1562         rte_eth_dev_info_get(port_id, &dev_info);
1563         if (strstr(dev_info.driver_name, "i40e") != NULL) {
1564                 /* 32 bytes RX descriptor, i40e only */
1565                 struct igb_ring_desc_32_bytes *ring =
1566                         (struct igb_ring_desc_32_bytes *)ring_mz->addr;
1567                 ring[desc_id].lo_dword.dword =
1568                         rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1569                 ring_rxd_display_dword(ring[desc_id].lo_dword);
1570                 ring[desc_id].hi_dword.dword =
1571                         rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1572                 ring_rxd_display_dword(ring[desc_id].hi_dword);
1573                 ring[desc_id].resv1.dword =
1574                         rte_le_to_cpu_64(ring[desc_id].resv1.dword);
1575                 ring_rxd_display_dword(ring[desc_id].resv1);
1576                 ring[desc_id].resv2.dword =
1577                         rte_le_to_cpu_64(ring[desc_id].resv2.dword);
1578                 ring_rxd_display_dword(ring[desc_id].resv2);
1579
1580                 return;
1581         }
1582 #endif
1583         /* 16 bytes RX descriptor */
1584         ring[desc_id].lo_dword.dword =
1585                 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1586         ring_rxd_display_dword(ring[desc_id].lo_dword);
1587         ring[desc_id].hi_dword.dword =
1588                 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1589         ring_rxd_display_dword(ring[desc_id].hi_dword);
1590 }
1591
1592 static void
1593 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
1594 {
1595         struct igb_ring_desc_16_bytes *ring;
1596         struct igb_ring_desc_16_bytes txd;
1597
1598         ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
1599         txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
1600         txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
1601         printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
1602                         (unsigned)txd.lo_dword.words.lo,
1603                         (unsigned)txd.lo_dword.words.hi,
1604                         (unsigned)txd.hi_dword.words.lo,
1605                         (unsigned)txd.hi_dword.words.hi);
1606 }
1607
1608 void
1609 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
1610 {
1611         const struct rte_memzone *rx_mz;
1612
1613         if (port_id_is_invalid(port_id, ENABLED_WARN))
1614                 return;
1615         if (rx_queue_id_is_invalid(rxq_id))
1616                 return;
1617         if (rx_desc_id_is_invalid(rxd_id))
1618                 return;
1619         rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
1620         if (rx_mz == NULL)
1621                 return;
1622         ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
1623 }
1624
1625 void
1626 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
1627 {
1628         const struct rte_memzone *tx_mz;
1629
1630         if (port_id_is_invalid(port_id, ENABLED_WARN))
1631                 return;
1632         if (tx_queue_id_is_invalid(txq_id))
1633                 return;
1634         if (tx_desc_id_is_invalid(txd_id))
1635                 return;
1636         tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
1637         if (tx_mz == NULL)
1638                 return;
1639         ring_tx_descriptor_display(tx_mz, txd_id);
1640 }
1641
1642 void
1643 fwd_lcores_config_display(void)
1644 {
1645         lcoreid_t lc_id;
1646
1647         printf("List of forwarding lcores:");
1648         for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
1649                 printf(" %2u", fwd_lcores_cpuids[lc_id]);
1650         printf("\n");
1651 }
1652 void
1653 rxtx_config_display(void)
1654 {
1655         portid_t pid;
1656
1657         printf("  %s packet forwarding%s packets/burst=%d\n",
1658                cur_fwd_eng->fwd_mode_name,
1659                retry_enabled == 0 ? "" : " with retry",
1660                nb_pkt_per_burst);
1661
1662         if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
1663                 printf("  packet len=%u - nb packet segments=%d\n",
1664                                 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
1665
1666         printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
1667                nb_fwd_lcores, nb_fwd_ports);
1668
1669         RTE_ETH_FOREACH_DEV(pid) {
1670                 struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf;
1671                 struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf;
1672
1673                 printf("  port %d:\n", (unsigned int)pid);
1674                 printf("  CRC stripping %s\n",
1675                                 ports[pid].dev_conf.rxmode.hw_strip_crc ?
1676                                 "enabled" : "disabled");
1677                 printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
1678                                 nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
1679                 printf("  RX threshold registers: pthresh=%d hthresh=%d "
1680                        " wthresh=%d\n",
1681                                 rx_conf->rx_thresh.pthresh,
1682                                 rx_conf->rx_thresh.hthresh,
1683                                 rx_conf->rx_thresh.wthresh);
1684                 printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
1685                                 nb_txq, nb_txd, tx_conf->tx_free_thresh);
1686                 printf("  TX threshold registers: pthresh=%d hthresh=%d "
1687                        " wthresh=%d\n",
1688                                 tx_conf->tx_thresh.pthresh,
1689                                 tx_conf->tx_thresh.hthresh,
1690                                 tx_conf->tx_thresh.wthresh);
1691                 printf("  TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
1692                                 tx_conf->tx_rs_thresh, tx_conf->txq_flags);
1693         }
1694 }
1695
1696 void
1697 port_rss_reta_info(portid_t port_id,
1698                    struct rte_eth_rss_reta_entry64 *reta_conf,
1699                    uint16_t nb_entries)
1700 {
1701         uint16_t i, idx, shift;
1702         int ret;
1703
1704         if (port_id_is_invalid(port_id, ENABLED_WARN))
1705                 return;
1706
1707         ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
1708         if (ret != 0) {
1709                 printf("Failed to get RSS RETA info, return code = %d\n", ret);
1710                 return;
1711         }
1712
1713         for (i = 0; i < nb_entries; i++) {
1714                 idx = i / RTE_RETA_GROUP_SIZE;
1715                 shift = i % RTE_RETA_GROUP_SIZE;
1716                 if (!(reta_conf[idx].mask & (1ULL << shift)))
1717                         continue;
1718                 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1719                                         i, reta_conf[idx].reta[shift]);
1720         }
1721 }
1722
1723 /*
1724  * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1725  * key of the port.
1726  */
1727 void
1728 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
1729 {
1730         struct rte_eth_rss_conf rss_conf;
1731         uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1732         uint64_t rss_hf;
1733         uint8_t i;
1734         int diag;
1735         struct rte_eth_dev_info dev_info;
1736         uint8_t hash_key_size;
1737
1738         if (port_id_is_invalid(port_id, ENABLED_WARN))
1739                 return;
1740
1741         memset(&dev_info, 0, sizeof(dev_info));
1742         rte_eth_dev_info_get(port_id, &dev_info);
1743         if (dev_info.hash_key_size > 0 &&
1744                         dev_info.hash_key_size <= sizeof(rss_key))
1745                 hash_key_size = dev_info.hash_key_size;
1746         else {
1747                 printf("dev_info did not provide a valid hash key size\n");
1748                 return;
1749         }
1750
1751         rss_conf.rss_hf = 0;
1752         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1753                 if (!strcmp(rss_info, rss_type_table[i].str))
1754                         rss_conf.rss_hf = rss_type_table[i].rss_type;
1755         }
1756
1757         /* Get RSS hash key if asked to display it */
1758         rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1759         rss_conf.rss_key_len = hash_key_size;
1760         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1761         if (diag != 0) {
1762                 switch (diag) {
1763                 case -ENODEV:
1764                         printf("port index %d invalid\n", port_id);
1765                         break;
1766                 case -ENOTSUP:
1767                         printf("operation not supported by device\n");
1768                         break;
1769                 default:
1770                         printf("operation failed - diag=%d\n", diag);
1771                         break;
1772                 }
1773                 return;
1774         }
1775         rss_hf = rss_conf.rss_hf;
1776         if (rss_hf == 0) {
1777                 printf("RSS disabled\n");
1778                 return;
1779         }
1780         printf("RSS functions:\n ");
1781         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1782                 if (rss_hf & rss_type_table[i].rss_type)
1783                         printf("%s ", rss_type_table[i].str);
1784         }
1785         printf("\n");
1786         if (!show_rss_key)
1787                 return;
1788         printf("RSS key:\n");
1789         for (i = 0; i < hash_key_size; i++)
1790                 printf("%02X", rss_key[i]);
1791         printf("\n");
1792 }
1793
1794 void
1795 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1796                          uint hash_key_len)
1797 {
1798         struct rte_eth_rss_conf rss_conf;
1799         int diag;
1800         unsigned int i;
1801
1802         rss_conf.rss_key = NULL;
1803         rss_conf.rss_key_len = hash_key_len;
1804         rss_conf.rss_hf = 0;
1805         for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1806                 if (!strcmp(rss_type_table[i].str, rss_type))
1807                         rss_conf.rss_hf = rss_type_table[i].rss_type;
1808         }
1809         diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1810         if (diag == 0) {
1811                 rss_conf.rss_key = hash_key;
1812                 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1813         }
1814         if (diag == 0)
1815                 return;
1816
1817         switch (diag) {
1818         case -ENODEV:
1819                 printf("port index %d invalid\n", port_id);
1820                 break;
1821         case -ENOTSUP:
1822                 printf("operation not supported by device\n");
1823                 break;
1824         default:
1825                 printf("operation failed - diag=%d\n", diag);
1826                 break;
1827         }
1828 }
1829
1830 /*
1831  * Setup forwarding configuration for each logical core.
1832  */
1833 static void
1834 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1835 {
1836         streamid_t nb_fs_per_lcore;
1837         streamid_t nb_fs;
1838         streamid_t sm_id;
1839         lcoreid_t  nb_extra;
1840         lcoreid_t  nb_fc;
1841         lcoreid_t  nb_lc;
1842         lcoreid_t  lc_id;
1843
1844         nb_fs = cfg->nb_fwd_streams;
1845         nb_fc = cfg->nb_fwd_lcores;
1846         if (nb_fs <= nb_fc) {
1847                 nb_fs_per_lcore = 1;
1848                 nb_extra = 0;
1849         } else {
1850                 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1851                 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1852         }
1853
1854         nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1855         sm_id = 0;
1856         for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1857                 fwd_lcores[lc_id]->stream_idx = sm_id;
1858                 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1859                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1860         }
1861
1862         /*
1863          * Assign extra remaining streams, if any.
1864          */
1865         nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1866         for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1867                 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1868                 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1869                 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1870         }
1871 }
1872
1873 static portid_t
1874 fwd_topology_tx_port_get(portid_t rxp)
1875 {
1876         static int warning_once = 1;
1877
1878         RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports);
1879
1880         switch (port_topology) {
1881         default:
1882         case PORT_TOPOLOGY_PAIRED:
1883                 if ((rxp & 0x1) == 0) {
1884                         if (rxp + 1 < cur_fwd_config.nb_fwd_ports)
1885                                 return rxp + 1;
1886                         if (warning_once) {
1887                                 printf("\nWarning! port-topology=paired"
1888                                        " and odd forward ports number,"
1889                                        " the last port will pair with"
1890                                        " itself.\n\n");
1891                                 warning_once = 0;
1892                         }
1893                         return rxp;
1894                 }
1895                 return rxp - 1;
1896         case PORT_TOPOLOGY_CHAINED:
1897                 return (rxp + 1) % cur_fwd_config.nb_fwd_ports;
1898         case PORT_TOPOLOGY_LOOP:
1899                 return rxp;
1900         }
1901 }
1902
1903 static void
1904 simple_fwd_config_setup(void)
1905 {
1906         portid_t i;
1907         portid_t j;
1908         portid_t inc = 2;
1909
1910         if (port_topology == PORT_TOPOLOGY_CHAINED ||
1911             port_topology == PORT_TOPOLOGY_LOOP) {
1912                 inc = 1;
1913         } else if (nb_fwd_ports % 2) {
1914                 printf("\nWarning! Cannot handle an odd number of ports "
1915                        "with the current port topology. Configuration "
1916                        "must be changed to have an even number of ports, "
1917                        "or relaunch application with "
1918                        "--port-topology=chained\n\n");
1919         }
1920
1921         cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1922         cur_fwd_config.nb_fwd_streams =
1923                 (streamid_t) cur_fwd_config.nb_fwd_ports;
1924
1925         /* reinitialize forwarding streams */
1926         init_fwd_streams();
1927
1928         /*
1929          * In the simple forwarding test, the number of forwarding cores
1930          * must be lower or equal to the number of forwarding ports.
1931          */
1932         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1933         if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1934                 cur_fwd_config.nb_fwd_lcores =
1935                         (lcoreid_t) cur_fwd_config.nb_fwd_ports;
1936         setup_fwd_config_of_each_lcore(&cur_fwd_config);
1937
1938         for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
1939                 if (port_topology != PORT_TOPOLOGY_LOOP)
1940                         j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
1941                 else
1942                         j = i;
1943                 fwd_streams[i]->rx_port   = fwd_ports_ids[i];
1944                 fwd_streams[i]->rx_queue  = 0;
1945                 fwd_streams[i]->tx_port   = fwd_ports_ids[j];
1946                 fwd_streams[i]->tx_queue  = 0;
1947                 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
1948                 fwd_streams[i]->retry_enabled = retry_enabled;
1949
1950                 if (port_topology == PORT_TOPOLOGY_PAIRED) {
1951                         fwd_streams[j]->rx_port   = fwd_ports_ids[j];
1952                         fwd_streams[j]->rx_queue  = 0;
1953                         fwd_streams[j]->tx_port   = fwd_ports_ids[i];
1954                         fwd_streams[j]->tx_queue  = 0;
1955                         fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port;
1956                         fwd_streams[j]->retry_enabled = retry_enabled;
1957                 }
1958         }
1959 }
1960
1961 /**
1962  * For the RSS forwarding test all streams distributed over lcores. Each stream
1963  * being composed of a RX queue to poll on a RX port for input messages,
1964  * associated with a TX queue of a TX port where to send forwarded packets.
1965  */
1966 static void
1967 rss_fwd_config_setup(void)
1968 {
1969         portid_t   rxp;
1970         portid_t   txp;
1971         queueid_t  rxq;
1972         queueid_t  nb_q;
1973         streamid_t  sm_id;
1974
1975         nb_q = nb_rxq;
1976         if (nb_q > nb_txq)
1977                 nb_q = nb_txq;
1978         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1979         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1980         cur_fwd_config.nb_fwd_streams =
1981                 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1982
1983         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1984                 cur_fwd_config.nb_fwd_lcores =
1985                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1986
1987         /* reinitialize forwarding streams */
1988         init_fwd_streams();
1989
1990         setup_fwd_config_of_each_lcore(&cur_fwd_config);
1991         rxp = 0; rxq = 0;
1992         for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1993                 struct fwd_stream *fs;
1994
1995                 fs = fwd_streams[sm_id];
1996                 txp = fwd_topology_tx_port_get(rxp);
1997                 fs->rx_port = fwd_ports_ids[rxp];
1998                 fs->rx_queue = rxq;
1999                 fs->tx_port = fwd_ports_ids[txp];
2000                 fs->tx_queue = rxq;
2001                 fs->peer_addr = fs->tx_port;
2002                 fs->retry_enabled = retry_enabled;
2003                 rxq = (queueid_t) (rxq + 1);
2004                 if (rxq < nb_q)
2005                         continue;
2006                 /*
2007                  * rxq == nb_q
2008                  * Restart from RX queue 0 on next RX port
2009                  */
2010                 rxq = 0;
2011                 rxp++;
2012         }
2013 }
2014
2015 /**
2016  * For the DCB forwarding test, each core is assigned on each traffic class.
2017  *
2018  * Each core is assigned a multi-stream, each stream being composed of
2019  * a RX queue to poll on a RX port for input messages, associated with
2020  * a TX queue of a TX port where to send forwarded packets. All RX and
2021  * TX queues are mapping to the same traffic class.
2022  * If VMDQ and DCB co-exist, each traffic class on different POOLs share
2023  * the same core
2024  */
2025 static void
2026 dcb_fwd_config_setup(void)
2027 {
2028         struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
2029         portid_t txp, rxp = 0;
2030         queueid_t txq, rxq = 0;
2031         lcoreid_t  lc_id;
2032         uint16_t nb_rx_queue, nb_tx_queue;
2033         uint16_t i, j, k, sm_id = 0;
2034         uint8_t tc = 0;
2035
2036         cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2037         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2038         cur_fwd_config.nb_fwd_streams =
2039                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2040
2041         /* reinitialize forwarding streams */
2042         init_fwd_streams();
2043         sm_id = 0;
2044         txp = 1;
2045         /* get the dcb info on the first RX and TX ports */
2046         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2047         (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2048
2049         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2050                 fwd_lcores[lc_id]->stream_nb = 0;
2051                 fwd_lcores[lc_id]->stream_idx = sm_id;
2052                 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
2053                         /* if the nb_queue is zero, means this tc is
2054                          * not enabled on the POOL
2055                          */
2056                         if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
2057                                 break;
2058                         k = fwd_lcores[lc_id]->stream_nb +
2059                                 fwd_lcores[lc_id]->stream_idx;
2060                         rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
2061                         txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
2062                         nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2063                         nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
2064                         for (j = 0; j < nb_rx_queue; j++) {
2065                                 struct fwd_stream *fs;
2066
2067                                 fs = fwd_streams[k + j];
2068                                 fs->rx_port = fwd_ports_ids[rxp];
2069                                 fs->rx_queue = rxq + j;
2070                                 fs->tx_port = fwd_ports_ids[txp];
2071                                 fs->tx_queue = txq + j % nb_tx_queue;
2072                                 fs->peer_addr = fs->tx_port;
2073                                 fs->retry_enabled = retry_enabled;
2074                         }
2075                         fwd_lcores[lc_id]->stream_nb +=
2076                                 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
2077                 }
2078                 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
2079
2080                 tc++;
2081                 if (tc < rxp_dcb_info.nb_tcs)
2082                         continue;
2083                 /* Restart from TC 0 on next RX port */
2084                 tc = 0;
2085                 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
2086                         rxp = (portid_t)
2087                                 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
2088                 else
2089                         rxp++;
2090                 if (rxp >= nb_fwd_ports)
2091                         return;
2092                 /* get the dcb information on next RX and TX ports */
2093                 if ((rxp & 0x1) == 0)
2094                         txp = (portid_t) (rxp + 1);
2095                 else
2096                         txp = (portid_t) (rxp - 1);
2097                 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
2098                 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
2099         }
2100 }
2101
2102 static void
2103 icmp_echo_config_setup(void)
2104 {
2105         portid_t  rxp;
2106         queueid_t rxq;
2107         lcoreid_t lc_id;
2108         uint16_t  sm_id;
2109
2110         if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
2111                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
2112                         (nb_txq * nb_fwd_ports);
2113         else
2114                 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
2115         cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
2116         cur_fwd_config.nb_fwd_streams =
2117                 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
2118         if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
2119                 cur_fwd_config.nb_fwd_lcores =
2120                         (lcoreid_t)cur_fwd_config.nb_fwd_streams;
2121         if (verbose_level > 0) {
2122                 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
2123                        __FUNCTION__,
2124                        cur_fwd_config.nb_fwd_lcores,
2125                        cur_fwd_config.nb_fwd_ports,
2126                        cur_fwd_config.nb_fwd_streams);
2127         }
2128
2129         /* reinitialize forwarding streams */
2130         init_fwd_streams();
2131         setup_fwd_config_of_each_lcore(&cur_fwd_config);
2132         rxp = 0; rxq = 0;
2133         for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
2134                 if (verbose_level > 0)
2135                         printf("  core=%d: \n", lc_id);
2136                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2137                         struct fwd_stream *fs;
2138                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2139                         fs->rx_port = fwd_ports_ids[rxp];
2140                         fs->rx_queue = rxq;
2141                         fs->tx_port = fs->rx_port;
2142                         fs->tx_queue = rxq;
2143                         fs->peer_addr = fs->tx_port;
2144                         fs->retry_enabled = retry_enabled;
2145                         if (verbose_level > 0)
2146                                 printf("  stream=%d port=%d rxq=%d txq=%d\n",
2147                                        sm_id, fs->rx_port, fs->rx_queue,
2148                                        fs->tx_queue);
2149                         rxq = (queueid_t) (rxq + 1);
2150                         if (rxq == nb_rxq) {
2151                                 rxq = 0;
2152                                 rxp = (portid_t) (rxp + 1);
2153                         }
2154                 }
2155         }
2156 }
2157
2158 void
2159 fwd_config_setup(void)
2160 {
2161         cur_fwd_config.fwd_eng = cur_fwd_eng;
2162         if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
2163                 icmp_echo_config_setup();
2164                 return;
2165         }
2166         if ((nb_rxq > 1) && (nb_txq > 1)){
2167                 if (dcb_config)
2168                         dcb_fwd_config_setup();
2169                 else
2170                         rss_fwd_config_setup();
2171         }
2172         else
2173                 simple_fwd_config_setup();
2174 }
2175
2176 void
2177 pkt_fwd_config_display(struct fwd_config *cfg)
2178 {
2179         struct fwd_stream *fs;
2180         lcoreid_t  lc_id;
2181         streamid_t sm_id;
2182
2183         printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
2184                 "NUMA support %s, MP over anonymous pages %s\n",
2185                 cfg->fwd_eng->fwd_mode_name,
2186                 retry_enabled == 0 ? "" : " with retry",
2187                 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
2188                 numa_support == 1 ? "enabled" : "disabled",
2189                 mp_anon != 0 ? "enabled" : "disabled");
2190
2191         if (retry_enabled)
2192                 printf("TX retry num: %u, delay between TX retries: %uus\n",
2193                         burst_tx_retry_num, burst_tx_delay_time);
2194         for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
2195                 printf("Logical Core %u (socket %u) forwards packets on "
2196                        "%d streams:",
2197                        fwd_lcores_cpuids[lc_id],
2198                        rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
2199                        fwd_lcores[lc_id]->stream_nb);
2200                 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
2201                         fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
2202                         printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
2203                                "P=%d/Q=%d (socket %u) ",
2204                                fs->rx_port, fs->rx_queue,
2205                                ports[fs->rx_port].socket_id,
2206                                fs->tx_port, fs->tx_queue,
2207                                ports[fs->tx_port].socket_id);
2208                         print_ethaddr("peer=",
2209                                       &peer_eth_addrs[fs->peer_addr]);
2210                 }
2211                 printf("\n");
2212         }
2213         printf("\n");
2214 }
2215
2216 int
2217 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
2218 {
2219         unsigned int i;
2220         unsigned int lcore_cpuid;
2221         int record_now;
2222
2223         record_now = 0;
2224  again:
2225         for (i = 0; i < nb_lc; i++) {
2226                 lcore_cpuid = lcorelist[i];
2227                 if (! rte_lcore_is_enabled(lcore_cpuid)) {
2228                         printf("lcore %u not enabled\n", lcore_cpuid);
2229                         return -1;
2230                 }
2231                 if (lcore_cpuid == rte_get_master_lcore()) {
2232                         printf("lcore %u cannot be masked on for running "
2233                                "packet forwarding, which is the master lcore "
2234                                "and reserved for command line parsing only\n",
2235                                lcore_cpuid);
2236                         return -1;
2237                 }
2238                 if (record_now)
2239                         fwd_lcores_cpuids[i] = lcore_cpuid;
2240         }
2241         if (record_now == 0) {
2242                 record_now = 1;
2243                 goto again;
2244         }
2245         nb_cfg_lcores = (lcoreid_t) nb_lc;
2246         if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
2247                 printf("previous number of forwarding cores %u - changed to "
2248                        "number of configured cores %u\n",
2249                        (unsigned int) nb_fwd_lcores, nb_lc);
2250                 nb_fwd_lcores = (lcoreid_t) nb_lc;
2251         }
2252
2253         return 0;
2254 }
2255
2256 int
2257 set_fwd_lcores_mask(uint64_t lcoremask)
2258 {
2259         unsigned int lcorelist[64];
2260         unsigned int nb_lc;
2261         unsigned int i;
2262
2263         if (lcoremask == 0) {
2264                 printf("Invalid NULL mask of cores\n");
2265                 return -1;
2266         }
2267         nb_lc = 0;
2268         for (i = 0; i < 64; i++) {
2269                 if (! ((uint64_t)(1ULL << i) & lcoremask))
2270                         continue;
2271                 lcorelist[nb_lc++] = i;
2272         }
2273         return set_fwd_lcores_list(lcorelist, nb_lc);
2274 }
2275
2276 void
2277 set_fwd_lcores_number(uint16_t nb_lc)
2278 {
2279         if (nb_lc > nb_cfg_lcores) {
2280                 printf("nb fwd cores %u > %u (max. number of configured "
2281                        "lcores) - ignored\n",
2282                        (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
2283                 return;
2284         }
2285         nb_fwd_lcores = (lcoreid_t) nb_lc;
2286         printf("Number of forwarding cores set to %u\n",
2287                (unsigned int) nb_fwd_lcores);
2288 }
2289
2290 void
2291 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
2292 {
2293         unsigned int i;
2294         portid_t port_id;
2295         int record_now;
2296
2297         record_now = 0;
2298  again:
2299         for (i = 0; i < nb_pt; i++) {
2300                 port_id = (portid_t) portlist[i];
2301                 if (port_id_is_invalid(port_id, ENABLED_WARN))
2302                         return;
2303                 if (record_now)
2304                         fwd_ports_ids[i] = port_id;
2305         }
2306         if (record_now == 0) {
2307                 record_now = 1;
2308                 goto again;
2309         }
2310         nb_cfg_ports = (portid_t) nb_pt;
2311         if (nb_fwd_ports != (portid_t) nb_pt) {
2312                 printf("previous number of forwarding ports %u - changed to "
2313                        "number of configured ports %u\n",
2314                        (unsigned int) nb_fwd_ports, nb_pt);
2315                 nb_fwd_ports = (portid_t) nb_pt;
2316         }
2317 }
2318
2319 void
2320 set_fwd_ports_mask(uint64_t portmask)
2321 {
2322         unsigned int portlist[64];
2323         unsigned int nb_pt;
2324         unsigned int i;
2325
2326         if (portmask == 0) {
2327                 printf("Invalid NULL mask of ports\n");
2328                 return;
2329         }
2330         nb_pt = 0;
2331         RTE_ETH_FOREACH_DEV(i) {
2332                 if (! ((uint64_t)(1ULL << i) & portmask))
2333                         continue;
2334                 portlist[nb_pt++] = i;
2335         }
2336         set_fwd_ports_list(portlist, nb_pt);
2337 }
2338
2339 void
2340 set_fwd_ports_number(uint16_t nb_pt)
2341 {
2342         if (nb_pt > nb_cfg_ports) {
2343                 printf("nb fwd ports %u > %u (number of configured "
2344                        "ports) - ignored\n",
2345                        (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
2346                 return;
2347         }
2348         nb_fwd_ports = (portid_t) nb_pt;
2349         printf("Number of forwarding ports set to %u\n",
2350                (unsigned int) nb_fwd_ports);
2351 }
2352
2353 int
2354 port_is_forwarding(portid_t port_id)
2355 {
2356         unsigned int i;
2357
2358         if (port_id_is_invalid(port_id, ENABLED_WARN))
2359                 return -1;
2360
2361         for (i = 0; i < nb_fwd_ports; i++) {
2362                 if (fwd_ports_ids[i] == port_id)
2363                         return 1;
2364         }
2365
2366         return 0;
2367 }
2368
2369 void
2370 set_nb_pkt_per_burst(uint16_t nb)
2371 {
2372         if (nb > MAX_PKT_BURST) {
2373                 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
2374                        " ignored\n",
2375                        (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
2376                 return;
2377         }
2378         nb_pkt_per_burst = nb;
2379         printf("Number of packets per burst set to %u\n",
2380                (unsigned int) nb_pkt_per_burst);
2381 }
2382
2383 static const char *
2384 tx_split_get_name(enum tx_pkt_split split)
2385 {
2386         uint32_t i;
2387
2388         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2389                 if (tx_split_name[i].split == split)
2390                         return tx_split_name[i].name;
2391         }
2392         return NULL;
2393 }
2394
2395 void
2396 set_tx_pkt_split(const char *name)
2397 {
2398         uint32_t i;
2399
2400         for (i = 0; i != RTE_DIM(tx_split_name); i++) {
2401                 if (strcmp(tx_split_name[i].name, name) == 0) {
2402                         tx_pkt_split = tx_split_name[i].split;
2403                         return;
2404                 }
2405         }
2406         printf("unknown value: \"%s\"\n", name);
2407 }
2408
2409 void
2410 show_tx_pkt_segments(void)
2411 {
2412         uint32_t i, n;
2413         const char *split;
2414
2415         n = tx_pkt_nb_segs;
2416         split = tx_split_get_name(tx_pkt_split);
2417
2418         printf("Number of segments: %u\n", n);
2419         printf("Segment sizes: ");
2420         for (i = 0; i != n - 1; i++)
2421                 printf("%hu,", tx_pkt_seg_lengths[i]);
2422         printf("%hu\n", tx_pkt_seg_lengths[i]);
2423         printf("Split packet: %s\n", split);
2424 }
2425
2426 void
2427 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
2428 {
2429         uint16_t tx_pkt_len;
2430         unsigned i;
2431
2432         if (nb_segs >= (unsigned) nb_txd) {
2433                 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
2434                        nb_segs, (unsigned int) nb_txd);
2435                 return;
2436         }
2437
2438         /*
2439          * Check that each segment length is greater or equal than
2440          * the mbuf data sise.
2441          * Check also that the total packet length is greater or equal than the
2442          * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
2443          */
2444         tx_pkt_len = 0;
2445         for (i = 0; i < nb_segs; i++) {
2446                 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
2447                         printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
2448                                i, seg_lengths[i], (unsigned) mbuf_data_size);
2449                         return;
2450                 }
2451                 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
2452         }
2453         if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
2454                 printf("total packet length=%u < %d - give up\n",
2455                                 (unsigned) tx_pkt_len,
2456                                 (int)(sizeof(struct ether_hdr) + 20 + 8));
2457                 return;
2458         }
2459
2460         for (i = 0; i < nb_segs; i++)
2461                 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
2462
2463         tx_pkt_length  = tx_pkt_len;
2464         tx_pkt_nb_segs = (uint8_t) nb_segs;
2465 }
2466
2467 void
2468 setup_gro(const char *onoff, portid_t port_id)
2469 {
2470         if (!rte_eth_dev_is_valid_port(port_id)) {
2471                 printf("invalid port id %u\n", port_id);
2472                 return;
2473         }
2474         if (test_done == 0) {
2475                 printf("Before enable/disable GRO,"
2476                                 " please stop forwarding first\n");
2477                 return;
2478         }
2479         if (strcmp(onoff, "on") == 0) {
2480                 if (gro_ports[port_id].enable != 0) {
2481                         printf("Port %u has enabled GRO. Please"
2482                                         " disable GRO first\n", port_id);
2483                         return;
2484                 }
2485                 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2486                         gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4;
2487                         gro_ports[port_id].param.max_flow_num =
2488                                 GRO_DEFAULT_FLOW_NUM;
2489                         gro_ports[port_id].param.max_item_per_flow =
2490                                 GRO_DEFAULT_ITEM_NUM_PER_FLOW;
2491                 }
2492                 gro_ports[port_id].enable = 1;
2493         } else {
2494                 if (gro_ports[port_id].enable == 0) {
2495                         printf("Port %u has disabled GRO\n", port_id);
2496                         return;
2497                 }
2498                 gro_ports[port_id].enable = 0;
2499         }
2500 }
2501
2502 void
2503 setup_gro_flush_cycles(uint8_t cycles)
2504 {
2505         if (test_done == 0) {
2506                 printf("Before change flush interval for GRO,"
2507                                 " please stop forwarding first.\n");
2508                 return;
2509         }
2510
2511         if (cycles > GRO_MAX_FLUSH_CYCLES || cycles <
2512                         GRO_DEFAULT_FLUSH_CYCLES) {
2513                 printf("The flushing cycle be in the range"
2514                                 " of 1 to %u. Revert to the default"
2515                                 " value %u.\n",
2516                                 GRO_MAX_FLUSH_CYCLES,
2517                                 GRO_DEFAULT_FLUSH_CYCLES);
2518                 cycles = GRO_DEFAULT_FLUSH_CYCLES;
2519         }
2520
2521         gro_flush_cycles = cycles;
2522 }
2523
2524 void
2525 show_gro(portid_t port_id)
2526 {
2527         struct rte_gro_param *param;
2528         uint32_t max_pkts_num;
2529
2530         param = &gro_ports[port_id].param;
2531
2532         if (!rte_eth_dev_is_valid_port(port_id)) {
2533                 printf("Invalid port id %u.\n", port_id);
2534                 return;
2535         }
2536         if (gro_ports[port_id].enable) {
2537                 printf("GRO type: TCP/IPv4\n");
2538                 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
2539                         max_pkts_num = param->max_flow_num *
2540                                 param->max_item_per_flow;
2541                 } else
2542                         max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES;
2543                 printf("Max number of packets to perform GRO: %u\n",
2544                                 max_pkts_num);
2545                 printf("Flushing cycles: %u\n", gro_flush_cycles);
2546         } else
2547                 printf("Port %u doesn't enable GRO.\n", port_id);
2548 }
2549
2550 void
2551 setup_gso(const char *mode, portid_t port_id)
2552 {
2553         if (!rte_eth_dev_is_valid_port(port_id)) {
2554                 printf("invalid port id %u\n", port_id);
2555                 return;
2556         }
2557         if (strcmp(mode, "on") == 0) {
2558                 if (test_done == 0) {
2559                         printf("before enabling GSO,"
2560                                         " please stop forwarding first\n");
2561                         return;
2562                 }
2563                 gso_ports[port_id].enable = 1;
2564         } else if (strcmp(mode, "off") == 0) {
2565                 if (test_done == 0) {
2566                         printf("before disabling GSO,"
2567                                         " please stop forwarding first\n");
2568                         return;
2569                 }
2570                 gso_ports[port_id].enable = 0;
2571         }
2572 }
2573
2574 char*
2575 list_pkt_forwarding_modes(void)
2576 {
2577         static char fwd_modes[128] = "";
2578         const char *separator = "|";
2579         struct fwd_engine *fwd_eng;
2580         unsigned i = 0;
2581
2582         if (strlen (fwd_modes) == 0) {
2583                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2584                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
2585                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2586                         strncat(fwd_modes, separator,
2587                                         sizeof(fwd_modes) - strlen(fwd_modes) - 1);
2588                 }
2589                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2590         }
2591
2592         return fwd_modes;
2593 }
2594
2595 char*
2596 list_pkt_forwarding_retry_modes(void)
2597 {
2598         static char fwd_modes[128] = "";
2599         const char *separator = "|";
2600         struct fwd_engine *fwd_eng;
2601         unsigned i = 0;
2602
2603         if (strlen(fwd_modes) == 0) {
2604                 while ((fwd_eng = fwd_engines[i++]) != NULL) {
2605                         if (fwd_eng == &rx_only_engine)
2606                                 continue;
2607                         strncat(fwd_modes, fwd_eng->fwd_mode_name,
2608                                         sizeof(fwd_modes) -
2609                                         strlen(fwd_modes) - 1);
2610                         strncat(fwd_modes, separator,
2611                                         sizeof(fwd_modes) -
2612                                         strlen(fwd_modes) - 1);
2613                 }
2614                 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
2615         }
2616
2617         return fwd_modes;
2618 }
2619
2620 void
2621 set_pkt_forwarding_mode(const char *fwd_mode_name)
2622 {
2623         struct fwd_engine *fwd_eng;
2624         unsigned i;
2625
2626         i = 0;
2627         while ((fwd_eng = fwd_engines[i]) != NULL) {
2628                 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
2629                         printf("Set %s packet forwarding mode%s\n",
2630                                fwd_mode_name,
2631                                retry_enabled == 0 ? "" : " with retry");
2632                         cur_fwd_eng = fwd_eng;
2633                         return;
2634                 }
2635                 i++;
2636         }
2637         printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
2638 }
2639
2640 void
2641 set_verbose_level(uint16_t vb_level)
2642 {
2643         printf("Change verbose level from %u to %u\n",
2644                (unsigned int) verbose_level, (unsigned int) vb_level);
2645         verbose_level = vb_level;
2646 }
2647
2648 void
2649 vlan_extend_set(portid_t port_id, int on)
2650 {
2651         int diag;
2652         int vlan_offload;
2653
2654         if (port_id_is_invalid(port_id, ENABLED_WARN))
2655                 return;
2656
2657         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2658
2659         if (on)
2660                 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
2661         else
2662                 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
2663
2664         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2665         if (diag < 0)
2666                 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
2667                "diag=%d\n", port_id, on, diag);
2668 }
2669
2670 void
2671 rx_vlan_strip_set(portid_t port_id, int on)
2672 {
2673         int diag;
2674         int vlan_offload;
2675
2676         if (port_id_is_invalid(port_id, ENABLED_WARN))
2677                 return;
2678
2679         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2680
2681         if (on)
2682                 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
2683         else
2684                 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
2685
2686         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2687         if (diag < 0)
2688                 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
2689                "diag=%d\n", port_id, on, diag);
2690 }
2691
2692 void
2693 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
2694 {
2695         int diag;
2696
2697         if (port_id_is_invalid(port_id, ENABLED_WARN))
2698                 return;
2699
2700         diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
2701         if (diag < 0)
2702                 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
2703                "diag=%d\n", port_id, queue_id, on, diag);
2704 }
2705
2706 void
2707 rx_vlan_filter_set(portid_t port_id, int on)
2708 {
2709         int diag;
2710         int vlan_offload;
2711
2712         if (port_id_is_invalid(port_id, ENABLED_WARN))
2713                 return;
2714
2715         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2716
2717         if (on)
2718                 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
2719         else
2720                 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
2721
2722         diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
2723         if (diag < 0)
2724                 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
2725                "diag=%d\n", port_id, on, diag);
2726 }
2727
2728 int
2729 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
2730 {
2731         int diag;
2732
2733         if (port_id_is_invalid(port_id, ENABLED_WARN))
2734                 return 1;
2735         if (vlan_id_is_invalid(vlan_id))
2736                 return 1;
2737         diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
2738         if (diag == 0)
2739                 return 0;
2740         printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
2741                "diag=%d\n",
2742                port_id, vlan_id, on, diag);
2743         return -1;
2744 }
2745
2746 void
2747 rx_vlan_all_filter_set(portid_t port_id, int on)
2748 {
2749         uint16_t vlan_id;
2750
2751         if (port_id_is_invalid(port_id, ENABLED_WARN))
2752                 return;
2753         for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
2754                 if (rx_vft_set(port_id, vlan_id, on))
2755                         break;
2756         }
2757 }
2758
2759 void
2760 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
2761 {
2762         int diag;
2763
2764         if (port_id_is_invalid(port_id, ENABLED_WARN))
2765                 return;
2766
2767         diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
2768         if (diag == 0)
2769                 return;
2770
2771         printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
2772                "diag=%d\n",
2773                port_id, vlan_type, tp_id, diag);
2774 }
2775
2776 void
2777 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
2778 {
2779         int vlan_offload;
2780         if (port_id_is_invalid(port_id, ENABLED_WARN))
2781                 return;
2782         if (vlan_id_is_invalid(vlan_id))
2783                 return;
2784
2785         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2786         if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
2787                 printf("Error, as QinQ has been enabled.\n");
2788                 return;
2789         }
2790
2791         tx_vlan_reset(port_id);
2792         ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
2793         ports[port_id].tx_vlan_id = vlan_id;
2794 }
2795
2796 void
2797 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
2798 {
2799         int vlan_offload;
2800         if (port_id_is_invalid(port_id, ENABLED_WARN))
2801                 return;
2802         if (vlan_id_is_invalid(vlan_id))
2803                 return;
2804         if (vlan_id_is_invalid(vlan_id_outer))
2805                 return;
2806
2807         vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
2808         if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
2809                 printf("Error, as QinQ hasn't been enabled.\n");
2810                 return;
2811         }
2812
2813         tx_vlan_reset(port_id);
2814         ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ;
2815         ports[port_id].tx_vlan_id = vlan_id;
2816         ports[port_id].tx_vlan_id_outer = vlan_id_outer;
2817 }
2818
2819 void
2820 tx_vlan_reset(portid_t port_id)
2821 {
2822         if (port_id_is_invalid(port_id, ENABLED_WARN))
2823                 return;
2824         ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN |
2825                                 TESTPMD_TX_OFFLOAD_INSERT_QINQ);
2826         ports[port_id].tx_vlan_id = 0;
2827         ports[port_id].tx_vlan_id_outer = 0;
2828 }
2829
2830 void
2831 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
2832 {
2833         if (port_id_is_invalid(port_id, ENABLED_WARN))
2834                 return;
2835
2836         rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
2837 }
2838
2839 void
2840 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
2841 {
2842         uint16_t i;
2843         uint8_t existing_mapping_found = 0;
2844
2845         if (port_id_is_invalid(port_id, ENABLED_WARN))
2846                 return;
2847
2848         if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
2849                 return;
2850
2851         if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
2852                 printf("map_value not in required range 0..%d\n",
2853                                 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
2854                 return;
2855         }
2856
2857         if (!is_rx) { /*then tx*/
2858                 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2859                         if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2860                             (tx_queue_stats_mappings[i].queue_id == queue_id)) {
2861                                 tx_queue_stats_mappings[i].stats_counter_id = map_value;
2862                                 existing_mapping_found = 1;
2863                                 break;
2864                         }
2865                 }
2866                 if (!existing_mapping_found) { /* A new additional mapping... */
2867                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
2868                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
2869                         tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
2870                         nb_tx_queue_stats_mappings++;
2871                 }
2872         }
2873         else { /*rx*/
2874                 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2875                         if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2876                             (rx_queue_stats_mappings[i].queue_id == queue_id)) {
2877                                 rx_queue_stats_mappings[i].stats_counter_id = map_value;
2878                                 existing_mapping_found = 1;
2879                                 break;
2880                         }
2881                 }
2882                 if (!existing_mapping_found) { /* A new additional mapping... */
2883                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
2884                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
2885                         rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
2886                         nb_rx_queue_stats_mappings++;
2887                 }
2888         }
2889 }
2890
2891 void
2892 set_xstats_hide_zero(uint8_t on_off)
2893 {
2894         xstats_hide_zero = on_off;
2895 }
2896
2897 static inline void
2898 print_fdir_mask(struct rte_eth_fdir_masks *mask)
2899 {
2900         printf("\n    vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
2901
2902         if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2903                 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
2904                         " tunnel_id: 0x%08x",
2905                         mask->mac_addr_byte_mask, mask->tunnel_type_mask,
2906                         rte_be_to_cpu_32(mask->tunnel_id_mask));
2907         else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2908                 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
2909                         rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
2910                         rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
2911
2912                 printf("\n    src_port: 0x%04x, dst_port: 0x%04x",
2913                         rte_be_to_cpu_16(mask->src_port_mask),
2914                         rte_be_to_cpu_16(mask->dst_port_mask));
2915
2916                 printf("\n    src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2917                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
2918                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
2919                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
2920                         rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
2921
2922                 printf("\n    dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2923                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
2924                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
2925                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
2926                         rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
2927         }
2928
2929         printf("\n");
2930 }
2931
2932 static inline void
2933 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2934 {
2935         struct rte_eth_flex_payload_cfg *cfg;
2936         uint32_t i, j;
2937
2938         for (i = 0; i < flex_conf->nb_payloads; i++) {
2939                 cfg = &flex_conf->flex_set[i];
2940                 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
2941                         printf("\n    RAW:  ");
2942                 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
2943                         printf("\n    L2_PAYLOAD:  ");
2944                 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
2945                         printf("\n    L3_PAYLOAD:  ");
2946                 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
2947                         printf("\n    L4_PAYLOAD:  ");
2948                 else
2949                         printf("\n    UNKNOWN PAYLOAD(%u):  ", cfg->type);
2950                 for (j = 0; j < num; j++)
2951                         printf("  %-5u", cfg->src_offset[j]);
2952         }
2953         printf("\n");
2954 }
2955
2956 static char *
2957 flowtype_to_str(uint16_t flow_type)
2958 {
2959         struct flow_type_info {
2960                 char str[32];
2961                 uint16_t ftype;
2962         };
2963
2964         uint8_t i;
2965         static struct flow_type_info flowtype_str_table[] = {
2966                 {"raw", RTE_ETH_FLOW_RAW},
2967                 {"ipv4", RTE_ETH_FLOW_IPV4},
2968                 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
2969                 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
2970                 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
2971                 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
2972                 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
2973                 {"ipv6", RTE_ETH_FLOW_IPV6},
2974                 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
2975                 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
2976                 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
2977                 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
2978                 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
2979                 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
2980                 {"port", RTE_ETH_FLOW_PORT},
2981                 {"vxlan", RTE_ETH_FLOW_VXLAN},
2982                 {"geneve", RTE_ETH_FLOW_GENEVE},
2983                 {"nvgre", RTE_ETH_FLOW_NVGRE},
2984         };
2985
2986         for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
2987                 if (flowtype_str_table[i].ftype == flow_type)
2988                         return flowtype_str_table[i].str;
2989         }
2990
2991         return NULL;
2992 }
2993
2994 static inline void
2995 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2996 {
2997         struct rte_eth_fdir_flex_mask *mask;
2998         uint32_t i, j;
2999         char *p;
3000
3001         for (i = 0; i < flex_conf->nb_flexmasks; i++) {
3002                 mask = &flex_conf->flex_mask[i];
3003                 p = flowtype_to_str(mask->flow_type);
3004                 printf("\n    %s:\t", p ? p : "unknown");
3005                 for (j = 0; j < num; j++)
3006                         printf(" %02x", mask->mask[j]);
3007         }
3008         printf("\n");
3009 }
3010
3011 static inline void
3012 print_fdir_flow_type(uint32_t flow_types_mask)
3013 {
3014         int i;
3015         char *p;
3016
3017         for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
3018                 if (!(flow_types_mask & (1 << i)))
3019                         continue;
3020                 p = flowtype_to_str(i);
3021                 if (p)
3022                         printf(" %s", p);
3023                 else
3024                         printf(" unknown");
3025         }
3026         printf("\n");
3027 }
3028
3029 void
3030 fdir_get_infos(portid_t port_id)
3031 {
3032         struct rte_eth_fdir_stats fdir_stat;
3033         struct rte_eth_fdir_info fdir_info;
3034         int ret;
3035
3036         static const char *fdir_stats_border = "########################";
3037
3038         if (port_id_is_invalid(port_id, ENABLED_WARN))
3039                 return;
3040         ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
3041         if (ret < 0) {
3042                 printf("\n FDIR is not supported on port %-2d\n",
3043                         port_id);
3044                 return;
3045         }
3046
3047         memset(&fdir_info, 0, sizeof(fdir_info));
3048         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3049                                RTE_ETH_FILTER_INFO, &fdir_info);
3050         memset(&fdir_stat, 0, sizeof(fdir_stat));
3051         rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
3052                                RTE_ETH_FILTER_STATS, &fdir_stat);
3053         printf("\n  %s FDIR infos for port %-2d     %s\n",
3054                fdir_stats_border, port_id, fdir_stats_border);
3055         printf("  MODE: ");
3056         if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
3057                 printf("  PERFECT\n");
3058         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
3059                 printf("  PERFECT-MAC-VLAN\n");
3060         else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
3061                 printf("  PERFECT-TUNNEL\n");
3062         else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
3063                 printf("  SIGNATURE\n");
3064         else
3065                 printf("  DISABLE\n");
3066         if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
3067                 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
3068                 printf("  SUPPORTED FLOW TYPE: ");
3069                 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
3070         }
3071         printf("  FLEX PAYLOAD INFO:\n");
3072         printf("  max_len:       %-10"PRIu32"  payload_limit: %-10"PRIu32"\n"
3073                "  payload_unit:  %-10"PRIu32"  payload_seg:   %-10"PRIu32"\n"
3074                "  bitmask_unit:  %-10"PRIu32"  bitmask_num:   %-10"PRIu32"\n",
3075                 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
3076                 fdir_info.flex_payload_unit,
3077                 fdir_info.max_flex_payload_segment_num,
3078                 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
3079         printf("  MASK: ");
3080         print_fdir_mask(&fdir_info.mask);
3081         if (fdir_info.flex_conf.nb_payloads > 0) {
3082                 printf("  FLEX PAYLOAD SRC OFFSET:");
3083                 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3084         }
3085         if (fdir_info.flex_conf.nb_flexmasks > 0) {
3086                 printf("  FLEX MASK CFG:");
3087                 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
3088         }
3089         printf("  guarant_count: %-10"PRIu32"  best_count:    %"PRIu32"\n",
3090                fdir_stat.guarant_cnt, fdir_stat.best_cnt);
3091         printf("  guarant_space: %-10"PRIu32"  best_space:    %"PRIu32"\n",
3092                fdir_info.guarant_spc, fdir_info.best_spc);
3093         printf("  collision:     %-10"PRIu32"  free:          %"PRIu32"\n"
3094                "  maxhash:       %-10"PRIu32"  maxlen:        %"PRIu32"\n"
3095                "  add:           %-10"PRIu64"  remove:        %"PRIu64"\n"
3096                "  f_add:         %-10"PRIu64"  f_remove:      %"PRIu64"\n",
3097                fdir_stat.collision, fdir_stat.free,
3098                fdir_stat.maxhash, fdir_stat.maxlen,
3099                fdir_stat.add, fdir_stat.remove,
3100                fdir_stat.f_add, fdir_stat.f_remove);
3101         printf("  %s############################%s\n",
3102                fdir_stats_border, fdir_stats_border);
3103 }
3104
3105 void
3106 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
3107 {
3108         struct rte_port *port;
3109         struct rte_eth_fdir_flex_conf *flex_conf;
3110         int i, idx = 0;
3111
3112         port = &ports[port_id];
3113         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3114         for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
3115                 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
3116                         idx = i;
3117                         break;
3118                 }
3119         }
3120         if (i >= RTE_ETH_FLOW_MAX) {
3121                 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
3122                         idx = flex_conf->nb_flexmasks;
3123                         flex_conf->nb_flexmasks++;
3124                 } else {
3125                         printf("The flex mask table is full. Can not set flex"
3126                                 " mask for flow_type(%u).", cfg->flow_type);
3127                         return;
3128                 }
3129         }
3130         rte_memcpy(&flex_conf->flex_mask[idx],
3131                          cfg,
3132                          sizeof(struct rte_eth_fdir_flex_mask));
3133 }
3134
3135 void
3136 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
3137 {
3138         struct rte_port *port;
3139         struct rte_eth_fdir_flex_conf *flex_conf;
3140         int i, idx = 0;
3141
3142         port = &ports[port_id];
3143         flex_conf = &port->dev_conf.fdir_conf.flex_conf;
3144         for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
3145                 if (cfg->type == flex_conf->flex_set[i].type) {
3146                         idx = i;
3147                         break;
3148                 }
3149         }
3150         if (i >= RTE_ETH_PAYLOAD_MAX) {
3151                 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
3152                         idx = flex_conf->nb_payloads;
3153                         flex_conf->nb_payloads++;
3154                 } else {
3155                         printf("The flex payload table is full. Can not set"
3156                                 " flex payload for type(%u).", cfg->type);
3157                         return;
3158                 }
3159         }
3160         rte_memcpy(&flex_conf->flex_set[idx],
3161                          cfg,
3162                          sizeof(struct rte_eth_flex_payload_cfg));
3163
3164 }
3165
3166 void
3167 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
3168 {
3169 #ifdef RTE_LIBRTE_IXGBE_PMD
3170         int diag;
3171
3172         if (is_rx)
3173                 diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on);
3174         else
3175                 diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on);
3176
3177         if (diag == 0)
3178                 return;
3179         printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n",
3180                         is_rx ? "rx" : "tx", port_id, diag);
3181         return;
3182 #endif
3183         printf("VF %s setting not supported for port %d\n",
3184                         is_rx ? "Rx" : "Tx", port_id);
3185         RTE_SET_USED(vf);
3186         RTE_SET_USED(on);
3187 }
3188
3189 int
3190 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
3191 {
3192         int diag;
3193         struct rte_eth_link link;
3194
3195         if (port_id_is_invalid(port_id, ENABLED_WARN))
3196                 return 1;
3197         rte_eth_link_get_nowait(port_id, &link);
3198         if (rate > link.link_speed) {
3199                 printf("Invalid rate value:%u bigger than link speed: %u\n",
3200                         rate, link.link_speed);
3201                 return 1;
3202         }
3203         diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
3204         if (diag == 0)
3205                 return diag;
3206         printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
3207                 port_id, diag);
3208         return diag;
3209 }
3210
3211 int
3212 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
3213 {
3214         int diag = -ENOTSUP;
3215
3216         RTE_SET_USED(vf);
3217         RTE_SET_USED(rate);
3218         RTE_SET_USED(q_msk);
3219
3220 #ifdef RTE_LIBRTE_IXGBE_PMD
3221         if (diag == -ENOTSUP)
3222                 diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
3223                                                        q_msk);
3224 #endif
3225 #ifdef RTE_LIBRTE_BNXT_PMD
3226         if (diag == -ENOTSUP)
3227                 diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
3228 #endif
3229         if (diag == 0)
3230                 return diag;
3231
3232         printf("set_vf_rate_limit for port_id=%d failed diag=%d\n",
3233                 port_id, diag);
3234         return diag;
3235 }
3236
3237 /*
3238  * Functions to manage the set of filtered Multicast MAC addresses.
3239  *
3240  * A pool of filtered multicast MAC addresses is associated with each port.
3241  * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
3242  * The address of the pool and the number of valid multicast MAC addresses
3243  * recorded in the pool are stored in the fields "mc_addr_pool" and
3244  * "mc_addr_nb" of the "rte_port" data structure.
3245  *
3246  * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
3247  * to be supplied a contiguous array of multicast MAC addresses.
3248  * To comply with this constraint, the set of multicast addresses recorded
3249  * into the pool are systematically compacted at the beginning of the pool.
3250  * Hence, when a multicast address is removed from the pool, all following
3251  * addresses, if any, are copied back to keep the set contiguous.
3252  */
3253 #define MCAST_POOL_INC 32
3254
3255 static int
3256 mcast_addr_pool_extend(struct rte_port *port)
3257 {
3258         struct ether_addr *mc_pool;
3259         size_t mc_pool_size;
3260
3261         /*
3262          * If a free entry is available at the end of the pool, just
3263          * increment the number of recorded multicast addresses.
3264          */
3265         if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
3266                 port->mc_addr_nb++;
3267                 return 0;
3268         }
3269
3270         /*
3271          * [re]allocate a pool with MCAST_POOL_INC more entries.
3272          * The previous test guarantees that port->mc_addr_nb is a multiple
3273          * of MCAST_POOL_INC.
3274          */
3275         mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
3276                                                     MCAST_POOL_INC);
3277         mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
3278                                                 mc_pool_size);
3279         if (mc_pool == NULL) {
3280                 printf("allocation of pool of %u multicast addresses failed\n",
3281                        port->mc_addr_nb + MCAST_POOL_INC);
3282                 return -ENOMEM;
3283         }
3284
3285         port->mc_addr_pool = mc_pool;
3286         port->mc_addr_nb++;
3287         return 0;
3288
3289 }
3290
3291 static void
3292 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
3293 {
3294         port->mc_addr_nb--;
3295         if (addr_idx == port->mc_addr_nb) {
3296                 /* No need to recompact the set of multicast addressses. */
3297                 if (port->mc_addr_nb == 0) {
3298                         /* free the pool of multicast addresses. */
3299                         free(port->mc_addr_pool);
3300                         port->mc_addr_pool = NULL;
3301                 }
3302                 return;
3303         }
3304         memmove(&port->mc_addr_pool[addr_idx],
3305                 &port->mc_addr_pool[addr_idx + 1],
3306                 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
3307 }
3308
3309 static void
3310 eth_port_multicast_addr_list_set(portid_t port_id)
3311 {
3312         struct rte_port *port;
3313         int diag;
3314
3315         port = &ports[port_id];
3316         diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
3317                                             port->mc_addr_nb);
3318         if (diag == 0)
3319                 return;
3320         printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
3321                port->mc_addr_nb, port_id, -diag);
3322 }
3323
3324 void
3325 mcast_addr_add(portid_t port_id, struct ether_addr *mc_addr)
3326 {
3327         struct rte_port *port;
3328         uint32_t i;
3329
3330         if (port_id_is_invalid(port_id, ENABLED_WARN))
3331                 return;
3332
3333         port = &ports[port_id];
3334
3335         /*
3336          * Check that the added multicast MAC address is not already recorded
3337          * in the pool of multicast addresses.
3338          */
3339         for (i = 0; i < port->mc_addr_nb; i++) {
3340                 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
3341                         printf("multicast address already filtered by port\n");
3342                         return;
3343                 }
3344         }
3345
3346         if (mcast_addr_pool_extend(port) != 0)
3347                 return;
3348         ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
3349         eth_port_multicast_addr_list_set(port_id);
3350 }
3351
3352 void
3353 mcast_addr_remove(portid_t port_id, struct ether_addr *mc_addr)
3354 {
3355         struct rte_port *port;
3356         uint32_t i;
3357
3358         if (port_id_is_invalid(port_id, ENABLED_WARN))
3359                 return;
3360
3361         port = &ports[port_id];
3362
3363         /*
3364          * Search the pool of multicast MAC addresses for the removed address.
3365          */
3366         for (i = 0; i < port->mc_addr_nb; i++) {
3367                 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
3368                         break;
3369         }
3370         if (i == port->mc_addr_nb) {
3371                 printf("multicast address not filtered by port %d\n", port_id);
3372                 return;
3373         }
3374
3375         mcast_addr_pool_remove(port, i);
3376         eth_port_multicast_addr_list_set(port_id);
3377 }
3378
3379 void
3380 port_dcb_info_display(portid_t port_id)
3381 {
3382         struct rte_eth_dcb_info dcb_info;
3383         uint16_t i;
3384         int ret;
3385         static const char *border = "================";
3386
3387         if (port_id_is_invalid(port_id, ENABLED_WARN))
3388                 return;
3389
3390         ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
3391         if (ret) {
3392                 printf("\n Failed to get dcb infos on port %-2d\n",
3393                         port_id);
3394                 return;
3395         }
3396         printf("\n  %s DCB infos for port %-2d  %s\n", border, port_id, border);
3397         printf("  TC NUMBER: %d\n", dcb_info.nb_tcs);
3398         printf("\n  TC :        ");
3399         for (i = 0; i < dcb_info.nb_tcs; i++)
3400                 printf("\t%4d", i);
3401         printf("\n  Priority :  ");
3402         for (i = 0; i < dcb_info.nb_tcs; i++)
3403                 printf("\t%4d", dcb_info.prio_tc[i]);
3404         printf("\n  BW percent :");
3405         for (i = 0; i < dcb_info.nb_tcs; i++)
3406                 printf("\t%4d%%", dcb_info.tc_bws[i]);
3407         printf("\n  RXQ base :  ");
3408         for (i = 0; i < dcb_info.nb_tcs; i++)
3409                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
3410         printf("\n  RXQ number :");
3411         for (i = 0; i < dcb_info.nb_tcs; i++)
3412                 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
3413         printf("\n  TXQ base :  ");
3414         for (i = 0; i < dcb_info.nb_tcs; i++)
3415                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
3416         printf("\n  TXQ number :");
3417         for (i = 0; i < dcb_info.nb_tcs; i++)
3418                 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);
3419         printf("\n");
3420 }
3421
3422 uint8_t *
3423 open_ddp_package_file(const char *file_path, uint32_t *size)
3424 {
3425         int fd = open(file_path, O_RDONLY);
3426         off_t pkg_size;
3427         uint8_t *buf = NULL;
3428         int ret = 0;
3429         struct stat st_buf;
3430
3431         if (size)
3432                 *size = 0;
3433
3434         if (fd == -1) {
3435                 printf("%s: Failed to open %s\n", __func__, file_path);
3436                 return buf;
3437         }
3438
3439         if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) {
3440                 close(fd);
3441                 printf("%s: File operations failed\n", __func__);
3442                 return buf;
3443         }
3444
3445         pkg_size = st_buf.st_size;
3446         if (pkg_size < 0) {
3447                 close(fd);
3448                 printf("%s: File operations failed\n", __func__);
3449                 return buf;
3450         }
3451
3452         buf = (uint8_t *)malloc(pkg_size);
3453         if (!buf) {
3454                 close(fd);
3455                 printf("%s: Failed to malloc memory\n", __func__);
3456                 return buf;
3457         }
3458
3459         ret = read(fd, buf, pkg_size);
3460         if (ret < 0) {
3461                 close(fd);
3462                 printf("%s: File read operation failed\n", __func__);
3463                 close_ddp_package_file(buf);
3464                 return NULL;
3465         }
3466
3467         if (size)
3468                 *size = pkg_size;
3469
3470         close(fd);
3471
3472         return buf;
3473 }
3474
3475 int
3476 save_ddp_package_file(const char *file_path, uint8_t *buf, uint32_t size)
3477 {
3478         FILE *fh = fopen(file_path, "wb");
3479
3480         if (fh == NULL) {
3481                 printf("%s: Failed to open %s\n", __func__, file_path);
3482                 return -1;
3483         }
3484
3485         if (fwrite(buf, 1, size, fh) != size) {
3486                 fclose(fh);
3487                 printf("%s: File write operation failed\n", __func__);
3488                 return -1;
3489         }
3490
3491         fclose(fh);
3492
3493         return 0;
3494 }
3495
3496 int
3497 close_ddp_package_file(uint8_t *buf)
3498 {
3499         if (buf) {
3500                 free((void *)buf);
3501                 return 0;
3502         }
3503
3504         return -1;
3505 }
3506
3507 void
3508 port_queue_region_info_display(portid_t port_id, void *buf)
3509 {
3510 #ifdef RTE_LIBRTE_I40E_PMD
3511         uint16_t i, j;
3512         struct rte_pmd_i40e_queue_regions *info =
3513                 (struct rte_pmd_i40e_queue_regions *)buf;
3514         static const char *queue_region_info_stats_border = "-------";
3515
3516         if (!info->queue_region_number)
3517                 printf("there is no region has been set before");
3518
3519         printf("\n      %s All queue region info for port=%2d %s",
3520                         queue_region_info_stats_border, port_id,
3521                         queue_region_info_stats_border);
3522         printf("\n      queue_region_number: %-14u \n",
3523                         info->queue_region_number);
3524
3525         for (i = 0; i < info->queue_region_number; i++) {
3526                 printf("\n      region_id: %-14u queue_number: %-14u "
3527                         "queue_start_index: %-14u \n",
3528                         info->region[i].region_id,
3529                         info->region[i].queue_num,
3530                         info->region[i].queue_start_index);
3531
3532                 printf("  user_priority_num is  %-14u :",
3533                                         info->region[i].user_priority_num);
3534                 for (j = 0; j < info->region[i].user_priority_num; j++)
3535                         printf(" %-14u ", info->region[i].user_priority[j]);
3536
3537                 printf("\n      flowtype_num is  %-14u :",
3538                                 info->region[i].flowtype_num);
3539                 for (j = 0; j < info->region[i].flowtype_num; j++)
3540                         printf(" %-14u ", info->region[i].hw_flowtype[j]);
3541         }
3542 #else
3543         RTE_SET_USED(port_id);
3544         RTE_SET_USED(buf);
3545 #endif
3546
3547         printf("\n\n");
3548 }