4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Copyright 2013-2014 6WIND S.A.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name of 6WIND S.A. nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
72 #include <sys/queue.h>
74 #include <rte_common.h>
75 #include <rte_byteorder.h>
76 #include <rte_debug.h>
78 #include <rte_memory.h>
79 #include <rte_memcpy.h>
80 #include <rte_memzone.h>
81 #include <rte_launch.h>
83 #include <rte_per_lcore.h>
84 #include <rte_lcore.h>
85 #include <rte_atomic.h>
86 #include <rte_branch_prediction.h>
87 #include <rte_mempool.h>
89 #include <rte_interrupts.h>
91 #include <rte_ether.h>
92 #include <rte_ethdev.h>
93 #include <rte_string_fns.h>
94 #include <rte_cycles.h>
98 static char *flowtype_to_str(uint16_t flow_type);
100 static const struct {
101 enum tx_pkt_split split;
103 } tx_split_name[] = {
105 .split = TX_PKT_SPLIT_OFF,
109 .split = TX_PKT_SPLIT_ON,
113 .split = TX_PKT_SPLIT_RND,
118 struct rss_type_info {
123 static const struct rss_type_info rss_type_table[] = {
124 { "ipv4", ETH_RSS_IPV4 },
125 { "ipv4-frag", ETH_RSS_FRAG_IPV4 },
126 { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP },
127 { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP },
128 { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP },
129 { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER },
130 { "ipv6", ETH_RSS_IPV6 },
131 { "ipv6-frag", ETH_RSS_FRAG_IPV6 },
132 { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP },
133 { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP },
134 { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP },
135 { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER },
136 { "l2-payload", ETH_RSS_L2_PAYLOAD },
137 { "ipv6-ex", ETH_RSS_IPV6_EX },
138 { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX },
139 { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX },
140 { "port", ETH_RSS_PORT },
141 { "vxlan", ETH_RSS_VXLAN },
142 { "geneve", ETH_RSS_GENEVE },
143 { "nvgre", ETH_RSS_NVGRE },
148 print_ethaddr(const char *name, struct ether_addr *eth_addr)
150 char buf[ETHER_ADDR_FMT_SIZE];
151 ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
152 printf("%s%s", name, buf);
156 nic_stats_display(portid_t port_id)
158 static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS];
159 static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS];
160 static uint64_t prev_cycles[RTE_MAX_ETHPORTS];
161 uint64_t diff_pkts_rx, diff_pkts_tx, diff_cycles;
162 uint64_t mpps_rx, mpps_tx;
163 struct rte_eth_stats stats;
164 struct rte_port *port = &ports[port_id];
168 static const char *nic_stats_border = "########################";
170 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
171 printf("Valid port range is [0");
172 FOREACH_PORT(pid, ports)
177 rte_eth_stats_get(port_id, &stats);
178 printf("\n %s NIC statistics for port %-2d %s\n",
179 nic_stats_border, port_id, nic_stats_border);
181 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
182 printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: "
184 stats.ipackets, stats.imissed, stats.ibytes);
185 printf(" RX-errors: %-"PRIu64"\n", stats.ierrors);
186 printf(" RX-nombuf: %-10"PRIu64"\n",
188 printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: "
190 stats.opackets, stats.oerrors, stats.obytes);
193 printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64
194 " RX-bytes: %10"PRIu64"\n",
195 stats.ipackets, stats.ierrors, stats.ibytes);
196 printf(" RX-errors: %10"PRIu64"\n", stats.ierrors);
197 printf(" RX-nombuf: %10"PRIu64"\n",
199 printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64
200 " TX-bytes: %10"PRIu64"\n",
201 stats.opackets, stats.oerrors, stats.obytes);
204 if (port->rx_queue_stats_mapping_enabled) {
206 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
207 printf(" Stats reg %2d RX-packets: %10"PRIu64
208 " RX-errors: %10"PRIu64
209 " RX-bytes: %10"PRIu64"\n",
210 i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]);
213 if (port->tx_queue_stats_mapping_enabled) {
215 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
216 printf(" Stats reg %2d TX-packets: %10"PRIu64
217 " TX-bytes: %10"PRIu64"\n",
218 i, stats.q_opackets[i], stats.q_obytes[i]);
222 diff_cycles = prev_cycles[port_id];
223 prev_cycles[port_id] = rte_rdtsc();
225 diff_cycles = prev_cycles[port_id] - diff_cycles;
227 diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ?
228 (stats.ipackets - prev_pkts_rx[port_id]) : 0;
229 diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ?
230 (stats.opackets - prev_pkts_tx[port_id]) : 0;
231 prev_pkts_rx[port_id] = stats.ipackets;
232 prev_pkts_tx[port_id] = stats.opackets;
233 mpps_rx = diff_cycles > 0 ?
234 diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0;
235 mpps_tx = diff_cycles > 0 ?
236 diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0;
237 printf("\n Throughput (since last show)\n");
238 printf(" Rx-pps: %12"PRIu64"\n Tx-pps: %12"PRIu64"\n",
241 printf(" %s############################%s\n",
242 nic_stats_border, nic_stats_border);
246 nic_stats_clear(portid_t port_id)
250 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
251 printf("Valid port range is [0");
252 FOREACH_PORT(pid, ports)
257 rte_eth_stats_reset(port_id);
258 printf("\n NIC statistics for port %d cleared\n", port_id);
262 nic_xstats_display(portid_t port_id)
264 struct rte_eth_xstat *xstats;
265 int cnt_xstats, idx_xstat;
266 struct rte_eth_xstat_name *xstats_names;
268 printf("###### NIC extended statistics for port %-2d\n", port_id);
269 if (!rte_eth_dev_is_valid_port(port_id)) {
270 printf("Error: Invalid port number %i\n", port_id);
275 cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0);
276 if (cnt_xstats < 0) {
277 printf("Error: Cannot get count of xstats\n");
281 /* Get id-name lookup table */
282 xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats);
283 if (xstats_names == NULL) {
284 printf("Cannot allocate memory for xstats lookup\n");
287 if (cnt_xstats != rte_eth_xstats_get_names(
288 port_id, xstats_names, cnt_xstats)) {
289 printf("Error: Cannot get xstats lookup\n");
294 /* Get stats themselves */
295 xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats);
296 if (xstats == NULL) {
297 printf("Cannot allocate memory for xstats\n");
301 if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) {
302 printf("Error: Unable to get xstats\n");
309 for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++)
310 printf("%s: %"PRIu64"\n",
311 xstats_names[idx_xstat].name,
312 xstats[idx_xstat].value);
318 nic_xstats_clear(portid_t port_id)
320 rte_eth_xstats_reset(port_id);
324 nic_stats_mapping_display(portid_t port_id)
326 struct rte_port *port = &ports[port_id];
330 static const char *nic_stats_mapping_border = "########################";
332 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
333 printf("Valid port range is [0");
334 FOREACH_PORT(pid, ports)
340 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
341 printf("Port id %d - either does not support queue statistic mapping or"
342 " no queue statistic mapping set\n", port_id);
346 printf("\n %s NIC statistics mapping for port %-2d %s\n",
347 nic_stats_mapping_border, port_id, nic_stats_mapping_border);
349 if (port->rx_queue_stats_mapping_enabled) {
350 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
351 if (rx_queue_stats_mappings[i].port_id == port_id) {
352 printf(" RX-queue %2d mapped to Stats Reg %2d\n",
353 rx_queue_stats_mappings[i].queue_id,
354 rx_queue_stats_mappings[i].stats_counter_id);
361 if (port->tx_queue_stats_mapping_enabled) {
362 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
363 if (tx_queue_stats_mappings[i].port_id == port_id) {
364 printf(" TX-queue %2d mapped to Stats Reg %2d\n",
365 tx_queue_stats_mappings[i].queue_id,
366 tx_queue_stats_mappings[i].stats_counter_id);
371 printf(" %s####################################%s\n",
372 nic_stats_mapping_border, nic_stats_mapping_border);
376 rx_queue_infos_display(portid_t port_id, uint16_t queue_id)
378 struct rte_eth_rxq_info qinfo;
380 static const char *info_border = "*********************";
382 rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo);
384 printf("Failed to retrieve information for port: %hhu, "
385 "RX queue: %hu\nerror desc: %s(%d)\n",
386 port_id, queue_id, strerror(-rc), rc);
390 printf("\n%s Infos for port %-2u, RX queue %-2u %s",
391 info_border, port_id, queue_id, info_border);
393 printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name);
394 printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh);
395 printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh);
396 printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh);
397 printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh);
398 printf("\nRX drop packets: %s",
399 (qinfo.conf.rx_drop_en != 0) ? "on" : "off");
400 printf("\nRX deferred start: %s",
401 (qinfo.conf.rx_deferred_start != 0) ? "on" : "off");
402 printf("\nRX scattered packets: %s",
403 (qinfo.scattered_rx != 0) ? "on" : "off");
404 printf("\nNumber of RXDs: %hu", qinfo.nb_desc);
409 tx_queue_infos_display(portid_t port_id, uint16_t queue_id)
411 struct rte_eth_txq_info qinfo;
413 static const char *info_border = "*********************";
415 rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo);
417 printf("Failed to retrieve information for port: %hhu, "
418 "TX queue: %hu\nerror desc: %s(%d)\n",
419 port_id, queue_id, strerror(-rc), rc);
423 printf("\n%s Infos for port %-2u, TX queue %-2u %s",
424 info_border, port_id, queue_id, info_border);
426 printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh);
427 printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh);
428 printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh);
429 printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh);
430 printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh);
431 printf("\nTX flags: %#x", qinfo.conf.txq_flags);
432 printf("\nTX deferred start: %s",
433 (qinfo.conf.tx_deferred_start != 0) ? "on" : "off");
434 printf("\nNumber of TXDs: %hu", qinfo.nb_desc);
439 port_infos_display(portid_t port_id)
441 struct rte_port *port;
442 struct ether_addr mac_addr;
443 struct rte_eth_link link;
444 struct rte_eth_dev_info dev_info;
446 struct rte_mempool * mp;
447 static const char *info_border = "*********************";
450 if (port_id_is_invalid(port_id, ENABLED_WARN)) {
451 printf("Valid port range is [0");
452 FOREACH_PORT(pid, ports)
457 port = &ports[port_id];
458 rte_eth_link_get_nowait(port_id, &link);
459 printf("\n%s Infos for port %-2d %s\n",
460 info_border, port_id, info_border);
461 rte_eth_macaddr_get(port_id, &mac_addr);
462 print_ethaddr("MAC address: ", &mac_addr);
463 printf("\nConnect to socket: %u", port->socket_id);
465 if (port_numa[port_id] != NUMA_NO_CONFIG) {
466 mp = mbuf_pool_find(port_numa[port_id]);
468 printf("\nmemory allocation on the socket: %d",
471 printf("\nmemory allocation on the socket: %u",port->socket_id);
473 printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
474 printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
475 printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
476 ("full-duplex") : ("half-duplex"));
477 printf("Promiscuous mode: %s\n",
478 rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
479 printf("Allmulticast mode: %s\n",
480 rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
481 printf("Maximum number of MAC addresses: %u\n",
482 (unsigned int)(port->dev_info.max_mac_addrs));
483 printf("Maximum number of MAC addresses of hash filtering: %u\n",
484 (unsigned int)(port->dev_info.max_hash_mac_addrs));
486 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
487 if (vlan_offload >= 0){
488 printf("VLAN offload: \n");
489 if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD)
490 printf(" strip on \n");
492 printf(" strip off \n");
494 if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD)
495 printf(" filter on \n");
497 printf(" filter off \n");
499 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)
500 printf(" qinq(extend) on \n");
502 printf(" qinq(extend) off \n");
505 memset(&dev_info, 0, sizeof(dev_info));
506 rte_eth_dev_info_get(port_id, &dev_info);
507 if (dev_info.hash_key_size > 0)
508 printf("Hash key size in bytes: %u\n", dev_info.hash_key_size);
509 if (dev_info.reta_size > 0)
510 printf("Redirection table size: %u\n", dev_info.reta_size);
511 if (!dev_info.flow_type_rss_offloads)
512 printf("No flow type is supported.\n");
517 printf("Supported flow types:\n");
518 for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < RTE_ETH_FLOW_MAX;
520 if (!(dev_info.flow_type_rss_offloads & (1ULL << i)))
522 p = flowtype_to_str(i);
523 printf(" %s\n", (p ? p : "unknown"));
527 printf("Max possible RX queues: %u\n", dev_info.max_rx_queues);
528 printf("Max possible number of RXDs per queue: %hu\n",
529 dev_info.rx_desc_lim.nb_max);
530 printf("Min possible number of RXDs per queue: %hu\n",
531 dev_info.rx_desc_lim.nb_min);
532 printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align);
534 printf("Max possible TX queues: %u\n", dev_info.max_tx_queues);
535 printf("Max possible number of TXDs per queue: %hu\n",
536 dev_info.tx_desc_lim.nb_max);
537 printf("Min possible number of TXDs per queue: %hu\n",
538 dev_info.tx_desc_lim.nb_min);
539 printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align);
543 port_id_is_invalid(portid_t port_id, enum print_warning warning)
545 if (port_id == (portid_t)RTE_PORT_ALL)
548 if (port_id < RTE_MAX_ETHPORTS && ports[port_id].enabled)
551 if (warning == ENABLED_WARN)
552 printf("Invalid port %d\n", port_id);
558 vlan_id_is_invalid(uint16_t vlan_id)
562 printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
567 port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
572 printf("Port register offset 0x%X not aligned on a 4-byte "
577 pci_len = ports[port_id].dev_info.pci_dev->mem_resource[0].len;
578 if (reg_off >= pci_len) {
579 printf("Port %d: register offset %u (0x%X) out of port PCI "
580 "resource (length=%"PRIu64")\n",
581 port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len);
588 reg_bit_pos_is_invalid(uint8_t bit_pos)
592 printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
596 #define display_port_and_reg_off(port_id, reg_off) \
597 printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
600 display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
602 display_port_and_reg_off(port_id, (unsigned)reg_off);
603 printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
607 port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
612 if (port_id_is_invalid(port_id, ENABLED_WARN))
614 if (port_reg_off_is_invalid(port_id, reg_off))
616 if (reg_bit_pos_is_invalid(bit_x))
618 reg_v = port_id_pci_reg_read(port_id, reg_off);
619 display_port_and_reg_off(port_id, (unsigned)reg_off);
620 printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
624 port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
625 uint8_t bit1_pos, uint8_t bit2_pos)
631 if (port_id_is_invalid(port_id, ENABLED_WARN))
633 if (port_reg_off_is_invalid(port_id, reg_off))
635 if (reg_bit_pos_is_invalid(bit1_pos))
637 if (reg_bit_pos_is_invalid(bit2_pos))
639 if (bit1_pos > bit2_pos)
640 l_bit = bit2_pos, h_bit = bit1_pos;
642 l_bit = bit1_pos, h_bit = bit2_pos;
644 reg_v = port_id_pci_reg_read(port_id, reg_off);
647 reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
648 display_port_and_reg_off(port_id, (unsigned)reg_off);
649 printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
650 ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
654 port_reg_display(portid_t port_id, uint32_t reg_off)
658 if (port_id_is_invalid(port_id, ENABLED_WARN))
660 if (port_reg_off_is_invalid(port_id, reg_off))
662 reg_v = port_id_pci_reg_read(port_id, reg_off);
663 display_port_reg_value(port_id, reg_off, reg_v);
667 port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
672 if (port_id_is_invalid(port_id, ENABLED_WARN))
674 if (port_reg_off_is_invalid(port_id, reg_off))
676 if (reg_bit_pos_is_invalid(bit_pos))
679 printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
682 reg_v = port_id_pci_reg_read(port_id, reg_off);
684 reg_v &= ~(1 << bit_pos);
686 reg_v |= (1 << bit_pos);
687 port_id_pci_reg_write(port_id, reg_off, reg_v);
688 display_port_reg_value(port_id, reg_off, reg_v);
692 port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
693 uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
700 if (port_id_is_invalid(port_id, ENABLED_WARN))
702 if (port_reg_off_is_invalid(port_id, reg_off))
704 if (reg_bit_pos_is_invalid(bit1_pos))
706 if (reg_bit_pos_is_invalid(bit2_pos))
708 if (bit1_pos > bit2_pos)
709 l_bit = bit2_pos, h_bit = bit1_pos;
711 l_bit = bit1_pos, h_bit = bit2_pos;
713 if ((h_bit - l_bit) < 31)
714 max_v = (1 << (h_bit - l_bit + 1)) - 1;
719 printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
720 (unsigned)value, (unsigned)value,
721 (unsigned)max_v, (unsigned)max_v);
724 reg_v = port_id_pci_reg_read(port_id, reg_off);
725 reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
726 reg_v |= (value << l_bit); /* Set changed bits */
727 port_id_pci_reg_write(port_id, reg_off, reg_v);
728 display_port_reg_value(port_id, reg_off, reg_v);
732 port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
734 if (port_id_is_invalid(port_id, ENABLED_WARN))
736 if (port_reg_off_is_invalid(port_id, reg_off))
738 port_id_pci_reg_write(port_id, reg_off, reg_v);
739 display_port_reg_value(port_id, reg_off, reg_v);
743 port_mtu_set(portid_t port_id, uint16_t mtu)
747 if (port_id_is_invalid(port_id, ENABLED_WARN))
749 diag = rte_eth_dev_set_mtu(port_id, mtu);
752 printf("Set MTU failed. diag=%d\n", diag);
756 * RX/TX ring descriptors display functions.
759 rx_queue_id_is_invalid(queueid_t rxq_id)
763 printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
768 tx_queue_id_is_invalid(queueid_t txq_id)
772 printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
777 rx_desc_id_is_invalid(uint16_t rxdesc_id)
779 if (rxdesc_id < nb_rxd)
781 printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
787 tx_desc_id_is_invalid(uint16_t txdesc_id)
789 if (txdesc_id < nb_txd)
791 printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
796 static const struct rte_memzone *
797 ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
799 char mz_name[RTE_MEMZONE_NAMESIZE];
800 const struct rte_memzone *mz;
802 snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
803 ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
804 mz = rte_memzone_lookup(mz_name);
806 printf("%s ring memory zoneof (port %d, queue %d) not"
807 "found (zone name = %s\n",
808 ring_name, port_id, q_id, mz_name);
812 union igb_ring_dword {
815 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
825 struct igb_ring_desc_32_bytes {
826 union igb_ring_dword lo_dword;
827 union igb_ring_dword hi_dword;
828 union igb_ring_dword resv1;
829 union igb_ring_dword resv2;
832 struct igb_ring_desc_16_bytes {
833 union igb_ring_dword lo_dword;
834 union igb_ring_dword hi_dword;
838 ring_rxd_display_dword(union igb_ring_dword dword)
840 printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo,
841 (unsigned)dword.words.hi);
845 ring_rx_descriptor_display(const struct rte_memzone *ring_mz,
846 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
849 __rte_unused uint8_t port_id,
853 struct igb_ring_desc_16_bytes *ring =
854 (struct igb_ring_desc_16_bytes *)ring_mz->addr;
855 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
856 struct rte_eth_dev_info dev_info;
858 memset(&dev_info, 0, sizeof(dev_info));
859 rte_eth_dev_info_get(port_id, &dev_info);
860 if (strstr(dev_info.driver_name, "i40e") != NULL) {
861 /* 32 bytes RX descriptor, i40e only */
862 struct igb_ring_desc_32_bytes *ring =
863 (struct igb_ring_desc_32_bytes *)ring_mz->addr;
864 ring[desc_id].lo_dword.dword =
865 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
866 ring_rxd_display_dword(ring[desc_id].lo_dword);
867 ring[desc_id].hi_dword.dword =
868 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
869 ring_rxd_display_dword(ring[desc_id].hi_dword);
870 ring[desc_id].resv1.dword =
871 rte_le_to_cpu_64(ring[desc_id].resv1.dword);
872 ring_rxd_display_dword(ring[desc_id].resv1);
873 ring[desc_id].resv2.dword =
874 rte_le_to_cpu_64(ring[desc_id].resv2.dword);
875 ring_rxd_display_dword(ring[desc_id].resv2);
880 /* 16 bytes RX descriptor */
881 ring[desc_id].lo_dword.dword =
882 rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
883 ring_rxd_display_dword(ring[desc_id].lo_dword);
884 ring[desc_id].hi_dword.dword =
885 rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
886 ring_rxd_display_dword(ring[desc_id].hi_dword);
890 ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
892 struct igb_ring_desc_16_bytes *ring;
893 struct igb_ring_desc_16_bytes txd;
895 ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr;
896 txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword);
897 txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword);
898 printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
899 (unsigned)txd.lo_dword.words.lo,
900 (unsigned)txd.lo_dword.words.hi,
901 (unsigned)txd.hi_dword.words.lo,
902 (unsigned)txd.hi_dword.words.hi);
906 rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
908 const struct rte_memzone *rx_mz;
910 if (port_id_is_invalid(port_id, ENABLED_WARN))
912 if (rx_queue_id_is_invalid(rxq_id))
914 if (rx_desc_id_is_invalid(rxd_id))
916 rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
919 ring_rx_descriptor_display(rx_mz, port_id, rxd_id);
923 tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
925 const struct rte_memzone *tx_mz;
927 if (port_id_is_invalid(port_id, ENABLED_WARN))
929 if (tx_queue_id_is_invalid(txq_id))
931 if (tx_desc_id_is_invalid(txd_id))
933 tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
936 ring_tx_descriptor_display(tx_mz, txd_id);
940 fwd_lcores_config_display(void)
944 printf("List of forwarding lcores:");
945 for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
946 printf(" %2u", fwd_lcores_cpuids[lc_id]);
950 rxtx_config_display(void)
952 printf(" %s packet forwarding%s - CRC stripping %s - "
953 "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
954 retry_enabled == 0 ? "" : " with retry",
955 rx_mode.hw_strip_crc ? "enabled" : "disabled",
958 if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine)
959 printf(" packet len=%u - nb packet segments=%d\n",
960 (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
962 struct rte_eth_rxconf *rx_conf = &ports[0].rx_conf;
963 struct rte_eth_txconf *tx_conf = &ports[0].tx_conf;
965 printf(" nb forwarding cores=%d - nb forwarding ports=%d\n",
966 nb_fwd_lcores, nb_fwd_ports);
967 printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n",
968 nb_rxq, nb_rxd, rx_conf->rx_free_thresh);
969 printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
970 rx_conf->rx_thresh.pthresh, rx_conf->rx_thresh.hthresh,
971 rx_conf->rx_thresh.wthresh);
972 printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n",
973 nb_txq, nb_txd, tx_conf->tx_free_thresh);
974 printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
975 tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh,
976 tx_conf->tx_thresh.wthresh);
977 printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n",
978 tx_conf->tx_rs_thresh, tx_conf->txq_flags);
982 port_rss_reta_info(portid_t port_id,
983 struct rte_eth_rss_reta_entry64 *reta_conf,
986 uint16_t i, idx, shift;
989 if (port_id_is_invalid(port_id, ENABLED_WARN))
992 ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries);
994 printf("Failed to get RSS RETA info, return code = %d\n", ret);
998 for (i = 0; i < nb_entries; i++) {
999 idx = i / RTE_RETA_GROUP_SIZE;
1000 shift = i % RTE_RETA_GROUP_SIZE;
1001 if (!(reta_conf[idx].mask & (1ULL << shift)))
1003 printf("RSS RETA configuration: hash index=%u, queue=%u\n",
1004 i, reta_conf[idx].reta[shift]);
1009 * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
1013 port_rss_hash_conf_show(portid_t port_id, char rss_info[], int show_rss_key)
1015 struct rte_eth_rss_conf rss_conf;
1016 uint8_t rss_key[RSS_HASH_KEY_LENGTH];
1020 struct rte_eth_dev_info dev_info;
1021 uint8_t hash_key_size;
1023 if (port_id_is_invalid(port_id, ENABLED_WARN))
1026 memset(&dev_info, 0, sizeof(dev_info));
1027 rte_eth_dev_info_get(port_id, &dev_info);
1028 if (dev_info.hash_key_size > 0 &&
1029 dev_info.hash_key_size <= sizeof(rss_key))
1030 hash_key_size = dev_info.hash_key_size;
1032 printf("dev_info did not provide a valid hash key size\n");
1036 rss_conf.rss_hf = 0;
1037 for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1038 if (!strcmp(rss_info, rss_type_table[i].str))
1039 rss_conf.rss_hf = rss_type_table[i].rss_type;
1042 /* Get RSS hash key if asked to display it */
1043 rss_conf.rss_key = (show_rss_key) ? rss_key : NULL;
1044 rss_conf.rss_key_len = hash_key_size;
1045 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1049 printf("port index %d invalid\n", port_id);
1052 printf("operation not supported by device\n");
1055 printf("operation failed - diag=%d\n", diag);
1060 rss_hf = rss_conf.rss_hf;
1062 printf("RSS disabled\n");
1065 printf("RSS functions:\n ");
1066 for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1067 if (rss_hf & rss_type_table[i].rss_type)
1068 printf("%s ", rss_type_table[i].str);
1073 printf("RSS key:\n");
1074 for (i = 0; i < hash_key_size; i++)
1075 printf("%02X", rss_key[i]);
1080 port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key,
1083 struct rte_eth_rss_conf rss_conf;
1087 rss_conf.rss_key = NULL;
1088 rss_conf.rss_key_len = hash_key_len;
1089 rss_conf.rss_hf = 0;
1090 for (i = 0; i < RTE_DIM(rss_type_table); i++) {
1091 if (!strcmp(rss_type_table[i].str, rss_type))
1092 rss_conf.rss_hf = rss_type_table[i].rss_type;
1094 diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf);
1096 rss_conf.rss_key = hash_key;
1097 diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf);
1104 printf("port index %d invalid\n", port_id);
1107 printf("operation not supported by device\n");
1110 printf("operation failed - diag=%d\n", diag);
1116 * Setup forwarding configuration for each logical core.
1119 setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
1121 streamid_t nb_fs_per_lcore;
1129 nb_fs = cfg->nb_fwd_streams;
1130 nb_fc = cfg->nb_fwd_lcores;
1131 if (nb_fs <= nb_fc) {
1132 nb_fs_per_lcore = 1;
1135 nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
1136 nb_extra = (lcoreid_t) (nb_fs % nb_fc);
1139 nb_lc = (lcoreid_t) (nb_fc - nb_extra);
1141 for (lc_id = 0; lc_id < nb_lc; lc_id++) {
1142 fwd_lcores[lc_id]->stream_idx = sm_id;
1143 fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
1144 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1148 * Assign extra remaining streams, if any.
1150 nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
1151 for (lc_id = 0; lc_id < nb_extra; lc_id++) {
1152 fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
1153 fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
1154 sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
1159 simple_fwd_config_setup(void)
1165 if (port_topology == PORT_TOPOLOGY_CHAINED ||
1166 port_topology == PORT_TOPOLOGY_LOOP) {
1168 } else if (nb_fwd_ports % 2) {
1169 printf("\nWarning! Cannot handle an odd number of ports "
1170 "with the current port topology. Configuration "
1171 "must be changed to have an even number of ports, "
1172 "or relaunch application with "
1173 "--port-topology=chained\n\n");
1176 cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
1177 cur_fwd_config.nb_fwd_streams =
1178 (streamid_t) cur_fwd_config.nb_fwd_ports;
1180 /* reinitialize forwarding streams */
1184 * In the simple forwarding test, the number of forwarding cores
1185 * must be lower or equal to the number of forwarding ports.
1187 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1188 if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
1189 cur_fwd_config.nb_fwd_lcores =
1190 (lcoreid_t) cur_fwd_config.nb_fwd_ports;
1191 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1193 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
1194 if (port_topology != PORT_TOPOLOGY_LOOP)
1195 j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
1198 fwd_streams[i]->rx_port = fwd_ports_ids[i];
1199 fwd_streams[i]->rx_queue = 0;
1200 fwd_streams[i]->tx_port = fwd_ports_ids[j];
1201 fwd_streams[i]->tx_queue = 0;
1202 fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port;
1203 fwd_streams[i]->retry_enabled = retry_enabled;
1205 if (port_topology == PORT_TOPOLOGY_PAIRED) {
1206 fwd_streams[j]->rx_port = fwd_ports_ids[j];
1207 fwd_streams[j]->rx_queue = 0;
1208 fwd_streams[j]->tx_port = fwd_ports_ids[i];
1209 fwd_streams[j]->tx_queue = 0;
1210 fwd_streams[j]->peer_addr = fwd_streams[j]->tx_port;
1211 fwd_streams[j]->retry_enabled = retry_enabled;
1217 * For the RSS forwarding test all streams distributed over lcores. Each stream
1218 * being composed of a RX queue to poll on a RX port for input messages,
1219 * associated with a TX queue of a TX port where to send forwarded packets.
1220 * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
1221 * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
1223 * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
1227 rss_fwd_config_setup(void)
1238 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1239 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1240 cur_fwd_config.nb_fwd_streams =
1241 (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
1243 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1244 cur_fwd_config.nb_fwd_lcores =
1245 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1247 /* reinitialize forwarding streams */
1250 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1252 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1253 struct fwd_stream *fs;
1255 fs = fwd_streams[sm_id];
1257 if ((rxp & 0x1) == 0)
1258 txp = (portid_t) (rxp + 1);
1260 txp = (portid_t) (rxp - 1);
1262 * if we are in loopback, simply send stuff out through the
1265 if (port_topology == PORT_TOPOLOGY_LOOP)
1268 fs->rx_port = fwd_ports_ids[rxp];
1270 fs->tx_port = fwd_ports_ids[txp];
1272 fs->peer_addr = fs->tx_port;
1273 fs->retry_enabled = retry_enabled;
1274 rxq = (queueid_t) (rxq + 1);
1279 * Restart from RX queue 0 on next RX port
1282 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1284 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
1286 rxp = (portid_t) (rxp + 1);
1291 * For the DCB forwarding test, each core is assigned on each traffic class.
1293 * Each core is assigned a multi-stream, each stream being composed of
1294 * a RX queue to poll on a RX port for input messages, associated with
1295 * a TX queue of a TX port where to send forwarded packets. All RX and
1296 * TX queues are mapping to the same traffic class.
1297 * If VMDQ and DCB co-exist, each traffic class on different POOLs share
1301 dcb_fwd_config_setup(void)
1303 struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info;
1304 portid_t txp, rxp = 0;
1305 queueid_t txq, rxq = 0;
1307 uint16_t nb_rx_queue, nb_tx_queue;
1308 uint16_t i, j, k, sm_id = 0;
1311 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1312 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1313 cur_fwd_config.nb_fwd_streams =
1314 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1316 /* reinitialize forwarding streams */
1320 /* get the dcb info on the first RX and TX ports */
1321 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
1322 (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
1324 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1325 fwd_lcores[lc_id]->stream_nb = 0;
1326 fwd_lcores[lc_id]->stream_idx = sm_id;
1327 for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) {
1328 /* if the nb_queue is zero, means this tc is
1329 * not enabled on the POOL
1331 if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0)
1333 k = fwd_lcores[lc_id]->stream_nb +
1334 fwd_lcores[lc_id]->stream_idx;
1335 rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base;
1336 txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base;
1337 nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
1338 nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue;
1339 for (j = 0; j < nb_rx_queue; j++) {
1340 struct fwd_stream *fs;
1342 fs = fwd_streams[k + j];
1343 fs->rx_port = fwd_ports_ids[rxp];
1344 fs->rx_queue = rxq + j;
1345 fs->tx_port = fwd_ports_ids[txp];
1346 fs->tx_queue = txq + j % nb_tx_queue;
1347 fs->peer_addr = fs->tx_port;
1348 fs->retry_enabled = retry_enabled;
1350 fwd_lcores[lc_id]->stream_nb +=
1351 rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue;
1353 sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb);
1356 if (tc < rxp_dcb_info.nb_tcs)
1358 /* Restart from TC 0 on next RX port */
1360 if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
1362 (rxp + ((nb_ports >> 1) / nb_fwd_ports));
1365 if (rxp >= nb_fwd_ports)
1367 /* get the dcb information on next RX and TX ports */
1368 if ((rxp & 0x1) == 0)
1369 txp = (portid_t) (rxp + 1);
1371 txp = (portid_t) (rxp - 1);
1372 rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info);
1373 rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info);
1378 icmp_echo_config_setup(void)
1385 if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores)
1386 cur_fwd_config.nb_fwd_lcores = (lcoreid_t)
1387 (nb_txq * nb_fwd_ports);
1389 cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
1390 cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
1391 cur_fwd_config.nb_fwd_streams =
1392 (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports);
1393 if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores)
1394 cur_fwd_config.nb_fwd_lcores =
1395 (lcoreid_t)cur_fwd_config.nb_fwd_streams;
1396 if (verbose_level > 0) {
1397 printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n",
1399 cur_fwd_config.nb_fwd_lcores,
1400 cur_fwd_config.nb_fwd_ports,
1401 cur_fwd_config.nb_fwd_streams);
1404 /* reinitialize forwarding streams */
1406 setup_fwd_config_of_each_lcore(&cur_fwd_config);
1408 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
1409 if (verbose_level > 0)
1410 printf(" core=%d: \n", lc_id);
1411 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1412 struct fwd_stream *fs;
1413 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1414 fs->rx_port = fwd_ports_ids[rxp];
1416 fs->tx_port = fs->rx_port;
1418 fs->peer_addr = fs->tx_port;
1419 fs->retry_enabled = retry_enabled;
1420 if (verbose_level > 0)
1421 printf(" stream=%d port=%d rxq=%d txq=%d\n",
1422 sm_id, fs->rx_port, fs->rx_queue,
1424 rxq = (queueid_t) (rxq + 1);
1425 if (rxq == nb_rxq) {
1427 rxp = (portid_t) (rxp + 1);
1434 fwd_config_setup(void)
1436 cur_fwd_config.fwd_eng = cur_fwd_eng;
1437 if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) {
1438 icmp_echo_config_setup();
1441 if ((nb_rxq > 1) && (nb_txq > 1)){
1443 dcb_fwd_config_setup();
1445 rss_fwd_config_setup();
1448 simple_fwd_config_setup();
1452 pkt_fwd_config_display(struct fwd_config *cfg)
1454 struct fwd_stream *fs;
1458 printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - "
1459 "NUMA support %s, MP over anonymous pages %s\n",
1460 cfg->fwd_eng->fwd_mode_name,
1461 retry_enabled == 0 ? "" : " with retry",
1462 cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
1463 numa_support == 1 ? "enabled" : "disabled",
1464 mp_anon != 0 ? "enabled" : "disabled");
1467 printf("TX retry num: %u, delay between TX retries: %uus\n",
1468 burst_tx_retry_num, burst_tx_delay_time);
1469 for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
1470 printf("Logical Core %u (socket %u) forwards packets on "
1472 fwd_lcores_cpuids[lc_id],
1473 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
1474 fwd_lcores[lc_id]->stream_nb);
1475 for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
1476 fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
1477 printf("\n RX P=%d/Q=%d (socket %u) -> TX "
1478 "P=%d/Q=%d (socket %u) ",
1479 fs->rx_port, fs->rx_queue,
1480 ports[fs->rx_port].socket_id,
1481 fs->tx_port, fs->tx_queue,
1482 ports[fs->tx_port].socket_id);
1483 print_ethaddr("peer=",
1484 &peer_eth_addrs[fs->peer_addr]);
1492 set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
1495 unsigned int lcore_cpuid;
1500 for (i = 0; i < nb_lc; i++) {
1501 lcore_cpuid = lcorelist[i];
1502 if (! rte_lcore_is_enabled(lcore_cpuid)) {
1503 printf("lcore %u not enabled\n", lcore_cpuid);
1506 if (lcore_cpuid == rte_get_master_lcore()) {
1507 printf("lcore %u cannot be masked on for running "
1508 "packet forwarding, which is the master lcore "
1509 "and reserved for command line parsing only\n",
1514 fwd_lcores_cpuids[i] = lcore_cpuid;
1516 if (record_now == 0) {
1520 nb_cfg_lcores = (lcoreid_t) nb_lc;
1521 if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
1522 printf("previous number of forwarding cores %u - changed to "
1523 "number of configured cores %u\n",
1524 (unsigned int) nb_fwd_lcores, nb_lc);
1525 nb_fwd_lcores = (lcoreid_t) nb_lc;
1532 set_fwd_lcores_mask(uint64_t lcoremask)
1534 unsigned int lcorelist[64];
1538 if (lcoremask == 0) {
1539 printf("Invalid NULL mask of cores\n");
1543 for (i = 0; i < 64; i++) {
1544 if (! ((uint64_t)(1ULL << i) & lcoremask))
1546 lcorelist[nb_lc++] = i;
1548 return set_fwd_lcores_list(lcorelist, nb_lc);
1552 set_fwd_lcores_number(uint16_t nb_lc)
1554 if (nb_lc > nb_cfg_lcores) {
1555 printf("nb fwd cores %u > %u (max. number of configured "
1556 "lcores) - ignored\n",
1557 (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
1560 nb_fwd_lcores = (lcoreid_t) nb_lc;
1561 printf("Number of forwarding cores set to %u\n",
1562 (unsigned int) nb_fwd_lcores);
1566 set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
1574 for (i = 0; i < nb_pt; i++) {
1575 port_id = (portid_t) portlist[i];
1576 if (port_id_is_invalid(port_id, ENABLED_WARN))
1579 fwd_ports_ids[i] = port_id;
1581 if (record_now == 0) {
1585 nb_cfg_ports = (portid_t) nb_pt;
1586 if (nb_fwd_ports != (portid_t) nb_pt) {
1587 printf("previous number of forwarding ports %u - changed to "
1588 "number of configured ports %u\n",
1589 (unsigned int) nb_fwd_ports, nb_pt);
1590 nb_fwd_ports = (portid_t) nb_pt;
1595 set_fwd_ports_mask(uint64_t portmask)
1597 unsigned int portlist[64];
1601 if (portmask == 0) {
1602 printf("Invalid NULL mask of ports\n");
1606 for (i = 0; i < (unsigned)RTE_MIN(64, RTE_MAX_ETHPORTS); i++) {
1607 if (! ((uint64_t)(1ULL << i) & portmask))
1609 portlist[nb_pt++] = i;
1611 set_fwd_ports_list(portlist, nb_pt);
1615 set_fwd_ports_number(uint16_t nb_pt)
1617 if (nb_pt > nb_cfg_ports) {
1618 printf("nb fwd ports %u > %u (number of configured "
1619 "ports) - ignored\n",
1620 (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
1623 nb_fwd_ports = (portid_t) nb_pt;
1624 printf("Number of forwarding ports set to %u\n",
1625 (unsigned int) nb_fwd_ports);
1629 port_is_forwarding(portid_t port_id)
1633 if (port_id_is_invalid(port_id, ENABLED_WARN))
1636 for (i = 0; i < nb_fwd_ports; i++) {
1637 if (fwd_ports_ids[i] == port_id)
1645 set_nb_pkt_per_burst(uint16_t nb)
1647 if (nb > MAX_PKT_BURST) {
1648 printf("nb pkt per burst: %u > %u (maximum packet per burst) "
1650 (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
1653 nb_pkt_per_burst = nb;
1654 printf("Number of packets per burst set to %u\n",
1655 (unsigned int) nb_pkt_per_burst);
1659 tx_split_get_name(enum tx_pkt_split split)
1663 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
1664 if (tx_split_name[i].split == split)
1665 return tx_split_name[i].name;
1671 set_tx_pkt_split(const char *name)
1675 for (i = 0; i != RTE_DIM(tx_split_name); i++) {
1676 if (strcmp(tx_split_name[i].name, name) == 0) {
1677 tx_pkt_split = tx_split_name[i].split;
1681 printf("unknown value: \"%s\"\n", name);
1685 show_tx_pkt_segments(void)
1691 split = tx_split_get_name(tx_pkt_split);
1693 printf("Number of segments: %u\n", n);
1694 printf("Segment sizes: ");
1695 for (i = 0; i != n - 1; i++)
1696 printf("%hu,", tx_pkt_seg_lengths[i]);
1697 printf("%hu\n", tx_pkt_seg_lengths[i]);
1698 printf("Split packet: %s\n", split);
1702 set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
1704 uint16_t tx_pkt_len;
1707 if (nb_segs >= (unsigned) nb_txd) {
1708 printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
1709 nb_segs, (unsigned int) nb_txd);
1714 * Check that each segment length is greater or equal than
1715 * the mbuf data sise.
1716 * Check also that the total packet length is greater or equal than the
1717 * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
1720 for (i = 0; i < nb_segs; i++) {
1721 if (seg_lengths[i] > (unsigned) mbuf_data_size) {
1722 printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
1723 i, seg_lengths[i], (unsigned) mbuf_data_size);
1726 tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
1728 if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
1729 printf("total packet length=%u < %d - give up\n",
1730 (unsigned) tx_pkt_len,
1731 (int)(sizeof(struct ether_hdr) + 20 + 8));
1735 for (i = 0; i < nb_segs; i++)
1736 tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
1738 tx_pkt_length = tx_pkt_len;
1739 tx_pkt_nb_segs = (uint8_t) nb_segs;
1743 list_pkt_forwarding_modes(void)
1745 static char fwd_modes[128] = "";
1746 const char *separator = "|";
1747 struct fwd_engine *fwd_eng;
1750 if (strlen (fwd_modes) == 0) {
1751 while ((fwd_eng = fwd_engines[i++]) != NULL) {
1752 strncat(fwd_modes, fwd_eng->fwd_mode_name,
1753 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
1754 strncat(fwd_modes, separator,
1755 sizeof(fwd_modes) - strlen(fwd_modes) - 1);
1757 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
1764 list_pkt_forwarding_retry_modes(void)
1766 static char fwd_modes[128] = "";
1767 const char *separator = "|";
1768 struct fwd_engine *fwd_eng;
1771 if (strlen(fwd_modes) == 0) {
1772 while ((fwd_eng = fwd_engines[i++]) != NULL) {
1773 if (fwd_eng == &rx_only_engine)
1775 strncat(fwd_modes, fwd_eng->fwd_mode_name,
1777 strlen(fwd_modes) - 1);
1778 strncat(fwd_modes, separator,
1780 strlen(fwd_modes) - 1);
1782 fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0';
1789 set_pkt_forwarding_mode(const char *fwd_mode_name)
1791 struct fwd_engine *fwd_eng;
1795 while ((fwd_eng = fwd_engines[i]) != NULL) {
1796 if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
1797 printf("Set %s packet forwarding mode%s\n",
1799 retry_enabled == 0 ? "" : " with retry");
1800 cur_fwd_eng = fwd_eng;
1805 printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
1809 set_verbose_level(uint16_t vb_level)
1811 printf("Change verbose level from %u to %u\n",
1812 (unsigned int) verbose_level, (unsigned int) vb_level);
1813 verbose_level = vb_level;
1817 vlan_extend_set(portid_t port_id, int on)
1822 if (port_id_is_invalid(port_id, ENABLED_WARN))
1825 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1828 vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD;
1830 vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD;
1832 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1834 printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed "
1835 "diag=%d\n", port_id, on, diag);
1839 rx_vlan_strip_set(portid_t port_id, int on)
1844 if (port_id_is_invalid(port_id, ENABLED_WARN))
1847 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1850 vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
1852 vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD;
1854 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1856 printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed "
1857 "diag=%d\n", port_id, on, diag);
1861 rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on)
1865 if (port_id_is_invalid(port_id, ENABLED_WARN))
1868 diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on);
1870 printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed "
1871 "diag=%d\n", port_id, queue_id, on, diag);
1875 rx_vlan_filter_set(portid_t port_id, int on)
1880 if (port_id_is_invalid(port_id, ENABLED_WARN))
1883 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1886 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1888 vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD;
1890 diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload);
1892 printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed "
1893 "diag=%d\n", port_id, on, diag);
1897 rx_vft_set(portid_t port_id, uint16_t vlan_id, int on)
1901 if (port_id_is_invalid(port_id, ENABLED_WARN))
1903 if (vlan_id_is_invalid(vlan_id))
1905 diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
1908 printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
1910 port_id, vlan_id, on, diag);
1915 rx_vlan_all_filter_set(portid_t port_id, int on)
1919 if (port_id_is_invalid(port_id, ENABLED_WARN))
1921 for (vlan_id = 0; vlan_id < 4096; vlan_id++) {
1922 if (rx_vft_set(port_id, vlan_id, on))
1928 vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id)
1932 if (port_id_is_invalid(port_id, ENABLED_WARN))
1935 diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id);
1939 printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed "
1941 port_id, vlan_type, tp_id, diag);
1945 tx_vlan_set(portid_t port_id, uint16_t vlan_id)
1948 if (port_id_is_invalid(port_id, ENABLED_WARN))
1950 if (vlan_id_is_invalid(vlan_id))
1953 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1954 if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) {
1955 printf("Error, as QinQ has been enabled.\n");
1959 tx_vlan_reset(port_id);
1960 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_VLAN;
1961 ports[port_id].tx_vlan_id = vlan_id;
1965 tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer)
1968 if (port_id_is_invalid(port_id, ENABLED_WARN))
1970 if (vlan_id_is_invalid(vlan_id))
1972 if (vlan_id_is_invalid(vlan_id_outer))
1975 vlan_offload = rte_eth_dev_get_vlan_offload(port_id);
1976 if (!(vlan_offload & ETH_VLAN_EXTEND_OFFLOAD)) {
1977 printf("Error, as QinQ hasn't been enabled.\n");
1981 tx_vlan_reset(port_id);
1982 ports[port_id].tx_ol_flags |= TESTPMD_TX_OFFLOAD_INSERT_QINQ;
1983 ports[port_id].tx_vlan_id = vlan_id;
1984 ports[port_id].tx_vlan_id_outer = vlan_id_outer;
1988 tx_vlan_reset(portid_t port_id)
1990 if (port_id_is_invalid(port_id, ENABLED_WARN))
1992 ports[port_id].tx_ol_flags &= ~(TESTPMD_TX_OFFLOAD_INSERT_VLAN |
1993 TESTPMD_TX_OFFLOAD_INSERT_QINQ);
1994 ports[port_id].tx_vlan_id = 0;
1995 ports[port_id].tx_vlan_id_outer = 0;
1999 tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on)
2001 if (port_id_is_invalid(port_id, ENABLED_WARN))
2004 rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on);
2008 set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value)
2011 uint8_t existing_mapping_found = 0;
2013 if (port_id_is_invalid(port_id, ENABLED_WARN))
2016 if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id)))
2019 if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) {
2020 printf("map_value not in required range 0..%d\n",
2021 RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
2025 if (!is_rx) { /*then tx*/
2026 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2027 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2028 (tx_queue_stats_mappings[i].queue_id == queue_id)) {
2029 tx_queue_stats_mappings[i].stats_counter_id = map_value;
2030 existing_mapping_found = 1;
2034 if (!existing_mapping_found) { /* A new additional mapping... */
2035 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id;
2036 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id;
2037 tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value;
2038 nb_tx_queue_stats_mappings++;
2042 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2043 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2044 (rx_queue_stats_mappings[i].queue_id == queue_id)) {
2045 rx_queue_stats_mappings[i].stats_counter_id = map_value;
2046 existing_mapping_found = 1;
2050 if (!existing_mapping_found) { /* A new additional mapping... */
2051 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id;
2052 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id;
2053 rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value;
2054 nb_rx_queue_stats_mappings++;
2060 print_fdir_mask(struct rte_eth_fdir_masks *mask)
2062 printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask));
2064 if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2065 printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x,"
2066 " tunnel_id: 0x%08x",
2067 mask->mac_addr_byte_mask, mask->tunnel_type_mask,
2068 rte_be_to_cpu_32(mask->tunnel_id_mask));
2069 else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2070 printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x",
2071 rte_be_to_cpu_32(mask->ipv4_mask.src_ip),
2072 rte_be_to_cpu_32(mask->ipv4_mask.dst_ip));
2074 printf("\n src_port: 0x%04x, dst_port: 0x%04x",
2075 rte_be_to_cpu_16(mask->src_port_mask),
2076 rte_be_to_cpu_16(mask->dst_port_mask));
2078 printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2079 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]),
2080 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]),
2081 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]),
2082 rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3]));
2084 printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x",
2085 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]),
2086 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]),
2087 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]),
2088 rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3]));
2095 print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2097 struct rte_eth_flex_payload_cfg *cfg;
2100 for (i = 0; i < flex_conf->nb_payloads; i++) {
2101 cfg = &flex_conf->flex_set[i];
2102 if (cfg->type == RTE_ETH_RAW_PAYLOAD)
2104 else if (cfg->type == RTE_ETH_L2_PAYLOAD)
2105 printf("\n L2_PAYLOAD: ");
2106 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
2107 printf("\n L3_PAYLOAD: ");
2108 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
2109 printf("\n L4_PAYLOAD: ");
2111 printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type);
2112 for (j = 0; j < num; j++)
2113 printf(" %-5u", cfg->src_offset[j]);
2119 flowtype_to_str(uint16_t flow_type)
2121 struct flow_type_info {
2127 static struct flow_type_info flowtype_str_table[] = {
2128 {"raw", RTE_ETH_FLOW_RAW},
2129 {"ipv4", RTE_ETH_FLOW_IPV4},
2130 {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4},
2131 {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP},
2132 {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP},
2133 {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP},
2134 {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER},
2135 {"ipv6", RTE_ETH_FLOW_IPV6},
2136 {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6},
2137 {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP},
2138 {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP},
2139 {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP},
2140 {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER},
2141 {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD},
2142 {"port", RTE_ETH_FLOW_PORT},
2143 {"vxlan", RTE_ETH_FLOW_VXLAN},
2144 {"geneve", RTE_ETH_FLOW_GENEVE},
2145 {"nvgre", RTE_ETH_FLOW_NVGRE},
2148 for (i = 0; i < RTE_DIM(flowtype_str_table); i++) {
2149 if (flowtype_str_table[i].ftype == flow_type)
2150 return flowtype_str_table[i].str;
2157 print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
2159 struct rte_eth_fdir_flex_mask *mask;
2163 for (i = 0; i < flex_conf->nb_flexmasks; i++) {
2164 mask = &flex_conf->flex_mask[i];
2165 p = flowtype_to_str(mask->flow_type);
2166 printf("\n %s:\t", p ? p : "unknown");
2167 for (j = 0; j < num; j++)
2168 printf(" %02x", mask->mask[j]);
2174 print_fdir_flow_type(uint32_t flow_types_mask)
2179 for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) {
2180 if (!(flow_types_mask & (1 << i)))
2182 p = flowtype_to_str(i);
2192 fdir_get_infos(portid_t port_id)
2194 struct rte_eth_fdir_stats fdir_stat;
2195 struct rte_eth_fdir_info fdir_info;
2198 static const char *fdir_stats_border = "########################";
2200 if (port_id_is_invalid(port_id, ENABLED_WARN))
2202 ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
2204 printf("\n FDIR is not supported on port %-2d\n",
2209 memset(&fdir_info, 0, sizeof(fdir_info));
2210 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2211 RTE_ETH_FILTER_INFO, &fdir_info);
2212 memset(&fdir_stat, 0, sizeof(fdir_stat));
2213 rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
2214 RTE_ETH_FILTER_STATS, &fdir_stat);
2215 printf("\n %s FDIR infos for port %-2d %s\n",
2216 fdir_stats_border, port_id, fdir_stats_border);
2218 if (fdir_info.mode == RTE_FDIR_MODE_PERFECT)
2219 printf(" PERFECT\n");
2220 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN)
2221 printf(" PERFECT-MAC-VLAN\n");
2222 else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
2223 printf(" PERFECT-TUNNEL\n");
2224 else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE)
2225 printf(" SIGNATURE\n");
2227 printf(" DISABLE\n");
2228 if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN
2229 && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) {
2230 printf(" SUPPORTED FLOW TYPE: ");
2231 print_fdir_flow_type(fdir_info.flow_types_mask[0]);
2233 printf(" FLEX PAYLOAD INFO:\n");
2234 printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n"
2235 " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n"
2236 " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n",
2237 fdir_info.max_flexpayload, fdir_info.flex_payload_limit,
2238 fdir_info.flex_payload_unit,
2239 fdir_info.max_flex_payload_segment_num,
2240 fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num);
2242 print_fdir_mask(&fdir_info.mask);
2243 if (fdir_info.flex_conf.nb_payloads > 0) {
2244 printf(" FLEX PAYLOAD SRC OFFSET:");
2245 print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2247 if (fdir_info.flex_conf.nb_flexmasks > 0) {
2248 printf(" FLEX MASK CFG:");
2249 print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload);
2251 printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n",
2252 fdir_stat.guarant_cnt, fdir_stat.best_cnt);
2253 printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n",
2254 fdir_info.guarant_spc, fdir_info.best_spc);
2255 printf(" collision: %-10"PRIu32" free: %"PRIu32"\n"
2256 " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n"
2257 " add: %-10"PRIu64" remove: %"PRIu64"\n"
2258 " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n",
2259 fdir_stat.collision, fdir_stat.free,
2260 fdir_stat.maxhash, fdir_stat.maxlen,
2261 fdir_stat.add, fdir_stat.remove,
2262 fdir_stat.f_add, fdir_stat.f_remove);
2263 printf(" %s############################%s\n",
2264 fdir_stats_border, fdir_stats_border);
2268 fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
2270 struct rte_port *port;
2271 struct rte_eth_fdir_flex_conf *flex_conf;
2274 port = &ports[port_id];
2275 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2276 for (i = 0; i < RTE_ETH_FLOW_MAX; i++) {
2277 if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) {
2282 if (i >= RTE_ETH_FLOW_MAX) {
2283 if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) {
2284 idx = flex_conf->nb_flexmasks;
2285 flex_conf->nb_flexmasks++;
2287 printf("The flex mask table is full. Can not set flex"
2288 " mask for flow_type(%u).", cfg->flow_type);
2292 (void)rte_memcpy(&flex_conf->flex_mask[idx],
2294 sizeof(struct rte_eth_fdir_flex_mask));
2298 fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg)
2300 struct rte_port *port;
2301 struct rte_eth_fdir_flex_conf *flex_conf;
2304 port = &ports[port_id];
2305 flex_conf = &port->dev_conf.fdir_conf.flex_conf;
2306 for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) {
2307 if (cfg->type == flex_conf->flex_set[i].type) {
2312 if (i >= RTE_ETH_PAYLOAD_MAX) {
2313 if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) {
2314 idx = flex_conf->nb_payloads;
2315 flex_conf->nb_payloads++;
2317 printf("The flex payload table is full. Can not set"
2318 " flex payload for type(%u).", cfg->type);
2322 (void)rte_memcpy(&flex_conf->flex_set[idx],
2324 sizeof(struct rte_eth_flex_payload_cfg));
2329 set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
2333 if (port_id_is_invalid(port_id, ENABLED_WARN))
2336 diag = rte_eth_dev_set_vf_rx(port_id,vf,on);
2338 diag = rte_eth_dev_set_vf_tx(port_id,vf,on);
2342 printf("rte_eth_dev_set_vf_rx for port_id=%d failed "
2343 "diag=%d\n", port_id, diag);
2345 printf("rte_eth_dev_set_vf_tx for port_id=%d failed "
2346 "diag=%d\n", port_id, diag);
2351 set_vf_rx_vlan(portid_t port_id, uint16_t vlan_id, uint64_t vf_mask, uint8_t on)
2355 if (port_id_is_invalid(port_id, ENABLED_WARN))
2357 if (vlan_id_is_invalid(vlan_id))
2359 diag = rte_eth_dev_set_vf_vlan_filter(port_id, vlan_id, vf_mask, on);
2362 printf("rte_eth_dev_set_vf_vlan_filter for port_id=%d failed "
2363 "diag=%d\n", port_id, diag);
2367 set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate)
2370 struct rte_eth_link link;
2372 if (port_id_is_invalid(port_id, ENABLED_WARN))
2374 rte_eth_link_get_nowait(port_id, &link);
2375 if (rate > link.link_speed) {
2376 printf("Invalid rate value:%u bigger than link speed: %u\n",
2377 rate, link.link_speed);
2380 diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate);
2383 printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n",
2389 set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk)
2392 struct rte_eth_link link;
2397 if (port_id_is_invalid(port_id, ENABLED_WARN))
2399 rte_eth_link_get_nowait(port_id, &link);
2400 if (rate > link.link_speed) {
2401 printf("Invalid rate value:%u bigger than link speed: %u\n",
2402 rate, link.link_speed);
2405 diag = rte_eth_set_vf_rate_limit(port_id, vf, rate, q_msk);
2408 printf("rte_eth_set_vf_rate_limit for port_id=%d failed diag=%d\n",
2414 * Functions to manage the set of filtered Multicast MAC addresses.
2416 * A pool of filtered multicast MAC addresses is associated with each port.
2417 * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses.
2418 * The address of the pool and the number of valid multicast MAC addresses
2419 * recorded in the pool are stored in the fields "mc_addr_pool" and
2420 * "mc_addr_nb" of the "rte_port" data structure.
2422 * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes
2423 * to be supplied a contiguous array of multicast MAC addresses.
2424 * To comply with this constraint, the set of multicast addresses recorded
2425 * into the pool are systematically compacted at the beginning of the pool.
2426 * Hence, when a multicast address is removed from the pool, all following
2427 * addresses, if any, are copied back to keep the set contiguous.
2429 #define MCAST_POOL_INC 32
2432 mcast_addr_pool_extend(struct rte_port *port)
2434 struct ether_addr *mc_pool;
2435 size_t mc_pool_size;
2438 * If a free entry is available at the end of the pool, just
2439 * increment the number of recorded multicast addresses.
2441 if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) {
2447 * [re]allocate a pool with MCAST_POOL_INC more entries.
2448 * The previous test guarantees that port->mc_addr_nb is a multiple
2449 * of MCAST_POOL_INC.
2451 mc_pool_size = sizeof(struct ether_addr) * (port->mc_addr_nb +
2453 mc_pool = (struct ether_addr *) realloc(port->mc_addr_pool,
2455 if (mc_pool == NULL) {
2456 printf("allocation of pool of %u multicast addresses failed\n",
2457 port->mc_addr_nb + MCAST_POOL_INC);
2461 port->mc_addr_pool = mc_pool;
2468 mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx)
2471 if (addr_idx == port->mc_addr_nb) {
2472 /* No need to recompact the set of multicast addressses. */
2473 if (port->mc_addr_nb == 0) {
2474 /* free the pool of multicast addresses. */
2475 free(port->mc_addr_pool);
2476 port->mc_addr_pool = NULL;
2480 memmove(&port->mc_addr_pool[addr_idx],
2481 &port->mc_addr_pool[addr_idx + 1],
2482 sizeof(struct ether_addr) * (port->mc_addr_nb - addr_idx));
2486 eth_port_multicast_addr_list_set(uint8_t port_id)
2488 struct rte_port *port;
2491 port = &ports[port_id];
2492 diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool,
2496 printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n",
2497 port->mc_addr_nb, port_id, -diag);
2501 mcast_addr_add(uint8_t port_id, struct ether_addr *mc_addr)
2503 struct rte_port *port;
2506 if (port_id_is_invalid(port_id, ENABLED_WARN))
2509 port = &ports[port_id];
2512 * Check that the added multicast MAC address is not already recorded
2513 * in the pool of multicast addresses.
2515 for (i = 0; i < port->mc_addr_nb; i++) {
2516 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) {
2517 printf("multicast address already filtered by port\n");
2522 if (mcast_addr_pool_extend(port) != 0)
2524 ether_addr_copy(mc_addr, &port->mc_addr_pool[i]);
2525 eth_port_multicast_addr_list_set(port_id);
2529 mcast_addr_remove(uint8_t port_id, struct ether_addr *mc_addr)
2531 struct rte_port *port;
2534 if (port_id_is_invalid(port_id, ENABLED_WARN))
2537 port = &ports[port_id];
2540 * Search the pool of multicast MAC addresses for the removed address.
2542 for (i = 0; i < port->mc_addr_nb; i++) {
2543 if (is_same_ether_addr(mc_addr, &port->mc_addr_pool[i]))
2546 if (i == port->mc_addr_nb) {
2547 printf("multicast address not filtered by port %d\n", port_id);
2551 mcast_addr_pool_remove(port, i);
2552 eth_port_multicast_addr_list_set(port_id);
2556 port_dcb_info_display(uint8_t port_id)
2558 struct rte_eth_dcb_info dcb_info;
2561 static const char *border = "================";
2563 if (port_id_is_invalid(port_id, ENABLED_WARN))
2566 ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info);
2568 printf("\n Failed to get dcb infos on port %-2d\n",
2572 printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border);
2573 printf(" TC NUMBER: %d\n", dcb_info.nb_tcs);
2575 for (i = 0; i < dcb_info.nb_tcs; i++)
2577 printf("\n Priority : ");
2578 for (i = 0; i < dcb_info.nb_tcs; i++)
2579 printf("\t%4d", dcb_info.prio_tc[i]);
2580 printf("\n BW percent :");
2581 for (i = 0; i < dcb_info.nb_tcs; i++)
2582 printf("\t%4d%%", dcb_info.tc_bws[i]);
2583 printf("\n RXQ base : ");
2584 for (i = 0; i < dcb_info.nb_tcs; i++)
2585 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base);
2586 printf("\n RXQ number :");
2587 for (i = 0; i < dcb_info.nb_tcs; i++)
2588 printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue);
2589 printf("\n TXQ base : ");
2590 for (i = 0; i < dcb_info.nb_tcs; i++)
2591 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base);
2592 printf("\n TXQ number :");
2593 for (i = 0; i < dcb_info.nb_tcs; i++)
2594 printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue);