4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_debug.h>
49 #include <rte_cycles.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_launch.h>
54 #include <rte_per_lcore.h>
55 #include <rte_lcore.h>
56 #include <rte_atomic.h>
57 #include <rte_branch_prediction.h>
58 #include <rte_mempool.h>
60 #include <rte_interrupts.h>
62 #include <rte_ether.h>
63 #include <rte_ethdev.h>
68 #include <rte_prefetch.h>
69 #include <rte_string_fns.h>
76 #define IP_DEFTTL 64 /* from RFC 1340. */
77 #define IP_VERSION 0x40
78 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
79 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
81 #define GRE_KEY_PRESENT 0x2000
83 #define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT
85 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
86 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
87 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
92 /* structure that caches offload info for the current packet */
93 struct testpmd_offload_info {
101 uint16_t outer_ethertype;
102 uint16_t outer_l2_len;
103 uint16_t outer_l3_len;
104 uint8_t outer_l4_proto;
106 uint16_t tunnel_tso_segsz;
110 /* simplified GRE header */
111 struct simple_gre_hdr {
114 } __attribute__((__packed__));
117 get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
119 if (ethertype == _htons(ETHER_TYPE_IPv4))
120 return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
121 else /* assume ethertype == ETHER_TYPE_IPv6 */
122 return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
125 /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
127 parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
129 struct tcp_hdr *tcp_hdr;
131 info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
132 info->l4_proto = ipv4_hdr->next_proto_id;
134 /* only fill l4_len for TCP, it's useful for TSO */
135 if (info->l4_proto == IPPROTO_TCP) {
136 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len);
137 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
138 } else if (info->l4_proto == IPPROTO_UDP)
139 info->l4_len = sizeof(struct udp_hdr);
144 /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
146 parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
148 struct tcp_hdr *tcp_hdr;
150 info->l3_len = sizeof(struct ipv6_hdr);
151 info->l4_proto = ipv6_hdr->proto;
153 /* only fill l4_len for TCP, it's useful for TSO */
154 if (info->l4_proto == IPPROTO_TCP) {
155 tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len);
156 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
157 } else if (info->l4_proto == IPPROTO_UDP)
158 info->l4_len = sizeof(struct udp_hdr);
164 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
165 * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
166 * header. The l4_len argument is only set in case of TCP (useful for TSO).
169 parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info)
171 struct ipv4_hdr *ipv4_hdr;
172 struct ipv6_hdr *ipv6_hdr;
174 info->l2_len = sizeof(struct ether_hdr);
175 info->ethertype = eth_hdr->ether_type;
177 if (info->ethertype == _htons(ETHER_TYPE_VLAN)) {
178 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
180 info->l2_len += sizeof(struct vlan_hdr);
181 info->ethertype = vlan_hdr->eth_proto;
184 switch (info->ethertype) {
185 case _htons(ETHER_TYPE_IPv4):
186 ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
187 parse_ipv4(ipv4_hdr, info);
189 case _htons(ETHER_TYPE_IPv6):
190 ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
191 parse_ipv6(ipv6_hdr, info);
201 /* Parse a vxlan header */
203 parse_vxlan(struct udp_hdr *udp_hdr,
204 struct testpmd_offload_info *info,
207 struct ether_hdr *eth_hdr;
209 /* check udp destination port, 4789 is the default vxlan port
210 * (rfc7348) or that the rx offload flag is set (i40e only
212 if (udp_hdr->dst_port != _htons(4789) &&
213 RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0)
217 info->outer_ethertype = info->ethertype;
218 info->outer_l2_len = info->l2_len;
219 info->outer_l3_len = info->l3_len;
220 info->outer_l4_proto = info->l4_proto;
222 eth_hdr = (struct ether_hdr *)((char *)udp_hdr +
223 sizeof(struct udp_hdr) +
224 sizeof(struct vxlan_hdr));
226 parse_ethernet(eth_hdr, info);
227 info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
230 /* Parse a gre header */
232 parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
234 struct ether_hdr *eth_hdr;
235 struct ipv4_hdr *ipv4_hdr;
236 struct ipv6_hdr *ipv6_hdr;
239 /* check which fields are supported */
240 if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0)
243 gre_len += sizeof(struct simple_gre_hdr);
245 if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
246 gre_len += GRE_KEY_LEN;
248 if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
250 info->outer_ethertype = info->ethertype;
251 info->outer_l2_len = info->l2_len;
252 info->outer_l3_len = info->l3_len;
253 info->outer_l4_proto = info->l4_proto;
255 ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
257 parse_ipv4(ipv4_hdr, info);
258 info->ethertype = _htons(ETHER_TYPE_IPv4);
261 } else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) {
263 info->outer_ethertype = info->ethertype;
264 info->outer_l2_len = info->l2_len;
265 info->outer_l3_len = info->l3_len;
266 info->outer_l4_proto = info->l4_proto;
268 ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
270 info->ethertype = _htons(ETHER_TYPE_IPv6);
271 parse_ipv6(ipv6_hdr, info);
274 } else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) {
276 info->outer_ethertype = info->ethertype;
277 info->outer_l2_len = info->l2_len;
278 info->outer_l3_len = info->l3_len;
279 info->outer_l4_proto = info->l4_proto;
281 eth_hdr = (struct ether_hdr *)((char *)gre_hdr + gre_len);
283 parse_ethernet(eth_hdr, info);
287 info->l2_len += gre_len;
291 /* Parse an encapsulated ip or ipv6 header */
293 parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
295 struct ipv4_hdr *ipv4_hdr = encap_ip;
296 struct ipv6_hdr *ipv6_hdr = encap_ip;
299 ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
301 if (ip_version != 4 && ip_version != 6)
305 info->outer_ethertype = info->ethertype;
306 info->outer_l2_len = info->l2_len;
307 info->outer_l3_len = info->l3_len;
309 if (ip_version == 4) {
310 parse_ipv4(ipv4_hdr, info);
311 info->ethertype = _htons(ETHER_TYPE_IPv4);
313 parse_ipv6(ipv6_hdr, info);
314 info->ethertype = _htons(ETHER_TYPE_IPv6);
319 /* if possible, calculate the checksum of a packet in hw or sw,
320 * depending on the testpmd command line configuration */
322 process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
323 uint16_t testpmd_ol_flags)
325 struct ipv4_hdr *ipv4_hdr = l3_hdr;
326 struct udp_hdr *udp_hdr;
327 struct tcp_hdr *tcp_hdr;
328 struct sctp_hdr *sctp_hdr;
329 uint64_t ol_flags = 0;
330 uint32_t max_pkt_len, tso_segsz = 0;
332 /* ensure packet is large enough to require tso */
333 if (!info->is_tunnel) {
334 max_pkt_len = info->l2_len + info->l3_len + info->l4_len +
336 if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len)
337 tso_segsz = info->tso_segsz;
339 max_pkt_len = info->outer_l2_len + info->outer_l3_len +
340 info->l2_len + info->l3_len + info->l4_len +
341 info->tunnel_tso_segsz;
342 if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len)
343 tso_segsz = info->tunnel_tso_segsz;
346 if (info->ethertype == _htons(ETHER_TYPE_IPv4)) {
348 ipv4_hdr->hdr_checksum = 0;
350 ol_flags |= PKT_TX_IPV4;
351 if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
352 ol_flags |= PKT_TX_IP_CKSUM;
354 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
355 ol_flags |= PKT_TX_IP_CKSUM;
357 ipv4_hdr->hdr_checksum =
358 rte_ipv4_cksum(ipv4_hdr);
360 } else if (info->ethertype == _htons(ETHER_TYPE_IPv6))
361 ol_flags |= PKT_TX_IPV6;
363 return 0; /* packet type not supported, nothing to do */
365 if (info->l4_proto == IPPROTO_UDP) {
366 udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
367 /* do not recalculate udp cksum if it was 0 */
368 if (udp_hdr->dgram_cksum != 0) {
369 udp_hdr->dgram_cksum = 0;
370 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM)
371 ol_flags |= PKT_TX_UDP_CKSUM;
373 udp_hdr->dgram_cksum =
374 get_udptcp_checksum(l3_hdr, udp_hdr,
378 } else if (info->l4_proto == IPPROTO_TCP) {
379 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
382 ol_flags |= PKT_TX_TCP_SEG;
383 else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM)
384 ol_flags |= PKT_TX_TCP_CKSUM;
387 get_udptcp_checksum(l3_hdr, tcp_hdr,
390 if (info->gso_enable)
391 ol_flags |= PKT_TX_TCP_SEG;
392 } else if (info->l4_proto == IPPROTO_SCTP) {
393 sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len);
395 /* sctp payload must be a multiple of 4 to be
397 if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
398 ((ipv4_hdr->total_length & 0x3) == 0)) {
399 ol_flags |= PKT_TX_SCTP_CKSUM;
401 /* XXX implement CRC32c, example available in
409 /* Calculate the checksum of outer header */
411 process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
412 uint16_t testpmd_ol_flags, int tso_enabled)
414 struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
415 struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
416 struct udp_hdr *udp_hdr;
417 uint64_t ol_flags = 0;
419 if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
420 ipv4_hdr->hdr_checksum = 0;
421 ol_flags |= PKT_TX_OUTER_IPV4;
423 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
424 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
426 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
428 ol_flags |= PKT_TX_OUTER_IPV6;
430 if (info->outer_l4_proto != IPPROTO_UDP)
433 udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
435 /* outer UDP checksum is done in software as we have no hardware
436 * supporting it today, and no API for it. In the other side, for
437 * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
440 * If a packet will be TSOed into small packets by NIC, we cannot
441 * set/calculate a non-zero checksum, because it will be a wrong
442 * value after the packet be split into several small packets.
445 udp_hdr->dgram_cksum = 0;
447 /* do not recalculate udp cksum if it was 0 */
448 if (udp_hdr->dgram_cksum != 0) {
449 udp_hdr->dgram_cksum = 0;
450 if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4))
451 udp_hdr->dgram_cksum =
452 rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
454 udp_hdr->dgram_cksum =
455 rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
463 * Performs actual copying.
464 * Returns number of segments in the destination mbuf on success,
465 * or negative error code on failure.
468 mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
469 uint16_t seglen[], uint8_t nb_seg)
471 uint32_t dlen, slen, tlen;
473 const struct rte_mbuf *m;
486 while (ms != NULL && i != nb_seg) {
489 slen = rte_pktmbuf_data_len(ms);
490 src = rte_pktmbuf_mtod(ms, const uint8_t *);
494 dlen = RTE_MIN(seglen[i], slen);
495 md[i]->data_len = dlen;
496 md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1];
497 dst = rte_pktmbuf_mtod(md[i], uint8_t *);
500 len = RTE_MIN(slen, dlen);
501 memcpy(dst, src, len);
516 else if (tlen != m->pkt_len)
519 md[0]->nb_segs = nb_seg;
520 md[0]->pkt_len = tlen;
521 md[0]->vlan_tci = m->vlan_tci;
522 md[0]->vlan_tci_outer = m->vlan_tci_outer;
523 md[0]->ol_flags = m->ol_flags;
524 md[0]->tx_offload = m->tx_offload;
530 * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
531 * Copy packet contents and offload information into then new segmented mbuf.
533 static struct rte_mbuf *
534 pkt_copy_split(const struct rte_mbuf *pkt)
537 uint32_t i, len, nb_seg;
538 struct rte_mempool *mp;
539 uint16_t seglen[RTE_MAX_SEGS_PER_PKT];
540 struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT];
542 mp = current_fwd_lcore()->mbp;
544 if (tx_pkt_split == TX_PKT_SPLIT_RND)
545 nb_seg = random() % tx_pkt_nb_segs + 1;
547 nb_seg = tx_pkt_nb_segs;
549 memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
551 /* calculate number of segments to use and their length. */
553 for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) {
558 n = pkt->pkt_len - len;
560 /* update size of the last segment to fit rest of the packet */
568 p = rte_pktmbuf_alloc(mp);
571 "failed to allocate %u-th of %u mbuf "
572 "from mempool: %s\n",
573 nb_seg - i, nb_seg, mp->name);
578 if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) {
579 RTE_LOG(ERR, USER1, "mempool %s, %u-th segment: "
580 "expected seglen: %u, "
581 "actual mbuf tailroom: %u\n",
582 mp->name, i, seglen[i],
583 rte_pktmbuf_tailroom(md[i]));
588 /* all mbufs successfully allocated, do copy */
590 rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
593 "mbuf_copy_split for %p(len=%u, nb_seg=%u) "
594 "into %u segments failed with error code: %d\n",
595 pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
597 /* figure out how many mbufs to free. */
601 /* free unused mbufs */
602 for (; i != nb_seg; i++) {
603 rte_pktmbuf_free_seg(md[i]);
611 * Receive a burst of packets, and for each packet:
612 * - parse packet, and try to recognize a supported packet type (1)
613 * - if it's not a supported packet type, don't touch the packet, else:
614 * - reprocess the checksum of all supported layers. This is done in SW
615 * or HW, depending on testpmd command line configuration
616 * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
617 * segmentation offload (this implies HW TCP checksum)
618 * Then transmit packets on the output port.
620 * (1) Supported packets are:
621 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
622 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
624 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
625 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
626 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
628 * The testpmd command line for this forward engine sets the flags
629 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
630 * wether a checksum must be calculated in software or in hardware. The
631 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
632 * OUTER_IP is only useful for tunnel packets.
635 pkt_burst_checksum_forward(struct fwd_stream *fs)
637 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
638 struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
639 struct rte_gso_ctx *gso_ctx;
640 struct rte_mbuf **tx_pkts_burst;
641 struct rte_port *txp;
642 struct rte_mbuf *m, *p;
643 struct ether_hdr *eth_hdr;
644 void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
646 uint16_t gro_pkts_num;
652 uint64_t rx_ol_flags, tx_ol_flags;
653 uint16_t testpmd_ol_flags;
655 uint32_t rx_bad_ip_csum;
656 uint32_t rx_bad_l4_csum;
657 struct testpmd_offload_info info;
658 uint16_t nb_segments = 0;
661 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
664 uint64_t core_cycles;
667 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
668 start_tsc = rte_rdtsc();
671 /* receive a burst of packet */
672 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
674 if (unlikely(nb_rx == 0))
676 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
677 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
679 fs->rx_packets += nb_rx;
682 gro_enable = gro_ports[fs->rx_port].enable;
684 txp = &ports[fs->tx_port];
685 testpmd_ol_flags = txp->tx_ol_flags;
686 memset(&info, 0, sizeof(info));
687 info.tso_segsz = txp->tso_segsz;
688 info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
689 if (gso_ports[fs->tx_port].enable)
692 for (i = 0; i < nb_rx; i++) {
693 if (likely(i < nb_rx - 1))
694 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
699 info.pkt_len = rte_pktmbuf_pkt_len(m);
701 rx_ol_flags = m->ol_flags;
703 /* Update the L3/L4 checksum error packet statistics */
704 if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD)
706 if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD)
709 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
710 * and inner headers */
712 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
713 ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
715 ether_addr_copy(&ports[fs->tx_port].eth_addr,
717 parse_ethernet(eth_hdr, &info);
718 l3_hdr = (char *)eth_hdr + info.l2_len;
720 /* check if it's a supported tunnel */
721 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) {
722 if (info.l4_proto == IPPROTO_UDP) {
723 struct udp_hdr *udp_hdr;
725 udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
727 parse_vxlan(udp_hdr, &info, m->packet_type);
729 tx_ol_flags |= PKT_TX_TUNNEL_VXLAN;
730 } else if (info.l4_proto == IPPROTO_GRE) {
731 struct simple_gre_hdr *gre_hdr;
733 gre_hdr = (struct simple_gre_hdr *)
734 ((char *)l3_hdr + info.l3_len);
735 parse_gre(gre_hdr, &info);
737 tx_ol_flags |= PKT_TX_TUNNEL_GRE;
738 } else if (info.l4_proto == IPPROTO_IPIP) {
741 encap_ip_hdr = (char *)l3_hdr + info.l3_len;
742 parse_encap_ip(encap_ip_hdr, &info);
744 tx_ol_flags |= PKT_TX_TUNNEL_IPIP;
748 /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
749 if (info.is_tunnel) {
750 outer_l3_hdr = l3_hdr;
751 l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
754 /* step 2: depending on user command line configuration,
755 * recompute checksum either in software or flag the
756 * mbuf to offload the calculation to the NIC. If TSO
757 * is configured, prepare the mbuf for TCP segmentation. */
759 /* process checksums of inner headers first */
760 tx_ol_flags |= process_inner_cksums(l3_hdr, &info,
763 /* Then process outer headers if any. Note that the software
764 * checksum will be wrong if one of the inner checksums is
765 * processed in hardware. */
766 if (info.is_tunnel == 1) {
767 tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
769 !!(tx_ol_flags & PKT_TX_TCP_SEG));
772 /* step 3: fill the mbuf meta data (flags and header lengths) */
774 if (info.is_tunnel == 1) {
775 if (info.tunnel_tso_segsz ||
777 TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ||
778 (tx_ol_flags & PKT_TX_OUTER_IPV6)) {
779 m->outer_l2_len = info.outer_l2_len;
780 m->outer_l3_len = info.outer_l3_len;
781 m->l2_len = info.l2_len;
782 m->l3_len = info.l3_len;
783 m->l4_len = info.l4_len;
784 m->tso_segsz = info.tunnel_tso_segsz;
787 /* if there is a outer UDP cksum
788 processed in sw and the inner in hw,
789 the outer checksum will be wrong as
790 the payload will be modified by the
792 m->l2_len = info.outer_l2_len +
793 info.outer_l3_len + info.l2_len;
794 m->l3_len = info.l3_len;
795 m->l4_len = info.l4_len;
798 /* this is only useful if an offload flag is
799 * set, but it does not hurt to fill it in any
801 m->l2_len = info.l2_len;
802 m->l3_len = info.l3_len;
803 m->l4_len = info.l4_len;
804 m->tso_segsz = info.tso_segsz;
806 m->ol_flags = tx_ol_flags;
808 /* Do split & copy for the packet. */
809 if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
810 p = pkt_copy_split(m);
818 /* if verbose mode is enabled, dump debug info */
819 if (verbose_level > 0) {
822 printf("-----------------\n");
823 printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n",
824 fs->rx_port, m, m->pkt_len, m->nb_segs);
825 /* dump rx parsed packet info */
826 rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf));
827 printf("rx: l2_len=%d ethertype=%x l3_len=%d "
828 "l4_proto=%d l4_len=%d flags=%s\n",
829 info.l2_len, rte_be_to_cpu_16(info.ethertype),
830 info.l3_len, info.l4_proto, info.l4_len, buf);
831 if (rx_ol_flags & PKT_RX_LRO)
832 printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
833 if (info.is_tunnel == 1)
834 printf("rx: outer_l2_len=%d outer_ethertype=%x "
835 "outer_l3_len=%d\n", info.outer_l2_len,
836 rte_be_to_cpu_16(info.outer_ethertype),
838 /* dump tx packet info */
839 if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM |
840 TESTPMD_TX_OFFLOAD_UDP_CKSUM |
841 TESTPMD_TX_OFFLOAD_TCP_CKSUM |
842 TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) ||
844 printf("tx: m->l2_len=%d m->l3_len=%d "
846 m->l2_len, m->l3_len, m->l4_len);
847 if (info.is_tunnel == 1) {
848 if ((testpmd_ol_flags &
849 TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) ||
850 (tx_ol_flags & PKT_TX_OUTER_IPV6))
851 printf("tx: m->outer_l2_len=%d "
852 "m->outer_l3_len=%d\n",
855 if (info.tunnel_tso_segsz != 0 &&
856 (m->ol_flags & PKT_TX_TCP_SEG))
857 printf("tx: m->tso_segsz=%d\n",
859 } else if (info.tso_segsz != 0 &&
860 (m->ol_flags & PKT_TX_TCP_SEG))
861 printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
862 rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
863 printf("tx: flags=%s", buf);
868 if (unlikely(gro_enable)) {
869 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
870 nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
871 &(gro_ports[fs->rx_port].param));
873 gro_ctx = current_fwd_lcore()->gro_ctx;
874 nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx);
876 if (++fs->gro_times >= gro_flush_cycles) {
877 gro_pkts_num = rte_gro_get_pkt_count(gro_ctx);
878 if (gro_pkts_num > MAX_PKT_BURST - nb_rx)
879 gro_pkts_num = MAX_PKT_BURST - nb_rx;
881 nb_rx += rte_gro_timeout_flush(gro_ctx, 0,
890 if (gso_ports[fs->tx_port].enable == 0)
891 tx_pkts_burst = pkts_burst;
893 gso_ctx = &(current_fwd_lcore()->gso_ctx);
894 gso_ctx->gso_size = gso_max_segment_size;
895 for (i = 0; i < nb_rx; i++) {
896 ret = rte_gso_segment(pkts_burst[i], gso_ctx,
897 &gso_segments[nb_segments],
898 GSO_MAX_PKT_BURST - nb_segments);
902 RTE_LOG(DEBUG, USER1,
903 "Unable to segment packet");
904 rte_pktmbuf_free(pkts_burst[i]);
908 tx_pkts_burst = gso_segments;
912 nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
913 tx_pkts_burst, nb_rx);
914 if (nb_prep != nb_rx)
915 printf("Preparing packet burst to transmit failed: %s\n",
916 rte_strerror(rte_errno));
918 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
924 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
926 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
927 rte_delay_us(burst_tx_delay_time);
928 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
929 &tx_pkts_burst[nb_tx], nb_rx - nb_tx);
932 fs->tx_packets += nb_tx;
933 fs->rx_bad_ip_csum += rx_bad_ip_csum;
934 fs->rx_bad_l4_csum += rx_bad_l4_csum;
936 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
937 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
939 if (unlikely(nb_tx < nb_rx)) {
940 fs->fwd_dropped += (nb_rx - nb_tx);
942 rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
943 } while (++nb_tx < nb_rx);
946 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
947 end_tsc = rte_rdtsc();
948 core_cycles = (end_tsc - start_tsc);
949 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
953 struct fwd_engine csum_fwd_engine = {
954 .fwd_mode_name = "csum",
955 .port_fwd_begin = NULL,
956 .port_fwd_end = NULL,
957 .packet_fwd = pkt_burst_checksum_forward,