4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_debug.h>
38 #include "ip_frag_common.h"
41 * Reassemble fragments into one packet.
44 ipv4_frag_reassemble(struct ip_frag_pkt *fp)
46 struct ipv4_hdr *ip_hdr;
47 struct rte_mbuf *m, *prev;
48 uint32_t i, n, ofs, first_len;
49 uint32_t curr_idx = 0;
51 first_len = fp->frags[IP_FIRST_FRAG_IDX].len;
54 /*start from the last fragment. */
55 m = fp->frags[IP_LAST_FRAG_IDX].mb;
56 ofs = fp->frags[IP_LAST_FRAG_IDX].ofs;
57 curr_idx = IP_LAST_FRAG_IDX;
59 while (ofs != first_len) {
63 for (i = n; i != IP_FIRST_FRAG_IDX && ofs != first_len; i--) {
65 /* previous fragment found. */
66 if(fp->frags[i].ofs + fp->frags[i].len == ofs) {
68 RTE_ASSERT(curr_idx != i);
70 /* adjust start of the last fragment data. */
72 (uint16_t)(m->l2_len + m->l3_len));
73 rte_pktmbuf_chain(fp->frags[i].mb, m);
75 /* this mbuf should not be accessed directly */
76 fp->frags[curr_idx].mb = NULL;
79 /* update our last fragment and offset. */
81 ofs = fp->frags[i].ofs;
85 /* error - hole in the packet. */
91 /* chain with the first fragment. */
92 rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len));
93 rte_pktmbuf_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m);
94 fp->frags[curr_idx].mb = NULL;
95 m = fp->frags[IP_FIRST_FRAG_IDX].mb;
96 fp->frags[IP_FIRST_FRAG_IDX].mb = NULL;
98 /* update mbuf fields for reassembled packet. */
99 m->ol_flags |= PKT_TX_IP_CKSUM;
101 /* update ipv4 header for the reassmebled packet */
102 ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
104 ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
106 ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
107 rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
108 ip_hdr->hdr_checksum = 0;
114 * Process new mbuf with fragment of IPV4 packet.
115 * Incoming mbuf should have it's l2_len/l3_len fields setuped correclty.
117 * Table where to lookup/add the fragmented packet.
119 * Incoming mbuf with IPV4 fragment.
121 * Fragment arrival timestamp.
123 * Pointer to the IPV4 header inside the fragment.
125 * Pointer to mbuf for reassebled packet, or NULL if:
126 * - an error occured.
127 * - not all fragments of the packet are collected yet.
130 rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl,
131 struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
132 struct ipv4_hdr *ip_hdr)
134 struct ip_frag_pkt *fp;
135 struct ip_frag_key key;
136 const unaligned_uint64_t *psd;
137 uint16_t flag_offset, ip_ofs, ip_flag;
140 flag_offset = rte_be_to_cpu_16(ip_hdr->fragment_offset);
141 ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
142 ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
144 psd = (unaligned_uint64_t *)&ip_hdr->src_addr;
145 /* use first 8 bytes only */
146 key.src_dst[0] = psd[0];
147 key.id = ip_hdr->packet_id;
148 key.key_len = IPV4_KEYLEN;
150 ip_ofs *= IPV4_HDR_OFFSET_UNITS;
151 ip_len = rte_be_to_cpu_16(ip_hdr->total_length) - mb->l3_len;
153 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
154 "mbuf: %p, tms: %" PRIu64
155 ", key: <%" PRIx64 ", %#x>, ofs: %u, len: %d, flags: %#x\n"
156 "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
157 "max_entries: %u, use_entries: %u\n\n",
159 mb, tms, key.src_dst[0], key.id, ip_ofs, ip_len, ip_flag,
160 tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries,
163 /* check that fragment length is greater then zero. */
165 IP_FRAG_MBUF2DR(dr, mb);
169 /* try to find/add entry into the fragment's table. */
170 if ((fp = ip_frag_find(tbl, dr, &key, tms)) == NULL) {
171 IP_FRAG_MBUF2DR(dr, mb);
175 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
176 "tbl: %p, max_entries: %u, use_entries: %u\n"
177 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
178 ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
180 tbl, tbl->max_entries, tbl->use_entries,
181 fp, fp->key.src_dst[0], fp->key.id, fp->start,
182 fp->total_size, fp->frag_size, fp->last_idx);
185 /* process the fragmented packet. */
186 mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len, ip_flag);
187 ip_frag_inuse(tbl, fp);
189 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
191 "tbl: %p, max_entries: %u, use_entries: %u\n"
192 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
193 ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
194 __func__, __LINE__, mb,
195 tbl, tbl->max_entries, tbl->use_entries,
196 fp, fp->key.src_dst[0], fp->key.id, fp->start,
197 fp->total_size, fp->frag_size, fp->last_idx);