1 /*---------------------------------------------------------------------------
2 * Copyright (c) 2009-2014 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 *---------------------------------------------------------------------------
17 * IPv4 Fragmentation Node
24 #include <vnet/ip/ip.h>
37 format_ip_frag_trace (u8 * s, va_list * args)
39 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
41 ip_frag_trace_t *t = va_arg (*args, ip_frag_trace_t *);
42 s = format (s, "IPv%s offset: %u mtu: %u fragments: %u",
43 t->ipv6 ? "6" : "4", t->header_offset, t->mtu, t->n_fragments);
47 static u32 running_fragment_id;
50 * Limitation: Does follow buffer chains in the packet to fragment,
51 * but does not generate buffer chains. I.e. a fragment is always
52 * contained with in a single buffer and limited to the max buffer
56 ip4_frag_do_fragment (vlib_main_t * vm, u32 from_bi, u32 ** buffer,
57 ip_frag_error_t * error)
59 vlib_buffer_t *from_b;
61 u16 mtu, len, max, rem, offset, ip_frag_id, ip_frag_offset;
62 u8 *org_from_packet, more;
64 from_b = vlib_get_buffer (vm, from_bi);
65 offset = vnet_buffer (from_b)->ip_frag.header_offset;
66 mtu = vnet_buffer (from_b)->ip_frag.mtu;
67 org_from_packet = vlib_buffer_get_current (from_b);
68 ip4 = (ip4_header_t *) vlib_buffer_get_current (from_b) + offset;
70 rem = clib_net_to_host_u16 (ip4->length) - sizeof (ip4_header_t);
72 (mtu - sizeof (ip4_header_t) -
73 vnet_buffer (from_b)->ip_frag.header_offset) & ~0x7;
76 (vlib_buffer_length_in_chain (vm, from_b) - offset -
77 sizeof (ip4_header_t)))
79 *error = IP_FRAG_ERROR_MALFORMED;
83 if (mtu < sizeof (ip4_header_t))
85 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
89 if (ip4->flags_and_fragment_offset &
90 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))
92 *error = IP_FRAG_ERROR_DONT_FRAGMENT_SET;
96 if (ip4_is_fragment (ip4))
98 ip_frag_id = ip4->fragment_id;
99 ip_frag_offset = ip4_get_fragment_offset (ip4);
101 !(!(ip4->flags_and_fragment_offset &
102 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)));
106 ip_frag_id = (++running_fragment_id);
111 u8 *from_data = (void *) (ip4 + 1);
112 vlib_buffer_t *org_from_b = from_b;
114 u16 left_in_from_buffer =
115 from_b->current_length - offset - sizeof (ip4_header_t);
117 /* Do the actual fragmentation */
122 ip4_header_t *to_ip4;
125 len = (rem > (mtu - sizeof (ip4_header_t) - offset) ? max : rem);
126 if (len != rem) /* Last fragment does not need to divisible by 8 */
128 if (!vlib_buffer_alloc (vm, &to_bi, 1))
130 *error = IP_FRAG_ERROR_MEMORY;
133 vec_add1 (*buffer, to_bi);
134 to_b = vlib_get_buffer (vm, to_bi);
135 vnet_buffer (to_b)->sw_if_index[VLIB_RX] =
136 vnet_buffer (org_from_b)->sw_if_index[VLIB_RX];
137 vnet_buffer (to_b)->sw_if_index[VLIB_TX] =
138 vnet_buffer (org_from_b)->sw_if_index[VLIB_TX];
139 /* Copy adj_index in case DPO based node is sending for the
140 * fragmentation, the packet would be sent back to the proper
141 * DPO next node and Index
143 vnet_buffer (to_b)->ip.adj_index[VLIB_RX] =
144 vnet_buffer (org_from_b)->ip.adj_index[VLIB_RX];
145 vnet_buffer (to_b)->ip.adj_index[VLIB_TX] =
146 vnet_buffer (org_from_b)->ip.adj_index[VLIB_TX];
148 /* Copy offset and ip4 header */
149 clib_memcpy (to_b->data, org_from_packet,
150 offset + sizeof (ip4_header_t));
151 to_ip4 = vlib_buffer_get_current (to_b) + offset;
152 to_data = (void *) (to_ip4 + 1);
154 /* Spin through from buffers filling up the to buffer */
156 u16 bytes_to_copy, left_in_to_buffer = len;
159 /* Figure out how many bytes we can safely copy */
160 bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
161 left_in_to_buffer : left_in_from_buffer;
162 clib_memcpy (to_data + to_ptr, from_data + ptr, bytes_to_copy);
163 left_in_to_buffer -= bytes_to_copy;
164 ptr += bytes_to_copy;
165 left_in_from_buffer -= bytes_to_copy;
166 if (left_in_to_buffer == 0)
169 ASSERT (left_in_from_buffer == 0);
171 if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
173 *error = IP_FRAG_ERROR_MALFORMED;
176 from_b = vlib_get_buffer (vm, from_b->next_buffer);
177 from_data = (u8 *) vlib_buffer_get_current (from_b);
179 left_in_from_buffer = from_b->current_length;
180 to_ptr += bytes_to_copy;
183 to_b->current_length = offset + len + sizeof (ip4_header_t);
185 to_ip4->fragment_id = ip_frag_id;
186 to_ip4->flags_and_fragment_offset =
187 clib_host_to_net_u16 ((fo >> 3) + ip_frag_offset);
188 to_ip4->flags_and_fragment_offset |=
189 clib_host_to_net_u16 (((len != rem) || more) << 13);
190 to_ip4->length = clib_host_to_net_u16 (len + sizeof (ip4_header_t));
191 to_ip4->checksum = ip4_header_checksum (to_ip4);
193 if (vnet_buffer (org_from_b)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
195 /* Encapsulating ipv4 header */
196 ip4_header_t *encap_header4 =
197 (ip4_header_t *) vlib_buffer_get_current (to_b);
198 encap_header4->length = clib_host_to_net_u16 (to_b->current_length);
199 encap_header4->checksum = ip4_header_checksum (encap_header4);
201 else if (vnet_buffer (org_from_b)->
202 ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
204 /* Encapsulating ipv6 header */
205 ip6_header_t *encap_header6 =
206 (ip6_header_t *) vlib_buffer_get_current (to_b);
207 encap_header6->payload_length =
208 clib_host_to_net_u16 (to_b->current_length -
209 sizeof (*encap_header6));
217 ip_frag_set_vnet_buffer (vlib_buffer_t * b, u16 offset, u16 mtu,
218 u8 next_index, u8 flags)
220 vnet_buffer (b)->ip_frag.header_offset = offset;
221 vnet_buffer (b)->ip_frag.mtu = mtu;
222 vnet_buffer (b)->ip_frag.next_index = next_index;
223 vnet_buffer (b)->ip_frag.flags = flags;
227 ip4_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
229 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
230 vlib_node_runtime_t *error_node =
231 vlib_node_get_runtime (vm, ip4_frag_node.index);
232 from = vlib_frame_vector_args (frame);
233 n_left_from = frame->n_vectors;
234 next_index = node->cached_next_index;
235 u32 frag_sent = 0, small_packets = 0;
238 while (n_left_from > 0)
240 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
242 while (n_left_from > 0 && n_left_to_next > 0)
244 u32 pi0, *frag_from, frag_left;
246 ip_frag_error_t error0;
247 ip4_frag_next_t next0;
249 //Note: The packet is not enqueued now.
250 //It is instead put in a vector where other fragments
251 //will be put as well.
255 error0 = IP_FRAG_ERROR_NONE;
257 p0 = vlib_get_buffer (vm, pi0);
258 ip4_frag_do_fragment (vm, pi0, &buffer, &error0);
260 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
262 ip_frag_trace_t *tr =
263 vlib_add_trace (vm, node, p0, sizeof (*tr));
264 tr->header_offset = vnet_buffer (p0)->ip_frag.header_offset;
265 tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
267 tr->n_fragments = vec_len (buffer);
268 tr->next = vnet_buffer (p0)->ip_frag.next_index;
271 if (error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
273 icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
274 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
275 vnet_buffer (p0)->ip_frag.mtu);
276 vlib_buffer_advance (p0,
277 vnet_buffer (p0)->ip_frag.header_offset);
278 next0 = IP4_FRAG_NEXT_ICMP_ERROR;
283 next0 = (error0 == IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
284 ip_frag.next_index : IP4_FRAG_NEXT_DROP;
288 if (error0 == IP_FRAG_ERROR_NONE)
290 /* Free original buffer chain */
291 vlib_buffer_free_one (vm, pi0);
292 frag_sent += vec_len (buffer);
293 small_packets += (vec_len (buffer) == 1);
296 vlib_error_count (vm, ip4_frag_node.index, error0, 1);
298 //Send fragments that were added in the frame
300 frag_left = vec_len (buffer);
302 while (frag_left > 0)
304 while (frag_left > 0 && n_left_to_next > 0)
307 i = to_next[0] = frag_from[0];
313 vlib_get_buffer (vm, i)->error = error_node->errors[error0];
314 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
315 to_next, n_left_to_next, i,
318 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
319 vlib_get_next_frame (vm, node, next_index, to_next,
322 vec_reset_length (buffer);
324 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
328 vlib_node_increment_counter (vm, ip4_frag_node.index,
329 IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
330 vlib_node_increment_counter (vm, ip4_frag_node.index,
331 IP_FRAG_ERROR_SMALL_PACKET, small_packets);
333 return frame->n_vectors;
338 ip6_frag_do_fragment (vlib_main_t * vm, u32 pi, u32 ** buffer,
339 ip_frag_error_t * error)
342 ip6_header_t *ip6_hdr;
343 ip6_frag_hdr_t *frag_hdr;
344 u8 *payload, *next_header;
346 p = vlib_get_buffer (vm, pi);
348 //Parsing the IPv6 headers
350 vlib_buffer_get_current (p) + vnet_buffer (p)->ip_frag.header_offset;
351 payload = (u8 *) (ip6_hdr + 1);
352 next_header = &ip6_hdr->protocol;
353 if (*next_header == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
355 next_header = payload;
356 payload += payload[1] * 8;
359 if (*next_header == IP_PROTOCOL_IP6_DESTINATION_OPTIONS)
361 next_header = payload;
362 payload += payload[1] * 8;
365 if (*next_header == IP_PROTOCOL_IPV6_ROUTE)
367 next_header = payload;
368 payload += payload[1] * 8;
372 (payload >= (u8 *) vlib_buffer_get_current (p) + p->current_length))
374 //A malicious packet could set an extension header with a too big size
375 //and make us modify another vlib_buffer
376 *error = IP_FRAG_ERROR_MALFORMED;
380 if (p->flags & VLIB_BUFFER_NEXT_PRESENT)
382 *error = IP_FRAG_ERROR_MALFORMED;
388 if (*next_header == IP_PROTOCOL_IPV6_FRAGMENTATION)
390 //The fragmentation header is already there
391 frag_hdr = (ip6_frag_hdr_t *) payload;
392 has_more = ip6_frag_hdr_more (frag_hdr);
393 initial_offset = ip6_frag_hdr_offset (frag_hdr);
397 //Insert a fragmentation header in the packet
398 u8 nh = *next_header;
399 *next_header = IP_PROTOCOL_IPV6_FRAGMENTATION;
400 vlib_buffer_advance (p, -sizeof (*frag_hdr));
401 u8 *start = vlib_buffer_get_current (p);
402 memmove (start, start + sizeof (*frag_hdr),
403 payload - (start + sizeof (*frag_hdr)));
404 frag_hdr = (ip6_frag_hdr_t *) (payload - sizeof (*frag_hdr));
405 frag_hdr->identification = ++running_fragment_id;
406 frag_hdr->next_hdr = nh;
411 payload = (u8 *) (frag_hdr + 1);
413 u16 headers_len = payload - (u8 *) vlib_buffer_get_current (p);
414 u16 max_payload = vnet_buffer (p)->ip_frag.mtu - headers_len;
415 u16 rem = p->current_length - headers_len;
420 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
428 u16 len = (rem > max_payload) ? (max_payload & ~0x7) : rem;
433 if (!vlib_buffer_alloc (vm, &bi, 1))
435 *error = IP_FRAG_ERROR_MEMORY;
438 b = vlib_get_buffer (vm, bi);
439 vnet_buffer (b)->sw_if_index[VLIB_RX] =
440 vnet_buffer (p)->sw_if_index[VLIB_RX];
441 vnet_buffer (b)->sw_if_index[VLIB_TX] =
442 vnet_buffer (p)->sw_if_index[VLIB_TX];
444 /* Copy Adj_index in case DPO based node is sending for the fragmentation,
445 the packet would be sent back to the proper DPO next node and Index */
446 vnet_buffer (b)->ip.adj_index[VLIB_RX] =
447 vnet_buffer (p)->ip.adj_index[VLIB_RX];
448 vnet_buffer (b)->ip.adj_index[VLIB_TX] =
449 vnet_buffer (p)->ip.adj_index[VLIB_TX];
451 clib_memcpy (vlib_buffer_get_current (b),
452 vlib_buffer_get_current (p), headers_len);
453 clib_memcpy (vlib_buffer_get_current (b) + headers_len,
456 vlib_buffer_get_current (b) + headers_len - sizeof (*frag_hdr);
461 b = vlib_get_buffer (vm, bi);
462 //frag_hdr already set here
466 vlib_buffer_get_current (b) + vnet_buffer (p)->ip_frag.header_offset;
467 frag_hdr->fragment_offset_and_more =
468 ip6_frag_hdr_offset_and_more (initial_offset + (ptr >> 3),
470 b->current_length = headers_len + len;
471 ip6_hdr->payload_length =
472 clib_host_to_net_u16 (b->current_length -
473 vnet_buffer (p)->ip_frag.header_offset -
476 if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
478 //Encapsulating ipv4 header
479 ip4_header_t *encap_header4 =
480 (ip4_header_t *) vlib_buffer_get_current (b);
481 encap_header4->length = clib_host_to_net_u16 (b->current_length);
482 encap_header4->checksum = ip4_header_checksum (encap_header4);
484 else if (vnet_buffer (p)->ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
486 //Encapsulating ipv6 header
487 ip6_header_t *encap_header6 =
488 (ip6_header_t *) vlib_buffer_get_current (b);
489 encap_header6->payload_length =
490 clib_host_to_net_u16 (b->current_length -
491 sizeof (*encap_header6));
494 vec_add1 (*buffer, bi);
501 ip6_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
503 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
504 vlib_node_runtime_t *error_node =
505 vlib_node_get_runtime (vm, ip6_frag_node.index);
506 from = vlib_frame_vector_args (frame);
507 n_left_from = frame->n_vectors;
508 next_index = node->cached_next_index;
509 u32 frag_sent = 0, small_packets = 0;
512 while (n_left_from > 0)
514 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
516 while (n_left_from > 0 && n_left_to_next > 0)
518 u32 pi0, *frag_from, frag_left;
520 ip_frag_error_t error0;
521 ip6_frag_next_t next0;
526 error0 = IP_FRAG_ERROR_NONE;
528 p0 = vlib_get_buffer (vm, pi0);
529 ip6_frag_do_fragment (vm, pi0, &buffer, &error0);
531 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
533 ip_frag_trace_t *tr =
534 vlib_add_trace (vm, node, p0, sizeof (*tr));
535 tr->header_offset = vnet_buffer (p0)->ip_frag.header_offset;
536 tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
538 tr->n_fragments = vec_len (buffer);
539 tr->next = vnet_buffer (p0)->ip_frag.next_index;
543 next0 = (error0 == IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
544 ip_frag.next_index : IP6_FRAG_NEXT_DROP;
547 frag_sent += vec_len (buffer);
548 small_packets += (vec_len (buffer) == 1);
550 //Send fragments that were added in the frame
552 frag_left = vec_len (buffer);
553 while (frag_left > 0)
555 while (frag_left > 0 && n_left_to_next > 0)
558 i = to_next[0] = frag_from[0];
564 vlib_get_buffer (vm, i)->error = error_node->errors[error0];
565 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
566 to_next, n_left_to_next, i,
569 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
570 vlib_get_next_frame (vm, node, next_index, to_next,
573 vec_reset_length (buffer);
575 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
578 vlib_node_increment_counter (vm, ip6_frag_node.index,
579 IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
580 vlib_node_increment_counter (vm, ip6_frag_node.index,
581 IP_FRAG_ERROR_SMALL_PACKET, small_packets);
583 return frame->n_vectors;
586 static char *ip4_frag_error_strings[] = {
587 #define _(sym,string) string,
588 foreach_ip_frag_error
593 VLIB_REGISTER_NODE (ip4_frag_node) = {
594 .function = ip4_frag,
595 .name = IP4_FRAG_NODE_NAME,
596 .vector_size = sizeof (u32),
597 .format_trace = format_ip_frag_trace,
598 .type = VLIB_NODE_TYPE_INTERNAL,
600 .n_errors = IP_FRAG_N_ERROR,
601 .error_strings = ip4_frag_error_strings,
603 .n_next_nodes = IP4_FRAG_N_NEXT,
605 [IP4_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
606 [IP4_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
607 [IP4_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
608 [IP4_FRAG_NEXT_DROP] = "ip4-drop"
614 VLIB_REGISTER_NODE (ip6_frag_node) = {
615 .function = ip6_frag,
616 .name = IP6_FRAG_NODE_NAME,
617 .vector_size = sizeof (u32),
618 .format_trace = format_ip_frag_trace,
619 .type = VLIB_NODE_TYPE_INTERNAL,
621 .n_errors = IP_FRAG_N_ERROR,
622 .error_strings = ip4_frag_error_strings,
624 .n_next_nodes = IP6_FRAG_N_NEXT,
626 [IP6_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
627 [IP6_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
628 [IP6_FRAG_NEXT_DROP] = "ip6-drop"
634 * fd.io coding-style-patch-verification: ON
637 * eval: (c-set-style "gnu")