1 /*---------------------------------------------------------------------------
2 * Copyright (c) 2009-2014 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 *---------------------------------------------------------------------------
17 * IPv4 Fragmentation Node
24 #include <vnet/ip/ip.h>
36 format_ip_frag_trace (u8 * s, va_list * args)
38 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40 ip_frag_trace_t *t = va_arg (*args, ip_frag_trace_t *);
41 s = format (s, "IPv%s mtu: %u fragments: %u",
42 t->ipv6 ? "6" : "4", t->mtu, t->n_fragments);
46 static u32 running_fragment_id;
49 frag_set_sw_if_index (vlib_buffer_t * to, vlib_buffer_t * from)
51 vnet_buffer (to)->sw_if_index[VLIB_RX] =
52 vnet_buffer (from)->sw_if_index[VLIB_RX];
53 vnet_buffer (to)->sw_if_index[VLIB_TX] =
54 vnet_buffer (from)->sw_if_index[VLIB_TX];
56 /* Copy adj_index in case DPO based node is sending for the
57 * fragmentation, the packet would be sent back to the proper
58 * DPO next node and Index
60 vnet_buffer (to)->ip.adj_index[VLIB_RX] =
61 vnet_buffer (from)->ip.adj_index[VLIB_RX];
62 vnet_buffer (to)->ip.adj_index[VLIB_TX] =
63 vnet_buffer (from)->ip.adj_index[VLIB_TX];
66 if (PREDICT_TRUE (from->flags & VNET_BUFFER_F_QOS_DATA_VALID))
68 vnet_buffer2 (to)->qos = vnet_buffer2 (from)->qos;
69 to->flags |= VNET_BUFFER_F_QOS_DATA_VALID;
73 static vlib_buffer_t *
74 frag_buffer_alloc (vlib_buffer_t * org_b, u32 * bi)
76 vlib_main_t *vm = vlib_get_main ();
77 if (vlib_buffer_alloc (vm, bi, 1) != 1)
80 vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
81 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
82 vlib_buffer_copy_trace_flag (vm, org_b, *bi);
88 * Limitation: Does follow buffer chains in the packet to fragment,
89 * but does not generate buffer chains. I.e. a fragment is always
90 * contained with in a single buffer and limited to the max buffer
94 ip4_frag_do_fragment (vlib_main_t * vm, u32 from_bi, u32 ** buffer,
95 ip_frag_error_t * error)
97 vlib_buffer_t *from_b;
99 u16 mtu, len, max, rem, ip_frag_id, ip_frag_offset;
100 u8 *org_from_packet, more;
102 from_b = vlib_get_buffer (vm, from_bi);
103 mtu = vnet_buffer (from_b)->ip_frag.mtu;
104 org_from_packet = vlib_buffer_get_current (from_b);
105 ip4 = (ip4_header_t *) vlib_buffer_get_current (from_b);
107 rem = clib_net_to_host_u16 (ip4->length) - sizeof (ip4_header_t);
109 (clib_min (mtu, VLIB_BUFFER_DATA_SIZE) - sizeof (ip4_header_t)) & ~0x7;
112 (vlib_buffer_length_in_chain (vm, from_b) - sizeof (ip4_header_t)))
114 *error = IP_FRAG_ERROR_MALFORMED;
118 if (mtu < sizeof (ip4_header_t))
120 *error = IP_FRAG_ERROR_CANT_FRAGMENT_HEADER;
124 if (ip4->flags_and_fragment_offset &
125 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))
127 *error = IP_FRAG_ERROR_DONT_FRAGMENT_SET;
131 if (ip4_is_fragment (ip4))
133 ip_frag_id = ip4->fragment_id;
134 ip_frag_offset = ip4_get_fragment_offset (ip4);
136 !(!(ip4->flags_and_fragment_offset &
137 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)));
141 ip_frag_id = (++running_fragment_id);
146 u8 *from_data = (void *) (ip4 + 1);
147 vlib_buffer_t *org_from_b = from_b;
149 u16 left_in_from_buffer = from_b->current_length - sizeof (ip4_header_t);
152 /* Do the actual fragmentation */
157 ip4_header_t *to_ip4;
160 len = (rem > max ? max : rem);
161 if (len != rem) /* Last fragment does not need to divisible by 8 */
163 if ((to_b = frag_buffer_alloc (org_from_b, &to_bi)) == 0)
165 *error = IP_FRAG_ERROR_MEMORY;
168 vec_add1 (*buffer, to_bi);
169 frag_set_sw_if_index (to_b, org_from_b);
171 /* Copy ip4 header */
172 clib_memcpy_fast (to_b->data, org_from_packet, sizeof (ip4_header_t));
173 to_ip4 = vlib_buffer_get_current (to_b);
174 to_data = (void *) (to_ip4 + 1);
176 /* Spin through from buffers filling up the to buffer */
177 u16 left_in_to_buffer = len, to_ptr = 0;
182 /* Figure out how many bytes we can safely copy */
183 bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
184 left_in_to_buffer : left_in_from_buffer;
185 clib_memcpy_fast (to_data + to_ptr, from_data + ptr, bytes_to_copy);
186 left_in_to_buffer -= bytes_to_copy;
187 ptr += bytes_to_copy;
188 left_in_from_buffer -= bytes_to_copy;
189 if (left_in_to_buffer == 0)
192 ASSERT (left_in_from_buffer <= 0);
194 if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
196 *error = IP_FRAG_ERROR_MALFORMED;
199 from_b = vlib_get_buffer (vm, from_b->next_buffer);
200 from_data = (u8 *) vlib_buffer_get_current (from_b);
202 left_in_from_buffer = from_b->current_length;
203 to_ptr += bytes_to_copy;
206 to_b->current_length = len + sizeof (ip4_header_t);
208 to_ip4->fragment_id = ip_frag_id;
209 to_ip4->flags_and_fragment_offset =
210 clib_host_to_net_u16 ((fo >> 3) + ip_frag_offset);
211 to_ip4->flags_and_fragment_offset |=
212 clib_host_to_net_u16 (((len != rem) || more) << 13);
213 to_ip4->length = clib_host_to_net_u16 (len + sizeof (ip4_header_t));
214 to_ip4->checksum = ip4_header_checksum (to_ip4);
216 if (vnet_buffer (org_from_b)->ip_frag.flags & IP_FRAG_FLAG_IP4_HEADER)
218 /* Encapsulating ipv4 header */
219 ip4_header_t *encap_header4 =
220 (ip4_header_t *) vlib_buffer_get_current (to_b);
221 encap_header4->length = clib_host_to_net_u16 (to_b->current_length);
222 encap_header4->checksum = ip4_header_checksum (encap_header4);
224 else if (vnet_buffer (org_from_b)->
225 ip_frag.flags & IP_FRAG_FLAG_IP6_HEADER)
227 /* Encapsulating ipv6 header */
228 ip6_header_t *encap_header6 =
229 (ip6_header_t *) vlib_buffer_get_current (to_b);
230 encap_header6->payload_length =
231 clib_host_to_net_u16 (to_b->current_length -
232 sizeof (*encap_header6));
240 ip_frag_set_vnet_buffer (vlib_buffer_t * b, u16 mtu, u8 next_index, u8 flags)
242 vnet_buffer (b)->ip_frag.mtu = mtu;
243 vnet_buffer (b)->ip_frag.next_index = next_index;
244 vnet_buffer (b)->ip_frag.flags = flags;
249 frag_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
250 vlib_frame_t * frame, u32 node_index, bool is_ip6)
252 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
253 vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, node_index);
254 from = vlib_frame_vector_args (frame);
255 n_left_from = frame->n_vectors;
256 next_index = node->cached_next_index;
257 u32 frag_sent = 0, small_packets = 0;
260 while (n_left_from > 0)
262 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
264 while (n_left_from > 0 && n_left_to_next > 0)
266 u32 pi0, *frag_from, frag_left;
268 ip_frag_error_t error0;
272 * Note: The packet is not enqueued now. It is instead put
273 * in a vector where other fragments will be put as well.
278 error0 = IP_FRAG_ERROR_NONE;
280 p0 = vlib_get_buffer (vm, pi0);
282 ip6_frag_do_fragment (vm, pi0, &buffer, &error0);
284 ip4_frag_do_fragment (vm, pi0, &buffer, &error0);
286 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
288 ip_frag_trace_t *tr =
289 vlib_add_trace (vm, node, p0, sizeof (*tr));
290 tr->mtu = vnet_buffer (p0)->ip_frag.mtu;
291 tr->ipv6 = is_ip6 ? 1 : 0;
292 tr->n_fragments = vec_len (buffer);
293 tr->next = vnet_buffer (p0)->ip_frag.next_index;
296 if (!is_ip6 && error0 == IP_FRAG_ERROR_DONT_FRAGMENT_SET)
298 icmp4_error_set_vnet_buffer (p0, ICMP4_destination_unreachable,
299 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
300 vnet_buffer (p0)->ip_frag.mtu);
301 next0 = IP4_FRAG_NEXT_ICMP_ERROR;
308 IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
309 ip_frag.next_index : IP6_FRAG_NEXT_DROP;
313 IP_FRAG_ERROR_NONE) ? vnet_buffer (p0)->
314 ip_frag.next_index : IP4_FRAG_NEXT_DROP;
317 if (error0 == IP_FRAG_ERROR_NONE)
319 /* Free original buffer chain */
320 frag_sent += vec_len (buffer);
321 small_packets += (vec_len (buffer) == 1);
322 vlib_buffer_free_one (vm, pi0); /* Free original packet */
326 vlib_error_count (vm, node_index, error0, 1);
327 vec_add1 (buffer, pi0); /* Get rid of the original buffer */
330 /* Send fragments that were added in the frame */
332 frag_left = vec_len (buffer);
334 while (frag_left > 0)
336 while (frag_left > 0 && n_left_to_next > 0)
339 i = to_next[0] = frag_from[0];
345 vlib_get_buffer (vm, i)->error = error_node->errors[error0];
346 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
347 to_next, n_left_to_next, i,
350 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
351 vlib_get_next_frame (vm, node, next_index, to_next,
354 vec_reset_length (buffer);
356 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
360 vlib_node_increment_counter (vm, node_index,
361 IP_FRAG_ERROR_FRAGMENT_SENT, frag_sent);
362 vlib_node_increment_counter (vm, node_index,
363 IP_FRAG_ERROR_SMALL_PACKET, small_packets);
365 return frame->n_vectors;
371 ip4_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
373 return frag_node_inline (vm, node, frame, ip4_frag_node.index,
378 ip6_frag (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
380 return frag_node_inline (vm, node, frame, ip6_frag_node.index,
385 * Fragments the packet given in from_bi. Fragments are returned in the buffer vector.
386 * Caller must ensure the original packet is freed.
389 ip6_frag_do_fragment (vlib_main_t * vm, u32 from_bi, u32 ** buffer,
390 ip_frag_error_t * error)
392 vlib_buffer_t *from_b;
394 u16 mtu, len, max, rem, ip_frag_id;
396 from_b = vlib_get_buffer (vm, from_bi);
397 mtu = vnet_buffer (from_b)->ip_frag.mtu;
398 ip6 = (ip6_header_t *) vlib_buffer_get_current (from_b);
400 rem = clib_net_to_host_u16 (ip6->payload_length);
401 max = (mtu - sizeof (ip6_header_t) - sizeof (ip6_frag_hdr_t)) & ~0x7; // TODO: Is max correct??
404 (vlib_buffer_length_in_chain (vm, from_b) - sizeof (ip6_header_t)))
406 *error = IP_FRAG_ERROR_MALFORMED;
410 /* TODO: Look through header chain for fragmentation header */
411 if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
413 *error = IP_FRAG_ERROR_MALFORMED;
417 u8 *from_data = (void *) (ip6 + 1);
418 vlib_buffer_t *org_from_b = from_b;
420 u16 left_in_from_buffer = from_b->current_length - sizeof (ip6_header_t);
423 ip_frag_id = ++running_fragment_id; // Fix
425 /* Do the actual fragmentation */
430 ip6_header_t *to_ip6;
431 ip6_frag_hdr_t *to_frag_hdr;
436 (mtu - sizeof (ip6_header_t) - sizeof (ip6_frag_hdr_t)) ? max : rem);
437 if (len != rem) /* Last fragment does not need to divisible by 8 */
439 if ((to_b = frag_buffer_alloc (org_from_b, &to_bi)) == 0)
441 *error = IP_FRAG_ERROR_MEMORY;
444 vec_add1 (*buffer, to_bi);
445 frag_set_sw_if_index (to_b, org_from_b);
447 /* Copy ip6 header */
448 clib_memcpy_fast (to_b->data, ip6, sizeof (ip6_header_t));
449 to_ip6 = vlib_buffer_get_current (to_b);
450 to_frag_hdr = (ip6_frag_hdr_t *) (to_ip6 + 1);
451 to_data = (void *) (to_frag_hdr + 1);
453 /* Spin through from buffers filling up the to buffer */
454 u16 left_in_to_buffer = len, to_ptr = 0;
459 /* Figure out how many bytes we can safely copy */
460 bytes_to_copy = left_in_to_buffer <= left_in_from_buffer ?
461 left_in_to_buffer : left_in_from_buffer;
462 clib_memcpy_fast (to_data + to_ptr, from_data + ptr, bytes_to_copy);
463 left_in_to_buffer -= bytes_to_copy;
464 ptr += bytes_to_copy;
465 left_in_from_buffer -= bytes_to_copy;
466 if (left_in_to_buffer == 0)
469 ASSERT (left_in_from_buffer <= 0);
471 if (!(from_b->flags & VLIB_BUFFER_NEXT_PRESENT))
473 *error = IP_FRAG_ERROR_MALFORMED;
476 from_b = vlib_get_buffer (vm, from_b->next_buffer);
477 from_data = (u8 *) vlib_buffer_get_current (from_b);
479 left_in_from_buffer = from_b->current_length;
480 to_ptr += bytes_to_copy;
483 to_b->current_length =
484 len + sizeof (ip6_header_t) + sizeof (ip6_frag_hdr_t);
485 to_ip6->payload_length =
486 clib_host_to_net_u16 (len + sizeof (ip6_frag_hdr_t));
487 to_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
488 to_frag_hdr->fragment_offset_and_more =
489 ip6_frag_hdr_offset_and_more ((fo >> 3), len != rem);
490 to_frag_hdr->identification = ip_frag_id;
491 to_frag_hdr->next_hdr = ip6->protocol;
492 to_frag_hdr->rsv = 0;
499 static char *ip4_frag_error_strings[] = {
500 #define _(sym,string) string,
501 foreach_ip_frag_error
506 VLIB_REGISTER_NODE (ip4_frag_node) = {
507 .function = ip4_frag,
508 .name = IP4_FRAG_NODE_NAME,
509 .vector_size = sizeof (u32),
510 .format_trace = format_ip_frag_trace,
511 .type = VLIB_NODE_TYPE_INTERNAL,
513 .n_errors = IP_FRAG_N_ERROR,
514 .error_strings = ip4_frag_error_strings,
516 .n_next_nodes = IP4_FRAG_N_NEXT,
518 [IP4_FRAG_NEXT_IP4_REWRITE] = "ip4-rewrite",
519 [IP4_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
520 [IP4_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
521 [IP4_FRAG_NEXT_ICMP_ERROR] = "ip4-icmp-error",
522 [IP4_FRAG_NEXT_DROP] = "ip4-drop"
528 VLIB_REGISTER_NODE (ip6_frag_node) = {
529 .function = ip6_frag,
530 .name = IP6_FRAG_NODE_NAME,
531 .vector_size = sizeof (u32),
532 .format_trace = format_ip_frag_trace,
533 .type = VLIB_NODE_TYPE_INTERNAL,
535 .n_errors = IP_FRAG_N_ERROR,
536 .error_strings = ip4_frag_error_strings,
538 .n_next_nodes = IP6_FRAG_N_NEXT,
540 [IP6_FRAG_NEXT_IP6_REWRITE] = "ip6-rewrite",
541 [IP6_FRAG_NEXT_IP4_LOOKUP] = "ip4-lookup",
542 [IP6_FRAG_NEXT_IP6_LOOKUP] = "ip6-lookup",
543 [IP6_FRAG_NEXT_DROP] = "ip6-drop"
549 * fd.io coding-style-patch-verification: ON
552 * eval: (c-set-style "gnu")