2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include "../ip/ip_frag.h"
21 IP6_MAP_NEXT_IP4_LOOKUP,
22 #ifdef MAP_SKIP_IP6_LOOKUP
23 IP6_MAP_NEXT_IP4_REWRITE,
25 IP6_MAP_NEXT_IP6_REASS,
26 IP6_MAP_NEXT_IP4_REASS,
27 IP6_MAP_NEXT_IP4_FRAGMENT,
28 IP6_MAP_NEXT_IP6_ICMP_RELAY,
29 IP6_MAP_NEXT_IP6_LOCAL,
35 enum ip6_map_ip6_reass_next_e
37 IP6_MAP_IP6_REASS_NEXT_IP6_MAP,
38 IP6_MAP_IP6_REASS_NEXT_DROP,
39 IP6_MAP_IP6_REASS_N_NEXT,
42 enum ip6_map_ip4_reass_next_e
44 IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP,
45 IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT,
46 IP6_MAP_IP4_REASS_NEXT_DROP,
47 IP6_MAP_IP4_REASS_N_NEXT,
50 enum ip6_icmp_relay_next_e
52 IP6_ICMP_RELAY_NEXT_IP4_LOOKUP,
53 IP6_ICMP_RELAY_NEXT_DROP,
54 IP6_ICMP_RELAY_N_NEXT,
57 vlib_node_registration_t ip6_map_ip4_reass_node;
58 vlib_node_registration_t ip6_map_ip6_reass_node;
59 static vlib_node_registration_t ip6_map_icmp_relay_node;
66 } map_ip6_map_ip4_reass_trace_t;
69 format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
71 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
72 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
73 map_ip6_map_ip4_reass_trace_t *t =
74 va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
75 return format (s, "MAP domain index: %d L4 port: %u Status: %s",
76 t->map_domain_index, t->port,
77 t->cached ? "cached" : "forwarded");
85 } map_ip6_map_ip6_reass_trace_t;
88 format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
90 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
91 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
92 map_ip6_map_ip6_reass_trace_t *t =
93 va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
94 return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
95 t->frag_len, t->out ? "out" : "in");
101 static_always_inline bool
102 ip6_map_sec_check (map_domain_t * d, u16 port, ip4_header_t * ip4,
105 u16 sp4 = clib_net_to_host_u16 (port);
106 u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
107 u64 sal6 = map_get_pfx (d, sa4, sp4);
108 u64 sar6 = map_get_sfx (d, sa4, sp4);
111 (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
112 || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
117 static_always_inline void
118 ip6_map_security_check (map_domain_t * d, ip4_header_t * ip4,
119 ip6_header_t * ip6, u32 * next, u8 * error)
121 map_main_t *mm = &map_main;
122 if (d->ea_bits_len || d->rules)
124 if (d->psid_length > 0)
126 if (!ip4_is_fragment (ip4))
128 u16 port = ip4_map_get_port (ip4, MAP_SENDER);
133 ip6_map_sec_check (d, port, ip4,
134 ip6) ? MAP_ERROR_NONE :
135 MAP_ERROR_DECAP_SEC_CHECK;
139 *error = MAP_ERROR_BAD_PROTOCOL;
144 *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
150 static_always_inline bool
151 ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
153 #ifdef MAP_SKIP_IP6_LOOKUP
154 map_main_t *mm = &map_main;
155 u32 adj_index0 = mm->adj4_index;
158 ip_lookup_main_t *lm4 = &ip4_main.lookup_main;
159 ip_adjacency_t *adj = ip_get_adjacency (lm4, mm->adj4_index);
162 u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT);
163 adj_index0 += (hash_c0 & (adj->n_adj - 1));
165 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0;
176 ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
178 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
179 vlib_node_runtime_t *error_node =
180 vlib_node_get_runtime (vm, ip6_map_node.index);
181 map_main_t *mm = &map_main;
182 vlib_combined_counter_main_t *cm = mm->domain_counters;
183 u32 cpu_index = os_get_cpu_number ();
185 from = vlib_frame_vector_args (frame);
186 n_left_from = frame->n_vectors;
187 next_index = node->cached_next_index;
188 while (n_left_from > 0)
190 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
193 while (n_left_from >= 4 && n_left_to_next >= 2)
196 vlib_buffer_t *p0, *p1;
197 u8 error0 = MAP_ERROR_NONE;
198 u8 error1 = MAP_ERROR_NONE;
199 map_domain_t *d0 = 0, *d1 = 0;
200 ip4_header_t *ip40, *ip41;
201 ip6_header_t *ip60, *ip61;
202 u16 port0 = 0, port1 = 0;
203 u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
204 u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
205 u32 next1 = IP6_MAP_NEXT_IP4_LOOKUP;
207 /* Prefetch next iteration. */
209 vlib_buffer_t *p2, *p3;
211 p2 = vlib_get_buffer (vm, from[2]);
212 p3 = vlib_get_buffer (vm, from[3]);
214 vlib_prefetch_buffer_header (p2, LOAD);
215 vlib_prefetch_buffer_header (p3, LOAD);
217 /* IPv6 + IPv4 header + 8 bytes of ULP */
218 CLIB_PREFETCH (p2->data, 68, LOAD);
219 CLIB_PREFETCH (p3->data, 68, LOAD);
222 pi0 = to_next[0] = from[0];
223 pi1 = to_next[1] = from[1];
229 p0 = vlib_get_buffer (vm, pi0);
230 p1 = vlib_get_buffer (vm, pi1);
231 ip60 = vlib_buffer_get_current (p0);
232 ip61 = vlib_buffer_get_current (p1);
233 vlib_buffer_advance (p0, sizeof (ip6_header_t));
234 vlib_buffer_advance (p1, sizeof (ip6_header_t));
235 ip40 = vlib_buffer_get_current (p0);
236 ip41 = vlib_buffer_get_current (p1);
239 * Encapsulated IPv4 packet
240 * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
241 * - Lookup/Rewrite or Fragment node in case of packet > MTU
242 * Fragmented IPv6 packet
244 * - Error -> Pass to ICMPv6/ICMPv4 relay
245 * - Info -> Pass to IPv6 local
246 * Anything else -> drop
249 (ip60->protocol == IP_PROTOCOL_IP_IN_IP
250 && clib_net_to_host_u16 (ip60->payload_length) > 20))
253 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
254 (ip4_address_t *) & ip40->src_address.
255 as_u32, &map_domain_index0, &error0);
257 else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
258 clib_net_to_host_u16 (ip60->payload_length) >
259 sizeof (icmp46_header_t))
261 icmp46_header_t *icmp = (void *) (ip60 + 1);
262 next0 = (icmp->type == ICMP6_echo_request
264 ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
265 IP6_MAP_NEXT_IP6_ICMP_RELAY;
267 else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
269 next0 = IP6_MAP_NEXT_IP6_REASS;
273 error0 = MAP_ERROR_BAD_PROTOCOL;
276 (ip61->protocol == IP_PROTOCOL_IP_IN_IP
277 && clib_net_to_host_u16 (ip61->payload_length) > 20))
280 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
281 (ip4_address_t *) & ip41->src_address.
282 as_u32, &map_domain_index1, &error1);
284 else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
285 clib_net_to_host_u16 (ip61->payload_length) >
286 sizeof (icmp46_header_t))
288 icmp46_header_t *icmp = (void *) (ip61 + 1);
289 next1 = (icmp->type == ICMP6_echo_request
291 ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
292 IP6_MAP_NEXT_IP6_ICMP_RELAY;
294 else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
296 next1 = IP6_MAP_NEXT_IP6_REASS;
300 error1 = MAP_ERROR_BAD_PROTOCOL;
305 /* MAP inbound security check */
306 ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
308 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
309 next0 == IP6_MAP_NEXT_IP4_LOOKUP))
313 && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
315 vnet_buffer (p0)->ip_frag.header_offset = 0;
316 vnet_buffer (p0)->ip_frag.flags = 0;
317 vnet_buffer (p0)->ip_frag.next_index =
318 IP4_FRAG_NEXT_IP4_LOOKUP;
319 vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
320 next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
325 ip6_map_ip4_lookup_bypass (p0,
327 IP6_MAP_NEXT_IP4_REWRITE : next0;
329 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
331 map_domain_index0, 1,
338 /* MAP inbound security check */
339 ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
341 if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
342 next1 == IP6_MAP_NEXT_IP4_LOOKUP))
346 && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
348 vnet_buffer (p1)->ip_frag.header_offset = 0;
349 vnet_buffer (p1)->ip_frag.flags = 0;
350 vnet_buffer (p1)->ip_frag.next_index =
351 IP4_FRAG_NEXT_IP4_LOOKUP;
352 vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
353 next1 = IP6_MAP_NEXT_IP4_FRAGMENT;
358 ip6_map_ip4_lookup_bypass (p1,
360 IP6_MAP_NEXT_IP4_REWRITE : next1;
362 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
364 map_domain_index1, 1,
370 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
372 map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
373 tr->map_domain_index = map_domain_index0;
377 if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
379 map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
380 tr->map_domain_index = map_domain_index1;
384 if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
386 /* Set ICMP parameters */
387 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
388 icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
389 ICMP6_destination_unreachable_source_address_failed_policy,
391 next0 = IP6_MAP_NEXT_ICMP;
395 next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
398 if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
400 /* Set ICMP parameters */
401 vlib_buffer_advance (p1, -sizeof (ip6_header_t));
402 icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
403 ICMP6_destination_unreachable_source_address_failed_policy,
405 next1 = IP6_MAP_NEXT_ICMP;
409 next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
413 if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
414 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
415 if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
416 vlib_buffer_advance (p1, -sizeof (ip6_header_t));
418 p0->error = error_node->errors[error0];
419 p1->error = error_node->errors[error1];
420 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
421 n_left_to_next, pi0, pi1, next0,
426 while (n_left_from > 0 && n_left_to_next > 0)
430 u8 error0 = MAP_ERROR_NONE;
431 map_domain_t *d0 = 0;
435 u32 map_domain_index0 = ~0;
436 u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
438 pi0 = to_next[0] = from[0];
444 p0 = vlib_get_buffer (vm, pi0);
445 ip60 = vlib_buffer_get_current (p0);
446 vlib_buffer_advance (p0, sizeof (ip6_header_t));
447 ip40 = vlib_buffer_get_current (p0);
450 * Encapsulated IPv4 packet
451 * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
452 * - Lookup/Rewrite or Fragment node in case of packet > MTU
453 * Fragmented IPv6 packet
455 * - Error -> Pass to ICMPv6/ICMPv4 relay
456 * - Info -> Pass to IPv6 local
457 * Anything else -> drop
460 (ip60->protocol == IP_PROTOCOL_IP_IN_IP
461 && clib_net_to_host_u16 (ip60->payload_length) > 20))
464 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
465 (ip4_address_t *) & ip40->src_address.
466 as_u32, &map_domain_index0, &error0);
468 else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
469 clib_net_to_host_u16 (ip60->payload_length) >
470 sizeof (icmp46_header_t))
472 icmp46_header_t *icmp = (void *) (ip60 + 1);
473 next0 = (icmp->type == ICMP6_echo_request
475 ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
476 IP6_MAP_NEXT_IP6_ICMP_RELAY;
478 else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
479 (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
480 IP_PROTOCOL_IP_IN_IP))
482 next0 = IP6_MAP_NEXT_IP6_REASS;
486 error0 = MAP_ERROR_BAD_PROTOCOL;
491 /* MAP inbound security check */
492 ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
494 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
495 next0 == IP6_MAP_NEXT_IP4_LOOKUP))
499 && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
501 vnet_buffer (p0)->ip_frag.header_offset = 0;
502 vnet_buffer (p0)->ip_frag.flags = 0;
503 vnet_buffer (p0)->ip_frag.next_index =
504 IP4_FRAG_NEXT_IP4_LOOKUP;
505 vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
506 next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
511 ip6_map_ip4_lookup_bypass (p0,
513 IP6_MAP_NEXT_IP4_REWRITE : next0;
515 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
517 map_domain_index0, 1,
523 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
525 map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
526 tr->map_domain_index = map_domain_index0;
527 tr->port = (u16) port0;
530 if (mm->icmp6_enabled &&
531 (error0 == MAP_ERROR_DECAP_SEC_CHECK
532 || error0 == MAP_ERROR_NO_DOMAIN))
534 /* Set ICMP parameters */
535 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
536 icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
537 ICMP6_destination_unreachable_source_address_failed_policy,
539 next0 = IP6_MAP_NEXT_ICMP;
543 next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
547 if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
548 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
550 p0->error = error_node->errors[error0];
551 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
552 n_left_to_next, pi0, next0);
554 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
557 return frame->n_vectors;
561 static_always_inline void
562 ip6_map_ip6_reass_prepare (vlib_main_t * vm, vlib_node_runtime_t * node,
563 map_ip6_reass_t * r, u32 ** fragments_ready,
564 u32 ** fragments_to_drop)
568 ip6_frag_hdr_t *frag0;
571 if (!r->ip4_header.ip_version_and_header_length)
574 //The IP header is here, we need to check for packets
575 //that can be forwarded
577 for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
579 if (r->fragments[i].pi == ~0 ||
580 ((!r->fragments[i].next_data_len)
581 && (r->fragments[i].next_data_offset != (0xffff))))
584 p0 = vlib_get_buffer (vm, r->fragments[i].pi);
585 ip60 = vlib_buffer_get_current (p0);
586 frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
587 ip40 = (ip4_header_t *) (frag0 + 1);
589 if (ip6_frag_hdr_offset (frag0))
591 //Not first fragment, add the IPv4 header
592 clib_memcpy (ip40, &r->ip4_header, 20);
595 #ifdef MAP_IP6_REASS_COUNT_BYTES
597 clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
600 if (ip6_frag_hdr_more (frag0))
602 //Not last fragment, we copy end of next
603 clib_memcpy (u8_ptr_add (ip60, p0->current_length),
604 r->fragments[i].next_data, 20);
605 p0->current_length += 20;
606 ip60->payload_length = u16_net_add (ip60->payload_length, 20);
609 if (!ip4_is_fragment (ip40))
611 ip40->fragment_id = frag_id_6to4 (frag0->identification);
612 ip40->flags_and_fragment_offset =
613 clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
617 ip40->flags_and_fragment_offset =
618 clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
619 ip6_frag_hdr_offset (frag0));
622 if (ip6_frag_hdr_more (frag0))
623 ip40->flags_and_fragment_offset |=
624 clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
627 clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
629 ip40->checksum = ip4_header_checksum (ip40);
631 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
633 map_ip6_map_ip6_reass_trace_t *tr =
634 vlib_add_trace (vm, node, p0, sizeof (*tr));
635 tr->offset = ip4_get_fragment_offset (ip40);
636 tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
640 vec_add1 (*fragments_ready, r->fragments[i].pi);
641 r->fragments[i].pi = ~0;
642 r->fragments[i].next_data_len = 0;
643 r->fragments[i].next_data_offset = 0;
644 map_main.ip6_reass_buffered_counter--;
646 //TODO: Best solution would be that ip6_map handles extension headers
647 // and ignores atomic fragment. But in the meantime, let's just copy the header.
649 u8 protocol = frag0->next_hdr;
650 memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
651 ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
653 vlib_buffer_advance (p0, sizeof (*frag0));
658 map_ip6_drop_pi (u32 pi)
660 vlib_main_t *vm = vlib_get_main ();
661 vlib_node_runtime_t *n =
662 vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
663 vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP6_REASS_NEXT_DROP, pi);
667 map_ip4_drop_pi (u32 pi)
669 vlib_main_t *vm = vlib_get_main ();
670 vlib_node_runtime_t *n =
671 vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
672 vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP4_REASS_NEXT_DROP, pi);
677 * TODO: We should count the number of successfully
678 * transmitted fragment bytes and compare that to the last fragment
679 * offset such that we can free the reassembly structure when all fragments
680 * have been forwarded.
683 ip6_map_ip6_reass (vlib_main_t * vm,
684 vlib_node_runtime_t * node, vlib_frame_t * frame)
686 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
687 vlib_node_runtime_t *error_node =
688 vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
689 u32 *fragments_to_drop = NULL;
690 u32 *fragments_ready = NULL;
692 from = vlib_frame_vector_args (frame);
693 n_left_from = frame->n_vectors;
694 next_index = node->cached_next_index;
695 while (n_left_from > 0)
697 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
700 while (n_left_from > 0 && n_left_to_next > 0)
704 u8 error0 = MAP_ERROR_NONE;
706 ip6_frag_hdr_t *frag0;
711 pi0 = to_next[0] = from[0];
717 p0 = vlib_get_buffer (vm, pi0);
718 ip60 = vlib_buffer_get_current (p0);
719 frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
721 clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
723 clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
725 ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
727 //FIXME: Support other extension headers, maybe
729 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
731 map_ip6_map_ip6_reass_trace_t *tr =
732 vlib_add_trace (vm, node, p0, sizeof (*tr));
734 tr->frag_len = frag_len;
738 map_ip6_reass_lock ();
740 map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
741 frag0->identification, frag0->next_hdr,
743 //FIXME: Use better error codes
744 if (PREDICT_FALSE (!r))
746 // Could not create a caching entry
747 error0 = MAP_ERROR_FRAGMENT_MEMORY;
749 else if (PREDICT_FALSE ((frag_len <= 20 &&
750 (ip6_frag_hdr_more (frag0) || (!offset)))))
752 //Very small fragment are restricted to the last one and
753 //can't be the first one
754 error0 = MAP_ERROR_FRAGMENT_MALFORMED;
757 if (map_ip6_reass_add_fragment
758 (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
760 map_ip6_reass_free (r, &fragments_to_drop);
761 error0 = MAP_ERROR_FRAGMENT_MEMORY;
765 #ifdef MAP_IP6_REASS_COUNT_BYTES
766 if (!ip6_frag_hdr_more (frag0))
767 r->expected_total = offset + frag_len;
769 ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
771 #ifdef MAP_IP6_REASS_COUNT_BYTES
772 if (r->forwarded >= r->expected_total)
773 map_ip6_reass_free (r, &fragments_to_drop);
776 map_ip6_reass_unlock ();
778 if (error0 == MAP_ERROR_NONE)
788 //All data from that packet was copied no need to keep it, but this is not an error
789 p0->error = error_node->errors[MAP_ERROR_NONE];
790 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
791 to_next, n_left_to_next,
793 IP6_MAP_IP6_REASS_NEXT_DROP);
798 p0->error = error_node->errors[error0];
799 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
801 IP6_MAP_IP6_REASS_NEXT_DROP);
804 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
807 map_send_all_to_node (vm, fragments_ready, node,
808 &error_node->errors[MAP_ERROR_NONE],
809 IP6_MAP_IP6_REASS_NEXT_IP6_MAP);
810 map_send_all_to_node (vm, fragments_to_drop, node,
811 &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
812 IP6_MAP_IP6_REASS_NEXT_DROP);
814 vec_free (fragments_to_drop);
815 vec_free (fragments_ready);
816 return frame->n_vectors;
823 ip6_map_ip4_reass (vlib_main_t * vm,
824 vlib_node_runtime_t * node, vlib_frame_t * frame)
826 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
827 vlib_node_runtime_t *error_node =
828 vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index);
829 map_main_t *mm = &map_main;
830 vlib_combined_counter_main_t *cm = mm->domain_counters;
831 u32 cpu_index = os_get_cpu_number ();
832 u32 *fragments_to_drop = NULL;
833 u32 *fragments_to_loopback = NULL;
835 from = vlib_frame_vector_args (frame);
836 n_left_from = frame->n_vectors;
837 next_index = node->cached_next_index;
838 while (n_left_from > 0)
840 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
843 while (n_left_from > 0 && n_left_to_next > 0)
847 u8 error0 = MAP_ERROR_NONE;
852 u32 map_domain_index0 = ~0;
853 u32 next0 = IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP;
856 pi0 = to_next[0] = from[0];
862 p0 = vlib_get_buffer (vm, pi0);
863 ip40 = vlib_buffer_get_current (p0);
864 ip60 = ((ip6_header_t *) ip40) - 1;
867 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
868 (ip4_address_t *) & ip40->src_address.as_u32,
869 &map_domain_index0, &error0);
871 map_ip4_reass_lock ();
872 //This node only deals with fragmented ip4
874 map_ip4_reass_get (ip40->src_address.as_u32,
875 ip40->dst_address.as_u32,
876 ip40->fragment_id, ip40->protocol,
878 if (PREDICT_FALSE (!r))
880 // Could not create a caching entry
881 error0 = MAP_ERROR_FRAGMENT_MEMORY;
883 else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
885 // This is a fragment
888 // We know the port already
891 else if (map_ip4_reass_add_fragment (r, pi0))
893 // Not enough space for caching
894 error0 = MAP_ERROR_FRAGMENT_MEMORY;
895 map_ip4_reass_free (r, &fragments_to_drop);
904 ip4_get_port (ip40, MAP_SENDER, p0->current_length)) < 0)
906 // Could not find port from first fragment. Stop reassembling.
907 error0 = MAP_ERROR_BAD_PROTOCOL;
909 map_ip4_reass_free (r, &fragments_to_drop);
913 // Found port. Remember it and loopback saved fragments
915 map_ip4_reass_get_fragments (r, &fragments_to_loopback);
918 #ifdef MAP_IP4_REASS_COUNT_BYTES
921 r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
922 if (!ip4_get_fragment_more (ip40))
924 ip4_get_fragment_offset (ip40) * 8 +
925 clib_host_to_net_u16 (ip40->length) - 20;
926 if (r->forwarded >= r->expected_total)
927 map_ip4_reass_free (r, &fragments_to_drop);
931 map_ip4_reass_unlock ();
933 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
935 ip6_map_sec_check (d0, port0, ip40,
936 ip60) ? MAP_ERROR_NONE :
937 MAP_ERROR_DECAP_SEC_CHECK;
940 (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
941 && error0 == MAP_ERROR_NONE && !cached))
943 vnet_buffer (p0)->ip_frag.header_offset = 0;
944 vnet_buffer (p0)->ip_frag.flags = 0;
945 vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
946 vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
947 next0 = IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT;
950 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
952 map_ip6_map_ip4_reass_trace_t *tr =
953 vlib_add_trace (vm, node, p0, sizeof (*tr));
954 tr->map_domain_index = map_domain_index0;
967 if (error0 == MAP_ERROR_NONE)
968 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
969 cpu_index, map_domain_index0,
971 clib_net_to_host_u16 (ip40->
975 MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
976 p0->error = error_node->errors[error0];
977 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
978 n_left_to_next, pi0, next0);
981 //Loopback when we reach the end of the inpu vector
982 if (n_left_from == 0 && vec_len (fragments_to_loopback))
984 from = vlib_frame_vector_args (frame);
985 u32 len = vec_len (fragments_to_loopback);
986 if (len <= VLIB_FRAME_SIZE)
988 clib_memcpy (from, fragments_to_loopback,
991 vec_reset_length (fragments_to_loopback);
996 fragments_to_loopback + (len -
998 sizeof (u32) * VLIB_FRAME_SIZE);
999 n_left_from = VLIB_FRAME_SIZE;
1000 _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
1004 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1006 map_send_all_to_node (vm, fragments_to_drop, node,
1007 &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
1008 IP6_MAP_IP4_REASS_NEXT_DROP);
1010 vec_free (fragments_to_drop);
1011 vec_free (fragments_to_loopback);
1012 return frame->n_vectors;
1019 ip6_map_icmp_relay (vlib_main_t * vm,
1020 vlib_node_runtime_t * node, vlib_frame_t * frame)
1022 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
1023 vlib_node_runtime_t *error_node =
1024 vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index);
1025 map_main_t *mm = &map_main;
1026 u32 cpu_index = os_get_cpu_number ();
1027 u16 *fragment_ids, *fid;
1029 from = vlib_frame_vector_args (frame);
1030 n_left_from = frame->n_vectors;
1031 next_index = node->cached_next_index;
1033 /* Get random fragment IDs for replies. */
1034 fid = fragment_ids =
1035 clib_random_buffer_get_data (&vm->random_buffer,
1036 n_left_from * sizeof (fragment_ids[0]));
1038 while (n_left_from > 0)
1040 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1043 while (n_left_from > 0 && n_left_to_next > 0)
1047 u8 error0 = MAP_ERROR_NONE;
1049 u32 next0 = IP6_ICMP_RELAY_NEXT_IP4_LOOKUP;
1052 pi0 = to_next[0] = from[0];
1056 n_left_to_next -= 1;
1058 p0 = vlib_get_buffer (vm, pi0);
1059 ip60 = vlib_buffer_get_current (p0);
1060 u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
1067 * Original IPv4 header / packet
1071 * Original IPv4 header / packet
1074 /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
1077 error0 = MAP_ERROR_ICMP_RELAY;
1081 icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
1082 ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
1084 if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
1086 error0 = MAP_ERROR_ICMP_RELAY;
1090 ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
1091 vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
1092 ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
1093 icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
1096 * Relay according to RFC2473, section 8.3
1098 switch (icmp60->type)
1100 case ICMP6_destination_unreachable:
1101 case ICMP6_time_exceeded:
1102 case ICMP6_parameter_problem:
1103 /* Type 3 - destination unreachable, Code 1 - host unreachable */
1104 new_icmp40->type = ICMP4_destination_unreachable;
1106 ICMP4_destination_unreachable_destination_unreachable_host;
1109 case ICMP6_packet_too_big:
1110 /* Type 3 - destination unreachable, Code 4 - packet too big */
1111 /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
1112 mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
1117 flags_and_fragment_offset &
1118 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
1120 error0 = MAP_ERROR_ICMP_RELAY;
1124 new_icmp40->type = ICMP4_destination_unreachable;
1126 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
1127 *((u32 *) (new_icmp40 + 1)) =
1128 clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
1132 error0 = MAP_ERROR_ICMP_RELAY;
1137 * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
1139 new_ip40->ip_version_and_header_length = 0x45;
1141 u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
1142 new_ip40->length = clib_host_to_net_u16 (nlen);
1143 new_ip40->fragment_id = fid[0];
1146 new_ip40->protocol = IP_PROTOCOL_ICMP;
1147 new_ip40->src_address = mm->icmp4_src_address;
1148 new_ip40->dst_address = inner_ip40->src_address;
1149 new_ip40->checksum = ip4_header_checksum (new_ip40);
1151 new_icmp40->checksum = 0;
1152 ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
1153 new_icmp40->checksum = ~ip_csum_fold (sum);
1155 vlib_increment_simple_counter (&mm->icmp_relayed, cpu_index, 0, 1);
1158 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
1160 map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
1161 tr->map_domain_index = 0;
1166 (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
1167 p0->error = error_node->errors[error0];
1168 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1169 n_left_to_next, pi0, next0);
1171 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1174 return frame->n_vectors;
1178 static char *map_error_strings[] = {
1179 #define _(sym,string) string,
1185 VLIB_REGISTER_NODE(ip6_map_node) = {
1186 .function = ip6_map,
1188 .vector_size = sizeof(u32),
1189 .format_trace = format_map_trace,
1190 .type = VLIB_NODE_TYPE_INTERNAL,
1192 .n_errors = MAP_N_ERROR,
1193 .error_strings = map_error_strings,
1195 .n_next_nodes = IP6_MAP_N_NEXT,
1197 [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1198 #ifdef MAP_SKIP_IP6_LOOKUP
1199 [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-rewrite-transit",
1201 [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
1202 [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
1203 [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
1204 [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
1205 [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
1206 [IP6_MAP_NEXT_DROP] = "error-drop",
1207 [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
1213 VLIB_REGISTER_NODE(ip6_map_ip6_reass_node) = {
1214 .function = ip6_map_ip6_reass,
1215 .name = "ip6-map-ip6-reass",
1216 .vector_size = sizeof(u32),
1217 .format_trace = format_ip6_map_ip6_reass_trace,
1218 .type = VLIB_NODE_TYPE_INTERNAL,
1219 .n_errors = MAP_N_ERROR,
1220 .error_strings = map_error_strings,
1221 .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
1223 [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
1224 [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
1230 VLIB_REGISTER_NODE(ip6_map_ip4_reass_node) = {
1231 .function = ip6_map_ip4_reass,
1232 .name = "ip6-map-ip4-reass",
1233 .vector_size = sizeof(u32),
1234 .format_trace = format_ip6_map_ip4_reass_trace,
1235 .type = VLIB_NODE_TYPE_INTERNAL,
1236 .n_errors = MAP_N_ERROR,
1237 .error_strings = map_error_strings,
1238 .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
1240 [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
1241 [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
1242 [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
1248 VLIB_REGISTER_NODE(ip6_map_icmp_relay_node, static) = {
1249 .function = ip6_map_icmp_relay,
1250 .name = "ip6-map-icmp-relay",
1251 .vector_size = sizeof(u32),
1252 .format_trace = format_map_trace, //FIXME
1253 .type = VLIB_NODE_TYPE_INTERNAL,
1254 .n_errors = MAP_N_ERROR,
1255 .error_strings = map_error_strings,
1256 .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
1258 [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
1259 [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
1265 * fd.io coding-style-patch-verification: ON
1268 * eval: (c-set-style "gnu")