2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19 #include <vnet/ip/ip6_to_ip4.h>
20 #include <vnet/ip/reass/ip4_sv_reass.h>
24 IP6_MAP_NEXT_IP4_LOOKUP,
25 #ifdef MAP_SKIP_IP6_LOOKUP
26 IP6_MAP_NEXT_IP4_REWRITE,
28 IP6_MAP_NEXT_IP4_REASS,
29 IP6_MAP_NEXT_IP4_FRAGMENT,
30 IP6_MAP_NEXT_IP6_ICMP_RELAY,
31 IP6_MAP_NEXT_IP6_LOCAL,
37 enum ip6_map_ip6_reass_next_e
39 IP6_MAP_IP6_REASS_NEXT_IP6_MAP,
40 IP6_MAP_IP6_REASS_NEXT_DROP,
41 IP6_MAP_IP6_REASS_N_NEXT,
44 enum ip6_map_post_ip4_reass_next_e
46 IP6_MAP_POST_IP4_REASS_NEXT_IP4_LOOKUP,
47 IP6_MAP_POST_IP4_REASS_NEXT_IP4_FRAGMENT,
48 IP6_MAP_POST_IP4_REASS_NEXT_DROP,
49 IP6_MAP_POST_IP4_REASS_N_NEXT,
52 enum ip6_icmp_relay_next_e
54 IP6_ICMP_RELAY_NEXT_IP4_LOOKUP,
55 IP6_ICMP_RELAY_NEXT_DROP,
56 IP6_ICMP_RELAY_N_NEXT,
59 vlib_node_registration_t ip6_map_post_ip4_reass_node;
60 vlib_node_registration_t ip6_map_ip6_reass_node;
61 static vlib_node_registration_t ip6_map_icmp_relay_node;
68 } map_ip6_map_ip4_reass_trace_t;
71 format_ip6_map_post_ip4_reass_trace (u8 * s, va_list * args)
73 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
75 map_ip6_map_ip4_reass_trace_t *t =
76 va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
77 return format (s, "MAP domain index: %d L4 port: %u Status: %s",
78 t->map_domain_index, clib_net_to_host_u16 (t->port),
79 t->cached ? "cached" : "forwarded");
87 } map_ip6_map_ip6_reass_trace_t;
90 format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
92 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
93 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
94 map_ip6_map_ip6_reass_trace_t *t =
95 va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
96 return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
97 t->frag_len, t->out ? "out" : "in");
103 static_always_inline bool
104 ip6_map_sec_check (map_domain_t * d, u16 port, ip4_header_t * ip4,
107 u16 sp4 = clib_net_to_host_u16 (port);
108 u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
109 u64 sal6 = map_get_pfx (d, sa4, sp4);
110 u64 sar6 = map_get_sfx (d, sa4, sp4);
113 (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
114 || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
119 static_always_inline void
120 ip6_map_security_check (map_domain_t * d, vlib_buffer_t * b0,
121 ip4_header_t * ip4, ip6_header_t * ip6, u32 * next,
124 map_main_t *mm = &map_main;
125 if (d->ea_bits_len || d->rules)
127 if (d->psid_length > 0)
129 if (!ip4_is_fragment (ip4))
131 u16 port = ip4_get_port (ip4, 1);
136 ip6_map_sec_check (d, port, ip4,
137 ip6) ? MAP_ERROR_NONE :
138 MAP_ERROR_DECAP_SEC_CHECK;
142 *error = MAP_ERROR_BAD_PROTOCOL;
147 if (mm->sec_check_frag)
149 vnet_buffer (b0)->ip.reass.next_index =
150 map_main.ip4_sv_reass_custom_next_index;
151 *next = IP6_MAP_NEXT_IP4_REASS;
158 static_always_inline bool
159 ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip)
161 #ifdef MAP_SKIP_IP6_LOOKUP
162 if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP4].fei)
164 vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
165 pre_resolved[FIB_PROTOCOL_IP4].dpo.dpoi_index;
176 ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
178 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
179 vlib_node_runtime_t *error_node =
180 vlib_node_get_runtime (vm, ip6_map_node.index);
181 map_main_t *mm = &map_main;
182 vlib_combined_counter_main_t *cm = mm->domain_counters;
183 u32 thread_index = vm->thread_index;
185 from = vlib_frame_vector_args (frame);
186 n_left_from = frame->n_vectors;
187 next_index = node->cached_next_index;
188 while (n_left_from > 0)
190 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
193 while (n_left_from >= 4 && n_left_to_next >= 2)
196 vlib_buffer_t *p0, *p1;
197 u8 error0 = MAP_ERROR_NONE;
198 u8 error1 = MAP_ERROR_NONE;
199 map_domain_t *d0 = 0, *d1 = 0;
200 ip4_header_t *ip40, *ip41;
201 ip6_header_t *ip60, *ip61;
202 u16 port0 = 0, port1 = 0;
203 u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
204 u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
205 u32 next1 = IP6_MAP_NEXT_IP4_LOOKUP;
207 /* Prefetch next iteration. */
209 vlib_buffer_t *p2, *p3;
211 p2 = vlib_get_buffer (vm, from[2]);
212 p3 = vlib_get_buffer (vm, from[3]);
214 vlib_prefetch_buffer_header (p2, LOAD);
215 vlib_prefetch_buffer_header (p3, LOAD);
217 /* IPv6 + IPv4 header + 8 bytes of ULP */
218 CLIB_PREFETCH (p2->data, 68, LOAD);
219 CLIB_PREFETCH (p3->data, 68, LOAD);
222 pi0 = to_next[0] = from[0];
223 pi1 = to_next[1] = from[1];
229 p0 = vlib_get_buffer (vm, pi0);
230 p1 = vlib_get_buffer (vm, pi1);
231 ip60 = vlib_buffer_get_current (p0);
232 ip61 = vlib_buffer_get_current (p1);
233 vlib_buffer_advance (p0, sizeof (ip6_header_t));
234 vlib_buffer_advance (p1, sizeof (ip6_header_t));
235 ip40 = vlib_buffer_get_current (p0);
236 ip41 = vlib_buffer_get_current (p1);
239 * Encapsulated IPv4 packet
240 * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
241 * - Lookup/Rewrite or Fragment node in case of packet > MTU
242 * Fragmented IPv6 packet
244 * - Error -> Pass to ICMPv6/ICMPv4 relay
245 * - Info -> Pass to IPv6 local
246 * Anything else -> drop
249 (ip60->protocol == IP_PROTOCOL_IP_IN_IP
250 && clib_net_to_host_u16 (ip60->payload_length) > 20))
253 ip4_map_get_domain ((ip4_address_t *) & ip40->
254 src_address.as_u32, &map_domain_index0,
257 else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
258 clib_net_to_host_u16 (ip60->payload_length) >
259 sizeof (icmp46_header_t))
261 icmp46_header_t *icmp = (void *) (ip60 + 1);
262 next0 = (icmp->type == ICMP6_echo_request
264 ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
265 IP6_MAP_NEXT_IP6_ICMP_RELAY;
267 else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
269 error0 = MAP_ERROR_FRAGMENTED;
273 error0 = MAP_ERROR_BAD_PROTOCOL;
276 (ip61->protocol == IP_PROTOCOL_IP_IN_IP
277 && clib_net_to_host_u16 (ip61->payload_length) > 20))
280 ip4_map_get_domain ((ip4_address_t *) & ip41->
281 src_address.as_u32, &map_domain_index1,
284 else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
285 clib_net_to_host_u16 (ip61->payload_length) >
286 sizeof (icmp46_header_t))
288 icmp46_header_t *icmp = (void *) (ip61 + 1);
289 next1 = (icmp->type == ICMP6_echo_request
291 ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
292 IP6_MAP_NEXT_IP6_ICMP_RELAY;
294 else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
296 error1 = MAP_ERROR_FRAGMENTED;
300 error1 = MAP_ERROR_BAD_PROTOCOL;
305 /* MAP inbound security check */
306 ip6_map_security_check (d0, p0, ip40, ip60, &next0, &error0);
308 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
309 next0 == IP6_MAP_NEXT_IP4_LOOKUP))
313 && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
315 vnet_buffer (p0)->ip_frag.flags = 0;
316 vnet_buffer (p0)->ip_frag.next_index =
317 IP_FRAG_NEXT_IP4_LOOKUP;
318 vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
319 next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
324 ip6_map_ip4_lookup_bypass (p0,
326 IP6_MAP_NEXT_IP4_REWRITE : next0;
328 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
330 map_domain_index0, 1,
337 /* MAP inbound security check */
338 ip6_map_security_check (d1, p1, ip41, ip61, &next1, &error1);
340 if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
341 next1 == IP6_MAP_NEXT_IP4_LOOKUP))
345 && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
347 vnet_buffer (p1)->ip_frag.flags = 0;
348 vnet_buffer (p1)->ip_frag.next_index =
349 IP_FRAG_NEXT_IP4_LOOKUP;
350 vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
351 next1 = IP6_MAP_NEXT_IP4_FRAGMENT;
356 ip6_map_ip4_lookup_bypass (p1,
358 IP6_MAP_NEXT_IP4_REWRITE : next1;
360 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
362 map_domain_index1, 1,
368 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
370 map_add_trace (vm, node, p0, map_domain_index0, port0);
373 if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
375 map_add_trace (vm, node, p1, map_domain_index1, port1);
378 if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
380 /* Set ICMP parameters */
381 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
382 icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
383 ICMP6_destination_unreachable_source_address_failed_policy,
385 next0 = IP6_MAP_NEXT_ICMP;
389 next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
392 if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
394 /* Set ICMP parameters */
395 vlib_buffer_advance (p1, -sizeof (ip6_header_t));
396 icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
397 ICMP6_destination_unreachable_source_address_failed_policy,
399 next1 = IP6_MAP_NEXT_ICMP;
403 next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
407 if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
408 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
409 if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
410 vlib_buffer_advance (p1, -sizeof (ip6_header_t));
412 p0->error = error_node->errors[error0];
413 p1->error = error_node->errors[error1];
414 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
415 n_left_to_next, pi0, pi1, next0,
420 while (n_left_from > 0 && n_left_to_next > 0)
424 u8 error0 = MAP_ERROR_NONE;
425 map_domain_t *d0 = 0;
429 u32 map_domain_index0 = ~0;
430 u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP;
432 pi0 = to_next[0] = from[0];
438 p0 = vlib_get_buffer (vm, pi0);
439 ip60 = vlib_buffer_get_current (p0);
440 vlib_buffer_advance (p0, sizeof (ip6_header_t));
441 ip40 = vlib_buffer_get_current (p0);
444 * Encapsulated IPv4 packet
445 * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
446 * - Lookup/Rewrite or Fragment node in case of packet > MTU
447 * Fragmented IPv6 packet
449 * - Error -> Pass to ICMPv6/ICMPv4 relay
450 * - Info -> Pass to IPv6 local
451 * Anything else -> drop
454 (ip60->protocol == IP_PROTOCOL_IP_IN_IP
455 && clib_net_to_host_u16 (ip60->payload_length) > 20))
458 ip4_map_get_domain ((ip4_address_t *) & ip40->
459 src_address.as_u32, &map_domain_index0,
462 else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
463 clib_net_to_host_u16 (ip60->payload_length) >
464 sizeof (icmp46_header_t))
466 icmp46_header_t *icmp = (void *) (ip60 + 1);
467 next0 = (icmp->type == ICMP6_echo_request
469 ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
470 IP6_MAP_NEXT_IP6_ICMP_RELAY;
472 else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
473 (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
474 IP_PROTOCOL_IP_IN_IP))
476 error0 = MAP_ERROR_FRAGMENTED;
480 /* XXX: Move get_domain to ip6_get_domain lookup on source */
481 //error0 = MAP_ERROR_BAD_PROTOCOL;
482 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
483 vnet_feature_next (&next0, p0);
488 /* MAP inbound security check */
489 ip6_map_security_check (d0, p0, ip40, ip60, &next0, &error0);
491 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
492 next0 == IP6_MAP_NEXT_IP4_LOOKUP))
496 && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
498 vnet_buffer (p0)->ip_frag.flags = 0;
499 vnet_buffer (p0)->ip_frag.next_index =
500 IP_FRAG_NEXT_IP4_LOOKUP;
501 vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
502 next0 = IP6_MAP_NEXT_IP4_FRAGMENT;
507 ip6_map_ip4_lookup_bypass (p0,
509 IP6_MAP_NEXT_IP4_REWRITE : next0;
511 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
513 map_domain_index0, 1,
519 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
521 map_add_trace (vm, node, p0, map_domain_index0, port0);
524 if (mm->icmp6_enabled &&
525 (error0 == MAP_ERROR_DECAP_SEC_CHECK
526 || error0 == MAP_ERROR_NO_DOMAIN))
528 /* Set ICMP parameters */
529 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
530 icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
531 ICMP6_destination_unreachable_source_address_failed_policy,
533 next0 = IP6_MAP_NEXT_ICMP;
537 next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
541 if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
542 vlib_buffer_advance (p0, -sizeof (ip6_header_t));
544 p0->error = error_node->errors[error0];
545 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
546 n_left_to_next, pi0, next0);
548 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
551 return frame->n_vectors;
556 map_ip6_drop_pi (u32 pi)
558 vlib_main_t *vm = vlib_get_main ();
559 vlib_node_runtime_t *n =
560 vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index);
561 vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP6_REASS_NEXT_DROP, pi);
565 * ip6_map_post_ip4_reass
568 ip6_map_post_ip4_reass (vlib_main_t * vm,
569 vlib_node_runtime_t * node, vlib_frame_t * frame)
571 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
572 vlib_node_runtime_t *error_node =
573 vlib_node_get_runtime (vm, ip6_map_post_ip4_reass_node.index);
574 map_main_t *mm = &map_main;
575 vlib_combined_counter_main_t *cm = mm->domain_counters;
576 u32 thread_index = vm->thread_index;
578 from = vlib_frame_vector_args (frame);
579 n_left_from = frame->n_vectors;
580 next_index = node->cached_next_index;
581 while (n_left_from > 0)
583 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
586 while (n_left_from > 0 && n_left_to_next > 0)
590 u8 error0 = MAP_ERROR_NONE;
595 u32 map_domain_index0 = ~0;
596 u32 next0 = IP6_MAP_POST_IP4_REASS_NEXT_IP4_LOOKUP;
598 pi0 = to_next[0] = from[0];
604 p0 = vlib_get_buffer (vm, pi0);
605 ip40 = vlib_buffer_get_current (p0);
606 ip60 = ((ip6_header_t *) ip40) - 1;
609 ip4_map_get_domain ((ip4_address_t *) & ip40->src_address.as_u32,
610 &map_domain_index0, &error0);
612 port0 = vnet_buffer (p0)->ip.reass.l4_src_port;
614 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
616 ip6_map_sec_check (d0, port0, ip40,
617 ip60) ? MAP_ERROR_NONE :
618 MAP_ERROR_DECAP_SEC_CHECK;
621 (error0 == MAP_ERROR_NONE &&
622 d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
624 vnet_buffer (p0)->ip_frag.flags = 0;
625 vnet_buffer (p0)->ip_frag.next_index = IP_FRAG_NEXT_IP4_LOOKUP;
626 vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
627 next0 = IP6_MAP_POST_IP4_REASS_NEXT_IP4_FRAGMENT;
630 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
632 map_ip6_map_ip4_reass_trace_t *tr =
633 vlib_add_trace (vm, node, p0, sizeof (*tr));
634 tr->map_domain_index = map_domain_index0;
638 if (error0 == MAP_ERROR_NONE)
639 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
641 map_domain_index0, 1,
646 MAP_ERROR_NONE) ? next0 : IP6_MAP_POST_IP4_REASS_NEXT_DROP;
647 p0->error = error_node->errors[error0];
648 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
649 n_left_to_next, pi0, next0);
652 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
654 return frame->n_vectors;
661 ip6_map_icmp_relay (vlib_main_t * vm,
662 vlib_node_runtime_t * node, vlib_frame_t * frame)
664 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
665 vlib_node_runtime_t *error_node =
666 vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index);
667 map_main_t *mm = &map_main;
668 u32 thread_index = vm->thread_index;
669 u16 *fragment_ids, *fid;
671 from = vlib_frame_vector_args (frame);
672 n_left_from = frame->n_vectors;
673 next_index = node->cached_next_index;
675 /* Get random fragment IDs for replies. */
677 clib_random_buffer_get_data (&vm->random_buffer,
678 n_left_from * sizeof (fragment_ids[0]));
680 while (n_left_from > 0)
682 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
685 while (n_left_from > 0 && n_left_to_next > 0)
689 u8 error0 = MAP_ERROR_NONE;
691 u32 next0 = IP6_ICMP_RELAY_NEXT_IP4_LOOKUP;
694 pi0 = to_next[0] = from[0];
700 p0 = vlib_get_buffer (vm, pi0);
701 ip60 = vlib_buffer_get_current (p0);
702 u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
709 * Original IPv4 header / packet
713 * Original IPv4 header / packet
716 /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
719 error0 = MAP_ERROR_ICMP_RELAY;
723 icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
724 ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
726 if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
728 error0 = MAP_ERROR_ICMP_RELAY;
732 ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
733 vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
734 ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
735 icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
738 * Relay according to RFC2473, section 8.3
740 switch (icmp60->type)
742 case ICMP6_destination_unreachable:
743 case ICMP6_time_exceeded:
744 case ICMP6_parameter_problem:
745 /* Type 3 - destination unreachable, Code 1 - host unreachable */
746 new_icmp40->type = ICMP4_destination_unreachable;
748 ICMP4_destination_unreachable_destination_unreachable_host;
751 case ICMP6_packet_too_big:
752 /* Type 3 - destination unreachable, Code 4 - packet too big */
753 /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
754 mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
758 (inner_ip40->flags_and_fragment_offset &
759 clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
761 error0 = MAP_ERROR_ICMP_RELAY;
765 new_icmp40->type = ICMP4_destination_unreachable;
767 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
768 *((u32 *) (new_icmp40 + 1)) =
769 clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
773 error0 = MAP_ERROR_ICMP_RELAY;
778 * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
780 new_ip40->ip_version_and_header_length = 0x45;
782 u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
783 new_ip40->length = clib_host_to_net_u16 (nlen);
784 new_ip40->fragment_id = fid[0];
787 new_ip40->protocol = IP_PROTOCOL_ICMP;
788 new_ip40->src_address = mm->icmp4_src_address;
789 new_ip40->dst_address = inner_ip40->src_address;
790 new_ip40->checksum = ip4_header_checksum (new_ip40);
792 new_icmp40->checksum = 0;
793 ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
794 new_icmp40->checksum = ~ip_csum_fold (sum);
796 vlib_increment_simple_counter (&mm->icmp_relayed, thread_index, 0,
800 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
802 map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
803 tr->map_domain_index = 0;
808 (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
809 p0->error = error_node->errors[error0];
810 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
811 n_left_to_next, pi0, next0);
813 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
816 return frame->n_vectors;
820 static char *map_error_strings[] = {
821 #define _(sym,string) string,
827 VNET_FEATURE_INIT (ip6_map_feature, static) =
829 .arc_name = "ip6-unicast",
830 .node_name = "ip6-map",
831 .runs_before = VNET_FEATURES ("ip6-flow-classify"),
832 .runs_after = VNET_FEATURES ("ip6-full-reassembly-feature"),
835 VLIB_REGISTER_NODE(ip6_map_node) = {
838 .vector_size = sizeof(u32),
839 .format_trace = format_map_trace,
840 .type = VLIB_NODE_TYPE_INTERNAL,
842 .n_errors = MAP_N_ERROR,
843 .error_strings = map_error_strings,
845 .n_next_nodes = IP6_MAP_N_NEXT,
847 [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
848 #ifdef MAP_SKIP_IP6_LOOKUP
849 [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-load-balance",
851 [IP6_MAP_NEXT_IP4_REASS] = "ip4-sv-reassembly-custom-next",
852 [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
853 [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
854 [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
855 [IP6_MAP_NEXT_DROP] = "error-drop",
856 [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
862 VLIB_REGISTER_NODE(ip6_map_post_ip4_reass_node) = {
863 .function = ip6_map_post_ip4_reass,
864 .name = "ip6-map-post-ip4-reass",
865 .vector_size = sizeof(u32),
866 .format_trace = format_ip6_map_post_ip4_reass_trace,
867 .type = VLIB_NODE_TYPE_INTERNAL,
868 .n_errors = MAP_N_ERROR,
869 .error_strings = map_error_strings,
870 .n_next_nodes = IP6_MAP_POST_IP4_REASS_N_NEXT,
872 [IP6_MAP_POST_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
873 [IP6_MAP_POST_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
874 [IP6_MAP_POST_IP4_REASS_NEXT_DROP] = "error-drop",
880 VLIB_REGISTER_NODE(ip6_map_icmp_relay_node, static) = {
881 .function = ip6_map_icmp_relay,
882 .name = "ip6-map-icmp-relay",
883 .vector_size = sizeof(u32),
884 .format_trace = format_map_trace, //FIXME
885 .type = VLIB_NODE_TYPE_INTERNAL,
886 .n_errors = MAP_N_ERROR,
887 .error_strings = map_error_strings,
888 .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
890 [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
891 [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
897 ip6_map_init (vlib_main_t * vm)
899 map_main.ip4_sv_reass_custom_next_index =
900 ip4_sv_reass_custom_register_next_node
901 (ip6_map_post_ip4_reass_node.index);
905 VLIB_INIT_FUNCTION (ip6_map_init) =
907 .runs_after = VLIB_INITS ("map_init"),};
910 * fd.io coding-style-patch-verification: ON
913 * eval: (c-set-style "gnu")