2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include "../ip/ip_frag.h"
19 #define IP4_MAP_T_DUAL_LOOP 1
23 IP4_MAPT_NEXT_MAPT_TCP_UDP,
24 IP4_MAPT_NEXT_MAPT_ICMP,
25 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
32 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
33 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
34 IP4_MAPT_ICMP_NEXT_DROP,
36 } ip4_mapt_icmp_next_t;
40 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
41 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
42 IP4_MAPT_TCP_UDP_NEXT_DROP,
43 IP4_MAPT_TCP_UDP_N_NEXT
44 } ip4_mapt_tcp_udp_next_t;
48 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
49 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
50 IP4_MAPT_FRAGMENTED_NEXT_DROP,
51 IP4_MAPT_FRAGMENTED_N_NEXT
52 } ip4_mapt_fragmented_next_t;
54 //This is used to pass information within the buffer data.
55 //Buffer structure being too small to contain big structures like this.
57 typedef CLIB_PACKED (struct {
60 //IPv6 header + Fragmentation header will be here
61 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
63 }) ip4_mapt_pseudo_header_t;
66 #define frag_id_4to6(id) (id)
68 //TODO: Find the right place in memory for this.
70 static u8 icmp_to_icmp6_updater_pointer_table[] =
79 static_always_inline int
80 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
83 map_ip4_reass_lock ();
85 map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
88 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
93 map_ip4_reass_unlock ();
97 static_always_inline i32
98 ip4_map_fragment_get_port (ip4_header_t * ip4)
101 map_ip4_reass_lock ();
103 map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
106 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
108 i32 ret = r ? r->port : -1;
109 map_ip4_reass_unlock ();
114 /* Statelessly translates an ICMP packet into ICMPv6.
116 * Warning: The checksum will need to be recomputed.
119 static_always_inline int
120 ip4_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len,
121 i32 * receiver_port, ip4_header_t ** inner_ip4)
126 case ICMP4_echo_reply:
127 *receiver_port = ((u16 *) icmp)[2];
128 icmp->type = ICMP6_echo_reply;
130 case ICMP4_echo_request:
131 *receiver_port = ((u16 *) icmp)[2];
132 icmp->type = ICMP6_echo_request;
134 case ICMP4_destination_unreachable:
135 *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
136 *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
140 case ICMP4_destination_unreachable_destination_unreachable_net: //0
141 case ICMP4_destination_unreachable_destination_unreachable_host: //1
142 icmp->type = ICMP6_destination_unreachable;
143 icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
145 case ICMP4_destination_unreachable_protocol_unreachable: //2
146 icmp->type = ICMP6_parameter_problem;
147 icmp->code = ICMP6_parameter_problem_unrecognized_next_header;
149 case ICMP4_destination_unreachable_port_unreachable: //3
150 icmp->type = ICMP6_destination_unreachable;
151 icmp->code = ICMP6_destination_unreachable_port_unreachable;
153 case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set: //4
155 ICMP6_packet_too_big;
158 u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
160 advertised_mtu += 20;
162 advertised_mtu = 1000; //FIXME ! (RFC 1191 - plateau value)
164 //FIXME: = minimum(advertised MTU+20, MTU_of_IPv6_nexthop, (MTU_of_IPv4_nexthop)+20)
165 *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (advertised_mtu);
169 case ICMP4_destination_unreachable_source_route_failed: //5
170 case ICMP4_destination_unreachable_destination_network_unknown: //6
171 case ICMP4_destination_unreachable_destination_host_unknown: //7
172 case ICMP4_destination_unreachable_source_host_isolated: //8
173 case ICMP4_destination_unreachable_network_unreachable_for_type_of_service: //11
174 case ICMP4_destination_unreachable_host_unreachable_for_type_of_service: //12
176 ICMP6_destination_unreachable;
177 icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
179 case ICMP4_destination_unreachable_network_administratively_prohibited: //9
180 case ICMP4_destination_unreachable_host_administratively_prohibited: //10
181 case ICMP4_destination_unreachable_communication_administratively_prohibited: //13
182 case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15
183 icmp->type = ICMP6_destination_unreachable;
185 ICMP6_destination_unreachable_destination_administratively_prohibited;
187 case ICMP4_destination_unreachable_host_precedence_violation: //14
193 case ICMP4_time_exceeded: //11
194 *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
195 *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
196 icmp->type = ICMP6_time_exceeded;
197 //icmp->code = icmp->code //unchanged
200 case ICMP4_parameter_problem:
201 *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8);
202 *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8);
206 case ICMP4_parameter_problem_pointer_indicates_error:
207 case ICMP4_parameter_problem_bad_length:
208 icmp->type = ICMP6_parameter_problem;
209 icmp->code = ICMP6_parameter_problem_erroneous_header_field;
212 icmp_to_icmp6_updater_pointer_table[*((u8 *) (icmp + 1))];
216 *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (ptr);
220 //All other codes cause dropping the packet
226 //All other types cause dropping the packet
233 static_always_inline void
234 _ip4_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error)
236 ip4_header_t *ip4, *inner_ip4;
237 ip6_header_t *ip6, *inner_ip6;
239 icmp46_header_t *icmp;
242 u16 *inner_L4_checksum = 0;
243 ip6_frag_hdr_t *inner_frag;
245 u32 inner_frag_offset;
248 ip4 = vlib_buffer_get_current (p);
249 ip_len = clib_net_to_host_u16 (ip4->length);
250 ASSERT (ip_len <= p->current_length);
252 icmp = (icmp46_header_t *) (ip4 + 1);
253 if (ip4_icmp_to_icmp6_in_place (icmp, ip_len - sizeof (*ip4),
254 &recv_port, &inner_ip4))
256 *error = MAP_ERROR_ICMP;
262 // In case of 1:1 mapping, we don't care about the port
263 if (d->ea_bits_len == 0 && d->rules)
269 *error = MAP_ERROR_ICMP;
276 //We have 2 headers to translate.
277 //We need to make some room in the middle of the packet
279 if (PREDICT_FALSE (ip4_is_fragment (inner_ip4)))
281 //Here it starts getting really tricky
282 //We will add a fragmentation header in the inner packet
284 if (!ip4_is_first_fragment (inner_ip4))
286 //For now we do not handle unless it is the first fragment
287 //Ideally we should handle the case as we are in slow path already
288 *error = MAP_ERROR_FRAGMENTED;
292 vlib_buffer_advance (p,
293 -2 * (sizeof (*ip6) - sizeof (*ip4)) -
294 sizeof (*inner_frag));
295 ip6 = vlib_buffer_get_current (p);
296 clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4,
299 (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
300 icmp = (icmp46_header_t *) (ip4 + 1);
303 (ip6_header_t *) u8_ptr_add (inner_ip4,
304 sizeof (*ip4) - sizeof (*ip6) -
305 sizeof (*inner_frag));
307 (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, sizeof (*inner_ip6));
308 ip6->payload_length =
309 u16_net_add (ip4->length,
310 sizeof (*ip6) - 2 * sizeof (*ip4) +
311 sizeof (*inner_frag));
312 inner_frag_id = frag_id_4to6 (inner_ip4->fragment_id);
313 inner_frag_offset = ip4_get_fragment_offset (inner_ip4);
315 ! !(inner_ip4->flags_and_fragment_offset &
316 clib_net_to_host_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS));
320 vlib_buffer_advance (p, -2 * (sizeof (*ip6) - sizeof (*ip4)));
321 ip6 = vlib_buffer_get_current (p);
322 clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4,
325 (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
326 icmp = (icmp46_header_t *) u8_ptr_add (ip4, sizeof (*ip4));
328 (ip6_header_t *) u8_ptr_add (inner_ip4,
329 sizeof (*ip4) - sizeof (*ip6));
330 ip6->payload_length =
331 u16_net_add (ip4->length, sizeof (*ip6) - 2 * sizeof (*ip4));
335 if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_TCP))
337 inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum;
339 ip_csum_fold (ip_csum_sub_even
341 *((u64 *) (&inner_ip4->src_address))));
343 else if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_UDP))
345 inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum;
346 if (!*inner_L4_checksum)
348 //The inner packet was first translated, and therefore came from IPv6.
349 //As the packet was an IPv6 packet, the UDP checksum can't be NULL
350 *error = MAP_ERROR_ICMP;
354 ip_csum_fold (ip_csum_sub_even
356 *((u64 *) (&inner_ip4->src_address))));
358 else if (inner_ip4->protocol == IP_PROTOCOL_ICMP)
360 //We have an ICMP inside an ICMP
361 //It needs to be translated, but not for error ICMP messages
362 icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1);
363 csum = inner_icmp->checksum;
364 //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by ip4_icmp_to_icmp6_in_place
365 csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp));
366 inner_icmp->type = (inner_icmp->type == ICMP4_echo_request) ?
367 ICMP6_echo_request : ICMP6_echo_reply;
368 csum = ip_csum_add_even (csum, *((u16 *) inner_icmp));
370 ip_csum_add_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6));
372 ip_csum_add_even (csum, inner_ip4->length - sizeof (*inner_ip4));
373 inner_icmp->checksum = ip_csum_fold (csum);
374 inner_L4_checksum = &inner_icmp->checksum;
375 inner_ip4->protocol = IP_PROTOCOL_ICMP6;
379 ASSERT (0); // We had a port from that, so it is udp or tcp or ICMP
382 //FIXME: Security check with the port found in the inner packet
384 csum = *inner_L4_checksum; //Initial checksum of the inner L4 header
385 //FIXME: Shouldn't we remove ip addresses from there ?
387 inner_ip6->ip_version_traffic_class_and_flow_label =
388 clib_host_to_net_u32 ((6 << 28) + (inner_ip4->tos << 20));
389 inner_ip6->payload_length =
390 u16_net_add (inner_ip4->length, -sizeof (*inner_ip4));
391 inner_ip6->hop_limit = inner_ip4->ttl;
392 inner_ip6->protocol = inner_ip4->protocol;
394 //Note that the source address is within the domain
395 //while the destination address is the one outside the domain
396 ip4_map_t_embedded_address (d, &inner_ip6->dst_address,
397 &inner_ip4->dst_address);
398 inner_ip6->src_address.as_u64[0] =
399 map_get_pfx_net (d, inner_ip4->src_address.as_u32, recv_port);
400 inner_ip6->src_address.as_u64[1] =
401 map_get_sfx_net (d, inner_ip4->src_address.as_u32, recv_port);
403 if (PREDICT_FALSE (inner_frag != NULL))
405 inner_frag->next_hdr = inner_ip6->protocol;
406 inner_frag->identification = inner_frag_id;
408 inner_frag->fragment_offset_and_more =
409 ip6_frag_hdr_offset_and_more (inner_frag_offset, inner_frag_more);
410 inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
411 inner_ip6->payload_length =
412 clib_host_to_net_u16 (clib_net_to_host_u16
413 (inner_ip6->payload_length) +
414 sizeof (*inner_frag));
417 csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
418 csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
419 csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
420 csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
421 *inner_L4_checksum = ip_csum_fold (csum);
426 vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
427 ip6 = vlib_buffer_get_current (p);
428 ip6->payload_length =
429 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) -
433 //Translate outer IPv6
434 ip6->ip_version_traffic_class_and_flow_label =
435 clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
437 ip6->hop_limit = ip4->ttl;
438 ip6->protocol = IP_PROTOCOL_ICMP6;
440 ip4_map_t_embedded_address (d, &ip6->src_address, &ip4->src_address);
441 ip6->dst_address.as_u64[0] =
442 map_get_pfx_net (d, ip4->dst_address.as_u32, recv_port);
443 ip6->dst_address.as_u64[1] =
444 map_get_sfx_net (d, ip4->dst_address.as_u32, recv_port);
446 //Truncate when the packet exceeds the minimal IPv6 MTU
447 if (p->current_length > 1280)
449 ip6->payload_length = clib_host_to_net_u16 (1280 - sizeof (*ip6));
450 p->current_length = 1280; //Looks too simple to be correct...
453 //TODO: We could do an easy diff-checksum for echo requests/replies
454 //Recompute ICMP checksum
456 csum = ip_csum_with_carry (0, ip6->payload_length);
457 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
458 csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
459 csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
460 csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
461 csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
463 ip_incremental_checksum (csum, icmp,
464 clib_net_to_host_u16 (ip6->payload_length));
465 icmp->checksum = ~ip_csum_fold (csum);
469 ip4_map_t_icmp (vlib_main_t * vm,
470 vlib_node_runtime_t * node, vlib_frame_t * frame)
472 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
473 vlib_node_runtime_t *error_node =
474 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
475 from = vlib_frame_vector_args (frame);
476 n_left_from = frame->n_vectors;
477 next_index = node->cached_next_index;
478 vlib_combined_counter_main_t *cm = map_main.domain_counters;
479 u32 cpu_index = os_get_cpu_number ();
481 while (n_left_from > 0)
483 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
485 while (n_left_from > 0 && n_left_to_next > 0)
489 ip4_mapt_icmp_next_t next0;
494 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
495 pi0 = to_next[0] = from[0];
500 error0 = MAP_ERROR_NONE;
502 p0 = vlib_get_buffer (vm, pi0);
503 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
505 clib_net_to_host_u16 (((ip4_header_t *)
506 vlib_buffer_get_current (p0))->length);
508 pool_elt_at_index (map_main.domains,
509 vnet_buffer (p0)->map_t.map_domain_index);
510 _ip4_map_t_icmp (d0, p0, &error0);
512 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
514 vnet_buffer (p0)->ip_frag.header_offset = 0;
515 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
516 vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
517 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
519 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
521 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
523 vnet_buffer (p0)->map_t.
524 map_domain_index, 1, len0);
528 next0 = IP4_MAPT_ICMP_NEXT_DROP;
530 p0->error = error_node->errors[error0];
531 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
532 to_next, n_left_to_next, pi0,
535 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
537 return frame->n_vectors;
541 ip4_map_t_fragmented (vlib_main_t * vm,
542 vlib_node_runtime_t * node, vlib_frame_t * frame)
544 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
545 from = vlib_frame_vector_args (frame);
546 n_left_from = frame->n_vectors;
547 next_index = node->cached_next_index;
549 while (n_left_from > 0)
551 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
553 while (n_left_from > 0 && n_left_to_next > 0)
559 ip6_frag_hdr_t *frag0;
560 ip4_mapt_pseudo_header_t *pheader0;
561 ip4_mapt_fragmented_next_t next0;
563 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
564 pi0 = to_next[0] = from[0];
570 p0 = vlib_get_buffer (vm, pi0);
572 //Accessing pseudo header
573 pheader0 = vlib_buffer_get_current (p0);
574 vlib_buffer_advance (p0, sizeof (*pheader0));
576 //Accessing ip4 header
577 ip40 = vlib_buffer_get_current (p0);
579 (ip6_frag_hdr_t *) u8_ptr_add (ip40,
580 sizeof (*ip40) - sizeof (*frag0));
582 (ip6_header_t *) u8_ptr_add (ip40,
583 sizeof (*ip40) - sizeof (*frag0) -
585 vlib_buffer_advance (p0,
586 sizeof (*ip40) - sizeof (*ip60) -
589 //We know that the protocol was one of ICMP, TCP or UDP
590 //because the first fragment was found and cached
593 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip40->protocol;
594 frag0->identification = frag_id_4to6 (ip40->fragment_id);
596 frag0->fragment_offset_and_more =
597 ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip40),
599 (ip40->flags_and_fragment_offset) &
600 IP4_HEADER_FLAG_MORE_FRAGMENTS);
602 ip60->ip_version_traffic_class_and_flow_label =
603 clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
604 ip60->payload_length =
605 clib_host_to_net_u16 (clib_net_to_host_u16 (ip40->length) -
606 sizeof (*ip40) + sizeof (*frag0));
607 ip60->hop_limit = ip40->ttl;
608 ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
609 ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
610 ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
611 ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
612 ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
614 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
616 vnet_buffer (p0)->ip_frag.header_offset = 0;
617 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
618 vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
619 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
622 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
623 to_next, n_left_to_next, pi0,
626 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
628 return frame->n_vectors;
632 ip4_map_t_tcp_udp (vlib_main_t * vm,
633 vlib_node_runtime_t * node, vlib_frame_t * frame)
635 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
636 from = vlib_frame_vector_args (frame);
637 n_left_from = frame->n_vectors;
638 next_index = node->cached_next_index;
640 while (n_left_from > 0)
642 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
644 #ifdef IP4_MAP_T_DUAL_LOOP
645 while (n_left_from >= 4 && n_left_to_next >= 2)
648 vlib_buffer_t *p0, *p1;
649 ip4_header_t *ip40, *ip41;
650 ip6_header_t *ip60, *ip61;
651 ip_csum_t csum0, csum1;
652 u16 *checksum0, *checksum1;
653 ip6_frag_hdr_t *frag0, *frag1;
654 u32 frag_id0, frag_id1;
655 ip4_mapt_pseudo_header_t *pheader0, *pheader1;
656 ip4_mapt_tcp_udp_next_t next0, next1;
658 pi0 = to_next[0] = from[0];
659 pi1 = to_next[1] = from[1];
665 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
666 next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
667 p0 = vlib_get_buffer (vm, pi0);
668 p1 = vlib_get_buffer (vm, pi1);
670 //Accessing pseudo header
671 pheader0 = vlib_buffer_get_current (p0);
672 pheader1 = vlib_buffer_get_current (p1);
673 vlib_buffer_advance (p0, sizeof (*pheader0));
674 vlib_buffer_advance (p1, sizeof (*pheader1));
676 //Accessing ip4 header
677 ip40 = vlib_buffer_get_current (p0);
678 ip41 = vlib_buffer_get_current (p1);
680 (u16 *) u8_ptr_add (ip40,
681 vnet_buffer (p0)->map_t.checksum_offset);
683 (u16 *) u8_ptr_add (ip41,
684 vnet_buffer (p1)->map_t.checksum_offset);
686 //UDP checksum is optional over IPv4 but mandatory for IPv6
687 //We do not check udp->length sanity but use our safe computed value instead
689 (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP))
692 clib_host_to_net_u16 (ip40->length) - sizeof (*ip40);
694 (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40));
696 csum = ip_incremental_checksum (0, udp, udp_len);
698 ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
700 ip_csum_with_carry (csum,
701 clib_host_to_net_u16 (IP_PROTOCOL_UDP));
703 ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address)));
704 *checksum0 = ~ip_csum_fold (csum);
707 (!*checksum1 && ip41->protocol == IP_PROTOCOL_UDP))
710 clib_host_to_net_u16 (ip41->length) - sizeof (*ip40);
712 (udp_header_t *) u8_ptr_add (ip41, sizeof (*ip40));
714 csum = ip_incremental_checksum (0, udp, udp_len);
716 ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
718 ip_csum_with_carry (csum,
719 clib_host_to_net_u16 (IP_PROTOCOL_UDP));
721 ip_csum_with_carry (csum, *((u64 *) (&ip41->src_address)));
722 *checksum1 = ~ip_csum_fold (csum);
725 csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32);
726 csum1 = ip_csum_sub_even (*checksum1, ip41->src_address.as_u32);
727 csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32);
728 csum1 = ip_csum_sub_even (csum1, ip41->dst_address.as_u32);
730 // Deal with fragmented packets
731 if (PREDICT_FALSE (ip40->flags_and_fragment_offset &
733 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
736 (ip6_header_t *) u8_ptr_add (ip40,
737 sizeof (*ip40) - sizeof (*ip60) -
740 (ip6_frag_hdr_t *) u8_ptr_add (ip40,
743 frag_id0 = frag_id_4to6 (ip40->fragment_id);
744 vlib_buffer_advance (p0,
745 sizeof (*ip40) - sizeof (*ip60) -
751 (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) -
753 vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60));
757 if (PREDICT_FALSE (ip41->flags_and_fragment_offset &
759 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
762 (ip6_header_t *) u8_ptr_add (ip41,
763 sizeof (*ip40) - sizeof (*ip60) -
766 (ip6_frag_hdr_t *) u8_ptr_add (ip41,
769 frag_id1 = frag_id_4to6 (ip41->fragment_id);
770 vlib_buffer_advance (p1,
771 sizeof (*ip40) - sizeof (*ip60) -
777 (ip6_header_t *) (((u8 *) ip41) + sizeof (*ip40) -
779 vlib_buffer_advance (p1, sizeof (*ip40) - sizeof (*ip60));
783 ip60->ip_version_traffic_class_and_flow_label =
784 clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
785 ip61->ip_version_traffic_class_and_flow_label =
786 clib_host_to_net_u32 ((6 << 28) + (ip41->tos << 20));
787 ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40));
788 ip61->payload_length = u16_net_add (ip41->length, -sizeof (*ip40));
789 ip60->hop_limit = ip40->ttl;
790 ip61->hop_limit = ip41->ttl;
791 ip60->protocol = ip40->protocol;
792 ip61->protocol = ip41->protocol;
794 if (PREDICT_FALSE (frag0 != NULL))
796 frag0->next_hdr = ip60->protocol;
797 frag0->identification = frag_id0;
799 frag0->fragment_offset_and_more =
800 ip6_frag_hdr_offset_and_more (0, 1);
801 ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
802 ip60->payload_length =
803 u16_net_add (ip60->payload_length, sizeof (*frag0));
806 if (PREDICT_FALSE (frag1 != NULL))
808 frag1->next_hdr = ip61->protocol;
809 frag1->identification = frag_id1;
811 frag1->fragment_offset_and_more =
812 ip6_frag_hdr_offset_and_more (0, 1);
813 ip61->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
814 ip61->payload_length =
815 u16_net_add (ip61->payload_length, sizeof (*frag0));
818 //Finally copying the address
819 ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
820 ip61->dst_address.as_u64[0] = pheader1->daddr.as_u64[0];
821 ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
822 ip61->dst_address.as_u64[1] = pheader1->daddr.as_u64[1];
823 ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
824 ip61->src_address.as_u64[0] = pheader1->saddr.as_u64[0];
825 ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
826 ip61->src_address.as_u64[1] = pheader1->saddr.as_u64[1];
828 csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]);
829 csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[0]);
830 csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]);
831 csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[1]);
832 csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]);
833 csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[0]);
834 csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]);
835 csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[1]);
836 *checksum0 = ip_csum_fold (csum0);
837 *checksum1 = ip_csum_fold (csum1);
839 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
841 vnet_buffer (p0)->ip_frag.header_offset = 0;
842 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
843 vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
844 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
847 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
849 vnet_buffer (p1)->ip_frag.header_offset = 0;
850 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
851 vnet_buffer (p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
852 next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
855 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
856 to_next, n_left_to_next, pi0, pi1,
861 while (n_left_from > 0 && n_left_to_next > 0)
869 ip6_frag_hdr_t *frag0;
871 ip4_mapt_pseudo_header_t *pheader0;
872 ip4_mapt_tcp_udp_next_t next0;
874 pi0 = to_next[0] = from[0];
880 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
881 p0 = vlib_get_buffer (vm, pi0);
883 //Accessing pseudo header
884 pheader0 = vlib_buffer_get_current (p0);
885 vlib_buffer_advance (p0, sizeof (*pheader0));
887 //Accessing ip4 header
888 ip40 = vlib_buffer_get_current (p0);
890 (u16 *) u8_ptr_add (ip40,
891 vnet_buffer (p0)->map_t.checksum_offset);
893 //UDP checksum is optional over IPv4 but mandatory for IPv6
894 //We do not check udp->length sanity but use our safe computed value instead
896 (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP))
899 clib_host_to_net_u16 (ip40->length) - sizeof (*ip40);
901 (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40));
903 csum = ip_incremental_checksum (0, udp, udp_len);
905 ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
907 ip_csum_with_carry (csum,
908 clib_host_to_net_u16 (IP_PROTOCOL_UDP));
910 ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address)));
911 *checksum0 = ~ip_csum_fold (csum);
914 csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32);
915 csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32);
917 // Deal with fragmented packets
918 if (PREDICT_FALSE (ip40->flags_and_fragment_offset &
920 (IP4_HEADER_FLAG_MORE_FRAGMENTS)))
923 (ip6_header_t *) u8_ptr_add (ip40,
924 sizeof (*ip40) - sizeof (*ip60) -
927 (ip6_frag_hdr_t *) u8_ptr_add (ip40,
930 frag_id0 = frag_id_4to6 (ip40->fragment_id);
931 vlib_buffer_advance (p0,
932 sizeof (*ip40) - sizeof (*ip60) -
938 (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) -
940 vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60));
944 ip60->ip_version_traffic_class_and_flow_label =
945 clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20));
946 ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40));
947 ip60->hop_limit = ip40->ttl;
948 ip60->protocol = ip40->protocol;
950 if (PREDICT_FALSE (frag0 != NULL))
952 frag0->next_hdr = ip60->protocol;
953 frag0->identification = frag_id0;
955 frag0->fragment_offset_and_more =
956 ip6_frag_hdr_offset_and_more (0, 1);
957 ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
958 ip60->payload_length =
959 u16_net_add (ip60->payload_length, sizeof (*frag0));
962 //Finally copying the address
963 ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
964 ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
965 ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
966 ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
968 csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]);
969 csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]);
970 csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]);
971 csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]);
972 *checksum0 = ip_csum_fold (csum0);
974 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
976 //Send to fragmentation node if necessary
977 vnet_buffer (p0)->ip_frag.header_offset = 0;
978 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
979 vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
980 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
983 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
984 to_next, n_left_to_next, pi0,
987 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
990 return frame->n_vectors;
993 static_always_inline void
994 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
995 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
996 u8 * error0, ip4_mapt_next_t * next0)
998 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
1000 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
1001 if (d0->ea_bits_len == 0 && d0->rules)
1007 *dst_port0 = ip4_map_fragment_get_port (ip40);
1008 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
1011 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
1013 vnet_buffer (p0)->map_t.checksum_offset = 36;
1014 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
1015 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
1016 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
1018 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
1020 vnet_buffer (p0)->map_t.checksum_offset = 26;
1021 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
1022 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
1023 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
1025 else if (ip40->protocol == IP_PROTOCOL_ICMP)
1027 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
1028 if (d0->ea_bits_len == 0 && d0->rules)
1030 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
1032 || ((icmp46_header_t *)
1034 sizeof (*ip40)))->code == ICMP4_echo_request)
1035 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
1039 *error0 = MAP_ERROR_BAD_PROTOCOL;
1044 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
1046 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
1047 vlib_node_runtime_t *error_node =
1048 vlib_node_get_runtime (vm, ip4_map_t_node.index);
1049 from = vlib_frame_vector_args (frame);
1050 n_left_from = frame->n_vectors;
1051 next_index = node->cached_next_index;
1052 vlib_combined_counter_main_t *cm = map_main.domain_counters;
1053 u32 cpu_index = os_get_cpu_number ();
1055 while (n_left_from > 0)
1057 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1059 #ifdef IP4_MAP_T_DUAL_LOOP
1060 while (n_left_from >= 4 && n_left_to_next >= 2)
1063 vlib_buffer_t *p0, *p1;
1064 ip4_header_t *ip40, *ip41;
1065 map_domain_t *d0, *d1;
1066 ip4_mapt_next_t next0 = 0, next1 = 0;
1067 u16 ip4_len0, ip4_len1;
1069 i32 dst_port0, dst_port1;
1070 ip4_mapt_pseudo_header_t *pheader0, *pheader1;
1072 pi0 = to_next[0] = from[0];
1073 pi1 = to_next[1] = from[1];
1077 n_left_to_next -= 2;
1078 error0 = MAP_ERROR_NONE;
1079 error1 = MAP_ERROR_NONE;
1081 p0 = vlib_get_buffer (vm, pi0);
1082 p1 = vlib_get_buffer (vm, pi1);
1083 ip40 = vlib_buffer_get_current (p0);
1084 ip41 = vlib_buffer_get_current (p1);
1085 ip4_len0 = clib_host_to_net_u16 (ip40->length);
1086 ip4_len1 = clib_host_to_net_u16 (ip41->length);
1088 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
1089 ip40->ip_version_and_header_length != 0x45))
1091 error0 = MAP_ERROR_UNKNOWN;
1092 next0 = IP4_MAPT_NEXT_DROP;
1095 if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
1096 ip41->ip_version_and_header_length != 0x45))
1098 error1 = MAP_ERROR_UNKNOWN;
1099 next1 = IP4_MAPT_NEXT_DROP;
1102 d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
1103 &vnet_buffer (p0)->map_t.map_domain_index);
1104 d1 = ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
1105 &vnet_buffer (p1)->map_t.map_domain_index);
1107 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
1108 vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
1113 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
1115 ip4_map_t_classify (p1, d1, ip41, ip4_len1, &dst_port1, &error1,
1118 //Add MAP-T pseudo header in front of the packet
1119 vlib_buffer_advance (p0, -sizeof (*pheader0));
1120 vlib_buffer_advance (p1, -sizeof (*pheader1));
1121 pheader0 = vlib_buffer_get_current (p0);
1122 pheader1 = vlib_buffer_get_current (p1);
1124 //Save addresses within the packet
1125 ip4_map_t_embedded_address (d0, &pheader0->saddr,
1126 &ip40->src_address);
1127 ip4_map_t_embedded_address (d1, &pheader1->saddr,
1128 &ip41->src_address);
1129 pheader0->daddr.as_u64[0] =
1130 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
1131 pheader0->daddr.as_u64[1] =
1132 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
1133 pheader1->daddr.as_u64[0] =
1134 map_get_pfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
1135 pheader1->daddr.as_u64[1] =
1136 map_get_sfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
1139 (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
1140 && (d0->ea_bits_len != 0 || !d0->rules)
1141 && ip4_map_fragment_cache (ip40, dst_port0)))
1143 error0 = MAP_ERROR_FRAGMENT_MEMORY;
1147 (ip4_is_first_fragment (ip41) && (dst_port1 != -1)
1148 && (d1->ea_bits_len != 0 || !d1->rules)
1149 && ip4_map_fragment_cache (ip41, dst_port1)))
1151 error1 = MAP_ERROR_FRAGMENT_MEMORY;
1155 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
1157 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
1159 vnet_buffer (p0)->map_t.
1160 map_domain_index, 1,
1161 clib_net_to_host_u16 (ip40->
1166 (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
1168 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
1170 vnet_buffer (p1)->map_t.
1171 map_domain_index, 1,
1172 clib_net_to_host_u16 (ip41->
1176 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
1177 next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
1178 p0->error = error_node->errors[error0];
1179 p1->error = error_node->errors[error1];
1180 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1181 n_left_to_next, pi0, pi1, next0,
1186 while (n_left_from > 0 && n_left_to_next > 0)
1192 ip4_mapt_next_t next0;
1196 ip4_mapt_pseudo_header_t *pheader0;
1198 pi0 = to_next[0] = from[0];
1202 n_left_to_next -= 1;
1203 error0 = MAP_ERROR_NONE;
1205 p0 = vlib_get_buffer (vm, pi0);
1206 ip40 = vlib_buffer_get_current (p0);
1207 ip4_len0 = clib_host_to_net_u16 (ip40->length);
1208 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
1209 ip40->ip_version_and_header_length != 0x45))
1211 error0 = MAP_ERROR_UNKNOWN;
1212 next0 = IP4_MAPT_NEXT_DROP;
1215 d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
1216 &vnet_buffer (p0)->map_t.map_domain_index);
1218 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
1221 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
1224 //Add MAP-T pseudo header in front of the packet
1225 vlib_buffer_advance (p0, -sizeof (*pheader0));
1226 pheader0 = vlib_buffer_get_current (p0);
1228 //Save addresses within the packet
1229 ip4_map_t_embedded_address (d0, &pheader0->saddr,
1230 &ip40->src_address);
1231 pheader0->daddr.as_u64[0] =
1232 map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
1233 pheader0->daddr.as_u64[1] =
1234 map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
1236 //It is important to cache at this stage because the result might be necessary
1237 //for packets within the same vector.
1238 //Actually, this approach even provides some limited out-of-order fragments support
1240 (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
1241 && (d0->ea_bits_len != 0 || !d0->rules)
1242 && ip4_map_fragment_cache (ip40, dst_port0)))
1244 error0 = MAP_ERROR_UNKNOWN;
1248 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
1250 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
1252 vnet_buffer (p0)->map_t.
1253 map_domain_index, 1,
1254 clib_net_to_host_u16 (ip40->
1258 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
1259 p0->error = error_node->errors[error0];
1260 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1261 to_next, n_left_to_next, pi0,
1264 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1266 return frame->n_vectors;
1269 static char *map_t_error_strings[] = {
1270 #define _(sym,string) string,
1276 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
1277 .function = ip4_map_t_fragmented,
1278 .name = "ip4-map-t-fragmented",
1279 .vector_size = sizeof(u32),
1280 .format_trace = format_map_trace,
1281 .type = VLIB_NODE_TYPE_INTERNAL,
1283 .n_errors = MAP_N_ERROR,
1284 .error_strings = map_t_error_strings,
1286 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
1288 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
1289 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
1290 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1296 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
1297 .function = ip4_map_t_icmp,
1298 .name = "ip4-map-t-icmp",
1299 .vector_size = sizeof(u32),
1300 .format_trace = format_map_trace,
1301 .type = VLIB_NODE_TYPE_INTERNAL,
1303 .n_errors = MAP_N_ERROR,
1304 .error_strings = map_t_error_strings,
1306 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
1308 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
1309 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
1310 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
1316 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
1317 .function = ip4_map_t_tcp_udp,
1318 .name = "ip4-map-t-tcp-udp",
1319 .vector_size = sizeof(u32),
1320 .format_trace = format_map_trace,
1321 .type = VLIB_NODE_TYPE_INTERNAL,
1323 .n_errors = MAP_N_ERROR,
1324 .error_strings = map_t_error_strings,
1326 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
1328 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
1329 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
1330 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1336 VLIB_REGISTER_NODE(ip4_map_t_node) = {
1337 .function = ip4_map_t,
1338 .name = "ip4-map-t",
1339 .vector_size = sizeof(u32),
1340 .format_trace = format_map_trace,
1341 .type = VLIB_NODE_TYPE_INTERNAL,
1343 .n_errors = MAP_N_ERROR,
1344 .error_strings = map_t_error_strings,
1346 .n_next_nodes = IP4_MAPT_N_NEXT,
1348 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
1349 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
1350 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
1351 [IP4_MAPT_NEXT_DROP] = "error-drop",
1357 * fd.io coding-style-patch-verification: ON
1360 * eval: (c-set-style "gnu")