2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include "../ip/ip_frag.h"
19 #define IP4_MAP_T_DUAL_LOOP 1
22 IP4_MAPT_NEXT_MAPT_TCP_UDP,
23 IP4_MAPT_NEXT_MAPT_ICMP,
24 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
30 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
31 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
32 IP4_MAPT_ICMP_NEXT_DROP,
34 } ip4_mapt_icmp_next_t;
37 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
38 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
39 IP4_MAPT_TCP_UDP_NEXT_DROP,
40 IP4_MAPT_TCP_UDP_N_NEXT
41 } ip4_mapt_tcp_udp_next_t;
44 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
45 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
46 IP4_MAPT_FRAGMENTED_NEXT_DROP,
47 IP4_MAPT_FRAGMENTED_N_NEXT
48 } ip4_mapt_fragmented_next_t;
50 //This is used to pass information within the buffer data.
51 //Buffer structure being too small to contain big structures like this.
52 typedef CLIB_PACKED(struct {
55 //IPv6 header + Fragmentation header will be here
56 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
58 }) ip4_mapt_pseudo_header_t;
60 #define frag_id_4to6(id) (id)
62 //TODO: Find the right place in memory for this.
63 static u8 icmp_to_icmp6_updater_pointer_table[] =
70 static_always_inline int
71 ip4_map_fragment_cache (ip4_header_t *ip4, u16 port)
75 map_ip4_reass_t *r = map_ip4_reass_get(ip4->src_address.as_u32, ip4->dst_address.as_u32,
77 (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
82 map_ip4_reass_unlock();
86 static_always_inline i32
87 ip4_map_fragment_get_port (ip4_header_t *ip4)
91 map_ip4_reass_t *r = map_ip4_reass_get(ip4->src_address.as_u32, ip4->dst_address.as_u32,
93 (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
95 i32 ret = r?r->port:-1;
96 map_ip4_reass_unlock();
101 /* Statelessly translates an ICMP packet into ICMPv6.
103 * Warning: The checksum will need to be recomputed.
106 static_always_inline int
107 ip4_icmp_to_icmp6_in_place (icmp46_header_t *icmp, u32 icmp_len,
108 i32 *receiver_port, ip4_header_t **inner_ip4)
111 switch (icmp->type) {
112 case ICMP4_echo_reply:
113 *receiver_port = ((u16 *)icmp)[2];
114 icmp->type = ICMP6_echo_reply;
116 case ICMP4_echo_request:
117 *receiver_port = ((u16 *)icmp)[2];
118 icmp->type = ICMP6_echo_request;
120 case ICMP4_destination_unreachable:
121 *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
122 *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
124 switch (icmp->code) {
125 case ICMP4_destination_unreachable_destination_unreachable_net: //0
126 case ICMP4_destination_unreachable_destination_unreachable_host: //1
127 icmp->type = ICMP6_destination_unreachable;
128 icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
130 case ICMP4_destination_unreachable_protocol_unreachable: //2
131 icmp->type = ICMP6_parameter_problem;
132 icmp->code = ICMP6_parameter_problem_unrecognized_next_header;
134 case ICMP4_destination_unreachable_port_unreachable: //3
135 icmp->type = ICMP6_destination_unreachable;
136 icmp->code = ICMP6_destination_unreachable_port_unreachable;
138 case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set: //4
139 icmp->type = ICMP6_packet_too_big;
142 u32 advertised_mtu = clib_net_to_host_u32(*((u32 *)(icmp + 1)));
144 advertised_mtu += 20;
146 advertised_mtu = 1000; //FIXME ! (RFC 1191 - plateau value)
148 //FIXME: = minimum(advertised MTU+20, MTU_of_IPv6_nexthop, (MTU_of_IPv4_nexthop)+20)
149 *((u32 *)(icmp + 1)) = clib_host_to_net_u32(advertised_mtu);
153 case ICMP4_destination_unreachable_source_route_failed: //5
154 case ICMP4_destination_unreachable_destination_network_unknown: //6
155 case ICMP4_destination_unreachable_destination_host_unknown: //7
156 case ICMP4_destination_unreachable_source_host_isolated: //8
157 case ICMP4_destination_unreachable_network_unreachable_for_type_of_service: //11
158 case ICMP4_destination_unreachable_host_unreachable_for_type_of_service: //12
159 icmp->type = ICMP6_destination_unreachable;
160 icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
162 case ICMP4_destination_unreachable_network_administratively_prohibited: //9
163 case ICMP4_destination_unreachable_host_administratively_prohibited: //10
164 case ICMP4_destination_unreachable_communication_administratively_prohibited: //13
165 case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15
166 icmp->type = ICMP6_destination_unreachable;
167 icmp->code = ICMP6_destination_unreachable_destination_administratively_prohibited;
169 case ICMP4_destination_unreachable_host_precedence_violation: //14
175 case ICMP4_time_exceeded: //11
176 *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
177 *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
178 icmp->type = ICMP6_time_exceeded;
179 //icmp->code = icmp->code //unchanged
182 case ICMP4_parameter_problem:
183 *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
184 *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
186 switch (icmp->code) {
187 case ICMP4_parameter_problem_pointer_indicates_error:
188 case ICMP4_parameter_problem_bad_length:
189 icmp->type = ICMP6_parameter_problem;
190 icmp->code = ICMP6_parameter_problem_erroneous_header_field;
192 u8 ptr = icmp_to_icmp6_updater_pointer_table[*((u8 *)(icmp + 1))];
196 *((u32 *)(icmp + 1)) = clib_host_to_net_u32(ptr);
200 //All other codes cause dropping the packet
206 //All other types cause dropping the packet
213 static_always_inline void
214 _ip4_map_t_icmp (map_domain_t *d, vlib_buffer_t *p, u8 *error)
216 ip4_header_t *ip4, *inner_ip4;
217 ip6_header_t *ip6, *inner_ip6;
219 icmp46_header_t *icmp;
222 u16 *inner_L4_checksum = 0;
223 ip6_frag_hdr_t *inner_frag;
225 u32 inner_frag_offset;
228 ip4 = vlib_buffer_get_current(p);
229 ip_len = clib_net_to_host_u16(ip4->length);
230 ASSERT(ip_len <= p->current_length);
232 icmp = (icmp46_header_t *)(ip4 + 1);
233 if (ip4_icmp_to_icmp6_in_place(icmp, ip_len - sizeof(*ip4),
234 &recv_port, &inner_ip4)) {
235 *error = MAP_ERROR_ICMP;
240 // In case of 1:1 mapping, we don't care about the port
241 if(d->ea_bits_len == 0 && d->rules) {
244 *error = MAP_ERROR_ICMP;
250 //We have 2 headers to translate.
251 //We need to make some room in the middle of the packet
253 if (PREDICT_FALSE(ip4_is_fragment(inner_ip4))) {
254 //Here it starts getting really tricky
255 //We will add a fragmentation header in the inner packet
257 if (!ip4_is_first_fragment(inner_ip4)) {
258 //For now we do not handle unless it is the first fragment
259 //Ideally we should handle the case as we are in slow path already
260 *error = MAP_ERROR_FRAGMENTED;
264 vlib_buffer_advance(p, - 2*(sizeof(*ip6) - sizeof(*ip4)) - sizeof(*inner_frag));
265 ip6 = vlib_buffer_get_current(p);
266 memcpy(u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4)), ip4, 20 + 8);
267 ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
268 icmp = (icmp46_header_t *) (ip4 + 1);
270 inner_ip6 = (ip6_header_t *) u8_ptr_add(inner_ip4, sizeof(*ip4) - sizeof(*ip6) - sizeof(*inner_frag));
271 inner_frag = (ip6_frag_hdr_t *) u8_ptr_add(inner_ip6, sizeof(*inner_ip6));
272 ip6->payload_length = u16_net_add(ip4->length, sizeof(*ip6) - 2*sizeof(*ip4) + sizeof(*inner_frag));
273 inner_frag_id = frag_id_4to6(inner_ip4->fragment_id);
274 inner_frag_offset = ip4_get_fragment_offset(inner_ip4);
275 inner_frag_more = !!(inner_ip4->flags_and_fragment_offset & clib_net_to_host_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS));
277 vlib_buffer_advance(p, - 2*(sizeof(*ip6) - sizeof(*ip4)));
278 ip6 = vlib_buffer_get_current(p);
279 memcpy(u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4)), ip4, 20 + 8);
280 ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
281 icmp = (icmp46_header_t *) u8_ptr_add(ip4, sizeof(*ip4));
282 inner_ip6 = (ip6_header_t *) u8_ptr_add(inner_ip4, sizeof(*ip4) - sizeof(*ip6));
283 ip6->payload_length = u16_net_add(ip4->length, sizeof(*ip6) - 2*sizeof(*ip4));
287 if (PREDICT_TRUE(inner_ip4->protocol == IP_PROTOCOL_TCP)) {
288 inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum;
289 *inner_L4_checksum = ip_csum_fold(ip_csum_sub_even(*inner_L4_checksum, *((u64 *) (&inner_ip4->src_address))));
290 } else if (PREDICT_TRUE(inner_ip4->protocol == IP_PROTOCOL_UDP)) {
291 inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum;
292 if (!*inner_L4_checksum) {
293 //The inner packet was first translated, and therefore came from IPv6.
294 //As the packet was an IPv6 packet, the UDP checksum can't be NULL
295 *error = MAP_ERROR_ICMP;
298 *inner_L4_checksum = ip_csum_fold(ip_csum_sub_even(*inner_L4_checksum, *((u64 *)(&inner_ip4->src_address))));
299 } else if (inner_ip4->protocol == IP_PROTOCOL_ICMP) {
300 //We have an ICMP inside an ICMP
301 //It needs to be translated, but not for error ICMP messages
302 icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1);
303 csum = inner_icmp->checksum;
304 //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by ip4_icmp_to_icmp6_in_place
305 csum = ip_csum_sub_even(csum, *((u16 *)inner_icmp));
306 inner_icmp->type = (inner_icmp->type == ICMP4_echo_request)?
307 ICMP6_echo_request:ICMP6_echo_reply;
308 csum = ip_csum_add_even(csum, *((u16 *)inner_icmp));
309 csum = ip_csum_add_even(csum, clib_host_to_net_u16(IP_PROTOCOL_ICMP6));
310 csum = ip_csum_add_even(csum, inner_ip4->length - sizeof(*inner_ip4));
311 inner_icmp->checksum = ip_csum_fold(csum);
312 inner_L4_checksum = &inner_icmp->checksum;
313 inner_ip4->protocol = IP_PROTOCOL_ICMP6;
315 ASSERT(0); // We had a port from that, so it is udp or tcp or ICMP
318 //FIXME: Security check with the port found in the inner packet
320 csum = *inner_L4_checksum; //Initial checksum of the inner L4 header
321 //FIXME: Shouldn't we remove ip addresses from there ?
323 inner_ip6->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (inner_ip4->tos << 20));
324 inner_ip6->payload_length = u16_net_add(inner_ip4->length, - sizeof(*inner_ip4));
325 inner_ip6->hop_limit = inner_ip4->ttl;
326 inner_ip6->protocol = inner_ip4->protocol;
328 //Note that the source address is within the domain
329 //while the destination address is the one outside the domain
330 ip4_map_t_embedded_address(d, &inner_ip6->dst_address, &inner_ip4->dst_address);
331 inner_ip6->src_address.as_u64[0] = map_get_pfx_net(d, inner_ip4->src_address.as_u32, recv_port);
332 inner_ip6->src_address.as_u64[1] = map_get_sfx_net(d, inner_ip4->src_address.as_u32, recv_port);
334 if (PREDICT_FALSE(inner_frag != NULL)) {
335 inner_frag->next_hdr = inner_ip6->protocol;
336 inner_frag->identification = inner_frag_id;
338 inner_frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(inner_frag_offset, inner_frag_more);
339 inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
340 inner_ip6->payload_length = clib_host_to_net_u16(
341 clib_net_to_host_u16(inner_ip6->payload_length) + sizeof(*inner_frag));
344 csum = ip_csum_add_even(csum, inner_ip6->src_address.as_u64[0]);
345 csum = ip_csum_add_even(csum, inner_ip6->src_address.as_u64[1]);
346 csum = ip_csum_add_even(csum, inner_ip6->dst_address.as_u64[0]);
347 csum = ip_csum_add_even(csum, inner_ip6->dst_address.as_u64[1]);
348 *inner_L4_checksum = ip_csum_fold(csum);
351 vlib_buffer_advance(p, sizeof(*ip4) - sizeof(*ip6));
352 ip6 = vlib_buffer_get_current(p);
353 ip6->payload_length = clib_host_to_net_u16(clib_net_to_host_u16(ip4->length) - sizeof(*ip4));
356 //Translate outer IPv6
357 ip6->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip4->tos << 20));
359 ip6->hop_limit = ip4->ttl;
360 ip6->protocol = IP_PROTOCOL_ICMP6;
362 ip4_map_t_embedded_address(d, &ip6->src_address, &ip4->src_address);
363 ip6->dst_address.as_u64[0] = map_get_pfx_net(d, ip4->dst_address.as_u32, recv_port);
364 ip6->dst_address.as_u64[1] = map_get_sfx_net(d, ip4->dst_address.as_u32, recv_port);
366 //Truncate when the packet exceeds the minimal IPv6 MTU
367 if (p->current_length > 1280) {
368 ip6->payload_length = clib_host_to_net_u16(1280 - sizeof(*ip6));
369 p->current_length = 1280; //Looks too simple to be correct...
372 //TODO: We could do an easy diff-checksum for echo requests/replies
373 //Recompute ICMP checksum
375 csum = ip_csum_with_carry(0, ip6->payload_length);
376 csum = ip_csum_with_carry(csum, clib_host_to_net_u16(ip6->protocol));
377 csum = ip_csum_with_carry(csum, ip6->src_address.as_u64[0]);
378 csum = ip_csum_with_carry(csum, ip6->src_address.as_u64[1]);
379 csum = ip_csum_with_carry(csum, ip6->dst_address.as_u64[0]);
380 csum = ip_csum_with_carry(csum, ip6->dst_address.as_u64[1]);
381 csum = ip_incremental_checksum(csum, icmp, clib_net_to_host_u16(ip6->payload_length));
382 icmp->checksum = ~ip_csum_fold (csum);
386 ip4_map_t_icmp (vlib_main_t *vm,
387 vlib_node_runtime_t *node,
390 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
391 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_t_icmp_node.index);
392 from = vlib_frame_vector_args(frame);
393 n_left_from = frame->n_vectors;
394 next_index = node->cached_next_index;
395 vlib_combined_counter_main_t *cm = map_main.domain_counters;
396 u32 cpu_index = os_get_cpu_number();
398 while (n_left_from > 0) {
399 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
401 while (n_left_from > 0 && n_left_to_next > 0) {
404 ip4_mapt_icmp_next_t next0;
409 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
410 pi0 = to_next[0] = from[0];
415 error0 = MAP_ERROR_NONE;
417 p0 = vlib_get_buffer(vm, pi0);
418 vlib_buffer_advance(p0, sizeof(ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
419 len0 = clib_net_to_host_u16(((ip4_header_t *)vlib_buffer_get_current(p0))->length);
420 d0 = pool_elt_at_index(map_main.domains, vnet_buffer(p0)->map_t.map_domain_index);
421 _ip4_map_t_icmp(d0, p0, &error0);
423 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
424 vnet_buffer(p0)->ip_frag.header_offset = 0;
425 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
426 vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
427 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
429 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
430 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index,
431 vnet_buffer(p0)->map_t.map_domain_index, 1,
434 next0 = IP4_MAPT_ICMP_NEXT_DROP;
436 p0->error = error_node->errors[error0];
437 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
438 to_next, n_left_to_next, pi0,
441 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
443 return frame->n_vectors;
447 ip4_map_t_fragmented (vlib_main_t *vm,
448 vlib_node_runtime_t *node,
451 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
452 from = vlib_frame_vector_args(frame);
453 n_left_from = frame->n_vectors;
454 next_index = node->cached_next_index;
456 while (n_left_from > 0) {
457 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
459 while (n_left_from > 0 && n_left_to_next > 0) {
464 ip6_frag_hdr_t *frag0;
465 ip4_mapt_pseudo_header_t *pheader0;
466 ip4_mapt_fragmented_next_t next0;
468 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
469 pi0 = to_next[0] = from[0];
475 p0 = vlib_get_buffer(vm, pi0);
477 //Accessing pseudo header
478 pheader0 = vlib_buffer_get_current(p0);
479 vlib_buffer_advance(p0, sizeof(*pheader0));
481 //Accessing ip4 header
482 ip40 = vlib_buffer_get_current(p0);
483 frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
484 ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0) - sizeof(*ip60));
485 vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
487 //We know that the protocol was one of ICMP, TCP or UDP
488 //because the first fragment was found and cached
489 frag0->next_hdr = (ip40->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip40->protocol;
490 frag0->identification = frag_id_4to6(ip40->fragment_id);
492 frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(
493 ip4_get_fragment_offset(ip40),
494 clib_net_to_host_u16(ip40->flags_and_fragment_offset) & IP4_HEADER_FLAG_MORE_FRAGMENTS);
496 ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
497 ip60->payload_length = clib_host_to_net_u16(clib_net_to_host_u16(ip40->length) - sizeof(*ip40) + sizeof(*frag0));
498 ip60->hop_limit = ip40->ttl;
499 ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
500 ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
501 ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
502 ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
503 ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
505 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
506 vnet_buffer(p0)->ip_frag.header_offset = 0;
507 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
508 vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
509 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
512 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
513 to_next, n_left_to_next, pi0,
516 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
518 return frame->n_vectors;
522 ip4_map_t_tcp_udp(vlib_main_t *vm,
523 vlib_node_runtime_t *node,
526 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
527 from = vlib_frame_vector_args(frame);
528 n_left_from = frame->n_vectors;
529 next_index = node->cached_next_index;
531 while (n_left_from > 0) {
532 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
534 #ifdef IP4_MAP_T_DUAL_LOOP
535 while (n_left_from >= 4 && n_left_to_next >= 2) {
537 vlib_buffer_t *p0, *p1;
538 ip4_header_t *ip40, *ip41;
539 ip6_header_t *ip60, *ip61;
540 ip_csum_t csum0, csum1;
541 u16 *checksum0, *checksum1;
542 ip6_frag_hdr_t *frag0, *frag1;
543 u32 frag_id0, frag_id1;
544 ip4_mapt_pseudo_header_t *pheader0, *pheader1;
545 ip4_mapt_tcp_udp_next_t next0, next1;
547 pi0 = to_next[0] = from[0];
548 pi1 = to_next[1] = from[1];
554 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
555 next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
556 p0 = vlib_get_buffer(vm, pi0);
557 p1 = vlib_get_buffer(vm, pi1);
559 //Accessing pseudo header
560 pheader0 = vlib_buffer_get_current(p0);
561 pheader1 = vlib_buffer_get_current(p1);
562 vlib_buffer_advance(p0, sizeof(*pheader0));
563 vlib_buffer_advance(p1, sizeof(*pheader1));
565 //Accessing ip4 header
566 ip40 = vlib_buffer_get_current(p0);
567 ip41 = vlib_buffer_get_current(p1);
568 checksum0 = (u16 *) u8_ptr_add(ip40, vnet_buffer(p0)->map_t.checksum_offset);
569 checksum1 = (u16 *) u8_ptr_add(ip41, vnet_buffer(p1)->map_t.checksum_offset);
571 //UDP checksum is optional over IPv4 but mandatory for IPv6
572 //We do not check udp->length sanity but use our safe computed value instead
573 if (PREDICT_FALSE(!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) {
574 u16 udp_len = clib_host_to_net_u16(ip40->length) - sizeof(*ip40);
575 udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip40, sizeof(*ip40));
577 csum = ip_incremental_checksum(0, udp, udp_len);
578 csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
579 csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
580 csum = ip_csum_with_carry(csum, *((u64 *)(&ip40->src_address)));
581 *checksum0 = ~ip_csum_fold(csum);
583 if (PREDICT_FALSE(!*checksum1 && ip41->protocol == IP_PROTOCOL_UDP)) {
584 u16 udp_len = clib_host_to_net_u16(ip41->length) - sizeof(*ip40);
585 udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip41, sizeof(*ip40));
587 csum = ip_incremental_checksum(0, udp, udp_len);
588 csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
589 csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
590 csum = ip_csum_with_carry(csum, *((u64 *)(&ip41->src_address)));
591 *checksum1 = ~ip_csum_fold(csum);
594 csum0 = ip_csum_sub_even(*checksum0, ip40->src_address.as_u32);
595 csum1 = ip_csum_sub_even(*checksum1, ip41->src_address.as_u32);
596 csum0 = ip_csum_sub_even(csum0, ip40->dst_address.as_u32);
597 csum1 = ip_csum_sub_even(csum1, ip41->dst_address.as_u32);
599 // Deal with fragmented packets
600 if (PREDICT_FALSE(ip40->flags_and_fragment_offset &
601 clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
602 ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
603 frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
604 frag_id0 = frag_id_4to6(ip40->fragment_id);
605 vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
607 ip60 = (ip6_header_t *) (((u8 *)ip40) + sizeof(*ip40) - sizeof(*ip60));
608 vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60));
612 if (PREDICT_FALSE(ip41->flags_and_fragment_offset &
613 clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
614 ip61 = (ip6_header_t *) u8_ptr_add(ip41, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
615 frag1 = (ip6_frag_hdr_t *) u8_ptr_add(ip41, sizeof(*ip40) - sizeof(*frag0));
616 frag_id1 = frag_id_4to6(ip41->fragment_id);
617 vlib_buffer_advance(p1, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
619 ip61 = (ip6_header_t *) (((u8 *)ip41) + sizeof(*ip40) - sizeof(*ip60));
620 vlib_buffer_advance(p1, sizeof(*ip40) - sizeof(*ip60));
624 ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
625 ip61->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip41->tos << 20));
626 ip60->payload_length = u16_net_add(ip40->length, - sizeof(*ip40));
627 ip61->payload_length = u16_net_add(ip41->length, - sizeof(*ip40));
628 ip60->hop_limit = ip40->ttl;
629 ip61->hop_limit = ip41->ttl;
630 ip60->protocol = ip40->protocol;
631 ip61->protocol = ip41->protocol;
633 if (PREDICT_FALSE(frag0 != NULL)) {
634 frag0->next_hdr = ip60->protocol;
635 frag0->identification = frag_id0;
637 frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
638 ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
639 ip60->payload_length = u16_net_add(ip60->payload_length, sizeof(*frag0));
642 if (PREDICT_FALSE(frag1 != NULL)) {
643 frag1->next_hdr = ip61->protocol;
644 frag1->identification = frag_id1;
646 frag1->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
647 ip61->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
648 ip61->payload_length = u16_net_add(ip61->payload_length, sizeof(*frag0));
651 //Finally copying the address
652 ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
653 ip61->dst_address.as_u64[0] = pheader1->daddr.as_u64[0];
654 ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
655 ip61->dst_address.as_u64[1] = pheader1->daddr.as_u64[1];
656 ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
657 ip61->src_address.as_u64[0] = pheader1->saddr.as_u64[0];
658 ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
659 ip61->src_address.as_u64[1] = pheader1->saddr.as_u64[1];
661 csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[0]);
662 csum1 = ip_csum_add_even(csum1, ip61->src_address.as_u64[0]);
663 csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[1]);
664 csum1 = ip_csum_add_even(csum1, ip61->src_address.as_u64[1]);
665 csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[0]);
666 csum1 = ip_csum_add_even(csum1, ip61->dst_address.as_u64[0]);
667 csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[1]);
668 csum1 = ip_csum_add_even(csum1, ip61->dst_address.as_u64[1]);
669 *checksum0 = ip_csum_fold(csum0);
670 *checksum1 = ip_csum_fold(csum1);
672 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
673 vnet_buffer(p0)->ip_frag.header_offset = 0;
674 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
675 vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
676 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
679 if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
680 vnet_buffer(p1)->ip_frag.header_offset = 0;
681 vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
682 vnet_buffer(p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
683 next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
686 vlib_validate_buffer_enqueue_x2(vm, node, next_index,
687 to_next, n_left_to_next, pi0, pi1,
692 while (n_left_from > 0 && n_left_to_next > 0) {
699 ip6_frag_hdr_t *frag0;
701 ip4_mapt_pseudo_header_t *pheader0;
702 ip4_mapt_tcp_udp_next_t next0;
704 pi0 = to_next[0] = from[0];
710 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
711 p0 = vlib_get_buffer(vm, pi0);
713 //Accessing pseudo header
714 pheader0 = vlib_buffer_get_current(p0);
715 vlib_buffer_advance(p0, sizeof(*pheader0));
717 //Accessing ip4 header
718 ip40 = vlib_buffer_get_current(p0);
719 checksum0 = (u16 *) u8_ptr_add(ip40, vnet_buffer(p0)->map_t.checksum_offset);
721 //UDP checksum is optional over IPv4 but mandatory for IPv6
722 //We do not check udp->length sanity but use our safe computed value instead
723 if (PREDICT_FALSE(!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) {
724 u16 udp_len = clib_host_to_net_u16(ip40->length) - sizeof(*ip40);
725 udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip40, sizeof(*ip40));
727 csum = ip_incremental_checksum(0, udp, udp_len);
728 csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
729 csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
730 csum = ip_csum_with_carry(csum, *((u64 *)(&ip40->src_address)));
731 *checksum0 = ~ip_csum_fold(csum);
734 csum0 = ip_csum_sub_even(*checksum0, ip40->src_address.as_u32);
735 csum0 = ip_csum_sub_even(csum0, ip40->dst_address.as_u32);
737 // Deal with fragmented packets
738 if (PREDICT_FALSE(ip40->flags_and_fragment_offset &
739 clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
740 ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
741 frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
742 frag_id0 = frag_id_4to6(ip40->fragment_id);
743 vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
745 ip60 = (ip6_header_t *) (((u8 *)ip40) + sizeof(*ip40) - sizeof(*ip60));
746 vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60));
750 ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
751 ip60->payload_length = u16_net_add(ip40->length, - sizeof(*ip40));
752 ip60->hop_limit = ip40->ttl;
753 ip60->protocol = ip40->protocol;
755 if (PREDICT_FALSE(frag0 != NULL)) {
756 frag0->next_hdr = ip60->protocol;
757 frag0->identification = frag_id0;
759 frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
760 ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
761 ip60->payload_length = u16_net_add(ip60->payload_length, sizeof(*frag0));
764 //Finally copying the address
765 ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
766 ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
767 ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
768 ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
770 csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[0]);
771 csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[1]);
772 csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[0]);
773 csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[1]);
774 *checksum0 = ip_csum_fold(csum0);
776 if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
777 //Send to fragmentation node if necessary
778 vnet_buffer(p0)->ip_frag.header_offset = 0;
779 vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
780 vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
781 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
784 vlib_validate_buffer_enqueue_x1(vm, node, next_index,
785 to_next, n_left_to_next, pi0,
788 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
791 return frame->n_vectors;
794 static_always_inline void
795 ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0,
796 i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0)
798 if (PREDICT_FALSE(ip4_get_fragment_offset(ip40))) {
799 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
800 if(d0->ea_bits_len == 0 && d0->rules) {
803 *dst_port0 = ip4_map_fragment_get_port(ip40);
804 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
806 } else if (PREDICT_TRUE(ip40->protocol == IP_PROTOCOL_TCP)) {
807 vnet_buffer(p0)->map_t.checksum_offset = 36;
808 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
809 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
810 *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 2));
811 } else if (PREDICT_TRUE(ip40->protocol == IP_PROTOCOL_UDP)) {
812 vnet_buffer(p0)->map_t.checksum_offset = 26;
813 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
814 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
815 *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 2));
816 } else if (ip40->protocol == IP_PROTOCOL_ICMP) {
817 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
818 if(d0->ea_bits_len == 0 && d0->rules)
820 else if (((icmp46_header_t *) u8_ptr_add(ip40, sizeof(*ip40)))->code == ICMP4_echo_reply ||
821 ((icmp46_header_t *) u8_ptr_add(ip40, sizeof(*ip40)))->code == ICMP4_echo_request)
822 *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 6));
824 *error0 = MAP_ERROR_BAD_PROTOCOL;
829 ip4_map_t (vlib_main_t *vm,
830 vlib_node_runtime_t *node,
833 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
834 vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_t_node.index);
835 from = vlib_frame_vector_args(frame);
836 n_left_from = frame->n_vectors;
837 next_index = node->cached_next_index;
838 vlib_combined_counter_main_t *cm = map_main.domain_counters;
839 u32 cpu_index = os_get_cpu_number();
841 while (n_left_from > 0) {
842 vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
844 #ifdef IP4_MAP_T_DUAL_LOOP
845 while (n_left_from >= 4 && n_left_to_next >= 2) {
847 vlib_buffer_t *p0, *p1;
848 ip4_header_t *ip40, *ip41;
849 map_domain_t *d0, *d1;
850 ip4_mapt_next_t next0 = 0, next1 = 0;
851 u16 ip4_len0, ip4_len1;
853 i32 dst_port0, dst_port1;
854 ip4_mapt_pseudo_header_t *pheader0, *pheader1;
856 pi0 = to_next[0] = from[0];
857 pi1 = to_next[1] = from[1];
862 error0 = MAP_ERROR_NONE;
863 error1 = MAP_ERROR_NONE;
865 p0 = vlib_get_buffer(vm, pi0);
866 p1 = vlib_get_buffer(vm, pi1);
867 ip40 = vlib_buffer_get_current(p0);
868 ip41 = vlib_buffer_get_current(p1);
869 ip4_len0 = clib_host_to_net_u16(ip40->length);
870 ip4_len1 = clib_host_to_net_u16(ip41->length);
872 if (PREDICT_FALSE(p0->current_length < ip4_len0 ||
873 ip40->ip_version_and_header_length != 0x45)) {
874 error0 = MAP_ERROR_UNKNOWN;
875 next0 = IP4_MAPT_NEXT_DROP;
878 if (PREDICT_FALSE(p1->current_length < ip4_len1 ||
879 ip41->ip_version_and_header_length != 0x45)) {
880 error1 = MAP_ERROR_UNKNOWN;
881 next1 = IP4_MAPT_NEXT_DROP;
884 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
885 &vnet_buffer(p0)->map_t.map_domain_index);
886 d1 = ip4_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX],
887 &vnet_buffer(p1)->map_t.map_domain_index);
889 vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
890 vnet_buffer(p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
895 ip4_map_t_classify(p0, d0, ip40, ip4_len0, &dst_port0, &error0, &next0);
896 ip4_map_t_classify(p1, d1, ip41, ip4_len1, &dst_port1, &error1, &next1);
898 //Add MAP-T pseudo header in front of the packet
899 vlib_buffer_advance(p0, - sizeof(*pheader0));
900 vlib_buffer_advance(p1, - sizeof(*pheader1));
901 pheader0 = vlib_buffer_get_current(p0);
902 pheader1 = vlib_buffer_get_current(p1);
904 //Save addresses within the packet
905 ip4_map_t_embedded_address(d0, &pheader0->saddr, &ip40->src_address);
906 ip4_map_t_embedded_address(d1, &pheader1->saddr, &ip41->src_address);
907 pheader0->daddr.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
908 pheader0->daddr.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
909 pheader1->daddr.as_u64[0] = map_get_pfx_net(d1, ip41->dst_address.as_u32, (u16)dst_port1);
910 pheader1->daddr.as_u64[1] = map_get_sfx_net(d1, ip41->dst_address.as_u32, (u16)dst_port1);
912 if (PREDICT_FALSE(ip4_is_first_fragment(ip40) && (dst_port0 != -1) &&
913 (d0->ea_bits_len != 0 || !d0->rules) &&
914 ip4_map_fragment_cache(ip40, dst_port0))) {
915 error0 = MAP_ERROR_FRAGMENT_MEMORY;
918 if (PREDICT_FALSE(ip4_is_first_fragment(ip41) && (dst_port1 != -1) &&
919 (d1->ea_bits_len != 0 || !d1->rules) &&
920 ip4_map_fragment_cache(ip41, dst_port1))) {
921 error1 = MAP_ERROR_FRAGMENT_MEMORY;
924 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) {
925 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index,
926 vnet_buffer(p0)->map_t.map_domain_index, 1,
927 clib_net_to_host_u16(ip40->length));
930 if (PREDICT_TRUE(error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP)) {
931 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index,
932 vnet_buffer(p1)->map_t.map_domain_index, 1,
933 clib_net_to_host_u16(ip41->length));
936 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
937 next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
938 p0->error = error_node->errors[error0];
939 p1->error = error_node->errors[error1];
940 vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
941 n_left_to_next, pi0, pi1, next0, next1);
945 while (n_left_from > 0 && n_left_to_next > 0) {
950 ip4_mapt_next_t next0;
954 ip4_mapt_pseudo_header_t *pheader0;
956 pi0 = to_next[0] = from[0];
961 error0 = MAP_ERROR_NONE;
963 p0 = vlib_get_buffer(vm, pi0);
964 ip40 = vlib_buffer_get_current(p0);
965 ip4_len0 = clib_host_to_net_u16(ip40->length);
966 if (PREDICT_FALSE(p0->current_length < ip4_len0 ||
967 ip40->ip_version_and_header_length != 0x45)) {
968 error0 = MAP_ERROR_UNKNOWN;
969 next0 = IP4_MAPT_NEXT_DROP;
972 d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
973 &vnet_buffer(p0)->map_t.map_domain_index);
975 vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
978 ip4_map_t_classify(p0, d0, ip40, ip4_len0, &dst_port0, &error0, &next0);
980 //Add MAP-T pseudo header in front of the packet
981 vlib_buffer_advance(p0, - sizeof(*pheader0));
982 pheader0 = vlib_buffer_get_current(p0);
984 //Save addresses within the packet
985 ip4_map_t_embedded_address(d0, &pheader0->saddr, &ip40->src_address);
986 pheader0->daddr.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
987 pheader0->daddr.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
989 //It is important to cache at this stage because the result might be necessary
990 //for packets within the same vector.
991 //Actually, this approach even provides some limited out-of-order fragments support
992 if (PREDICT_FALSE(ip4_is_first_fragment(ip40) && (dst_port0 != -1) &&
993 (d0->ea_bits_len != 0 || !d0->rules) &&
994 ip4_map_fragment_cache(ip40, dst_port0))) {
995 error0 = MAP_ERROR_UNKNOWN;
998 if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) {
999 vlib_increment_combined_counter(cm + MAP_DOMAIN_COUNTER_TX, cpu_index,
1000 vnet_buffer(p0)->map_t.map_domain_index, 1,
1001 clib_net_to_host_u16(ip40->length));
1004 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
1005 p0->error = error_node->errors[error0];
1006 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1007 to_next, n_left_to_next, pi0,
1010 vlib_put_next_frame(vm, node, next_index, n_left_to_next);
1012 return frame->n_vectors;
1015 static char *map_t_error_strings[] = {
1016 #define _(sym,string) string,
1021 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
1022 .function = ip4_map_t_fragmented,
1023 .name = "ip4-map-t-fragmented",
1024 .vector_size = sizeof(u32),
1025 .format_trace = format_map_trace,
1026 .type = VLIB_NODE_TYPE_INTERNAL,
1028 .n_errors = MAP_N_ERROR,
1029 .error_strings = map_t_error_strings,
1031 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
1033 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
1034 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
1035 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1039 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
1040 .function = ip4_map_t_icmp,
1041 .name = "ip4-map-t-icmp",
1042 .vector_size = sizeof(u32),
1043 .format_trace = format_map_trace,
1044 .type = VLIB_NODE_TYPE_INTERNAL,
1046 .n_errors = MAP_N_ERROR,
1047 .error_strings = map_t_error_strings,
1049 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
1051 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
1052 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
1053 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
1057 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
1058 .function = ip4_map_t_tcp_udp,
1059 .name = "ip4-map-t-tcp-udp",
1060 .vector_size = sizeof(u32),
1061 .format_trace = format_map_trace,
1062 .type = VLIB_NODE_TYPE_INTERNAL,
1064 .n_errors = MAP_N_ERROR,
1065 .error_strings = map_t_error_strings,
1067 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
1069 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
1070 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
1071 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1075 VLIB_REGISTER_NODE(ip4_map_t_node) = {
1076 .function = ip4_map_t,
1077 .name = "ip4-map-t",
1078 .vector_size = sizeof(u32),
1079 .format_trace = format_map_trace,
1080 .type = VLIB_NODE_TYPE_INTERNAL,
1082 .n_errors = MAP_N_ERROR,
1083 .error_strings = map_t_error_strings,
1085 .n_next_nodes = IP4_MAPT_N_NEXT,
1087 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
1088 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
1089 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
1090 [IP4_MAPT_NEXT_DROP] = "error-drop",