2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include "../ip/ip_frag.h"
18 #include <vnet/ip/ip4_to_ip6.h>
20 #define IP4_MAP_T_DUAL_LOOP 1
24 IP4_MAPT_NEXT_MAPT_TCP_UDP,
25 IP4_MAPT_NEXT_MAPT_ICMP,
26 IP4_MAPT_NEXT_MAPT_FRAGMENTED,
33 IP4_MAPT_ICMP_NEXT_IP6_LOOKUP,
34 IP4_MAPT_ICMP_NEXT_IP6_FRAG,
35 IP4_MAPT_ICMP_NEXT_DROP,
37 } ip4_mapt_icmp_next_t;
41 IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP,
42 IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG,
43 IP4_MAPT_TCP_UDP_NEXT_DROP,
44 IP4_MAPT_TCP_UDP_N_NEXT
45 } ip4_mapt_tcp_udp_next_t;
49 IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP,
50 IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG,
51 IP4_MAPT_FRAGMENTED_NEXT_DROP,
52 IP4_MAPT_FRAGMENTED_N_NEXT
53 } ip4_mapt_fragmented_next_t;
55 //This is used to pass information within the buffer data.
56 //Buffer structure being too small to contain big structures like this.
58 typedef CLIB_PACKED (struct {
61 //IPv6 header + Fragmentation header will be here
62 //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
64 }) ip4_mapt_pseudo_header_t;
68 static_always_inline int
69 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
72 map_ip4_reass_lock ();
74 map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
77 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
82 map_ip4_reass_unlock ();
86 static_always_inline i32
87 ip4_map_fragment_get_port (ip4_header_t * ip4)
90 map_ip4_reass_lock ();
92 map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
95 IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
97 i32 ret = r ? r->port : -1;
98 map_ip4_reass_unlock ();
106 } icmp_to_icmp6_ctx_t;
109 ip4_to_ip6_set_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *arg)
111 icmp_to_icmp6_ctx_t *ctx = arg;
112 map_main_t *mm = &map_main;
116 ip6->src_address.as_u64[0] =
117 map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
118 ip6->src_address.as_u64[1] =
119 map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
120 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address,
125 ip4_map_t_embedded_address (ctx->d, &ip6->src_address,
127 ip6->dst_address.as_u64[0] =
128 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
129 ip6->dst_address.as_u64[1] =
130 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
137 ip4_to_ip6_set_inner_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6,
140 icmp_to_icmp6_ctx_t *ctx = arg;
141 map_main_t *mm = &map_main;
145 //Note that the destination address is within the domain
146 //while the source address is the one outside the domain
147 ip4_map_t_embedded_address (ctx->d, &ip6->src_address,
149 ip6->dst_address.as_u64[0] =
150 map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
151 ip6->dst_address.as_u64[1] =
152 map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->id);
156 //Note that the source address is within the domain
157 //while the destination address is the one outside the domain
158 ip4_map_t_embedded_address (ctx->d, &ip6->dst_address,
160 ip6->src_address.as_u64[0] =
161 map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
162 ip6->src_address.as_u64[1] =
163 map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->id);
170 ip4_map_t_icmp (vlib_main_t * vm,
171 vlib_node_runtime_t * node, vlib_frame_t * frame)
173 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
174 vlib_node_runtime_t *error_node =
175 vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
176 from = vlib_frame_vector_args (frame);
177 n_left_from = frame->n_vectors;
178 next_index = node->cached_next_index;
179 vlib_combined_counter_main_t *cm = map_main.domain_counters;
180 u32 thread_index = vlib_get_thread_index ();
182 while (n_left_from > 0)
184 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
186 while (n_left_from > 0 && n_left_to_next > 0)
190 ip4_mapt_icmp_next_t next0;
194 icmp_to_icmp6_ctx_t ctx0;
196 icmp46_header_t *icmp0;
198 next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP;
199 pi0 = to_next[0] = from[0];
204 error0 = MAP_ERROR_NONE;
206 p0 = vlib_get_buffer (vm, pi0);
207 vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
209 clib_net_to_host_u16 (((ip4_header_t *)
210 vlib_buffer_get_current (p0))->length);
212 pool_elt_at_index (map_main.domains,
213 vnet_buffer (p0)->map_t.map_domain_index);
215 ip40 = vlib_buffer_get_current (p0);
216 icmp0 = (icmp46_header_t *) (ip40 + 1);
218 ctx0.id = ip4_get_port (ip40, icmp0->type == ICMP6_echo_request);
222 // In case of 1:1 mapping, we don't care about the port
223 if (!(d0->ea_bits_len == 0 && d0->rules))
225 error0 = MAP_ERROR_ICMP;
231 (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
232 ip4_to_ip6_set_inner_icmp_cb, &ctx0))
234 error0 = MAP_ERROR_ICMP;
238 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
240 vnet_buffer (p0)->ip_frag.header_offset = 0;
241 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
242 vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
243 next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG;
246 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
248 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
251 map_t.map_domain_index, 1,
256 next0 = IP4_MAPT_ICMP_NEXT_DROP;
258 p0->error = error_node->errors[error0];
259 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
260 to_next, n_left_to_next, pi0,
263 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
265 return frame->n_vectors;
269 ip4_to_ip6_set_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *ctx)
271 ip4_mapt_pseudo_header_t *pheader = ctx;
273 ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
274 ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
275 ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
276 ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
282 ip4_map_t_fragmented (vlib_main_t * vm,
283 vlib_node_runtime_t * node, vlib_frame_t * frame)
285 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
286 from = vlib_frame_vector_args (frame);
287 n_left_from = frame->n_vectors;
288 next_index = node->cached_next_index;
289 vlib_node_runtime_t *error_node =
290 vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
292 while (n_left_from > 0)
294 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
296 while (n_left_from > 0 && n_left_to_next > 0)
300 ip4_mapt_pseudo_header_t *pheader0;
301 ip4_mapt_fragmented_next_t next0;
303 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP;
304 pi0 = to_next[0] = from[0];
310 p0 = vlib_get_buffer (vm, pi0);
312 //Accessing pseudo header
313 pheader0 = vlib_buffer_get_current (p0);
314 vlib_buffer_advance (p0, sizeof (*pheader0));
316 if (ip4_to_ip6_fragmented (p0, ip4_to_ip6_set_cb, pheader0))
318 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
319 next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP;
323 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
325 vnet_buffer (p0)->ip_frag.header_offset = 0;
326 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
327 vnet_buffer (p0)->ip_frag.next_index =
328 IP6_FRAG_NEXT_IP6_LOOKUP;
329 next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG;
333 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
334 to_next, n_left_to_next, pi0,
337 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
339 return frame->n_vectors;
343 ip4_map_t_tcp_udp (vlib_main_t * vm,
344 vlib_node_runtime_t * node, vlib_frame_t * frame)
346 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
347 from = vlib_frame_vector_args (frame);
348 n_left_from = frame->n_vectors;
349 next_index = node->cached_next_index;
350 vlib_node_runtime_t *error_node =
351 vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
354 while (n_left_from > 0)
356 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
358 #ifdef IP4_MAP_T_DUAL_LOOP
359 while (n_left_from >= 4 && n_left_to_next >= 2)
362 vlib_buffer_t *p0, *p1;
363 ip4_mapt_pseudo_header_t *pheader0, *pheader1;
364 ip4_mapt_tcp_udp_next_t next0, next1;
366 pi0 = to_next[0] = from[0];
367 pi1 = to_next[1] = from[1];
373 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
374 next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
375 p0 = vlib_get_buffer (vm, pi0);
376 p1 = vlib_get_buffer (vm, pi1);
378 //Accessing pseudo header
379 pheader0 = vlib_buffer_get_current (p0);
380 pheader1 = vlib_buffer_get_current (p1);
381 vlib_buffer_advance (p0, sizeof (*pheader0));
382 vlib_buffer_advance (p1, sizeof (*pheader1));
384 if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
386 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
387 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
391 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
393 //Send to fragmentation node if necessary
394 vnet_buffer (p0)->ip_frag.header_offset = 0;
395 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
396 vnet_buffer (p0)->ip_frag.next_index =
397 IP6_FRAG_NEXT_IP6_LOOKUP;
398 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
402 if (ip4_to_ip6_tcp_udp (p1, ip4_to_ip6_set_cb, pheader1))
404 p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
405 next1 = IP4_MAPT_TCP_UDP_NEXT_DROP;
409 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
411 //Send to fragmentation node if necessary
412 vnet_buffer (p1)->ip_frag.header_offset = 0;
413 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
414 vnet_buffer (p1)->ip_frag.next_index =
415 IP6_FRAG_NEXT_IP6_LOOKUP;
416 next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
420 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
421 to_next, n_left_to_next, pi0, pi1,
426 while (n_left_from > 0 && n_left_to_next > 0)
430 ip4_mapt_pseudo_header_t *pheader0;
431 ip4_mapt_tcp_udp_next_t next0;
433 pi0 = to_next[0] = from[0];
439 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP;
440 p0 = vlib_get_buffer (vm, pi0);
442 //Accessing pseudo header
443 pheader0 = vlib_buffer_get_current (p0);
444 vlib_buffer_advance (p0, sizeof (*pheader0));
446 if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
448 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
449 next0 = IP4_MAPT_TCP_UDP_NEXT_DROP;
453 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
455 //Send to fragmentation node if necessary
456 vnet_buffer (p0)->ip_frag.header_offset = 0;
457 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
458 vnet_buffer (p0)->ip_frag.next_index =
459 IP6_FRAG_NEXT_IP6_LOOKUP;
460 next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG;
463 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
464 to_next, n_left_to_next, pi0,
467 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
470 return frame->n_vectors;
473 static_always_inline void
474 ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
475 ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
476 u8 * error0, ip4_mapt_next_t * next0)
478 map_main_t *mm = &map_main;
486 if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
488 *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED;
489 if (d0->ea_bits_len == 0 && d0->rules)
495 *dst_port0 = ip4_map_fragment_get_port (ip40);
496 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
499 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
501 vnet_buffer (p0)->map_t.checksum_offset = 36;
502 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
503 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
505 (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + port_offset));
507 else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
509 vnet_buffer (p0)->map_t.checksum_offset = 26;
510 *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
511 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
513 (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + port_offset));
515 else if (ip40->protocol == IP_PROTOCOL_ICMP)
517 *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
518 if (d0->ea_bits_len == 0 && d0->rules)
520 else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
522 || ((icmp46_header_t *)
524 sizeof (*ip40)))->code == ICMP4_echo_request)
525 *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
529 *error0 = MAP_ERROR_BAD_PROTOCOL;
534 ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
536 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
537 vlib_node_runtime_t *error_node =
538 vlib_node_get_runtime (vm, ip4_map_t_node.index);
539 from = vlib_frame_vector_args (frame);
540 n_left_from = frame->n_vectors;
541 next_index = node->cached_next_index;
542 map_main_t *mm = &map_main;
543 vlib_combined_counter_main_t *cm = map_main.domain_counters;
544 u32 thread_index = vlib_get_thread_index ();
546 while (n_left_from > 0)
548 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
550 #ifdef IP4_MAP_T_DUAL_LOOP
551 while (n_left_from >= 4 && n_left_to_next >= 2)
554 vlib_buffer_t *p0, *p1;
555 ip4_header_t *ip40, *ip41;
556 map_domain_t *d0, *d1;
557 ip4_mapt_next_t next0 = 0, next1 = 0;
558 u16 ip4_len0, ip4_len1;
560 i32 map_port0, map_port1;
561 ip4_mapt_pseudo_header_t *pheader0, *pheader1;
563 pi0 = to_next[0] = from[0];
564 pi1 = to_next[1] = from[1];
569 error0 = MAP_ERROR_NONE;
570 error1 = MAP_ERROR_NONE;
572 p0 = vlib_get_buffer (vm, pi0);
573 p1 = vlib_get_buffer (vm, pi1);
574 ip40 = vlib_buffer_get_current (p0);
575 ip41 = vlib_buffer_get_current (p1);
576 ip4_len0 = clib_host_to_net_u16 (ip40->length);
577 ip4_len1 = clib_host_to_net_u16 (ip41->length);
579 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
580 ip40->ip_version_and_header_length != 0x45))
582 error0 = MAP_ERROR_UNKNOWN;
583 next0 = IP4_MAPT_NEXT_DROP;
586 if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
587 ip41->ip_version_and_header_length != 0x45))
589 error1 = MAP_ERROR_UNKNOWN;
590 next1 = IP4_MAPT_NEXT_DROP;
593 vnet_buffer (p0)->map_t.map_domain_index =
594 vnet_buffer (p0)->ip.adj_index[VLIB_TX];
595 d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
596 vnet_buffer (p1)->map_t.map_domain_index =
597 vnet_buffer (p1)->ip.adj_index[VLIB_TX];
598 d1 = ip4_map_get_domain (vnet_buffer (p1)->map_t.map_domain_index);
600 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
601 vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
606 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &map_port0, &error0,
608 ip4_map_t_classify (p1, d1, ip41, ip4_len1, &map_port1, &error1,
611 //Add MAP-T pseudo header in front of the packet
612 vlib_buffer_advance (p0, -sizeof (*pheader0));
613 vlib_buffer_advance (p1, -sizeof (*pheader1));
614 pheader0 = vlib_buffer_get_current (p0);
615 pheader1 = vlib_buffer_get_current (p1);
617 //Save addresses within the packet
620 ip4_map_t_embedded_address (d0, &pheader0->daddr,
622 ip4_map_t_embedded_address (d1, &pheader1->daddr,
624 pheader0->saddr.as_u64[0] =
625 map_get_pfx_net (d0, ip40->src_address.as_u32,
627 pheader0->saddr.as_u64[1] =
628 map_get_sfx_net (d0, ip40->src_address.as_u32,
630 pheader1->saddr.as_u64[0] =
631 map_get_pfx_net (d1, ip41->src_address.as_u32,
633 pheader1->saddr.as_u64[1] =
634 map_get_sfx_net (d1, ip41->src_address.as_u32,
639 ip4_map_t_embedded_address (d0, &pheader0->saddr,
641 ip4_map_t_embedded_address (d1, &pheader1->saddr,
643 pheader0->daddr.as_u64[0] =
644 map_get_pfx_net (d0, ip40->dst_address.as_u32,
646 pheader0->daddr.as_u64[1] =
647 map_get_sfx_net (d0, ip40->dst_address.as_u32,
649 pheader1->daddr.as_u64[0] =
650 map_get_pfx_net (d1, ip41->dst_address.as_u32,
652 pheader1->daddr.as_u64[1] =
653 map_get_sfx_net (d1, ip41->dst_address.as_u32,
658 (ip4_is_first_fragment (ip40) && (map_port0 != -1)
659 && (d0->ea_bits_len != 0 || !d0->rules)
660 && ip4_map_fragment_cache (ip40, map_port0)))
662 error0 = MAP_ERROR_FRAGMENT_MEMORY;
666 (ip4_is_first_fragment (ip41) && (map_port1 != -1)
667 && (d1->ea_bits_len != 0 || !d1->rules)
668 && ip4_map_fragment_cache (ip41, map_port1)))
670 error1 = MAP_ERROR_FRAGMENT_MEMORY;
674 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
676 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
679 map_t.map_domain_index, 1,
685 (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
687 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
690 map_t.map_domain_index, 1,
695 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
696 next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
697 p0->error = error_node->errors[error0];
698 p1->error = error_node->errors[error1];
699 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
700 n_left_to_next, pi0, pi1, next0,
705 while (n_left_from > 0 && n_left_to_next > 0)
711 ip4_mapt_next_t next0;
715 ip4_mapt_pseudo_header_t *pheader0;
717 pi0 = to_next[0] = from[0];
722 error0 = MAP_ERROR_NONE;
724 p0 = vlib_get_buffer (vm, pi0);
725 ip40 = vlib_buffer_get_current (p0);
726 ip4_len0 = clib_host_to_net_u16 (ip40->length);
727 if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
728 ip40->ip_version_and_header_length != 0x45))
730 error0 = MAP_ERROR_UNKNOWN;
731 next0 = IP4_MAPT_NEXT_DROP;
734 vnet_buffer (p0)->map_t.map_domain_index =
735 vnet_buffer (p0)->ip.adj_index[VLIB_TX];
736 d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
738 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
741 ip4_map_t_classify (p0, d0, ip40, ip4_len0, &map_port0, &error0,
744 //Add MAP-T pseudo header in front of the packet
745 vlib_buffer_advance (p0, -sizeof (*pheader0));
746 pheader0 = vlib_buffer_get_current (p0);
748 //Save addresses within the packet
751 ip4_map_t_embedded_address (d0, &pheader0->daddr,
753 pheader0->saddr.as_u64[0] =
754 map_get_pfx_net (d0, ip40->src_address.as_u32,
756 pheader0->saddr.as_u64[1] =
757 map_get_sfx_net (d0, ip40->src_address.as_u32,
762 ip4_map_t_embedded_address (d0, &pheader0->saddr,
764 pheader0->daddr.as_u64[0] =
765 map_get_pfx_net (d0, ip40->dst_address.as_u32,
767 pheader0->daddr.as_u64[1] =
768 map_get_sfx_net (d0, ip40->dst_address.as_u32,
772 //It is important to cache at this stage because the result might be necessary
773 //for packets within the same vector.
774 //Actually, this approach even provides some limited out-of-order fragments support
776 (ip4_is_first_fragment (ip40) && (map_port0 != -1)
777 && (d0->ea_bits_len != 0 || !d0->rules)
778 && ip4_map_fragment_cache (ip40, map_port0)))
780 error0 = MAP_ERROR_UNKNOWN;
784 (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
786 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX,
789 map_t.map_domain_index, 1,
794 next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
795 p0->error = error_node->errors[error0];
796 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
797 to_next, n_left_to_next, pi0,
800 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
802 return frame->n_vectors;
805 static char *map_t_error_strings[] = {
806 #define _(sym,string) string,
812 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
813 .function = ip4_map_t_fragmented,
814 .name = "ip4-map-t-fragmented",
815 .vector_size = sizeof(u32),
816 .format_trace = format_map_trace,
817 .type = VLIB_NODE_TYPE_INTERNAL,
819 .n_errors = MAP_N_ERROR,
820 .error_strings = map_t_error_strings,
822 .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
824 [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
825 [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
826 [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
832 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
833 .function = ip4_map_t_icmp,
834 .name = "ip4-map-t-icmp",
835 .vector_size = sizeof(u32),
836 .format_trace = format_map_trace,
837 .type = VLIB_NODE_TYPE_INTERNAL,
839 .n_errors = MAP_N_ERROR,
840 .error_strings = map_t_error_strings,
842 .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
844 [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
845 [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
846 [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
852 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
853 .function = ip4_map_t_tcp_udp,
854 .name = "ip4-map-t-tcp-udp",
855 .vector_size = sizeof(u32),
856 .format_trace = format_map_trace,
857 .type = VLIB_NODE_TYPE_INTERNAL,
859 .n_errors = MAP_N_ERROR,
860 .error_strings = map_t_error_strings,
862 .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
864 [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
865 [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME,
866 [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
872 VLIB_REGISTER_NODE(ip4_map_t_node) = {
873 .function = ip4_map_t,
875 .vector_size = sizeof(u32),
876 .format_trace = format_map_trace,
877 .type = VLIB_NODE_TYPE_INTERNAL,
879 .n_errors = MAP_N_ERROR,
880 .error_strings = map_t_error_strings,
882 .n_next_nodes = IP4_MAPT_N_NEXT,
884 [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
885 [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
886 [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
887 [IP4_MAPT_NEXT_DROP] = "error-drop",
893 * fd.io coding-style-patch-verification: ON
896 * eval: (c-set-style "gnu")