2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include "../ip/ip_frag.h"
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
21 #define IP6_MAP_T_DUAL_LOOP
25 IP6_MAPT_NEXT_MAPT_TCP_UDP,
26 IP6_MAPT_NEXT_MAPT_ICMP,
27 IP6_MAPT_NEXT_MAPT_FRAGMENTED,
34 IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
35 IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36 IP6_MAPT_ICMP_NEXT_DROP,
38 } ip6_mapt_icmp_next_t;
42 IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43 IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
44 IP6_MAPT_TCP_UDP_NEXT_DROP,
45 IP6_MAPT_TCP_UDP_N_NEXT
46 } ip6_mapt_tcp_udp_next_t;
50 IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
51 IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
52 IP6_MAPT_FRAGMENTED_NEXT_DROP,
53 IP6_MAPT_FRAGMENTED_N_NEXT
54 } ip6_mapt_fragmented_next_t;
56 static_always_inline int
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58 map_domain_t * d, u16 port)
61 map_ip4_reass_lock ();
62 map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
64 ip6_map_t_embedded_address (d,
67 frag_id_6to4 (frag->identification),
70 IP_PROTOCOL_ICMP : ip6->protocol,
75 map_ip4_reass_unlock ();
79 /* Returns the associated port or -1 */
80 static_always_inline i32
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
85 map_ip4_reass_lock ();
86 map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
88 ip6_map_t_embedded_address (d,
91 frag_id_6to4 (frag->identification),
94 IP_PROTOCOL_ICMP : ip6->protocol,
96 i32 ret = r ? r->port : -1;
97 map_ip4_reass_unlock ();
105 } icmp6_to_icmp_ctx_t;
108 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
110 icmp6_to_icmp_ctx_t *ctx = arg;
111 map_main_t *mm = &map_main;
118 //Note that this prevents an intermediate IPv6 router from answering the request
119 ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
120 if (ip6->dst_address.as_u64[0] !=
121 map_get_pfx_net (ctx->d, ip4_dadr, ctx->id)
122 || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_dadr,
126 ip4->src_address.as_u32 =
127 ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
128 ip4->dst_address.as_u32 = ip4_dadr;
135 //Note that this prevents an intermediate IPv6 router from answering the request
136 ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
137 if (ip6->src_address.as_u64[0] !=
138 map_get_pfx_net (ctx->d, ip4_sadr, ctx->id)
139 || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
143 ip4->dst_address.as_u32 =
144 ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
145 ip4->src_address.as_u32 = ip4_sadr;
152 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
155 icmp6_to_icmp_ctx_t *ctx = arg;
156 map_main_t *mm = &map_main;
162 //Security check of inner packet
163 inner_ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
164 if (ip6->src_address.as_u64[0] !=
165 map_get_pfx_net (ctx->d, inner_ip4_sadr, ctx->id)
166 || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d,
171 ip4->src_address.as_u32 = inner_ip4_sadr;
172 ip4->dst_address.as_u32 =
173 ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
179 //Security check of inner packet
180 inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
181 if (ip6->dst_address.as_u64[0] !=
182 map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->id)
183 || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
188 ip4->dst_address.as_u32 = inner_ip4_dadr;
189 ip4->src_address.as_u32 =
190 ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
197 ip6_map_t_icmp (vlib_main_t * vm,
198 vlib_node_runtime_t * node, vlib_frame_t * frame)
200 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
201 vlib_node_runtime_t *error_node =
202 vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
203 from = vlib_frame_vector_args (frame);
204 n_left_from = frame->n_vectors;
205 next_index = node->cached_next_index;
206 vlib_combined_counter_main_t *cm = map_main.domain_counters;
207 u32 thread_index = vlib_get_thread_index ();
209 while (n_left_from > 0)
211 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
213 while (n_left_from > 0 && n_left_to_next > 0)
218 ip6_mapt_icmp_next_t next0;
221 icmp6_to_icmp_ctx_t ctx0;
223 icmp46_header_t *icmp0;
225 pi0 = to_next[0] = from[0];
230 error0 = MAP_ERROR_NONE;
231 next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
233 p0 = vlib_get_buffer (vm, pi0);
234 ip60 = vlib_buffer_get_current (p0);
235 len0 = clib_net_to_host_u16 (ip60->payload_length);
236 icmp0 = (icmp46_header_t *) (ip60 + 1);
238 pool_elt_at_index (map_main.domains,
239 vnet_buffer (p0)->map_t.map_domain_index);
242 ip6_get_port (ip60, icmp0->type == ICMP6_echo_request,
247 // In case of 1:1 mapping, we don't care about the port
248 if (!(d0->ea_bits_len == 0 && d0->rules))
250 error0 = MAP_ERROR_ICMP;
256 (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
257 ip6_to_ip4_set_inner_icmp_cb, &ctx0))
259 error0 = MAP_ERROR_ICMP;
263 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
265 //Send to fragmentation node if necessary
266 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
267 vnet_buffer (p0)->ip_frag.header_offset = 0;
268 vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
269 next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
272 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
274 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
277 map_t.map_domain_index, 1,
282 next0 = IP6_MAPT_ICMP_NEXT_DROP;
285 p0->error = error_node->errors[error0];
286 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
287 to_next, n_left_to_next, pi0,
290 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
292 return frame->n_vectors;
296 ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx)
298 vlib_buffer_t *p = ctx;
300 ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
301 ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
307 ip6_map_t_fragmented (vlib_main_t * vm,
308 vlib_node_runtime_t * node, vlib_frame_t * frame)
310 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
311 from = vlib_frame_vector_args (frame);
312 n_left_from = frame->n_vectors;
313 next_index = node->cached_next_index;
314 vlib_node_runtime_t *error_node =
315 vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
317 while (n_left_from > 0)
319 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
321 #ifdef IP6_MAP_T_DUAL_LOOP
322 while (n_left_from >= 4 && n_left_to_next >= 2)
325 vlib_buffer_t *p0, *p1;
328 pi0 = to_next[0] = from[0];
329 pi1 = to_next[1] = from[1];
335 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
336 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
337 p0 = vlib_get_buffer (vm, pi0);
338 p1 = vlib_get_buffer (vm, pi1);
340 if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
342 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
343 next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
347 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
349 //Send to fragmentation node if necessary
350 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
351 vnet_buffer (p0)->ip_frag.header_offset = 0;
352 vnet_buffer (p0)->ip_frag.next_index =
353 IP4_FRAG_NEXT_IP4_LOOKUP;
354 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
358 if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1))
360 p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
361 next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
365 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
367 //Send to fragmentation node if necessary
368 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
369 vnet_buffer (p1)->ip_frag.header_offset = 0;
370 vnet_buffer (p1)->ip_frag.next_index =
371 IP4_FRAG_NEXT_IP4_LOOKUP;
372 next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
376 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
377 to_next, n_left_to_next, pi0, pi1,
382 while (n_left_from > 0 && n_left_to_next > 0)
388 pi0 = to_next[0] = from[0];
394 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
395 p0 = vlib_get_buffer (vm, pi0);
397 if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
399 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
400 next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
404 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
406 //Send to fragmentation node if necessary
407 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
408 vnet_buffer (p0)->ip_frag.header_offset = 0;
409 vnet_buffer (p0)->ip_frag.next_index =
410 IP4_FRAG_NEXT_IP4_LOOKUP;
411 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
415 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
416 to_next, n_left_to_next, pi0,
419 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
421 return frame->n_vectors;
425 ip6_map_t_tcp_udp (vlib_main_t * vm,
426 vlib_node_runtime_t * node, vlib_frame_t * frame)
428 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
429 vlib_node_runtime_t *error_node =
430 vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
432 from = vlib_frame_vector_args (frame);
433 n_left_from = frame->n_vectors;
434 next_index = node->cached_next_index;
435 while (n_left_from > 0)
437 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
439 #ifdef IP6_MAP_T_DUAL_LOOP
440 while (n_left_from >= 4 && n_left_to_next >= 2)
443 vlib_buffer_t *p0, *p1;
444 ip6_mapt_tcp_udp_next_t next0, next1;
446 pi0 = to_next[0] = from[0];
447 pi1 = to_next[1] = from[1];
452 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
453 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
455 p0 = vlib_get_buffer (vm, pi0);
456 p1 = vlib_get_buffer (vm, pi1);
458 if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
460 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
461 next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
465 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
467 //Send to fragmentation node if necessary
468 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
469 vnet_buffer (p0)->ip_frag.header_offset = 0;
470 vnet_buffer (p0)->ip_frag.next_index =
471 IP4_FRAG_NEXT_IP4_LOOKUP;
472 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
476 if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
478 p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
479 next1 = IP6_MAPT_TCP_UDP_NEXT_DROP;
483 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
485 //Send to fragmentation node if necessary
486 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
487 vnet_buffer (p1)->ip_frag.header_offset = 0;
488 vnet_buffer (p1)->ip_frag.next_index =
489 IP4_FRAG_NEXT_IP4_LOOKUP;
490 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
494 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
495 n_left_to_next, pi0, pi1, next0,
500 while (n_left_from > 0 && n_left_to_next > 0)
504 ip6_mapt_tcp_udp_next_t next0;
506 pi0 = to_next[0] = from[0];
511 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
513 p0 = vlib_get_buffer (vm, pi0);
515 if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
517 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
518 next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
522 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
524 //Send to fragmentation node if necessary
525 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
526 vnet_buffer (p0)->ip_frag.header_offset = 0;
527 vnet_buffer (p0)->ip_frag.next_index =
528 IP4_FRAG_NEXT_IP4_LOOKUP;
529 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
533 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
534 to_next, n_left_to_next, pi0,
537 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
539 return frame->n_vectors;
542 static_always_inline void
543 ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
544 map_domain_t * d0, i32 * map_port0,
545 u8 * error0, ip6_mapt_next_t * next0,
546 u32 l4_len0, ip6_frag_hdr_t * frag0)
548 map_main_t *mm = &map_main;
556 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
557 ip6_frag_hdr_offset (frag0)))
559 *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
560 if (d0->ea_bits_len == 0 && d0->rules)
566 *map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
567 *error0 = (*map_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
572 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
575 l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
576 vnet_buffer (p0)->map_t.checksum_offset =
577 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
578 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
583 vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
587 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
590 l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
591 vnet_buffer (p0)->map_t.checksum_offset =
592 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
593 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
598 vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
600 else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
603 l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
604 *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
605 if (d0->ea_bits_len == 0 && d0->rules)
610 if (((icmp46_header_t *)
612 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
614 || ((icmp46_header_t *)
616 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
622 u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
627 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
628 *error0 = MAP_ERROR_BAD_PROTOCOL;
633 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
635 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
636 vlib_node_runtime_t *error_node =
637 vlib_node_get_runtime (vm, ip6_map_t_node.index);
638 map_main_t *mm = &map_main;
639 vlib_combined_counter_main_t *cm = map_main.domain_counters;
640 u32 thread_index = vlib_get_thread_index ();
642 from = vlib_frame_vector_args (frame);
643 n_left_from = frame->n_vectors;
644 next_index = node->cached_next_index;
645 while (n_left_from > 0)
647 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
649 #ifdef IP6_MAP_T_DUAL_LOOP
650 while (n_left_from >= 4 && n_left_to_next >= 2)
653 vlib_buffer_t *p0, *p1;
654 ip6_header_t *ip60, *ip61;
656 ip6_mapt_next_t next0, next1;
657 u32 l4_len0, l4_len1;
658 i32 map_port0, map_port1;
659 map_domain_t *d0, *d1;
660 ip6_frag_hdr_t *frag0, *frag1;
661 next0 = next1 = 0; //Because compiler whines
663 pi0 = to_next[0] = from[0];
664 pi1 = to_next[1] = from[1];
670 error0 = MAP_ERROR_NONE;
671 error1 = MAP_ERROR_NONE;
673 p0 = vlib_get_buffer (vm, pi0);
674 p1 = vlib_get_buffer (vm, pi1);
675 ip60 = vlib_buffer_get_current (p0);
676 ip61 = vlib_buffer_get_current (p1);
681 daddr0 = 0; /* TODO */
682 daddr1 = 0; /* TODO */
683 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
685 daddr0 = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
686 daddr1 = map_get_ip4 (&ip61->dst_address, 0 /*TODO*/);
688 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
689 (ip4_address_t *) & daddr0,
690 &vnet_buffer (p0)->map_t.map_domain_index,
693 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
694 (ip4_address_t *) & daddr1,
695 &vnet_buffer (p1)->map_t.map_domain_index,
698 daddr0 = map_get_ip4 (&ip60->dst_address, d0->flags);
699 daddr1 = map_get_ip4 (&ip61->dst_address, d1->flags);
701 vnet_buffer (p0)->map_t.v6.daddr = daddr0;
702 vnet_buffer (p1)->map_t.v6.daddr = daddr1;
703 vnet_buffer (p0)->map_t.v6.saddr =
704 ip6_map_t_embedded_address (d0, &ip60->src_address);
705 vnet_buffer (p1)->map_t.v6.saddr =
706 ip6_map_t_embedded_address (d1, &ip61->src_address);
711 saddr0 = 0; /* TODO */
712 saddr1 = 0; /* TODO */
713 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
715 saddr0 = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
716 saddr1 = map_get_ip4 (&ip61->src_address, 0 /*TODO*/);
718 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
719 (ip4_address_t *) & saddr0,
720 &vnet_buffer (p0)->map_t.map_domain_index,
723 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
724 (ip4_address_t *) & saddr1,
725 &vnet_buffer (p1)->map_t.map_domain_index,
728 saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
729 saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
731 vnet_buffer (p0)->map_t.v6.saddr = saddr0;
732 vnet_buffer (p1)->map_t.v6.saddr = saddr1;
733 vnet_buffer (p0)->map_t.v6.daddr =
734 ip6_map_t_embedded_address (d0, &ip60->dst_address);
735 vnet_buffer (p1)->map_t.v6.daddr =
736 ip6_map_t_embedded_address (d1, &ip61->dst_address);
739 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
740 vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
742 if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
743 &(vnet_buffer (p0)->map_t.
745 &(vnet_buffer (p0)->map_t.
747 &(vnet_buffer (p0)->map_t.
750 error0 = MAP_ERROR_MALFORMED;
751 next0 = IP6_MAPT_NEXT_DROP;
754 if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
755 &(vnet_buffer (p1)->map_t.
757 &(vnet_buffer (p1)->map_t.
759 &(vnet_buffer (p1)->map_t.
762 error1 = MAP_ERROR_MALFORMED;
763 next1 = IP6_MAPT_NEXT_DROP;
766 map_port0 = map_port1 = -1;
767 l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
768 sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
769 l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
770 sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
772 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
773 vnet_buffer (p0)->map_t.
776 (ip6_frag_hdr_t *) u8_ptr_add (ip61,
777 vnet_buffer (p1)->map_t.
780 ip6_map_t_classify (p0, ip60, d0, &map_port0, &error0, &next0,
782 ip6_map_t_classify (p1, ip61, d1, &map_port1, &error1, &next1,
787 && (ip60->src_address.as_u64[0] !=
788 map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
790 || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
792 (p0)->map_t.v6.saddr,
795 error0 = MAP_ERROR_SEC_CHECK;
800 && (ip61->src_address.as_u64[0] !=
801 map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
803 || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
805 (p1)->map_t.v6.saddr,
808 error1 = MAP_ERROR_SEC_CHECK;
811 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
812 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
817 && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
818 && (error0 == MAP_ERROR_NONE))
820 ip6_map_fragment_cache (ip60,
821 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
828 if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
829 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
834 && (map_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
835 && (error1 == MAP_ERROR_NONE))
837 ip6_map_fragment_cache (ip61,
838 (ip6_frag_hdr_t *) u8_ptr_add (ip61,
846 (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
848 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
851 map_t.map_domain_index, 1,
853 (ip60->payload_length));
857 (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
859 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
862 map_t.map_domain_index, 1,
864 (ip61->payload_length));
867 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
868 next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
869 p0->error = error_node->errors[error0];
870 p1->error = error_node->errors[error1];
871 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
872 n_left_to_next, pi0, pi1, next0,
877 while (n_left_from > 0 && n_left_to_next > 0)
886 ip6_frag_hdr_t *frag0;
888 ip6_mapt_next_t next0 = 0;
890 pi0 = to_next[0] = from[0];
895 error0 = MAP_ERROR_NONE;
897 p0 = vlib_get_buffer (vm, pi0);
898 ip60 = vlib_buffer_get_current (p0);
903 //Save daddr in a different variable to not overwrite ip.adj_index
904 daddr = 0; /* TODO */
905 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
907 daddr = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
909 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
910 (ip4_address_t *) & daddr,
911 &vnet_buffer (p0)->map_t.map_domain_index,
914 daddr = map_get_ip4 (&ip60->dst_address, d0->flags);
916 //FIXME: What if d0 is null
917 vnet_buffer (p0)->map_t.v6.daddr = daddr;
918 vnet_buffer (p0)->map_t.v6.saddr =
919 ip6_map_t_embedded_address (d0, &ip60->src_address);
926 //Save saddr in a different variable to not overwrite ip.adj_index
927 saddr = 0; /* TODO */
928 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
930 saddr = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
932 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
933 (ip4_address_t *) & saddr,
934 &vnet_buffer (p0)->map_t.map_domain_index,
937 saddr = map_get_ip4 (&ip60->src_address, d0->flags);
939 //FIXME: What if d0 is null
940 vnet_buffer (p0)->map_t.v6.saddr = saddr;
941 vnet_buffer (p0)->map_t.v6.daddr =
942 ip6_map_t_embedded_address (d0, &ip60->dst_address);
947 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
949 if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
950 &(vnet_buffer (p0)->map_t.
952 &(vnet_buffer (p0)->map_t.
954 &(vnet_buffer (p0)->map_t.
957 error0 = MAP_ERROR_MALFORMED;
958 next0 = IP6_MAPT_NEXT_DROP;
962 l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
963 sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
965 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
966 vnet_buffer (p0)->map_t.
970 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
971 ip6_frag_hdr_offset (frag0)))
973 map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
974 error0 = (map_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
975 next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
979 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
983 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
984 vnet_buffer (p0)->map_t.checksum_offset =
985 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
986 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
991 vnet_buffer (p0)->map_t.v6.l4_offset +
996 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
1000 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
1001 vnet_buffer (p0)->map_t.checksum_offset =
1002 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
1003 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
1008 vnet_buffer (p0)->map_t.v6.l4_offset +
1011 else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
1016 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
1017 next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1018 if (((icmp46_header_t *)
1020 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
1022 || ((icmp46_header_t *)
1024 vnet_buffer (p0)->map_t.v6.
1025 l4_offset))->code == ICMP6_echo_request)
1030 vnet_buffer (p0)->map_t.v6.l4_offset + 6));
1034 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1035 error0 = MAP_ERROR_BAD_PROTOCOL;
1040 ((!mm->is_ce) && (map_port0 != -1)
1041 && (ip60->src_address.as_u64[0] !=
1042 map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
1044 || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
1046 (p0)->map_t.v6.saddr,
1049 //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
1050 error0 = MAP_ERROR_SEC_CHECK;
1053 //Fragmented first packet needs to be cached for following packets
1054 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
1055 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
1060 && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
1061 && (error0 == MAP_ERROR_NONE))
1063 ip6_map_fragment_cache (ip60,
1064 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1072 (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
1074 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
1077 map_t.map_domain_index, 1,
1078 clib_net_to_host_u16
1079 (ip60->payload_length));
1082 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1083 p0->error = error_node->errors[error0];
1084 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1085 to_next, n_left_to_next, pi0,
1088 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1090 return frame->n_vectors;
1093 static char *map_t_error_strings[] = {
1094 #define _(sym,string) string,
1100 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
1101 .function = ip6_map_t_fragmented,
1102 .name = "ip6-map-t-fragmented",
1103 .vector_size = sizeof (u32),
1104 .format_trace = format_map_trace,
1105 .type = VLIB_NODE_TYPE_INTERNAL,
1107 .n_errors = MAP_N_ERROR,
1108 .error_strings = map_t_error_strings,
1110 .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
1112 [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
1113 [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1114 [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1120 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
1121 .function = ip6_map_t_icmp,
1122 .name = "ip6-map-t-icmp",
1123 .vector_size = sizeof (u32),
1124 .format_trace = format_map_trace,
1125 .type = VLIB_NODE_TYPE_INTERNAL,
1127 .n_errors = MAP_N_ERROR,
1128 .error_strings = map_t_error_strings,
1130 .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
1132 [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1133 [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1134 [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1140 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
1141 .function = ip6_map_t_tcp_udp,
1142 .name = "ip6-map-t-tcp-udp",
1143 .vector_size = sizeof (u32),
1144 .format_trace = format_map_trace,
1145 .type = VLIB_NODE_TYPE_INTERNAL,
1147 .n_errors = MAP_N_ERROR,
1148 .error_strings = map_t_error_strings,
1150 .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1152 [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1153 [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1154 [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1160 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1161 .function = ip6_map_t,
1162 .name = "ip6-map-t",
1163 .vector_size = sizeof(u32),
1164 .format_trace = format_map_trace,
1165 .type = VLIB_NODE_TYPE_INTERNAL,
1167 .n_errors = MAP_N_ERROR,
1168 .error_strings = map_t_error_strings,
1170 .n_next_nodes = IP6_MAPT_N_NEXT,
1172 [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1173 [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1174 [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1175 [IP6_MAPT_NEXT_DROP] = "error-drop",
1181 * fd.io coding-style-patch-verification: ON
1184 * eval: (c-set-style "gnu")