2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
21 #define IP6_MAP_T_DUAL_LOOP
25 IP6_MAPT_NEXT_MAPT_TCP_UDP,
26 IP6_MAPT_NEXT_MAPT_ICMP,
27 IP6_MAPT_NEXT_MAPT_FRAGMENTED,
34 IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
35 IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36 IP6_MAPT_ICMP_NEXT_DROP,
38 } ip6_mapt_icmp_next_t;
42 IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43 IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
44 IP6_MAPT_TCP_UDP_NEXT_DROP,
45 IP6_MAPT_TCP_UDP_N_NEXT
46 } ip6_mapt_tcp_udp_next_t;
50 IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
51 IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
52 IP6_MAPT_FRAGMENTED_NEXT_DROP,
53 IP6_MAPT_FRAGMENTED_N_NEXT
54 } ip6_mapt_fragmented_next_t;
56 static_always_inline int
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58 map_domain_t * d, u16 port)
61 map_ip4_reass_lock ();
62 map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
64 ip6_map_t_embedded_address (d,
67 frag_id_6to4 (frag->identification),
70 IP_PROTOCOL_ICMP : ip6->protocol,
75 map_ip4_reass_unlock ();
79 /* Returns the associated port or -1 */
80 static_always_inline i32
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
85 map_ip4_reass_lock ();
86 map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
88 ip6_map_t_embedded_address (d,
91 frag_id_6to4 (frag->identification),
94 IP_PROTOCOL_ICMP : ip6->protocol,
96 i32 ret = r ? r->port : -1;
97 map_ip4_reass_unlock ();
105 } icmp6_to_icmp_ctx_t;
108 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
110 icmp6_to_icmp_ctx_t *ctx = arg;
111 map_main_t *mm = &map_main;
118 //Note that this prevents an intermediate IPv6 router from answering the request
119 ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
120 if (ip6->dst_address.as_u64[0] !=
121 map_get_pfx_net (ctx->d, ip4_dadr, ctx->id)
122 || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_dadr,
126 ip4->src_address.as_u32 =
127 ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
128 ip4->dst_address.as_u32 = ip4_dadr;
135 //Note that this prevents an intermediate IPv6 router from answering the request
136 ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
137 if (ip6->src_address.as_u64[0] !=
138 map_get_pfx_net (ctx->d, ip4_sadr, ctx->id)
139 || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
143 ip4->dst_address.as_u32 =
144 ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
145 ip4->src_address.as_u32 = ip4_sadr;
152 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
155 icmp6_to_icmp_ctx_t *ctx = arg;
156 map_main_t *mm = &map_main;
162 //Security check of inner packet
163 inner_ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
164 if (ip6->src_address.as_u64[0] !=
165 map_get_pfx_net (ctx->d, inner_ip4_sadr, ctx->id)
166 || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d,
171 ip4->src_address.as_u32 = inner_ip4_sadr;
172 ip4->dst_address.as_u32 =
173 ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
179 //Security check of inner packet
180 inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
181 if (ip6->dst_address.as_u64[0] !=
182 map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->id)
183 || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
188 ip4->dst_address.as_u32 = inner_ip4_dadr;
189 ip4->src_address.as_u32 =
190 ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
197 ip6_map_t_icmp (vlib_main_t * vm,
198 vlib_node_runtime_t * node, vlib_frame_t * frame)
200 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
201 vlib_node_runtime_t *error_node =
202 vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
203 from = vlib_frame_vector_args (frame);
204 n_left_from = frame->n_vectors;
205 next_index = node->cached_next_index;
206 vlib_combined_counter_main_t *cm = map_main.domain_counters;
207 u32 thread_index = vm->thread_index;
209 while (n_left_from > 0)
211 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
213 while (n_left_from > 0 && n_left_to_next > 0)
218 ip6_mapt_icmp_next_t next0;
221 icmp6_to_icmp_ctx_t ctx0;
223 icmp46_header_t *icmp0;
225 pi0 = to_next[0] = from[0];
230 error0 = MAP_ERROR_NONE;
231 next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
233 p0 = vlib_get_buffer (vm, pi0);
234 ip60 = vlib_buffer_get_current (p0);
235 len0 = clib_net_to_host_u16 (ip60->payload_length);
236 icmp0 = (icmp46_header_t *) (ip60 + 1);
238 pool_elt_at_index (map_main.domains,
239 vnet_buffer (p0)->map_t.map_domain_index);
242 ip6_get_port (ip60, icmp0->type == ICMP6_echo_request,
247 // In case of 1:1 mapping, we don't care about the port
248 if (!(d0->ea_bits_len == 0 && d0->rules))
250 error0 = MAP_ERROR_ICMP;
256 (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
257 ip6_to_ip4_set_inner_icmp_cb, &ctx0))
259 error0 = MAP_ERROR_ICMP;
263 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
265 //Send to fragmentation node if necessary
266 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
267 vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
268 next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
271 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
273 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
276 map_t.map_domain_index, 1,
281 next0 = IP6_MAPT_ICMP_NEXT_DROP;
284 p0->error = error_node->errors[error0];
285 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
286 to_next, n_left_to_next, pi0,
289 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
291 return frame->n_vectors;
295 ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx)
297 vlib_buffer_t *p = ctx;
299 ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
300 ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
306 ip6_map_t_fragmented (vlib_main_t * vm,
307 vlib_node_runtime_t * node, vlib_frame_t * frame)
309 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
310 from = vlib_frame_vector_args (frame);
311 n_left_from = frame->n_vectors;
312 next_index = node->cached_next_index;
313 vlib_node_runtime_t *error_node =
314 vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
316 while (n_left_from > 0)
318 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
320 #ifdef IP6_MAP_T_DUAL_LOOP
321 while (n_left_from >= 4 && n_left_to_next >= 2)
324 vlib_buffer_t *p0, *p1;
327 pi0 = to_next[0] = from[0];
328 pi1 = to_next[1] = from[1];
334 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
335 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
336 p0 = vlib_get_buffer (vm, pi0);
337 p1 = vlib_get_buffer (vm, pi1);
339 if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
341 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
342 next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
346 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
348 //Send to fragmentation node if necessary
349 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
350 vnet_buffer (p0)->ip_frag.next_index =
351 IP4_FRAG_NEXT_IP4_LOOKUP;
352 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
356 if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1))
358 p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
359 next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
363 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
365 //Send to fragmentation node if necessary
366 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
367 vnet_buffer (p1)->ip_frag.next_index =
368 IP4_FRAG_NEXT_IP4_LOOKUP;
369 next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
373 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
374 to_next, n_left_to_next, pi0, pi1,
379 while (n_left_from > 0 && n_left_to_next > 0)
385 pi0 = to_next[0] = from[0];
391 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
392 p0 = vlib_get_buffer (vm, pi0);
394 if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
396 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
397 next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
401 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
403 //Send to fragmentation node if necessary
404 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
405 vnet_buffer (p0)->ip_frag.next_index =
406 IP4_FRAG_NEXT_IP4_LOOKUP;
407 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
411 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
412 to_next, n_left_to_next, pi0,
415 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
417 return frame->n_vectors;
421 ip6_map_t_tcp_udp (vlib_main_t * vm,
422 vlib_node_runtime_t * node, vlib_frame_t * frame)
424 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
425 vlib_node_runtime_t *error_node =
426 vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
428 from = vlib_frame_vector_args (frame);
429 n_left_from = frame->n_vectors;
430 next_index = node->cached_next_index;
431 while (n_left_from > 0)
433 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
435 #ifdef IP6_MAP_T_DUAL_LOOP
436 while (n_left_from >= 4 && n_left_to_next >= 2)
439 vlib_buffer_t *p0, *p1;
440 ip6_mapt_tcp_udp_next_t next0, next1;
442 pi0 = to_next[0] = from[0];
443 pi1 = to_next[1] = from[1];
448 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
449 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
451 p0 = vlib_get_buffer (vm, pi0);
452 p1 = vlib_get_buffer (vm, pi1);
454 if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
456 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
457 next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
461 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
463 //Send to fragmentation node if necessary
464 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
465 vnet_buffer (p0)->ip_frag.next_index =
466 IP4_FRAG_NEXT_IP4_LOOKUP;
467 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
471 if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
473 p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
474 next1 = IP6_MAPT_TCP_UDP_NEXT_DROP;
478 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
480 //Send to fragmentation node if necessary
481 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
482 vnet_buffer (p1)->ip_frag.next_index =
483 IP4_FRAG_NEXT_IP4_LOOKUP;
484 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
488 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
489 n_left_to_next, pi0, pi1, next0,
494 while (n_left_from > 0 && n_left_to_next > 0)
498 ip6_mapt_tcp_udp_next_t next0;
500 pi0 = to_next[0] = from[0];
505 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
507 p0 = vlib_get_buffer (vm, pi0);
509 if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
511 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
512 next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
516 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
518 //Send to fragmentation node if necessary
519 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
520 vnet_buffer (p0)->ip_frag.next_index =
521 IP4_FRAG_NEXT_IP4_LOOKUP;
522 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
526 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
527 to_next, n_left_to_next, pi0,
530 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
532 return frame->n_vectors;
535 static_always_inline void
536 ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
537 map_domain_t * d0, i32 * map_port0,
538 u8 * error0, ip6_mapt_next_t * next0,
539 u32 l4_len0, ip6_frag_hdr_t * frag0)
541 map_main_t *mm = &map_main;
549 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
550 ip6_frag_hdr_offset (frag0)))
552 *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
553 if (d0->ea_bits_len == 0 && d0->rules)
559 *map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
560 *error0 = (*map_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
565 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
568 l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
569 vnet_buffer (p0)->map_t.checksum_offset =
570 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
571 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
576 vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
580 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
583 l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
584 vnet_buffer (p0)->map_t.checksum_offset =
585 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
586 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
591 vnet_buffer (p0)->map_t.v6.l4_offset + port_offset));
593 else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
596 l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
597 *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
598 if (d0->ea_bits_len == 0 && d0->rules)
603 if (((icmp46_header_t *)
605 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
607 || ((icmp46_header_t *)
609 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
615 u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
620 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
621 *error0 = MAP_ERROR_BAD_PROTOCOL;
626 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
628 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
629 vlib_node_runtime_t *error_node =
630 vlib_node_get_runtime (vm, ip6_map_t_node.index);
631 map_main_t *mm = &map_main;
632 vlib_combined_counter_main_t *cm = map_main.domain_counters;
633 u32 thread_index = vm->thread_index;
635 from = vlib_frame_vector_args (frame);
636 n_left_from = frame->n_vectors;
637 next_index = node->cached_next_index;
638 while (n_left_from > 0)
640 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
642 #ifdef IP6_MAP_T_DUAL_LOOP
643 while (n_left_from >= 4 && n_left_to_next >= 2)
646 vlib_buffer_t *p0, *p1;
647 ip6_header_t *ip60, *ip61;
649 ip6_mapt_next_t next0, next1;
650 u32 l4_len0, l4_len1;
651 i32 map_port0, map_port1;
652 map_domain_t *d0, *d1;
653 ip6_frag_hdr_t *frag0, *frag1;
654 next0 = next1 = 0; //Because compiler whines
656 pi0 = to_next[0] = from[0];
657 pi1 = to_next[1] = from[1];
663 error0 = MAP_ERROR_NONE;
664 error1 = MAP_ERROR_NONE;
666 p0 = vlib_get_buffer (vm, pi0);
667 p1 = vlib_get_buffer (vm, pi1);
668 ip60 = vlib_buffer_get_current (p0);
669 ip61 = vlib_buffer_get_current (p1);
674 daddr0 = 0; /* TODO */
675 daddr1 = 0; /* TODO */
676 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
678 daddr0 = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
679 daddr1 = map_get_ip4 (&ip61->dst_address, 0 /*TODO*/);
681 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
682 (ip4_address_t *) & daddr0,
683 &vnet_buffer (p0)->map_t.map_domain_index,
686 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
687 (ip4_address_t *) & daddr1,
688 &vnet_buffer (p1)->map_t.map_domain_index,
691 daddr0 = map_get_ip4 (&ip60->dst_address, d0->flags);
692 daddr1 = map_get_ip4 (&ip61->dst_address, d1->flags);
694 vnet_buffer (p0)->map_t.v6.daddr = daddr0;
695 vnet_buffer (p1)->map_t.v6.daddr = daddr1;
696 vnet_buffer (p0)->map_t.v6.saddr =
697 ip6_map_t_embedded_address (d0, &ip60->src_address);
698 vnet_buffer (p1)->map_t.v6.saddr =
699 ip6_map_t_embedded_address (d1, &ip61->src_address);
704 saddr0 = 0; /* TODO */
705 saddr1 = 0; /* TODO */
706 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
708 saddr0 = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
709 saddr1 = map_get_ip4 (&ip61->src_address, 0 /*TODO*/);
711 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
712 (ip4_address_t *) & saddr0,
713 &vnet_buffer (p0)->map_t.map_domain_index,
716 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
717 (ip4_address_t *) & saddr1,
718 &vnet_buffer (p1)->map_t.map_domain_index,
721 saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
722 saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
724 vnet_buffer (p0)->map_t.v6.saddr = saddr0;
725 vnet_buffer (p1)->map_t.v6.saddr = saddr1;
726 vnet_buffer (p0)->map_t.v6.daddr =
727 ip6_map_t_embedded_address (d0, &ip60->dst_address);
728 vnet_buffer (p1)->map_t.v6.daddr =
729 ip6_map_t_embedded_address (d1, &ip61->dst_address);
732 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
733 vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
735 if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
736 &(vnet_buffer (p0)->map_t.
738 &(vnet_buffer (p0)->map_t.
740 &(vnet_buffer (p0)->map_t.
743 error0 = MAP_ERROR_MALFORMED;
744 next0 = IP6_MAPT_NEXT_DROP;
747 if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
748 &(vnet_buffer (p1)->map_t.
750 &(vnet_buffer (p1)->map_t.
752 &(vnet_buffer (p1)->map_t.
755 error1 = MAP_ERROR_MALFORMED;
756 next1 = IP6_MAPT_NEXT_DROP;
759 map_port0 = map_port1 = -1;
760 l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
761 sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
762 l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
763 sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
765 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
766 vnet_buffer (p0)->map_t.
769 (ip6_frag_hdr_t *) u8_ptr_add (ip61,
770 vnet_buffer (p1)->map_t.
773 ip6_map_t_classify (p0, ip60, d0, &map_port0, &error0, &next0,
775 ip6_map_t_classify (p1, ip61, d1, &map_port1, &error1, &next1,
780 && (ip60->src_address.as_u64[0] !=
781 map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
783 || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
785 (p0)->map_t.v6.saddr,
788 error0 = MAP_ERROR_SEC_CHECK;
793 && (ip61->src_address.as_u64[0] !=
794 map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
796 || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
798 (p1)->map_t.v6.saddr,
801 error1 = MAP_ERROR_SEC_CHECK;
804 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
805 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
810 && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
811 && (error0 == MAP_ERROR_NONE))
813 ip6_map_fragment_cache (ip60,
814 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
821 if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
822 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
827 && (map_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
828 && (error1 == MAP_ERROR_NONE))
830 ip6_map_fragment_cache (ip61,
831 (ip6_frag_hdr_t *) u8_ptr_add (ip61,
839 (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
841 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
844 map_t.map_domain_index, 1,
846 (ip60->payload_length));
850 (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
852 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
855 map_t.map_domain_index, 1,
857 (ip61->payload_length));
860 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
861 next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
862 p0->error = error_node->errors[error0];
863 p1->error = error_node->errors[error1];
864 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
865 n_left_to_next, pi0, pi1, next0,
870 while (n_left_from > 0 && n_left_to_next > 0)
879 ip6_frag_hdr_t *frag0;
881 ip6_mapt_next_t next0 = 0;
883 pi0 = to_next[0] = from[0];
888 error0 = MAP_ERROR_NONE;
890 p0 = vlib_get_buffer (vm, pi0);
891 ip60 = vlib_buffer_get_current (p0);
896 //Save daddr in a different variable to not overwrite ip.adj_index
897 daddr = 0; /* TODO */
898 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
900 daddr = map_get_ip4 (&ip60->dst_address, 0 /*TODO*/);
902 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
903 (ip4_address_t *) & daddr,
904 &vnet_buffer (p0)->map_t.map_domain_index,
907 daddr = map_get_ip4 (&ip60->dst_address, d0->flags);
909 //FIXME: What if d0 is null
910 vnet_buffer (p0)->map_t.v6.daddr = daddr;
911 vnet_buffer (p0)->map_t.v6.saddr =
912 ip6_map_t_embedded_address (d0, &ip60->src_address);
919 //Save saddr in a different variable to not overwrite ip.adj_index
920 saddr = 0; /* TODO */
921 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
923 saddr = map_get_ip4 (&ip60->src_address, 0 /*TODO*/);
925 ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
926 (ip4_address_t *) & saddr,
927 &vnet_buffer (p0)->map_t.map_domain_index,
930 saddr = map_get_ip4 (&ip60->src_address, d0->flags);
932 //FIXME: What if d0 is null
933 vnet_buffer (p0)->map_t.v6.saddr = saddr;
934 vnet_buffer (p0)->map_t.v6.daddr =
935 ip6_map_t_embedded_address (d0, &ip60->dst_address);
940 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
942 if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
943 &(vnet_buffer (p0)->map_t.
945 &(vnet_buffer (p0)->map_t.
947 &(vnet_buffer (p0)->map_t.
950 error0 = MAP_ERROR_MALFORMED;
951 next0 = IP6_MAPT_NEXT_DROP;
955 l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
956 sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
958 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
959 vnet_buffer (p0)->map_t.
963 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
964 ip6_frag_hdr_offset (frag0)))
966 map_port0 = ip6_map_fragment_get (ip60, frag0, d0);
967 error0 = (map_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
968 next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
972 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
976 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
977 vnet_buffer (p0)->map_t.checksum_offset =
978 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
979 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
984 vnet_buffer (p0)->map_t.v6.l4_offset +
989 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
993 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
994 vnet_buffer (p0)->map_t.checksum_offset =
995 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
996 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
1001 vnet_buffer (p0)->map_t.v6.l4_offset +
1004 else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
1009 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
1010 next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1011 if (((icmp46_header_t *)
1013 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
1015 || ((icmp46_header_t *)
1017 vnet_buffer (p0)->map_t.v6.
1018 l4_offset))->code == ICMP6_echo_request)
1023 vnet_buffer (p0)->map_t.v6.l4_offset + 6));
1027 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1028 error0 = MAP_ERROR_BAD_PROTOCOL;
1033 ((!mm->is_ce) && (map_port0 != -1)
1034 && (ip60->src_address.as_u64[0] !=
1035 map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
1037 || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
1039 (p0)->map_t.v6.saddr,
1042 //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
1043 error0 = MAP_ERROR_SEC_CHECK;
1046 //Fragmented first packet needs to be cached for following packets
1047 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
1048 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
1053 && (map_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
1054 && (error0 == MAP_ERROR_NONE))
1056 ip6_map_fragment_cache (ip60,
1057 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1065 (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
1067 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
1070 map_t.map_domain_index, 1,
1071 clib_net_to_host_u16
1072 (ip60->payload_length));
1075 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1076 p0->error = error_node->errors[error0];
1077 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1078 to_next, n_left_to_next, pi0,
1081 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1083 return frame->n_vectors;
1086 static char *map_t_error_strings[] = {
1087 #define _(sym,string) string,
1093 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
1094 .function = ip6_map_t_fragmented,
1095 .name = "ip6-map-t-fragmented",
1096 .vector_size = sizeof (u32),
1097 .format_trace = format_map_trace,
1098 .type = VLIB_NODE_TYPE_INTERNAL,
1100 .n_errors = MAP_N_ERROR,
1101 .error_strings = map_t_error_strings,
1103 .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
1105 [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
1106 [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1107 [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1113 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
1114 .function = ip6_map_t_icmp,
1115 .name = "ip6-map-t-icmp",
1116 .vector_size = sizeof (u32),
1117 .format_trace = format_map_trace,
1118 .type = VLIB_NODE_TYPE_INTERNAL,
1120 .n_errors = MAP_N_ERROR,
1121 .error_strings = map_t_error_strings,
1123 .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
1125 [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1126 [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1127 [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1133 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
1134 .function = ip6_map_t_tcp_udp,
1135 .name = "ip6-map-t-tcp-udp",
1136 .vector_size = sizeof (u32),
1137 .format_trace = format_map_trace,
1138 .type = VLIB_NODE_TYPE_INTERNAL,
1140 .n_errors = MAP_N_ERROR,
1141 .error_strings = map_t_error_strings,
1143 .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1145 [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1146 [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1147 [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1153 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1154 .function = ip6_map_t,
1155 .name = "ip6-map-t",
1156 .vector_size = sizeof(u32),
1157 .format_trace = format_map_trace,
1158 .type = VLIB_NODE_TYPE_INTERNAL,
1160 .n_errors = MAP_N_ERROR,
1161 .error_strings = map_t_error_strings,
1163 .n_next_nodes = IP6_MAPT_N_NEXT,
1165 [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1166 [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1167 [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1168 [IP6_MAPT_NEXT_DROP] = "error-drop",
1174 * fd.io coding-style-patch-verification: ON
1177 * eval: (c-set-style "gnu")