2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
21 #define IP6_MAP_T_DUAL_LOOP
25 IP6_MAPT_NEXT_MAPT_TCP_UDP,
26 IP6_MAPT_NEXT_MAPT_ICMP,
27 IP6_MAPT_NEXT_MAPT_FRAGMENTED,
34 IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
35 IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36 IP6_MAPT_ICMP_NEXT_DROP,
38 } ip6_mapt_icmp_next_t;
42 IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43 IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
44 IP6_MAPT_TCP_UDP_NEXT_DROP,
45 IP6_MAPT_TCP_UDP_N_NEXT
46 } ip6_mapt_tcp_udp_next_t;
50 IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
51 IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
52 IP6_MAPT_FRAGMENTED_NEXT_DROP,
53 IP6_MAPT_FRAGMENTED_N_NEXT
54 } ip6_mapt_fragmented_next_t;
56 static_always_inline int
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58 map_domain_t * d, u16 port)
61 map_ip4_reass_lock ();
62 map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
64 ip6_map_t_embedded_address (d,
67 frag_id_6to4 (frag->identification),
70 IP_PROTOCOL_ICMP : ip6->protocol,
75 map_ip4_reass_unlock ();
79 /* Returns the associated port or -1 */
80 static_always_inline i32
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
85 map_ip4_reass_lock ();
86 map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
88 ip6_map_t_embedded_address (d,
91 frag_id_6to4 (frag->identification),
94 IP_PROTOCOL_ICMP : ip6->protocol,
96 i32 ret = r ? r->port : -1;
97 map_ip4_reass_unlock ();
105 } icmp6_to_icmp_ctx_t;
108 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
110 icmp6_to_icmp_ctx_t *ctx = arg;
114 //Note that this prevents an intermediate IPv6 router from answering the request
115 ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
116 if (ip6->src_address.as_u64[0] !=
117 map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port)
118 || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
122 ip4->dst_address.as_u32 =
123 ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
124 ip4->src_address.as_u32 = ip4_sadr;
130 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
133 icmp6_to_icmp_ctx_t *ctx = arg;
136 //Security check of inner packet
137 inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
138 if (ip6->dst_address.as_u64[0] !=
139 map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->sender_port)
140 || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
145 ip4->dst_address.as_u32 = inner_ip4_dadr;
146 ip4->src_address.as_u32 =
147 ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
153 ip6_map_t_icmp (vlib_main_t * vm,
154 vlib_node_runtime_t * node, vlib_frame_t * frame)
156 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
157 vlib_node_runtime_t *error_node =
158 vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
159 from = vlib_frame_vector_args (frame);
160 n_left_from = frame->n_vectors;
161 next_index = node->cached_next_index;
162 vlib_combined_counter_main_t *cm = map_main.domain_counters;
163 u32 thread_index = vm->thread_index;
165 while (n_left_from > 0)
167 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
169 while (n_left_from > 0 && n_left_to_next > 0)
174 ip6_mapt_icmp_next_t next0;
177 icmp6_to_icmp_ctx_t ctx0;
180 pi0 = to_next[0] = from[0];
185 error0 = MAP_ERROR_NONE;
186 next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
188 p0 = vlib_get_buffer (vm, pi0);
189 ip60 = vlib_buffer_get_current (p0);
190 len0 = clib_net_to_host_u16 (ip60->payload_length);
192 pool_elt_at_index (map_main.domains,
193 vnet_buffer (p0)->map_t.map_domain_index);
194 ctx0.sender_port = ip6_get_port (ip60, 0, p0->current_length);
196 if (ctx0.sender_port == 0)
198 // In case of 1:1 mapping, we don't care about the port
199 if (!(d0->ea_bits_len == 0 && d0->rules))
201 error0 = MAP_ERROR_ICMP;
207 (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
208 ip6_to_ip4_set_inner_icmp_cb, &ctx0))
210 error0 = MAP_ERROR_ICMP;
214 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
216 //Send to fragmentation node if necessary
217 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
218 vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
219 next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
222 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
224 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
227 map_t.map_domain_index, 1,
232 next0 = IP6_MAPT_ICMP_NEXT_DROP;
235 p0->error = error_node->errors[error0];
236 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
237 to_next, n_left_to_next, pi0,
240 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
242 return frame->n_vectors;
246 ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx)
248 vlib_buffer_t *p = ctx;
250 ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
251 ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
257 ip6_map_t_fragmented (vlib_main_t * vm,
258 vlib_node_runtime_t * node, vlib_frame_t * frame)
260 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
261 from = vlib_frame_vector_args (frame);
262 n_left_from = frame->n_vectors;
263 next_index = node->cached_next_index;
264 vlib_node_runtime_t *error_node =
265 vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
267 while (n_left_from > 0)
269 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
271 #ifdef IP6_MAP_T_DUAL_LOOP
272 while (n_left_from >= 4 && n_left_to_next >= 2)
275 vlib_buffer_t *p0, *p1;
278 pi0 = to_next[0] = from[0];
279 pi1 = to_next[1] = from[1];
285 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
286 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
287 p0 = vlib_get_buffer (vm, pi0);
288 p1 = vlib_get_buffer (vm, pi1);
290 if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
292 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
293 next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
297 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
299 //Send to fragmentation node if necessary
300 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
301 vnet_buffer (p0)->ip_frag.next_index =
302 IP4_FRAG_NEXT_IP4_LOOKUP;
303 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
307 if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1))
309 p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
310 next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
314 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
316 //Send to fragmentation node if necessary
317 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
318 vnet_buffer (p1)->ip_frag.next_index =
319 IP4_FRAG_NEXT_IP4_LOOKUP;
320 next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
324 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
325 to_next, n_left_to_next, pi0, pi1,
330 while (n_left_from > 0 && n_left_to_next > 0)
336 pi0 = to_next[0] = from[0];
342 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
343 p0 = vlib_get_buffer (vm, pi0);
345 if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
347 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
348 next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
352 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
354 //Send to fragmentation node if necessary
355 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
356 vnet_buffer (p0)->ip_frag.next_index =
357 IP4_FRAG_NEXT_IP4_LOOKUP;
358 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
362 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
363 to_next, n_left_to_next, pi0,
366 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
368 return frame->n_vectors;
372 ip6_map_t_tcp_udp (vlib_main_t * vm,
373 vlib_node_runtime_t * node, vlib_frame_t * frame)
375 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
376 vlib_node_runtime_t *error_node =
377 vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
379 from = vlib_frame_vector_args (frame);
380 n_left_from = frame->n_vectors;
381 next_index = node->cached_next_index;
382 while (n_left_from > 0)
384 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
386 #ifdef IP6_MAP_T_DUAL_LOOP
387 while (n_left_from >= 4 && n_left_to_next >= 2)
390 vlib_buffer_t *p0, *p1;
391 ip6_mapt_tcp_udp_next_t next0, next1;
393 pi0 = to_next[0] = from[0];
394 pi1 = to_next[1] = from[1];
399 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
400 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
402 p0 = vlib_get_buffer (vm, pi0);
403 p1 = vlib_get_buffer (vm, pi1);
405 if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
407 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
408 next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
412 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
414 //Send to fragmentation node if necessary
415 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
416 vnet_buffer (p0)->ip_frag.next_index =
417 IP4_FRAG_NEXT_IP4_LOOKUP;
418 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
422 if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
424 p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
425 next1 = IP6_MAPT_TCP_UDP_NEXT_DROP;
429 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
431 //Send to fragmentation node if necessary
432 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
433 vnet_buffer (p1)->ip_frag.next_index =
434 IP4_FRAG_NEXT_IP4_LOOKUP;
435 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
439 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
440 n_left_to_next, pi0, pi1, next0,
445 while (n_left_from > 0 && n_left_to_next > 0)
449 ip6_mapt_tcp_udp_next_t next0;
451 pi0 = to_next[0] = from[0];
456 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
458 p0 = vlib_get_buffer (vm, pi0);
460 if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
462 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
463 next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
467 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
469 //Send to fragmentation node if necessary
470 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
471 vnet_buffer (p0)->ip_frag.next_index =
472 IP4_FRAG_NEXT_IP4_LOOKUP;
473 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
477 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
478 to_next, n_left_to_next, pi0,
481 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
483 return frame->n_vectors;
486 static_always_inline void
487 ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
488 map_domain_t * d0, i32 * src_port0,
489 u8 * error0, ip6_mapt_next_t * next0,
490 u32 l4_len0, ip6_frag_hdr_t * frag0)
492 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
493 ip6_frag_hdr_offset (frag0)))
495 *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
496 if (d0->ea_bits_len == 0 && d0->rules)
502 *src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
503 *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
508 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
511 l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
512 vnet_buffer (p0)->map_t.checksum_offset =
513 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
514 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
517 ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
521 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
524 l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
525 vnet_buffer (p0)->map_t.checksum_offset =
526 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
527 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
530 ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
532 else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
535 l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
536 *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
537 if (d0->ea_bits_len == 0 && d0->rules)
542 if (((icmp46_header_t *)
544 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
546 || ((icmp46_header_t *)
548 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
554 u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
559 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
560 *error0 = MAP_ERROR_BAD_PROTOCOL;
565 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
567 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
568 vlib_node_runtime_t *error_node =
569 vlib_node_get_runtime (vm, ip6_map_t_node.index);
570 vlib_combined_counter_main_t *cm = map_main.domain_counters;
571 u32 thread_index = vm->thread_index;
573 from = vlib_frame_vector_args (frame);
574 n_left_from = frame->n_vectors;
575 next_index = node->cached_next_index;
576 while (n_left_from > 0)
578 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
580 #ifdef IP6_MAP_T_DUAL_LOOP
581 while (n_left_from >= 4 && n_left_to_next >= 2)
584 vlib_buffer_t *p0, *p1;
585 ip6_header_t *ip60, *ip61;
587 ip6_mapt_next_t next0, next1;
588 u32 l4_len0, l4_len1;
589 i32 src_port0, src_port1;
590 map_domain_t *d0, *d1;
591 ip6_frag_hdr_t *frag0, *frag1;
593 next0 = next1 = 0; //Because compiler whines
595 pi0 = to_next[0] = from[0];
596 pi1 = to_next[1] = from[1];
602 error0 = MAP_ERROR_NONE;
603 error1 = MAP_ERROR_NONE;
605 p0 = vlib_get_buffer (vm, pi0);
606 p1 = vlib_get_buffer (vm, pi1);
607 ip60 = vlib_buffer_get_current (p0);
608 ip61 = vlib_buffer_get_current (p1);
610 saddr0 = 0; /* TODO */
611 saddr1 = 0; /* TODO */
612 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
614 d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
615 (ip4_address_t *) & saddr0,
616 &vnet_buffer (p0)->map_t.map_domain_index,
619 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
620 (ip4_address_t *) & saddr1,
621 &vnet_buffer (p1)->map_t.map_domain_index,
624 saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
625 saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
627 vnet_buffer (p0)->map_t.v6.saddr = saddr0;
628 vnet_buffer (p1)->map_t.v6.saddr = saddr1;
629 vnet_buffer (p0)->map_t.v6.daddr =
630 ip6_map_t_embedded_address (d0, &ip60->dst_address);
631 vnet_buffer (p1)->map_t.v6.daddr =
632 ip6_map_t_embedded_address (d1, &ip61->dst_address);
633 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
634 vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
636 if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
637 &(vnet_buffer (p0)->map_t.
639 &(vnet_buffer (p0)->map_t.
641 &(vnet_buffer (p0)->map_t.
644 error0 = MAP_ERROR_MALFORMED;
645 next0 = IP6_MAPT_NEXT_DROP;
648 if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
649 &(vnet_buffer (p1)->map_t.
651 &(vnet_buffer (p1)->map_t.
653 &(vnet_buffer (p1)->map_t.
656 error1 = MAP_ERROR_MALFORMED;
657 next1 = IP6_MAPT_NEXT_DROP;
660 src_port0 = src_port1 = -1;
661 l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
662 sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
663 l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
664 sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
666 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
667 vnet_buffer (p0)->map_t.
670 (ip6_frag_hdr_t *) u8_ptr_add (ip61,
671 vnet_buffer (p1)->map_t.
674 ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0,
676 ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1,
681 && (ip60->src_address.as_u64[0] !=
682 map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
684 || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
686 (p0)->map_t.v6.saddr,
689 error0 = MAP_ERROR_SEC_CHECK;
694 && (ip61->src_address.as_u64[0] !=
695 map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
697 || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
699 (p1)->map_t.v6.saddr,
702 error1 = MAP_ERROR_SEC_CHECK;
705 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
706 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
711 && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
712 && (error0 == MAP_ERROR_NONE))
714 ip6_map_fragment_cache (ip60,
715 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
722 if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
723 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
728 && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
729 && (error1 == MAP_ERROR_NONE))
731 ip6_map_fragment_cache (ip61,
732 (ip6_frag_hdr_t *) u8_ptr_add (ip61,
740 (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
742 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
745 map_t.map_domain_index, 1,
747 (ip60->payload_length));
751 (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
753 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
756 map_t.map_domain_index, 1,
758 (ip61->payload_length));
761 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
762 next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
763 p0->error = error_node->errors[error0];
764 p1->error = error_node->errors[error1];
765 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
766 n_left_to_next, pi0, pi1, next0,
771 while (n_left_from > 0 && n_left_to_next > 0)
780 ip6_frag_hdr_t *frag0;
781 ip6_mapt_next_t next0 = 0;
784 pi0 = to_next[0] = from[0];
789 error0 = MAP_ERROR_NONE;
791 p0 = vlib_get_buffer (vm, pi0);
792 ip60 = vlib_buffer_get_current (p0);
794 //Save saddr in a different variable to not overwrite ip.adj_index
795 saddr = 0; /* TODO */
796 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
798 d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
799 (ip4_address_t *) & saddr,
800 &vnet_buffer (p0)->map_t.map_domain_index,
803 saddr = map_get_ip4 (&ip60->src_address, d0->flags);
805 //FIXME: What if d0 is null
806 vnet_buffer (p0)->map_t.v6.saddr = saddr;
807 vnet_buffer (p0)->map_t.v6.daddr =
808 ip6_map_t_embedded_address (d0, &ip60->dst_address);
809 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
811 if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
812 &(vnet_buffer (p0)->map_t.
814 &(vnet_buffer (p0)->map_t.
816 &(vnet_buffer (p0)->map_t.
819 error0 = MAP_ERROR_MALFORMED;
820 next0 = IP6_MAPT_NEXT_DROP;
824 l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
825 sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
827 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
828 vnet_buffer (p0)->map_t.
832 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
833 ip6_frag_hdr_offset (frag0)))
835 src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
836 error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
837 next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
841 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
845 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
846 vnet_buffer (p0)->map_t.checksum_offset =
847 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
848 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
852 u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
856 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
860 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
861 vnet_buffer (p0)->map_t.checksum_offset =
862 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
863 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
867 u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
869 else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
874 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
875 next0 = IP6_MAPT_NEXT_MAPT_ICMP;
876 if (((icmp46_header_t *)
878 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
880 || ((icmp46_header_t *)
882 vnet_buffer (p0)->map_t.v6.
883 l4_offset))->code == ICMP6_echo_request)
888 vnet_buffer (p0)->map_t.v6.l4_offset + 6));
892 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
893 error0 = MAP_ERROR_BAD_PROTOCOL;
899 && (ip60->src_address.as_u64[0] !=
900 map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
902 || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
904 (p0)->map_t.v6.saddr,
907 //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
908 error0 = MAP_ERROR_SEC_CHECK;
911 //Fragmented first packet needs to be cached for following packets
912 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
913 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
918 && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
919 && (error0 == MAP_ERROR_NONE))
921 ip6_map_fragment_cache (ip60,
922 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
930 (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
932 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
935 map_t.map_domain_index, 1,
937 (ip60->payload_length));
940 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
941 p0->error = error_node->errors[error0];
942 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
943 to_next, n_left_to_next, pi0,
946 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
948 return frame->n_vectors;
951 static char *map_t_error_strings[] = {
952 #define _(sym,string) string,
958 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
959 .function = ip6_map_t_fragmented,
960 .name = "ip6-map-t-fragmented",
961 .vector_size = sizeof (u32),
962 .format_trace = format_map_trace,
963 .type = VLIB_NODE_TYPE_INTERNAL,
965 .n_errors = MAP_N_ERROR,
966 .error_strings = map_t_error_strings,
968 .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
970 [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
971 [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
972 [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
978 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
979 .function = ip6_map_t_icmp,
980 .name = "ip6-map-t-icmp",
981 .vector_size = sizeof (u32),
982 .format_trace = format_map_trace,
983 .type = VLIB_NODE_TYPE_INTERNAL,
985 .n_errors = MAP_N_ERROR,
986 .error_strings = map_t_error_strings,
988 .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
990 [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
991 [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
992 [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
998 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
999 .function = ip6_map_t_tcp_udp,
1000 .name = "ip6-map-t-tcp-udp",
1001 .vector_size = sizeof (u32),
1002 .format_trace = format_map_trace,
1003 .type = VLIB_NODE_TYPE_INTERNAL,
1005 .n_errors = MAP_N_ERROR,
1006 .error_strings = map_t_error_strings,
1008 .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1010 [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1011 [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1012 [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1018 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1019 .function = ip6_map_t,
1020 .name = "ip6-map-t",
1021 .vector_size = sizeof(u32),
1022 .format_trace = format_map_trace,
1023 .type = VLIB_NODE_TYPE_INTERNAL,
1025 .n_errors = MAP_N_ERROR,
1026 .error_strings = map_t_error_strings,
1028 .n_next_nodes = IP6_MAPT_N_NEXT,
1030 [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1031 [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1032 [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1033 [IP6_MAPT_NEXT_DROP] = "error-drop",
1039 * fd.io coding-style-patch-verification: ON
1042 * eval: (c-set-style "gnu")