2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include "../ip/ip_frag.h"
18 #include <vnet/ip/ip6_to_ip4.h>
19 #include <vnet/ip/ip4_to_ip6.h>
21 #define IP6_MAP_T_DUAL_LOOP
25 IP6_MAPT_NEXT_MAPT_TCP_UDP,
26 IP6_MAPT_NEXT_MAPT_ICMP,
27 IP6_MAPT_NEXT_MAPT_FRAGMENTED,
34 IP6_MAPT_ICMP_NEXT_IP4_LOOKUP,
35 IP6_MAPT_ICMP_NEXT_IP4_FRAG,
36 IP6_MAPT_ICMP_NEXT_DROP,
38 } ip6_mapt_icmp_next_t;
42 IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP,
43 IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG,
44 IP6_MAPT_TCP_UDP_NEXT_DROP,
45 IP6_MAPT_TCP_UDP_N_NEXT
46 } ip6_mapt_tcp_udp_next_t;
50 IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP,
51 IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG,
52 IP6_MAPT_FRAGMENTED_NEXT_DROP,
53 IP6_MAPT_FRAGMENTED_N_NEXT
54 } ip6_mapt_fragmented_next_t;
56 static_always_inline int
57 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
58 map_domain_t * d, u16 port)
61 map_ip4_reass_lock ();
62 map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
64 ip6_map_t_embedded_address (d,
67 frag_id_6to4 (frag->identification),
70 IP_PROTOCOL_ICMP : ip6->protocol,
75 map_ip4_reass_unlock ();
79 /* Returns the associated port or -1 */
80 static_always_inline i32
81 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
85 map_ip4_reass_lock ();
86 map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address,
88 ip6_map_t_embedded_address (d,
91 frag_id_6to4 (frag->identification),
94 IP_PROTOCOL_ICMP : ip6->protocol,
96 i32 ret = r ? r->port : -1;
97 map_ip4_reass_unlock ();
105 } icmp6_to_icmp_ctx_t;
108 ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
110 icmp6_to_icmp_ctx_t *ctx = arg;
114 //Note that this prevents an intermediate IPv6 router from answering the request
115 ip4_sadr = map_get_ip4 (&ip6->src_address, ctx->d->flags);
116 if (ip6->src_address.as_u64[0] !=
117 map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port)
118 || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr,
122 ip4->dst_address.as_u32 =
123 ip6_map_t_embedded_address (ctx->d, &ip6->dst_address);
124 ip4->src_address.as_u32 = ip4_sadr;
130 ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4,
133 icmp6_to_icmp_ctx_t *ctx = arg;
136 //Security check of inner packet
137 inner_ip4_dadr = map_get_ip4 (&ip6->dst_address, ctx->d->flags);
138 if (ip6->dst_address.as_u64[0] !=
139 map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->sender_port)
140 || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d,
145 ip4->dst_address.as_u32 = inner_ip4_dadr;
146 ip4->src_address.as_u32 =
147 ip6_map_t_embedded_address (ctx->d, &ip6->src_address);
153 ip6_map_t_icmp (vlib_main_t * vm,
154 vlib_node_runtime_t * node, vlib_frame_t * frame)
156 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
157 vlib_node_runtime_t *error_node =
158 vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index);
159 from = vlib_frame_vector_args (frame);
160 n_left_from = frame->n_vectors;
161 next_index = node->cached_next_index;
162 vlib_combined_counter_main_t *cm = map_main.domain_counters;
163 u32 thread_index = vlib_get_thread_index ();
165 while (n_left_from > 0)
167 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
169 while (n_left_from > 0 && n_left_to_next > 0)
174 ip6_mapt_icmp_next_t next0;
177 icmp6_to_icmp_ctx_t ctx0;
180 pi0 = to_next[0] = from[0];
185 error0 = MAP_ERROR_NONE;
186 next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP;
188 p0 = vlib_get_buffer (vm, pi0);
189 ip60 = vlib_buffer_get_current (p0);
190 len0 = clib_net_to_host_u16 (ip60->payload_length);
192 pool_elt_at_index (map_main.domains,
193 vnet_buffer (p0)->map_t.map_domain_index);
194 ctx0.sender_port = ip6_get_port (ip60, 0, p0->current_length);
196 if (ctx0.sender_port == 0)
198 // In case of 1:1 mapping, we don't care about the port
199 if (!(d0->ea_bits_len == 0 && d0->rules))
201 error0 = MAP_ERROR_ICMP;
207 (p0, ip6_to_ip4_set_icmp_cb, &ctx0,
208 ip6_to_ip4_set_inner_icmp_cb, &ctx0))
210 error0 = MAP_ERROR_ICMP;
214 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
216 //Send to fragmentation node if necessary
217 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
218 vnet_buffer (p0)->ip_frag.header_offset = 0;
219 vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
220 next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG;
223 if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
225 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
228 map_t.map_domain_index, 1,
233 next0 = IP6_MAPT_ICMP_NEXT_DROP;
236 p0->error = error_node->errors[error0];
237 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
238 to_next, n_left_to_next, pi0,
241 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
243 return frame->n_vectors;
247 ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx)
249 vlib_buffer_t *p = ctx;
251 ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr;
252 ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr;
258 ip6_map_t_fragmented (vlib_main_t * vm,
259 vlib_node_runtime_t * node, vlib_frame_t * frame)
261 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
262 from = vlib_frame_vector_args (frame);
263 n_left_from = frame->n_vectors;
264 next_index = node->cached_next_index;
265 vlib_node_runtime_t *error_node =
266 vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index);
268 while (n_left_from > 0)
270 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
272 #ifdef IP6_MAP_T_DUAL_LOOP
273 while (n_left_from >= 4 && n_left_to_next >= 2)
276 vlib_buffer_t *p0, *p1;
279 pi0 = to_next[0] = from[0];
280 pi1 = to_next[1] = from[1];
286 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
287 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
288 p0 = vlib_get_buffer (vm, pi0);
289 p1 = vlib_get_buffer (vm, pi1);
291 if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
293 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
294 next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
298 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
300 //Send to fragmentation node if necessary
301 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
302 vnet_buffer (p0)->ip_frag.header_offset = 0;
303 vnet_buffer (p0)->ip_frag.next_index =
304 IP4_FRAG_NEXT_IP4_LOOKUP;
305 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
309 if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1))
311 p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
312 next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
316 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
318 //Send to fragmentation node if necessary
319 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
320 vnet_buffer (p1)->ip_frag.header_offset = 0;
321 vnet_buffer (p1)->ip_frag.next_index =
322 IP4_FRAG_NEXT_IP4_LOOKUP;
323 next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
327 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
328 to_next, n_left_to_next, pi0, pi1,
333 while (n_left_from > 0 && n_left_to_next > 0)
339 pi0 = to_next[0] = from[0];
345 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
346 p0 = vlib_get_buffer (vm, pi0);
348 if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0))
350 p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
351 next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP;
355 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
357 //Send to fragmentation node if necessary
358 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
359 vnet_buffer (p0)->ip_frag.header_offset = 0;
360 vnet_buffer (p0)->ip_frag.next_index =
361 IP4_FRAG_NEXT_IP4_LOOKUP;
362 next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG;
366 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
367 to_next, n_left_to_next, pi0,
370 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
372 return frame->n_vectors;
376 ip6_map_t_tcp_udp (vlib_main_t * vm,
377 vlib_node_runtime_t * node, vlib_frame_t * frame)
379 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
380 vlib_node_runtime_t *error_node =
381 vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index);
383 from = vlib_frame_vector_args (frame);
384 n_left_from = frame->n_vectors;
385 next_index = node->cached_next_index;
386 while (n_left_from > 0)
388 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
390 #ifdef IP6_MAP_T_DUAL_LOOP
391 while (n_left_from >= 4 && n_left_to_next >= 2)
394 vlib_buffer_t *p0, *p1;
395 ip6_mapt_tcp_udp_next_t next0, next1;
397 pi0 = to_next[0] = from[0];
398 pi1 = to_next[1] = from[1];
403 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
404 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
406 p0 = vlib_get_buffer (vm, pi0);
407 p1 = vlib_get_buffer (vm, pi1);
409 if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
411 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
412 next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
416 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
418 //Send to fragmentation node if necessary
419 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
420 vnet_buffer (p0)->ip_frag.header_offset = 0;
421 vnet_buffer (p0)->ip_frag.next_index =
422 IP4_FRAG_NEXT_IP4_LOOKUP;
423 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
427 if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1))
429 p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
430 next1 = IP6_MAPT_TCP_UDP_NEXT_DROP;
434 if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
436 //Send to fragmentation node if necessary
437 vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
438 vnet_buffer (p1)->ip_frag.header_offset = 0;
439 vnet_buffer (p1)->ip_frag.next_index =
440 IP4_FRAG_NEXT_IP4_LOOKUP;
441 next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
445 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
446 n_left_to_next, pi0, pi1, next0,
451 while (n_left_from > 0 && n_left_to_next > 0)
455 ip6_mapt_tcp_udp_next_t next0;
457 pi0 = to_next[0] = from[0];
462 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP;
464 p0 = vlib_get_buffer (vm, pi0);
466 if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1))
468 p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
469 next0 = IP6_MAPT_TCP_UDP_NEXT_DROP;
473 if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
475 //Send to fragmentation node if necessary
476 vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
477 vnet_buffer (p0)->ip_frag.header_offset = 0;
478 vnet_buffer (p0)->ip_frag.next_index =
479 IP4_FRAG_NEXT_IP4_LOOKUP;
480 next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG;
484 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
485 to_next, n_left_to_next, pi0,
488 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
490 return frame->n_vectors;
493 static_always_inline void
494 ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60,
495 map_domain_t * d0, i32 * src_port0,
496 u8 * error0, ip6_mapt_next_t * next0,
497 u32 l4_len0, ip6_frag_hdr_t * frag0)
499 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
500 ip6_frag_hdr_offset (frag0)))
502 *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
503 if (d0->ea_bits_len == 0 && d0->rules)
509 *src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
510 *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
515 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
518 l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
519 vnet_buffer (p0)->map_t.checksum_offset =
520 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
521 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
524 ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
528 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
531 l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
532 vnet_buffer (p0)->map_t.checksum_offset =
533 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
534 *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
537 ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
539 else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
542 l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
543 *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
544 if (d0->ea_bits_len == 0 && d0->rules)
549 if (((icmp46_header_t *)
551 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
553 || ((icmp46_header_t *)
555 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
561 u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
566 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
567 *error0 = MAP_ERROR_BAD_PROTOCOL;
572 ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
574 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
575 vlib_node_runtime_t *error_node =
576 vlib_node_get_runtime (vm, ip6_map_t_node.index);
577 vlib_combined_counter_main_t *cm = map_main.domain_counters;
578 u32 thread_index = vlib_get_thread_index ();
580 from = vlib_frame_vector_args (frame);
581 n_left_from = frame->n_vectors;
582 next_index = node->cached_next_index;
583 while (n_left_from > 0)
585 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
587 #ifdef IP6_MAP_T_DUAL_LOOP
588 while (n_left_from >= 4 && n_left_to_next >= 2)
591 vlib_buffer_t *p0, *p1;
592 ip6_header_t *ip60, *ip61;
594 ip6_mapt_next_t next0, next1;
595 u32 l4_len0, l4_len1;
596 i32 src_port0, src_port1;
597 map_domain_t *d0, *d1;
598 ip6_frag_hdr_t *frag0, *frag1;
600 next0 = next1 = 0; //Because compiler whines
602 pi0 = to_next[0] = from[0];
603 pi1 = to_next[1] = from[1];
609 error0 = MAP_ERROR_NONE;
610 error1 = MAP_ERROR_NONE;
612 p0 = vlib_get_buffer (vm, pi0);
613 p1 = vlib_get_buffer (vm, pi1);
614 ip60 = vlib_buffer_get_current (p0);
615 ip61 = vlib_buffer_get_current (p1);
617 saddr0 = 0; /* TODO */
618 saddr1 = 0; /* TODO */
619 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
621 d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
622 (ip4_address_t *) & saddr0,
623 &vnet_buffer (p0)->map_t.map_domain_index,
626 ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
627 (ip4_address_t *) & saddr1,
628 &vnet_buffer (p1)->map_t.map_domain_index,
631 saddr0 = map_get_ip4 (&ip60->src_address, d0->flags);
632 saddr1 = map_get_ip4 (&ip61->src_address, d1->flags);
634 vnet_buffer (p0)->map_t.v6.saddr = saddr0;
635 vnet_buffer (p1)->map_t.v6.saddr = saddr1;
636 vnet_buffer (p0)->map_t.v6.daddr =
637 ip6_map_t_embedded_address (d0, &ip60->dst_address);
638 vnet_buffer (p1)->map_t.v6.daddr =
639 ip6_map_t_embedded_address (d1, &ip61->dst_address);
640 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
641 vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
643 if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
644 &(vnet_buffer (p0)->map_t.
646 &(vnet_buffer (p0)->map_t.
648 &(vnet_buffer (p0)->map_t.
651 error0 = MAP_ERROR_MALFORMED;
652 next0 = IP6_MAPT_NEXT_DROP;
655 if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
656 &(vnet_buffer (p1)->map_t.
658 &(vnet_buffer (p1)->map_t.
660 &(vnet_buffer (p1)->map_t.
663 error1 = MAP_ERROR_MALFORMED;
664 next1 = IP6_MAPT_NEXT_DROP;
667 src_port0 = src_port1 = -1;
668 l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
669 sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
670 l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
671 sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
673 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
674 vnet_buffer (p0)->map_t.
677 (ip6_frag_hdr_t *) u8_ptr_add (ip61,
678 vnet_buffer (p1)->map_t.
681 ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0,
683 ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1,
688 && (ip60->src_address.as_u64[0] !=
689 map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
691 || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
693 (p0)->map_t.v6.saddr,
696 error0 = MAP_ERROR_SEC_CHECK;
701 && (ip61->src_address.as_u64[0] !=
702 map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
704 || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
706 (p1)->map_t.v6.saddr,
709 error1 = MAP_ERROR_SEC_CHECK;
712 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
713 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
718 && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
719 && (error0 == MAP_ERROR_NONE))
721 ip6_map_fragment_cache (ip60,
722 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
729 if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
730 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
735 && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
736 && (error1 == MAP_ERROR_NONE))
738 ip6_map_fragment_cache (ip61,
739 (ip6_frag_hdr_t *) u8_ptr_add (ip61,
747 (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
749 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
752 map_t.map_domain_index, 1,
754 (ip60->payload_length));
758 (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
760 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
763 map_t.map_domain_index, 1,
765 (ip61->payload_length));
768 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
769 next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
770 p0->error = error_node->errors[error0];
771 p1->error = error_node->errors[error1];
772 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
773 n_left_to_next, pi0, pi1, next0,
778 while (n_left_from > 0 && n_left_to_next > 0)
787 ip6_frag_hdr_t *frag0;
788 ip6_mapt_next_t next0 = 0;
791 pi0 = to_next[0] = from[0];
796 error0 = MAP_ERROR_NONE;
798 p0 = vlib_get_buffer (vm, pi0);
799 ip60 = vlib_buffer_get_current (p0);
801 //Save saddr in a different variable to not overwrite ip.adj_index
802 saddr = 0; /* TODO */
803 /* NOTE: ip6_map_get_domain currently doesn't utilize second argument */
805 d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
806 (ip4_address_t *) & saddr,
807 &vnet_buffer (p0)->map_t.map_domain_index,
810 saddr = map_get_ip4 (&ip60->src_address, d0->flags);
812 //FIXME: What if d0 is null
813 vnet_buffer (p0)->map_t.v6.saddr = saddr;
814 vnet_buffer (p0)->map_t.v6.daddr =
815 ip6_map_t_embedded_address (d0, &ip60->dst_address);
816 vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
818 if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
819 &(vnet_buffer (p0)->map_t.
821 &(vnet_buffer (p0)->map_t.
823 &(vnet_buffer (p0)->map_t.
826 error0 = MAP_ERROR_MALFORMED;
827 next0 = IP6_MAPT_NEXT_DROP;
831 l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
832 sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
834 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
835 vnet_buffer (p0)->map_t.
839 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
840 ip6_frag_hdr_offset (frag0)))
842 src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
843 error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
844 next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED;
848 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
852 sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
853 vnet_buffer (p0)->map_t.checksum_offset =
854 vnet_buffer (p0)->map_t.v6.l4_offset + 16;
855 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
859 u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
863 (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
867 sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
868 vnet_buffer (p0)->map_t.checksum_offset =
869 vnet_buffer (p0)->map_t.v6.l4_offset + 6;
870 next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
874 u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
876 else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
881 sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
882 next0 = IP6_MAPT_NEXT_MAPT_ICMP;
883 if (((icmp46_header_t *)
885 vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
887 || ((icmp46_header_t *)
889 vnet_buffer (p0)->map_t.v6.
890 l4_offset))->code == ICMP6_echo_request)
895 vnet_buffer (p0)->map_t.v6.l4_offset + 6));
899 //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
900 error0 = MAP_ERROR_BAD_PROTOCOL;
906 && (ip60->src_address.as_u64[0] !=
907 map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
909 || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
911 (p0)->map_t.v6.saddr,
914 //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
915 error0 = MAP_ERROR_SEC_CHECK;
918 //Fragmented first packet needs to be cached for following packets
919 if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
920 !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
925 && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
926 && (error0 == MAP_ERROR_NONE))
928 ip6_map_fragment_cache (ip60,
929 (ip6_frag_hdr_t *) u8_ptr_add (ip60,
937 (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
939 vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX,
942 map_t.map_domain_index, 1,
944 (ip60->payload_length));
947 next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
948 p0->error = error_node->errors[error0];
949 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
950 to_next, n_left_to_next, pi0,
953 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
955 return frame->n_vectors;
958 static char *map_t_error_strings[] = {
959 #define _(sym,string) string,
965 VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = {
966 .function = ip6_map_t_fragmented,
967 .name = "ip6-map-t-fragmented",
968 .vector_size = sizeof (u32),
969 .format_trace = format_map_trace,
970 .type = VLIB_NODE_TYPE_INTERNAL,
972 .n_errors = MAP_N_ERROR,
973 .error_strings = map_t_error_strings,
975 .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
977 [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
978 [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
979 [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
985 VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = {
986 .function = ip6_map_t_icmp,
987 .name = "ip6-map-t-icmp",
988 .vector_size = sizeof (u32),
989 .format_trace = format_map_trace,
990 .type = VLIB_NODE_TYPE_INTERNAL,
992 .n_errors = MAP_N_ERROR,
993 .error_strings = map_t_error_strings,
995 .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
997 [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
998 [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
999 [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1005 VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = {
1006 .function = ip6_map_t_tcp_udp,
1007 .name = "ip6-map-t-tcp-udp",
1008 .vector_size = sizeof (u32),
1009 .format_trace = format_map_trace,
1010 .type = VLIB_NODE_TYPE_INTERNAL,
1012 .n_errors = MAP_N_ERROR,
1013 .error_strings = map_t_error_strings,
1015 .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1017 [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1018 [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME,
1019 [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1025 VLIB_REGISTER_NODE(ip6_map_t_node) = {
1026 .function = ip6_map_t,
1027 .name = "ip6-map-t",
1028 .vector_size = sizeof(u32),
1029 .format_trace = format_map_trace,
1030 .type = VLIB_NODE_TYPE_INTERNAL,
1032 .n_errors = MAP_N_ERROR,
1033 .error_strings = map_t_error_strings,
1035 .n_next_nodes = IP6_MAPT_N_NEXT,
1037 [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1038 [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1039 [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1040 [IP6_MAPT_NEXT_DROP] = "error-drop",
1046 * fd.io coding-style-patch-verification: ON
1049 * eval: (c-set-style "gnu")