2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief NAT64 IPv6 to IPv4 translation (inside to outside network)
20 #include <nat/nat64.h>
21 #include <nat/nat_reass.h>
22 #include <nat/nat_inlines.h>
23 #include <vnet/ip/ip6_to_ip4.h>
24 #include <vnet/fib/fib_table.h>
31 } nat64_in2out_trace_t;
34 format_nat64_in2out_trace (u8 * s, va_list * args)
36 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
37 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38 nat64_in2out_trace_t *t = va_arg (*args, nat64_in2out_trace_t *);
41 tag = t->is_slow_path ? "NAT64-in2out-slowpath" : "NAT64-in2out";
44 format (s, "%s: sw_if_index %d, next index %d", tag, t->sw_if_index,
55 } nat64_in2out_reass_trace_t;
58 format_nat64_in2out_reass_trace (u8 * s, va_list * args)
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 nat64_in2out_reass_trace_t *t =
63 va_arg (*args, nat64_in2out_reass_trace_t *);
66 format (s, "NAT64-in2out-reass: sw_if_index %d, next index %d, status %s",
67 t->sw_if_index, t->next_index,
68 t->cached ? "cached" : "translated");
73 vlib_node_registration_t nat64_in2out_node;
74 vlib_node_registration_t nat64_in2out_slowpath_node;
75 vlib_node_registration_t nat64_in2out_reass_node;
76 vlib_node_registration_t nat64_in2out_handoff_node;
78 #define foreach_nat64_in2out_error \
79 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
80 _(IN2OUT_PACKETS, "good in2out packets processed") \
81 _(NO_TRANSLATION, "no translation") \
82 _(UNKNOWN, "unknown") \
83 _(DROP_FRAGMENT, "Drop fragment") \
84 _(MAX_REASS, "Maximum reassemblies exceeded") \
85 _(MAX_FRAG, "Maximum fragments per reassembly exceeded")
90 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym,
91 foreach_nat64_in2out_error
94 } nat64_in2out_error_t;
96 static char *nat64_in2out_error_strings[] = {
97 #define _(sym,string) string,
98 foreach_nat64_in2out_error
104 NAT64_IN2OUT_NEXT_IP4_LOOKUP,
105 NAT64_IN2OUT_NEXT_IP6_LOOKUP,
106 NAT64_IN2OUT_NEXT_DROP,
107 NAT64_IN2OUT_NEXT_SLOWPATH,
108 NAT64_IN2OUT_NEXT_REASS,
110 } nat64_in2out_next_t;
112 typedef struct nat64_in2out_set_ctx_t_
117 } nat64_in2out_set_ctx_t;
120 nat64_not_translate (u32 sw_if_index, ip6_address_t ip6_addr)
123 ip6_main_t *im6 = &ip6_main;
124 ip_lookup_main_t *lm6 = &im6->lookup_main;
125 ip_interface_address_t *ia = 0;
128 foreach_ip_interface_address (lm6, ia, sw_if_index, 0,
130 addr = ip_interface_address_get_address (lm6, ia);
131 if (0 == ip6_address_compare (addr, &ip6_addr))
140 * @brief Check whether is a hairpinning.
142 * If the destination IP address of the packet is an IPv4 address assigned to
143 * the NAT64 itself, then the packet is a hairpin packet.
145 * param dst_addr Destination address of the packet.
147 * @returns 1 if hairpinning, otherwise 0.
149 static_always_inline int
150 is_hairpinning (ip6_address_t * dst_addr)
152 nat64_main_t *nm = &nat64_main;
155 for (i = 0; i < vec_len (nm->addr_pool); i++)
157 if (nm->addr_pool[i].addr.as_u32 == dst_addr->as_u32[3])
165 nat64_in2out_tcp_udp_set_cb (ip6_header_t * ip6, ip4_header_t * ip4,
168 nat64_main_t *nm = &nat64_main;
169 nat64_in2out_set_ctx_t *ctx = arg;
170 nat64_db_bib_entry_t *bibe;
171 nat64_db_st_entry_t *ste;
172 ip46_address_t saddr, daddr;
173 u32 sw_if_index, fib_index;
174 udp_header_t *udp = ip6_next_header (ip6);
175 u8 proto = ip6->protocol;
176 u16 sport = udp->src_port;
177 u16 dport = udp->dst_port;
178 nat64_db_t *db = &nm->db[ctx->thread_index];
180 sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
182 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
184 saddr.as_u64[0] = ip6->src_address.as_u64[0];
185 saddr.as_u64[1] = ip6->src_address.as_u64[1];
186 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
187 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
190 nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
195 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
201 bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
206 ip4_address_t out_addr;
207 if (nat64_alloc_out_addr_and_port
208 (fib_index, ip_proto_to_snat_proto (proto), &out_addr,
209 &out_port, ctx->thread_index))
213 nat64_db_bib_entry_create (db, &ip6->src_address, &out_addr,
214 sport, out_port, fib_index, proto, 0);
219 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
221 nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
227 ip4->src_address.as_u32 = bibe->out_addr.as_u32;
228 udp->src_port = bibe->out_port;
230 ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
232 if (proto == IP_PROTOCOL_TCP)
236 tcp_header_t *tcp = ip6_next_header (ip6);
238 nat64_tcp_session_set_state (ste, tcp, 1);
239 checksum = &tcp->checksum;
240 csum = ip_csum_sub_even (*checksum, sport);
241 csum = ip_csum_add_even (csum, udp->src_port);
242 mss_clamping (nm->sm, tcp, &csum);
243 *checksum = ip_csum_fold (csum);
246 nat64_session_reset_timeout (ste, ctx->vm);
252 nat64_in2out_icmp_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
254 nat64_main_t *nm = &nat64_main;
255 nat64_in2out_set_ctx_t *ctx = arg;
256 nat64_db_bib_entry_t *bibe;
257 nat64_db_st_entry_t *ste;
258 ip46_address_t saddr, daddr;
259 u32 sw_if_index, fib_index;
260 icmp46_header_t *icmp = ip6_next_header (ip6);
261 nat64_db_t *db = &nm->db[ctx->thread_index];
263 sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
265 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
267 saddr.as_u64[0] = ip6->src_address.as_u64[0];
268 saddr.as_u64[1] = ip6->src_address.as_u64[1];
269 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
270 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
272 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
274 u16 in_id = ((u16 *) (icmp))[2];
276 nat64_db_st_entry_find (db, &saddr, &daddr, in_id, 0,
277 IP_PROTOCOL_ICMP, fib_index, 1);
282 nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
290 nat64_db_bib_entry_find (db, &saddr, in_id,
291 IP_PROTOCOL_ICMP, fib_index, 1);
296 ip4_address_t out_addr;
297 if (nat64_alloc_out_addr_and_port
298 (fib_index, SNAT_PROTOCOL_ICMP, &out_addr, &out_id,
303 nat64_db_bib_entry_create (db, &ip6->src_address,
304 &out_addr, in_id, out_id,
305 fib_index, IP_PROTOCOL_ICMP, 0);
310 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
312 nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
318 nat64_session_reset_timeout (ste, ctx->vm);
320 ip4->src_address.as_u32 = bibe->out_addr.as_u32;
321 ((u16 *) (icmp))[2] = bibe->out_port;
323 ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
327 if (!vec_len (nm->addr_pool))
330 ip4->src_address.as_u32 = nm->addr_pool[0].addr.as_u32;
331 nat64_extract_ip4 (&ip6->dst_address, &ip4->dst_address, fib_index);
338 nat64_in2out_inner_icmp_set_cb (ip6_header_t * ip6, ip4_header_t * ip4,
341 nat64_main_t *nm = &nat64_main;
342 nat64_in2out_set_ctx_t *ctx = arg;
343 nat64_db_st_entry_t *ste;
344 nat64_db_bib_entry_t *bibe;
345 ip46_address_t saddr, daddr;
346 u32 sw_if_index, fib_index;
347 u8 proto = ip6->protocol;
348 nat64_db_t *db = &nm->db[ctx->thread_index];
350 sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
352 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
354 saddr.as_u64[0] = ip6->src_address.as_u64[0];
355 saddr.as_u64[1] = ip6->src_address.as_u64[1];
356 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
357 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
359 if (proto == IP_PROTOCOL_ICMP6)
361 icmp46_header_t *icmp = ip6_next_header (ip6);
362 u16 in_id = ((u16 *) (icmp))[2];
363 proto = IP_PROTOCOL_ICMP;
366 (icmp->type == ICMP4_echo_request
367 || icmp->type == ICMP4_echo_reply))
371 nat64_db_st_entry_find (db, &daddr, &saddr, in_id, 0, proto,
376 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
380 ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
381 ((u16 *) (icmp))[2] = bibe->out_port;
382 ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
386 udp_header_t *udp = ip6_next_header (ip6);
387 tcp_header_t *tcp = ip6_next_header (ip6);
391 u16 sport = udp->src_port;
392 u16 dport = udp->dst_port;
395 nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
400 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
404 ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
405 udp->dst_port = bibe->out_port;
406 ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
408 if (proto == IP_PROTOCOL_TCP)
409 checksum = &tcp->checksum;
411 checksum = &udp->checksum;
412 csum = ip_csum_sub_even (*checksum, dport);
413 csum = ip_csum_add_even (csum, udp->dst_port);
414 *checksum = ip_csum_fold (csum);
420 typedef struct unk_proto_st_walk_ctx_t_
422 ip6_address_t src_addr;
423 ip6_address_t dst_addr;
424 ip4_address_t out_addr;
428 } unk_proto_st_walk_ctx_t;
431 unk_proto_st_walk (nat64_db_st_entry_t * ste, void *arg)
433 nat64_main_t *nm = &nat64_main;
434 unk_proto_st_walk_ctx_t *ctx = arg;
435 nat64_db_bib_entry_t *bibe;
436 ip46_address_t saddr, daddr;
437 nat64_db_t *db = &nm->db[ctx->thread_index];
439 if (ip46_address_is_equal (&ste->in_r_addr, &ctx->dst_addr))
441 bibe = nat64_db_bib_entry_by_index (db, ste->proto, ste->bibe_index);
445 if (ip46_address_is_equal (&bibe->in_addr, &ctx->src_addr)
446 && bibe->fib_index == ctx->fib_index)
448 memset (&saddr, 0, sizeof (saddr));
449 saddr.ip4.as_u32 = bibe->out_addr.as_u32;
450 memset (&daddr, 0, sizeof (daddr));
451 nat64_extract_ip4 (&ctx->dst_addr, &daddr.ip4, ctx->fib_index);
453 if (nat64_db_st_entry_find
454 (db, &daddr, &saddr, 0, 0, ctx->proto, ctx->fib_index, 0))
457 ctx->out_addr.as_u32 = bibe->out_addr.as_u32;
466 nat64_in2out_unk_proto_set_cb (ip6_header_t * ip6, ip4_header_t * ip4,
469 nat64_main_t *nm = &nat64_main;
470 nat64_in2out_set_ctx_t *s_ctx = arg;
471 nat64_db_bib_entry_t *bibe;
472 nat64_db_st_entry_t *ste;
473 ip46_address_t saddr, daddr, addr;
474 u32 sw_if_index, fib_index;
475 u8 proto = ip6->protocol;
477 nat64_db_t *db = &nm->db[s_ctx->thread_index];
479 sw_if_index = vnet_buffer (s_ctx->b)->sw_if_index[VLIB_RX];
481 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
483 saddr.as_u64[0] = ip6->src_address.as_u64[0];
484 saddr.as_u64[1] = ip6->src_address.as_u64[1];
485 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
486 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
489 nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
493 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
499 bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
503 /* Choose same out address as for TCP/UDP session to same dst */
504 unk_proto_st_walk_ctx_t ctx = {
505 .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
506 .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
507 .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
508 .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
509 .out_addr.as_u32 = 0,
510 .fib_index = fib_index,
512 .thread_index = s_ctx->thread_index,
515 nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
517 if (!ctx.out_addr.as_u32)
518 nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
520 /* Verify if out address is not already in use for protocol */
521 memset (&addr, 0, sizeof (addr));
522 addr.ip4.as_u32 = ctx.out_addr.as_u32;
523 if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
524 ctx.out_addr.as_u32 = 0;
526 if (!ctx.out_addr.as_u32)
528 for (i = 0; i < vec_len (nm->addr_pool); i++)
530 addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
531 if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
536 if (!ctx.out_addr.as_u32)
540 nat64_db_bib_entry_create (db, &ip6->src_address,
541 &ctx.out_addr, 0, 0, fib_index, proto,
547 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
549 nat64_db_st_entry_create (db, bibe, &ip6->dst_address, &daddr.ip4, 0);
554 nat64_session_reset_timeout (ste, s_ctx->vm);
556 ip4->src_address.as_u32 = bibe->out_addr.as_u32;
557 ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
565 nat64_in2out_tcp_udp_hairpinning (vlib_main_t * vm, vlib_buffer_t * b,
566 ip6_header_t * ip6, u32 thread_index)
568 nat64_main_t *nm = &nat64_main;
569 nat64_db_bib_entry_t *bibe;
570 nat64_db_st_entry_t *ste;
571 ip46_address_t saddr, daddr;
572 u32 sw_if_index, fib_index;
573 udp_header_t *udp = ip6_next_header (ip6);
574 tcp_header_t *tcp = ip6_next_header (ip6);
575 u8 proto = ip6->protocol;
576 u16 sport = udp->src_port;
577 u16 dport = udp->dst_port;
580 nat64_db_t *db = &nm->db[thread_index];
582 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
584 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
586 saddr.as_u64[0] = ip6->src_address.as_u64[0];
587 saddr.as_u64[1] = ip6->src_address.as_u64[1];
588 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
589 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
591 if (proto == IP_PROTOCOL_UDP)
592 checksum = &udp->checksum;
594 checksum = &tcp->checksum;
596 csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
597 csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
598 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
599 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
600 csum = ip_csum_sub_even (csum, sport);
601 csum = ip_csum_sub_even (csum, dport);
604 nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
609 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
615 bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
620 ip4_address_t out_addr;
621 if (nat64_alloc_out_addr_and_port
622 (fib_index, ip_proto_to_snat_proto (proto), &out_addr,
623 &out_port, thread_index))
627 nat64_db_bib_entry_create (db, &ip6->src_address, &out_addr,
628 sport, out_port, fib_index, proto, 0);
633 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
635 nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
641 if (proto == IP_PROTOCOL_TCP)
642 nat64_tcp_session_set_state (ste, tcp, 1);
644 nat64_session_reset_timeout (ste, vm);
646 sport = udp->src_port = bibe->out_port;
647 nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
649 memset (&daddr, 0, sizeof (daddr));
650 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
654 vec_foreach (db, nm->db)
656 bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, 0, 0);
666 ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
667 ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
668 udp->dst_port = bibe->in_port;
670 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
671 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
672 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
673 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
674 csum = ip_csum_add_even (csum, udp->src_port);
675 csum = ip_csum_add_even (csum, udp->dst_port);
676 *checksum = ip_csum_fold (csum);
682 nat64_in2out_icmp_hairpinning (vlib_main_t * vm, vlib_buffer_t * b,
683 ip6_header_t * ip6, u32 thread_index)
685 nat64_main_t *nm = &nat64_main;
686 nat64_db_bib_entry_t *bibe;
687 nat64_db_st_entry_t *ste;
688 icmp46_header_t *icmp = ip6_next_header (ip6);
689 ip6_header_t *inner_ip6;
690 ip46_address_t saddr, daddr;
691 u32 sw_if_index, fib_index;
695 u16 *checksum, sport, dport;
697 nat64_db_t *db = &nm->db[thread_index];
699 if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
702 inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
704 proto = inner_ip6->protocol;
706 if (proto == IP_PROTOCOL_ICMP6)
709 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
711 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
713 saddr.as_u64[0] = inner_ip6->src_address.as_u64[0];
714 saddr.as_u64[1] = inner_ip6->src_address.as_u64[1];
715 daddr.as_u64[0] = inner_ip6->dst_address.as_u64[0];
716 daddr.as_u64[1] = inner_ip6->dst_address.as_u64[1];
718 udp = ip6_next_header (inner_ip6);
719 tcp = ip6_next_header (inner_ip6);
721 sport = udp->src_port;
722 dport = udp->dst_port;
724 if (proto == IP_PROTOCOL_UDP)
725 checksum = &udp->checksum;
727 checksum = &tcp->checksum;
729 csum = ip_csum_sub_even (*checksum, inner_ip6->src_address.as_u64[0]);
730 csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
731 csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
732 csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
733 csum = ip_csum_sub_even (csum, sport);
734 csum = ip_csum_sub_even (csum, dport);
737 nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
742 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
746 dport = udp->dst_port = bibe->out_port;
747 nat64_compose_ip6 (&inner_ip6->dst_address, &bibe->out_addr, fib_index);
749 memset (&saddr, 0, sizeof (saddr));
750 memset (&daddr, 0, sizeof (daddr));
751 saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
752 daddr.ip4.as_u32 = bibe->out_addr.as_u32;
756 vec_foreach (db, nm->db)
758 ste = nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
769 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
773 inner_ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
774 inner_ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
775 udp->src_port = bibe->in_port;
777 csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
778 csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
779 csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
780 csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
781 csum = ip_csum_add_even (csum, udp->src_port);
782 csum = ip_csum_add_even (csum, udp->dst_port);
783 *checksum = ip_csum_fold (csum);
785 if (!vec_len (nm->addr_pool))
788 nat64_compose_ip6 (&ip6->src_address, &nm->addr_pool[0].addr, fib_index);
789 ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
790 ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
793 csum = ip_csum_with_carry (0, ip6->payload_length);
794 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
795 csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
796 csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
797 csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
798 csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
800 ip_incremental_checksum (csum, icmp,
801 clib_net_to_host_u16 (ip6->payload_length));
802 icmp->checksum = ~ip_csum_fold (csum);
808 nat64_in2out_unk_proto_hairpinning (vlib_main_t * vm, vlib_buffer_t * b,
809 ip6_header_t * ip6, u32 thread_index)
811 nat64_main_t *nm = &nat64_main;
812 nat64_db_bib_entry_t *bibe;
813 nat64_db_st_entry_t *ste;
814 ip46_address_t saddr, daddr, addr;
815 u32 sw_if_index, fib_index;
816 u8 proto = ip6->protocol;
818 nat64_db_t *db = &nm->db[thread_index];
820 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
822 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
824 saddr.as_u64[0] = ip6->src_address.as_u64[0];
825 saddr.as_u64[1] = ip6->src_address.as_u64[1];
826 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
827 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
830 nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
834 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
840 bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
844 /* Choose same out address as for TCP/UDP session to same dst */
845 unk_proto_st_walk_ctx_t ctx = {
846 .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
847 .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
848 .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
849 .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
850 .out_addr.as_u32 = 0,
851 .fib_index = fib_index,
853 .thread_index = thread_index,
856 nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
858 if (!ctx.out_addr.as_u32)
859 nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
861 /* Verify if out address is not already in use for protocol */
862 memset (&addr, 0, sizeof (addr));
863 addr.ip4.as_u32 = ctx.out_addr.as_u32;
864 if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
865 ctx.out_addr.as_u32 = 0;
867 if (!ctx.out_addr.as_u32)
869 for (i = 0; i < vec_len (nm->addr_pool); i++)
871 addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
872 if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
877 if (!ctx.out_addr.as_u32)
881 nat64_db_bib_entry_create (db, &ip6->src_address,
882 &ctx.out_addr, 0, 0, fib_index, proto,
888 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
890 nat64_db_st_entry_create (db, bibe, &ip6->dst_address, &daddr.ip4, 0);
895 nat64_session_reset_timeout (ste, vm);
897 nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
899 memset (&daddr, 0, sizeof (daddr));
900 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
904 vec_foreach (db, nm->db)
906 bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, 0, 0);
916 ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
917 ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
923 nat64_in2out_node_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
924 vlib_frame_t * frame, u8 is_slow_path)
926 u32 n_left_from, *from, *to_next;
927 nat64_in2out_next_t next_index;
928 u32 pkts_processed = 0;
929 u32 stats_node_index;
930 u32 thread_index = vm->thread_index;
933 is_slow_path ? nat64_in2out_slowpath_node.index : nat64_in2out_node.index;
935 from = vlib_frame_vector_args (frame);
936 n_left_from = frame->n_vectors;
937 next_index = node->cached_next_index;
939 while (n_left_from > 0)
943 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
945 while (n_left_from > 0 && n_left_to_next > 0)
951 u16 l4_offset0, frag_offset0;
954 nat64_in2out_set_ctx_t ctx0;
957 /* speculatively enqueue b0 to the current next frame */
965 b0 = vlib_get_buffer (vm, bi0);
966 ip60 = vlib_buffer_get_current (b0);
970 ctx0.thread_index = thread_index;
972 next0 = NAT64_IN2OUT_NEXT_IP4_LOOKUP;
976 (ip60, b0->current_length, &l4_protocol0, &l4_offset0,
979 next0 = NAT64_IN2OUT_NEXT_DROP;
980 b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
984 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
986 if (nat64_not_translate (sw_if_index0, ip60->dst_address))
988 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
992 proto0 = ip_proto_to_snat_proto (l4_protocol0);
996 if (PREDICT_TRUE (proto0 == ~0))
998 if (is_hairpinning (&ip60->dst_address))
1000 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1001 if (nat64_in2out_unk_proto_hairpinning
1002 (vm, b0, ip60, thread_index))
1004 next0 = NAT64_IN2OUT_NEXT_DROP;
1006 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1011 if (ip6_to_ip4 (b0, nat64_in2out_unk_proto_set_cb, &ctx0))
1013 next0 = NAT64_IN2OUT_NEXT_DROP;
1015 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1023 if (PREDICT_FALSE (proto0 == ~0))
1025 next0 = NAT64_IN2OUT_NEXT_SLOWPATH;
1031 (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION))
1033 next0 = NAT64_IN2OUT_NEXT_REASS;
1037 if (proto0 == SNAT_PROTOCOL_ICMP)
1039 if (is_hairpinning (&ip60->dst_address))
1041 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1042 if (nat64_in2out_icmp_hairpinning
1043 (vm, b0, ip60, thread_index))
1045 next0 = NAT64_IN2OUT_NEXT_DROP;
1047 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1053 (b0, nat64_in2out_icmp_set_cb, &ctx0,
1054 nat64_in2out_inner_icmp_set_cb, &ctx0))
1056 next0 = NAT64_IN2OUT_NEXT_DROP;
1057 b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1061 else if (proto0 == SNAT_PROTOCOL_TCP || proto0 == SNAT_PROTOCOL_UDP)
1063 if (is_hairpinning (&ip60->dst_address))
1065 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1066 if (nat64_in2out_tcp_udp_hairpinning
1067 (vm, b0, ip60, thread_index))
1069 next0 = NAT64_IN2OUT_NEXT_DROP;
1071 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1076 if (ip6_to_ip4_tcp_udp
1077 (b0, nat64_in2out_tcp_udp_set_cb, &ctx0, 0))
1079 next0 = NAT64_IN2OUT_NEXT_DROP;
1080 b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1086 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
1087 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1089 nat64_in2out_trace_t *t =
1090 vlib_add_trace (vm, node, b0, sizeof (*t));
1091 t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1092 t->next_index = next0;
1093 t->is_slow_path = is_slow_path;
1096 pkts_processed += next0 != NAT64_IN2OUT_NEXT_DROP;
1098 /* verify speculative enqueue, maybe switch current next frame */
1099 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1100 n_left_to_next, bi0, next0);
1102 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1104 vlib_node_increment_counter (vm, stats_node_index,
1105 NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1107 return frame->n_vectors;
1111 nat64_in2out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1112 vlib_frame_t * frame)
1114 return nat64_in2out_node_fn_inline (vm, node, frame, 0);
1118 VLIB_REGISTER_NODE (nat64_in2out_node) = {
1119 .function = nat64_in2out_node_fn,
1120 .name = "nat64-in2out",
1121 .vector_size = sizeof (u32),
1122 .format_trace = format_nat64_in2out_trace,
1123 .type = VLIB_NODE_TYPE_INTERNAL,
1124 .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1125 .error_strings = nat64_in2out_error_strings,
1126 .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1127 /* edit / add dispositions here */
1129 [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1130 [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1131 [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1132 [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1133 [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1138 VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_node, nat64_in2out_node_fn);
1141 nat64_in2out_slowpath_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1142 vlib_frame_t * frame)
1144 return nat64_in2out_node_fn_inline (vm, node, frame, 1);
1148 VLIB_REGISTER_NODE (nat64_in2out_slowpath_node) = {
1149 .function = nat64_in2out_slowpath_node_fn,
1150 .name = "nat64-in2out-slowpath",
1151 .vector_size = sizeof (u32),
1152 .format_trace = format_nat64_in2out_trace,
1153 .type = VLIB_NODE_TYPE_INTERNAL,
1154 .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1155 .error_strings = nat64_in2out_error_strings,
1156 .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1157 /* edit / add dispositions here */
1159 [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1160 [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1161 [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1162 [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1163 [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1168 VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_slowpath_node,
1169 nat64_in2out_slowpath_node_fn);
1171 typedef struct nat64_in2out_frag_set_ctx_t_
1179 } nat64_in2out_frag_set_ctx_t;
1182 nat64_in2out_frag_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
1184 nat64_main_t *nm = &nat64_main;
1185 nat64_in2out_frag_set_ctx_t *ctx = arg;
1186 nat64_db_st_entry_t *ste;
1187 nat64_db_bib_entry_t *bibe;
1189 nat64_db_t *db = &nm->db[ctx->thread_index];
1191 ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
1195 bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
1199 nat64_session_reset_timeout (ste, ctx->vm);
1201 if (ctx->first_frag)
1203 udp = (udp_header_t *) u8_ptr_add (ip6, ctx->l4_offset);
1205 if (ctx->proto == IP_PROTOCOL_TCP)
1209 tcp_header_t *tcp = (tcp_header_t *) udp;
1211 nat64_tcp_session_set_state (ste, tcp, 1);
1212 checksum = &tcp->checksum;
1213 csum = ip_csum_sub_even (*checksum, tcp->src_port);
1214 csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[0]);
1215 csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
1216 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
1217 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
1218 csum = ip_csum_add_even (csum, bibe->out_port);
1219 csum = ip_csum_add_even (csum, bibe->out_addr.as_u32);
1220 csum = ip_csum_add_even (csum, ste->out_r_addr.as_u32);
1221 *checksum = ip_csum_fold (csum);
1224 udp->src_port = bibe->out_port;
1227 ip4->src_address.as_u32 = bibe->out_addr.as_u32;
1228 ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
1234 nat64_in2out_frag_hairpinning (vlib_buffer_t * b, ip6_header_t * ip6,
1235 nat64_in2out_frag_set_ctx_t * ctx)
1237 nat64_main_t *nm = &nat64_main;
1238 nat64_db_st_entry_t *ste;
1239 nat64_db_bib_entry_t *bibe;
1240 udp_header_t *udp = (udp_header_t *) u8_ptr_add (ip6, ctx->l4_offset);
1241 tcp_header_t *tcp = (tcp_header_t *) udp;
1242 u16 sport = udp->src_port;
1243 u16 dport = udp->dst_port;
1246 ip46_address_t daddr;
1247 nat64_db_t *db = &nm->db[ctx->thread_index];
1249 if (ctx->first_frag)
1251 if (ctx->proto == IP_PROTOCOL_UDP)
1252 checksum = &udp->checksum;
1254 checksum = &tcp->checksum;
1256 csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
1257 csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
1258 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
1259 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
1260 csum = ip_csum_sub_even (csum, sport);
1261 csum = ip_csum_sub_even (csum, dport);
1264 ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
1268 bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
1272 if (ctx->proto == IP_PROTOCOL_TCP)
1273 nat64_tcp_session_set_state (ste, tcp, 1);
1275 nat64_session_reset_timeout (ste, ctx->vm);
1277 sport = bibe->out_port;
1278 dport = ste->r_port;
1280 nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, bibe->fib_index);
1282 memset (&daddr, 0, sizeof (daddr));
1283 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1287 vec_foreach (db, nm->db)
1289 bibe = nat64_db_bib_entry_find (db, &daddr, dport, ctx->proto, 0, 0);
1299 ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
1300 ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
1302 if (ctx->first_frag)
1304 udp->dst_port = bibe->in_port;
1305 udp->src_port = sport;
1306 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
1307 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
1308 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
1309 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
1310 csum = ip_csum_add_even (csum, udp->src_port);
1311 csum = ip_csum_add_even (csum, udp->dst_port);
1312 *checksum = ip_csum_fold (csum);
1319 nat64_in2out_reass_node_fn (vlib_main_t * vm,
1320 vlib_node_runtime_t * node, vlib_frame_t * frame)
1322 u32 n_left_from, *from, *to_next;
1323 nat64_in2out_next_t next_index;
1324 u32 pkts_processed = 0;
1325 u32 *fragments_to_drop = 0;
1326 u32 *fragments_to_loopback = 0;
1327 nat64_main_t *nm = &nat64_main;
1328 u32 thread_index = vm->thread_index;
1330 from = vlib_frame_vector_args (frame);
1331 n_left_from = frame->n_vectors;
1332 next_index = node->cached_next_index;
1334 while (n_left_from > 0)
1338 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1340 while (n_left_from > 0 && n_left_to_next > 0)
1347 u16 l4_offset0, frag_offset0;
1349 nat_reass_ip6_t *reass0;
1350 ip6_frag_hdr_t *frag0;
1351 nat64_db_bib_entry_t *bibe0;
1352 nat64_db_st_entry_t *ste0;
1354 snat_protocol_t proto0;
1355 u32 sw_if_index0, fib_index0;
1356 ip46_address_t saddr0, daddr0;
1357 nat64_in2out_frag_set_ctx_t ctx0;
1358 nat64_db_t *db = &nm->db[thread_index];
1360 /* speculatively enqueue b0 to the current next frame */
1366 n_left_to_next -= 1;
1368 b0 = vlib_get_buffer (vm, bi0);
1369 next0 = NAT64_IN2OUT_NEXT_IP4_LOOKUP;
1371 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1373 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
1376 ctx0.thread_index = thread_index;
1378 if (PREDICT_FALSE (nat_reass_is_drop_frag (1)))
1380 next0 = NAT64_IN2OUT_NEXT_DROP;
1381 b0->error = node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1385 ip60 = (ip6_header_t *) vlib_buffer_get_current (b0);
1389 (ip60, b0->current_length, &l4_protocol0, &l4_offset0,
1392 next0 = NAT64_IN2OUT_NEXT_DROP;
1393 b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1398 (!(l4_protocol0 == IP_PROTOCOL_TCP
1399 || l4_protocol0 == IP_PROTOCOL_UDP)))
1401 next0 = NAT64_IN2OUT_NEXT_DROP;
1402 b0->error = node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1406 udp0 = (udp_header_t *) u8_ptr_add (ip60, l4_offset0);
1407 frag0 = (ip6_frag_hdr_t *) u8_ptr_add (ip60, frag_offset0);
1408 proto0 = ip_proto_to_snat_proto (l4_protocol0);
1410 reass0 = nat_ip6_reass_find_or_create (ip60->src_address,
1412 frag0->identification,
1414 1, &fragments_to_drop);
1416 if (PREDICT_FALSE (!reass0))
1418 next0 = NAT64_IN2OUT_NEXT_DROP;
1419 b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_REASS];
1423 if (PREDICT_TRUE (ip6_frag_hdr_offset (frag0)))
1425 ctx0.first_frag = 0;
1426 if (PREDICT_FALSE (reass0->sess_index == (u32) ~ 0))
1428 if (nat_ip6_reass_add_fragment
1429 (reass0, bi0, &fragments_to_drop))
1431 b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_FRAG];
1432 next0 = NAT64_IN2OUT_NEXT_DROP;
1441 ctx0.first_frag = 1;
1443 saddr0.as_u64[0] = ip60->src_address.as_u64[0];
1444 saddr0.as_u64[1] = ip60->src_address.as_u64[1];
1445 daddr0.as_u64[0] = ip60->dst_address.as_u64[0];
1446 daddr0.as_u64[1] = ip60->dst_address.as_u64[1];
1449 nat64_db_st_entry_find (db, &saddr0, &daddr0,
1450 udp0->src_port, udp0->dst_port,
1451 l4_protocol0, fib_index0, 1);
1455 nat64_db_bib_entry_find (db, &saddr0, udp0->src_port,
1456 l4_protocol0, fib_index0, 1);
1460 ip4_address_t out_addr0;
1461 if (nat64_alloc_out_addr_and_port
1462 (fib_index0, proto0, &out_addr0, &out_port0,
1465 next0 = NAT64_IN2OUT_NEXT_DROP;
1467 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1472 nat64_db_bib_entry_create (db,
1474 &out_addr0, udp0->src_port,
1475 out_port0, fib_index0,
1479 next0 = NAT64_IN2OUT_NEXT_DROP;
1481 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1485 nat64_extract_ip4 (&ip60->dst_address, &daddr0.ip4,
1488 nat64_db_st_entry_create (db, bibe0,
1489 &ip60->dst_address, &daddr0.ip4,
1493 next0 = NAT64_IN2OUT_NEXT_DROP;
1495 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1499 reass0->sess_index = nat64_db_st_entry_get_index (db, ste0);
1501 nat_ip6_reass_get_frags (reass0, &fragments_to_loopback);
1504 ctx0.sess_index = reass0->sess_index;
1505 ctx0.proto = l4_protocol0;
1507 ctx0.l4_offset = l4_offset0;
1509 if (PREDICT_FALSE (is_hairpinning (&ip60->dst_address)))
1511 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1512 if (nat64_in2out_frag_hairpinning (b0, ip60, &ctx0))
1514 next0 = NAT64_IN2OUT_NEXT_DROP;
1515 b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1521 if (ip6_to_ip4_fragmented (b0, nat64_in2out_frag_set_cb, &ctx0))
1523 next0 = NAT64_IN2OUT_NEXT_DROP;
1524 b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1531 ((node->flags & VLIB_NODE_FLAG_TRACE)
1532 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1534 nat64_in2out_reass_trace_t *t =
1535 vlib_add_trace (vm, node, b0, sizeof (*t));
1536 t->cached = cached0;
1537 t->sw_if_index = sw_if_index0;
1538 t->next_index = next0;
1548 pkts_processed += next0 != NAT64_IN2OUT_NEXT_DROP;
1550 /* verify speculative enqueue, maybe switch current next frame */
1551 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1552 to_next, n_left_to_next,
1556 if (n_left_from == 0 && vec_len (fragments_to_loopback))
1558 from = vlib_frame_vector_args (frame);
1559 u32 len = vec_len (fragments_to_loopback);
1560 if (len <= VLIB_FRAME_SIZE)
1562 clib_memcpy (from, fragments_to_loopback,
1563 sizeof (u32) * len);
1565 vec_reset_length (fragments_to_loopback);
1570 fragments_to_loopback + (len -
1572 sizeof (u32) * VLIB_FRAME_SIZE);
1573 n_left_from = VLIB_FRAME_SIZE;
1574 _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
1579 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1582 vlib_node_increment_counter (vm, nat64_in2out_reass_node.index,
1583 NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1586 nat_send_all_to_node (vm, fragments_to_drop, node,
1587 &node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT],
1588 NAT64_IN2OUT_NEXT_DROP);
1590 vec_free (fragments_to_drop);
1591 vec_free (fragments_to_loopback);
1592 return frame->n_vectors;
1596 VLIB_REGISTER_NODE (nat64_in2out_reass_node) = {
1597 .function = nat64_in2out_reass_node_fn,
1598 .name = "nat64-in2out-reass",
1599 .vector_size = sizeof (u32),
1600 .format_trace = format_nat64_in2out_reass_trace,
1601 .type = VLIB_NODE_TYPE_INTERNAL,
1602 .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1603 .error_strings = nat64_in2out_error_strings,
1604 .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1605 /* edit / add dispositions here */
1607 [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1608 [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1609 [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1610 [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1611 [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1616 VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_reass_node,
1617 nat64_in2out_reass_node_fn);
1619 #define foreach_nat64_in2out_handoff_error \
1620 _(CONGESTION_DROP, "congestion drop")
1624 #define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym,
1625 foreach_nat64_in2out_handoff_error
1627 NAT64_IN2OUT_HANDOFF_N_ERROR,
1628 } nat64_in2out_handoff_error_t;
1630 static char *nat64_in2out_handoff_error_strings[] = {
1631 #define _(sym,string) string,
1632 foreach_nat64_in2out_handoff_error
1638 u32 next_worker_index;
1639 } nat64_in2out_handoff_trace_t;
1642 format_nat64_in2out_handoff_trace (u8 * s, va_list * args)
1644 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1645 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1646 nat64_in2out_handoff_trace_t *t =
1647 va_arg (*args, nat64_in2out_handoff_trace_t *);
1650 format (s, "NAT64-IN2OUT-HANDOFF: next-worker %d", t->next_worker_index);
1656 nat64_in2out_handoff_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1657 vlib_frame_t * frame)
1659 nat64_main_t *nm = &nat64_main;
1660 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1661 u32 n_enq, n_left_from, *from;
1662 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1665 from = vlib_frame_vector_args (frame);
1666 n_left_from = frame->n_vectors;
1667 vlib_get_buffers (vm, from, bufs, n_left_from);
1670 ti = thread_indices;
1672 fq_index = nm->fq_in2out_index;
1674 while (n_left_from > 0)
1678 ip0 = vlib_buffer_get_current (b[0]);
1679 ti[0] = nat64_get_worker_in2out (&ip0->src_address);
1682 ((node->flags & VLIB_NODE_FLAG_TRACE)
1683 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1685 nat64_in2out_handoff_trace_t *t =
1686 vlib_add_trace (vm, node, b[0], sizeof (*t));
1687 t->next_worker_index = ti[0];
1696 vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1697 frame->n_vectors, 1);
1699 if (n_enq < frame->n_vectors)
1700 vlib_node_increment_counter (vm, node->node_index,
1701 NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
1702 frame->n_vectors - n_enq);
1703 return frame->n_vectors;
1707 VLIB_REGISTER_NODE (nat64_in2out_handoff_node) = {
1708 .function = nat64_in2out_handoff_node_fn,
1709 .name = "nat64-in2out-handoff",
1710 .vector_size = sizeof (u32),
1711 .format_trace = format_nat64_in2out_handoff_trace,
1712 .type = VLIB_NODE_TYPE_INTERNAL,
1713 .n_errors = ARRAY_LEN(nat64_in2out_handoff_error_strings),
1714 .error_strings = nat64_in2out_handoff_error_strings,
1724 VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_handoff_node,
1725 nat64_in2out_handoff_node_fn);
1728 * fd.io coding-style-patch-verification: ON
1731 * eval: (c-set-style "gnu")