2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief NAT64 IPv6 to IPv4 translation (inside to outside network)
20 #include <nat/nat64.h>
21 #include <nat/nat_reass.h>
22 #include <nat/nat_inlines.h>
23 #include <vnet/ip/ip6_to_ip4.h>
24 #include <vnet/fib/fib_table.h>
31 } nat64_in2out_trace_t;
34 format_nat64_in2out_trace (u8 * s, va_list * args)
36 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
37 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38 nat64_in2out_trace_t *t = va_arg (*args, nat64_in2out_trace_t *);
41 tag = t->is_slow_path ? "NAT64-in2out-slowpath" : "NAT64-in2out";
44 format (s, "%s: sw_if_index %d, next index %d", tag, t->sw_if_index,
55 } nat64_in2out_reass_trace_t;
58 format_nat64_in2out_reass_trace (u8 * s, va_list * args)
60 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62 nat64_in2out_reass_trace_t *t =
63 va_arg (*args, nat64_in2out_reass_trace_t *);
66 format (s, "NAT64-in2out-reass: sw_if_index %d, next index %d, status %s",
67 t->sw_if_index, t->next_index,
68 t->cached ? "cached" : "translated");
73 vlib_node_registration_t nat64_in2out_node;
74 vlib_node_registration_t nat64_in2out_slowpath_node;
75 vlib_node_registration_t nat64_in2out_reass_node;
76 vlib_node_registration_t nat64_in2out_handoff_node;
78 #define foreach_nat64_in2out_error \
79 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
80 _(IN2OUT_PACKETS, "good in2out packets processed") \
81 _(NO_TRANSLATION, "no translation") \
82 _(UNKNOWN, "unknown") \
83 _(DROP_FRAGMENT, "drop fragment") \
84 _(MAX_REASS, "maximum reassemblies exceeded") \
85 _(MAX_FRAG, "maximum fragments per reassembly exceeded") \
86 _(TCP_PACKETS, "TCP packets") \
87 _(UDP_PACKETS, "UDP packets") \
88 _(ICMP_PACKETS, "ICMP packets") \
89 _(OTHER_PACKETS, "other protocol packets") \
90 _(FRAGMENTS, "fragments") \
91 _(CACHED_FRAGMENTS, "cached fragments") \
92 _(PROCESSED_FRAGMENTS, "processed fragments")
97 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym,
98 foreach_nat64_in2out_error
100 NAT64_IN2OUT_N_ERROR,
101 } nat64_in2out_error_t;
103 static char *nat64_in2out_error_strings[] = {
104 #define _(sym,string) string,
105 foreach_nat64_in2out_error
111 NAT64_IN2OUT_NEXT_IP4_LOOKUP,
112 NAT64_IN2OUT_NEXT_IP6_LOOKUP,
113 NAT64_IN2OUT_NEXT_DROP,
114 NAT64_IN2OUT_NEXT_SLOWPATH,
115 NAT64_IN2OUT_NEXT_REASS,
117 } nat64_in2out_next_t;
119 typedef struct nat64_in2out_set_ctx_t_
124 } nat64_in2out_set_ctx_t;
127 nat64_not_translate (u32 sw_if_index, ip6_address_t ip6_addr)
130 ip6_main_t *im6 = &ip6_main;
131 ip_lookup_main_t *lm6 = &im6->lookup_main;
132 ip_interface_address_t *ia = 0;
135 foreach_ip_interface_address (lm6, ia, sw_if_index, 0,
137 addr = ip_interface_address_get_address (lm6, ia);
138 if (0 == ip6_address_compare (addr, &ip6_addr))
147 * @brief Check whether is a hairpinning.
149 * If the destination IP address of the packet is an IPv4 address assigned to
150 * the NAT64 itself, then the packet is a hairpin packet.
152 * param dst_addr Destination address of the packet.
154 * @returns 1 if hairpinning, otherwise 0.
156 static_always_inline int
157 is_hairpinning (ip6_address_t * dst_addr)
159 nat64_main_t *nm = &nat64_main;
162 for (i = 0; i < vec_len (nm->addr_pool); i++)
164 if (nm->addr_pool[i].addr.as_u32 == dst_addr->as_u32[3])
172 nat64_in2out_tcp_udp_set_cb (ip6_header_t * ip6, ip4_header_t * ip4,
175 nat64_main_t *nm = &nat64_main;
176 nat64_in2out_set_ctx_t *ctx = arg;
177 nat64_db_bib_entry_t *bibe;
178 nat64_db_st_entry_t *ste;
179 ip46_address_t saddr, daddr;
180 u32 sw_if_index, fib_index;
181 udp_header_t *udp = ip6_next_header (ip6);
182 u8 proto = ip6->protocol;
183 u16 sport = udp->src_port;
184 u16 dport = udp->dst_port;
185 nat64_db_t *db = &nm->db[ctx->thread_index];
187 sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
189 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
191 saddr.as_u64[0] = ip6->src_address.as_u64[0];
192 saddr.as_u64[1] = ip6->src_address.as_u64[1];
193 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
194 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
197 nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
202 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
208 bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
213 ip4_address_t out_addr;
214 if (nat64_alloc_out_addr_and_port
215 (fib_index, ip_proto_to_snat_proto (proto), &out_addr,
216 &out_port, ctx->thread_index))
220 nat64_db_bib_entry_create (db, &ip6->src_address, &out_addr,
221 sport, out_port, fib_index, proto, 0);
226 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
228 nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
234 ip4->src_address.as_u32 = bibe->out_addr.as_u32;
235 udp->src_port = bibe->out_port;
237 ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
239 if (proto == IP_PROTOCOL_TCP)
243 tcp_header_t *tcp = ip6_next_header (ip6);
245 nat64_tcp_session_set_state (ste, tcp, 1);
246 checksum = &tcp->checksum;
247 csum = ip_csum_sub_even (*checksum, sport);
248 csum = ip_csum_add_even (csum, udp->src_port);
249 mss_clamping (nm->sm, tcp, &csum);
250 *checksum = ip_csum_fold (csum);
253 nat64_session_reset_timeout (ste, ctx->vm);
259 nat64_in2out_icmp_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
261 nat64_main_t *nm = &nat64_main;
262 nat64_in2out_set_ctx_t *ctx = arg;
263 nat64_db_bib_entry_t *bibe;
264 nat64_db_st_entry_t *ste;
265 ip46_address_t saddr, daddr;
266 u32 sw_if_index, fib_index;
267 icmp46_header_t *icmp = ip6_next_header (ip6);
268 nat64_db_t *db = &nm->db[ctx->thread_index];
270 sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
272 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
274 saddr.as_u64[0] = ip6->src_address.as_u64[0];
275 saddr.as_u64[1] = ip6->src_address.as_u64[1];
276 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
277 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
279 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
281 u16 in_id = ((u16 *) (icmp))[2];
283 nat64_db_st_entry_find (db, &saddr, &daddr, in_id, 0,
284 IP_PROTOCOL_ICMP, fib_index, 1);
289 nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
297 nat64_db_bib_entry_find (db, &saddr, in_id,
298 IP_PROTOCOL_ICMP, fib_index, 1);
303 ip4_address_t out_addr;
304 if (nat64_alloc_out_addr_and_port
305 (fib_index, SNAT_PROTOCOL_ICMP, &out_addr, &out_id,
310 nat64_db_bib_entry_create (db, &ip6->src_address,
311 &out_addr, in_id, out_id,
312 fib_index, IP_PROTOCOL_ICMP, 0);
317 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
319 nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
325 nat64_session_reset_timeout (ste, ctx->vm);
327 ip4->src_address.as_u32 = bibe->out_addr.as_u32;
328 ((u16 *) (icmp))[2] = bibe->out_port;
330 ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
334 if (!vec_len (nm->addr_pool))
337 ip4->src_address.as_u32 = nm->addr_pool[0].addr.as_u32;
338 nat64_extract_ip4 (&ip6->dst_address, &ip4->dst_address, fib_index);
345 nat64_in2out_inner_icmp_set_cb (ip6_header_t * ip6, ip4_header_t * ip4,
348 nat64_main_t *nm = &nat64_main;
349 nat64_in2out_set_ctx_t *ctx = arg;
350 nat64_db_st_entry_t *ste;
351 nat64_db_bib_entry_t *bibe;
352 ip46_address_t saddr, daddr;
353 u32 sw_if_index, fib_index;
354 u8 proto = ip6->protocol;
355 nat64_db_t *db = &nm->db[ctx->thread_index];
357 sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
359 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
361 saddr.as_u64[0] = ip6->src_address.as_u64[0];
362 saddr.as_u64[1] = ip6->src_address.as_u64[1];
363 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
364 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
366 if (proto == IP_PROTOCOL_ICMP6)
368 icmp46_header_t *icmp = ip6_next_header (ip6);
369 u16 in_id = ((u16 *) (icmp))[2];
370 proto = IP_PROTOCOL_ICMP;
373 (icmp->type == ICMP4_echo_request
374 || icmp->type == ICMP4_echo_reply))
378 nat64_db_st_entry_find (db, &daddr, &saddr, in_id, 0, proto,
383 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
387 ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
388 ((u16 *) (icmp))[2] = bibe->out_port;
389 ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
393 udp_header_t *udp = ip6_next_header (ip6);
394 tcp_header_t *tcp = ip6_next_header (ip6);
398 u16 sport = udp->src_port;
399 u16 dport = udp->dst_port;
402 nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
407 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
411 ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
412 udp->dst_port = bibe->out_port;
413 ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
415 if (proto == IP_PROTOCOL_TCP)
416 checksum = &tcp->checksum;
418 checksum = &udp->checksum;
419 csum = ip_csum_sub_even (*checksum, dport);
420 csum = ip_csum_add_even (csum, udp->dst_port);
421 *checksum = ip_csum_fold (csum);
427 typedef struct unk_proto_st_walk_ctx_t_
429 ip6_address_t src_addr;
430 ip6_address_t dst_addr;
431 ip4_address_t out_addr;
435 } unk_proto_st_walk_ctx_t;
438 unk_proto_st_walk (nat64_db_st_entry_t * ste, void *arg)
440 nat64_main_t *nm = &nat64_main;
441 unk_proto_st_walk_ctx_t *ctx = arg;
442 nat64_db_bib_entry_t *bibe;
443 ip46_address_t saddr, daddr;
444 nat64_db_t *db = &nm->db[ctx->thread_index];
446 if (ip46_address_is_equal (&ste->in_r_addr, &ctx->dst_addr))
448 bibe = nat64_db_bib_entry_by_index (db, ste->proto, ste->bibe_index);
452 if (ip46_address_is_equal (&bibe->in_addr, &ctx->src_addr)
453 && bibe->fib_index == ctx->fib_index)
455 clib_memset (&saddr, 0, sizeof (saddr));
456 saddr.ip4.as_u32 = bibe->out_addr.as_u32;
457 clib_memset (&daddr, 0, sizeof (daddr));
458 nat64_extract_ip4 (&ctx->dst_addr, &daddr.ip4, ctx->fib_index);
460 if (nat64_db_st_entry_find
461 (db, &daddr, &saddr, 0, 0, ctx->proto, ctx->fib_index, 0))
464 ctx->out_addr.as_u32 = bibe->out_addr.as_u32;
473 nat64_in2out_unk_proto_set_cb (ip6_header_t * ip6, ip4_header_t * ip4,
476 nat64_main_t *nm = &nat64_main;
477 nat64_in2out_set_ctx_t *s_ctx = arg;
478 nat64_db_bib_entry_t *bibe;
479 nat64_db_st_entry_t *ste;
480 ip46_address_t saddr, daddr, addr;
481 u32 sw_if_index, fib_index;
482 u8 proto = ip6->protocol;
484 nat64_db_t *db = &nm->db[s_ctx->thread_index];
486 sw_if_index = vnet_buffer (s_ctx->b)->sw_if_index[VLIB_RX];
488 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
490 saddr.as_u64[0] = ip6->src_address.as_u64[0];
491 saddr.as_u64[1] = ip6->src_address.as_u64[1];
492 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
493 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
496 nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
500 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
506 bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
510 /* Choose same out address as for TCP/UDP session to same dst */
511 unk_proto_st_walk_ctx_t ctx = {
512 .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
513 .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
514 .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
515 .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
516 .out_addr.as_u32 = 0,
517 .fib_index = fib_index,
519 .thread_index = s_ctx->thread_index,
522 nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
524 if (!ctx.out_addr.as_u32)
525 nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
527 /* Verify if out address is not already in use for protocol */
528 clib_memset (&addr, 0, sizeof (addr));
529 addr.ip4.as_u32 = ctx.out_addr.as_u32;
530 if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
531 ctx.out_addr.as_u32 = 0;
533 if (!ctx.out_addr.as_u32)
535 for (i = 0; i < vec_len (nm->addr_pool); i++)
537 addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
538 if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
543 if (!ctx.out_addr.as_u32)
547 nat64_db_bib_entry_create (db, &ip6->src_address,
548 &ctx.out_addr, 0, 0, fib_index, proto,
554 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
556 nat64_db_st_entry_create (db, bibe, &ip6->dst_address, &daddr.ip4, 0);
561 nat64_session_reset_timeout (ste, s_ctx->vm);
563 ip4->src_address.as_u32 = bibe->out_addr.as_u32;
564 ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
572 nat64_in2out_tcp_udp_hairpinning (vlib_main_t * vm, vlib_buffer_t * b,
573 ip6_header_t * ip6, u32 thread_index)
575 nat64_main_t *nm = &nat64_main;
576 nat64_db_bib_entry_t *bibe;
577 nat64_db_st_entry_t *ste;
578 ip46_address_t saddr, daddr;
579 u32 sw_if_index, fib_index;
580 udp_header_t *udp = ip6_next_header (ip6);
581 tcp_header_t *tcp = ip6_next_header (ip6);
582 u8 proto = ip6->protocol;
583 u16 sport = udp->src_port;
584 u16 dport = udp->dst_port;
587 nat64_db_t *db = &nm->db[thread_index];
589 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
591 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
593 saddr.as_u64[0] = ip6->src_address.as_u64[0];
594 saddr.as_u64[1] = ip6->src_address.as_u64[1];
595 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
596 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
598 if (proto == IP_PROTOCOL_UDP)
599 checksum = &udp->checksum;
601 checksum = &tcp->checksum;
603 csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
604 csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
605 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
606 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
607 csum = ip_csum_sub_even (csum, sport);
608 csum = ip_csum_sub_even (csum, dport);
611 nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
616 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
622 bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
627 ip4_address_t out_addr;
628 if (nat64_alloc_out_addr_and_port
629 (fib_index, ip_proto_to_snat_proto (proto), &out_addr,
630 &out_port, thread_index))
634 nat64_db_bib_entry_create (db, &ip6->src_address, &out_addr,
635 sport, out_port, fib_index, proto, 0);
640 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
642 nat64_db_st_entry_create (db, bibe, &ip6->dst_address,
648 if (proto == IP_PROTOCOL_TCP)
649 nat64_tcp_session_set_state (ste, tcp, 1);
651 nat64_session_reset_timeout (ste, vm);
653 sport = udp->src_port = bibe->out_port;
654 nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
656 clib_memset (&daddr, 0, sizeof (daddr));
657 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
661 vec_foreach (db, nm->db)
663 bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, 0, 0);
673 ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
674 ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
675 udp->dst_port = bibe->in_port;
677 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
678 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
679 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
680 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
681 csum = ip_csum_add_even (csum, udp->src_port);
682 csum = ip_csum_add_even (csum, udp->dst_port);
683 *checksum = ip_csum_fold (csum);
689 nat64_in2out_icmp_hairpinning (vlib_main_t * vm, vlib_buffer_t * b,
690 ip6_header_t * ip6, u32 thread_index)
692 nat64_main_t *nm = &nat64_main;
693 nat64_db_bib_entry_t *bibe;
694 nat64_db_st_entry_t *ste;
695 icmp46_header_t *icmp = ip6_next_header (ip6);
696 ip6_header_t *inner_ip6;
697 ip46_address_t saddr, daddr;
698 u32 sw_if_index, fib_index;
702 u16 *checksum, sport, dport;
704 nat64_db_t *db = &nm->db[thread_index];
706 if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
709 inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
711 proto = inner_ip6->protocol;
713 if (proto == IP_PROTOCOL_ICMP6)
716 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
718 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
720 saddr.as_u64[0] = inner_ip6->src_address.as_u64[0];
721 saddr.as_u64[1] = inner_ip6->src_address.as_u64[1];
722 daddr.as_u64[0] = inner_ip6->dst_address.as_u64[0];
723 daddr.as_u64[1] = inner_ip6->dst_address.as_u64[1];
725 udp = ip6_next_header (inner_ip6);
726 tcp = ip6_next_header (inner_ip6);
728 sport = udp->src_port;
729 dport = udp->dst_port;
731 if (proto == IP_PROTOCOL_UDP)
732 checksum = &udp->checksum;
734 checksum = &tcp->checksum;
736 csum = ip_csum_sub_even (*checksum, inner_ip6->src_address.as_u64[0]);
737 csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
738 csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
739 csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
740 csum = ip_csum_sub_even (csum, sport);
741 csum = ip_csum_sub_even (csum, dport);
744 nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
749 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
753 dport = udp->dst_port = bibe->out_port;
754 nat64_compose_ip6 (&inner_ip6->dst_address, &bibe->out_addr, fib_index);
756 clib_memset (&saddr, 0, sizeof (saddr));
757 clib_memset (&daddr, 0, sizeof (daddr));
758 saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
759 daddr.ip4.as_u32 = bibe->out_addr.as_u32;
763 vec_foreach (db, nm->db)
765 ste = nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
776 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
780 inner_ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
781 inner_ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
782 udp->src_port = bibe->in_port;
784 csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
785 csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
786 csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
787 csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
788 csum = ip_csum_add_even (csum, udp->src_port);
789 csum = ip_csum_add_even (csum, udp->dst_port);
790 *checksum = ip_csum_fold (csum);
792 if (!vec_len (nm->addr_pool))
795 nat64_compose_ip6 (&ip6->src_address, &nm->addr_pool[0].addr, fib_index);
796 ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
797 ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
800 csum = ip_csum_with_carry (0, ip6->payload_length);
801 csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
802 csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
803 csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
804 csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
805 csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
807 ip_incremental_checksum (csum, icmp,
808 clib_net_to_host_u16 (ip6->payload_length));
809 icmp->checksum = ~ip_csum_fold (csum);
815 nat64_in2out_unk_proto_hairpinning (vlib_main_t * vm, vlib_buffer_t * b,
816 ip6_header_t * ip6, u32 thread_index)
818 nat64_main_t *nm = &nat64_main;
819 nat64_db_bib_entry_t *bibe;
820 nat64_db_st_entry_t *ste;
821 ip46_address_t saddr, daddr, addr;
822 u32 sw_if_index, fib_index;
823 u8 proto = ip6->protocol;
825 nat64_db_t *db = &nm->db[thread_index];
827 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
829 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6, sw_if_index);
831 saddr.as_u64[0] = ip6->src_address.as_u64[0];
832 saddr.as_u64[1] = ip6->src_address.as_u64[1];
833 daddr.as_u64[0] = ip6->dst_address.as_u64[0];
834 daddr.as_u64[1] = ip6->dst_address.as_u64[1];
837 nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
841 bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
847 bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
851 /* Choose same out address as for TCP/UDP session to same dst */
852 unk_proto_st_walk_ctx_t ctx = {
853 .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
854 .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
855 .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
856 .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
857 .out_addr.as_u32 = 0,
858 .fib_index = fib_index,
860 .thread_index = thread_index,
863 nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
865 if (!ctx.out_addr.as_u32)
866 nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
868 /* Verify if out address is not already in use for protocol */
869 clib_memset (&addr, 0, sizeof (addr));
870 addr.ip4.as_u32 = ctx.out_addr.as_u32;
871 if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
872 ctx.out_addr.as_u32 = 0;
874 if (!ctx.out_addr.as_u32)
876 for (i = 0; i < vec_len (nm->addr_pool); i++)
878 addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
879 if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
884 if (!ctx.out_addr.as_u32)
888 nat64_db_bib_entry_create (db, &ip6->src_address,
889 &ctx.out_addr, 0, 0, fib_index, proto,
895 nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
897 nat64_db_st_entry_create (db, bibe, &ip6->dst_address, &daddr.ip4, 0);
902 nat64_session_reset_timeout (ste, vm);
904 nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
906 clib_memset (&daddr, 0, sizeof (daddr));
907 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
911 vec_foreach (db, nm->db)
913 bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, 0, 0);
923 ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
924 ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
930 nat64_in2out_node_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
931 vlib_frame_t * frame, u8 is_slow_path)
933 u32 n_left_from, *from, *to_next;
934 nat64_in2out_next_t next_index;
935 u32 pkts_processed = 0;
936 u32 stats_node_index;
937 u32 thread_index = vm->thread_index;
938 u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
942 is_slow_path ? nat64_in2out_slowpath_node.index : nat64_in2out_node.index;
944 from = vlib_frame_vector_args (frame);
945 n_left_from = frame->n_vectors;
946 next_index = node->cached_next_index;
948 while (n_left_from > 0)
952 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
954 while (n_left_from > 0 && n_left_to_next > 0)
960 u16 l4_offset0, frag_offset0;
963 nat64_in2out_set_ctx_t ctx0;
966 /* speculatively enqueue b0 to the current next frame */
974 b0 = vlib_get_buffer (vm, bi0);
975 ip60 = vlib_buffer_get_current (b0);
979 ctx0.thread_index = thread_index;
981 next0 = NAT64_IN2OUT_NEXT_IP4_LOOKUP;
985 (ip60, b0->current_length, &l4_protocol0, &l4_offset0,
988 next0 = NAT64_IN2OUT_NEXT_DROP;
989 b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
993 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
995 if (nat64_not_translate (sw_if_index0, ip60->dst_address))
997 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1001 proto0 = ip_proto_to_snat_proto (l4_protocol0);
1005 if (PREDICT_TRUE (proto0 == ~0))
1008 if (is_hairpinning (&ip60->dst_address))
1010 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1011 if (nat64_in2out_unk_proto_hairpinning
1012 (vm, b0, ip60, thread_index))
1014 next0 = NAT64_IN2OUT_NEXT_DROP;
1016 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1021 if (ip6_to_ip4 (b0, nat64_in2out_unk_proto_set_cb, &ctx0))
1023 next0 = NAT64_IN2OUT_NEXT_DROP;
1025 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1033 if (PREDICT_FALSE (proto0 == ~0))
1035 next0 = NAT64_IN2OUT_NEXT_SLOWPATH;
1041 (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION))
1043 next0 = NAT64_IN2OUT_NEXT_REASS;
1048 if (proto0 == SNAT_PROTOCOL_ICMP)
1051 if (is_hairpinning (&ip60->dst_address))
1053 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1054 if (nat64_in2out_icmp_hairpinning
1055 (vm, b0, ip60, thread_index))
1057 next0 = NAT64_IN2OUT_NEXT_DROP;
1059 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1065 (b0, nat64_in2out_icmp_set_cb, &ctx0,
1066 nat64_in2out_inner_icmp_set_cb, &ctx0))
1068 next0 = NAT64_IN2OUT_NEXT_DROP;
1069 b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1073 else if (proto0 == SNAT_PROTOCOL_TCP || proto0 == SNAT_PROTOCOL_UDP)
1075 if (proto0 == SNAT_PROTOCOL_TCP)
1080 if (is_hairpinning (&ip60->dst_address))
1082 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1083 if (nat64_in2out_tcp_udp_hairpinning
1084 (vm, b0, ip60, thread_index))
1086 next0 = NAT64_IN2OUT_NEXT_DROP;
1088 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1093 if (ip6_to_ip4_tcp_udp
1094 (b0, nat64_in2out_tcp_udp_set_cb, &ctx0, 0))
1096 next0 = NAT64_IN2OUT_NEXT_DROP;
1097 b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1103 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
1104 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1106 nat64_in2out_trace_t *t =
1107 vlib_add_trace (vm, node, b0, sizeof (*t));
1108 t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1109 t->next_index = next0;
1110 t->is_slow_path = is_slow_path;
1113 pkts_processed += next0 == NAT64_IN2OUT_NEXT_IP4_LOOKUP;
1115 /* verify speculative enqueue, maybe switch current next frame */
1116 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1117 n_left_to_next, bi0, next0);
1119 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1121 vlib_node_increment_counter (vm, stats_node_index,
1122 NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1124 vlib_node_increment_counter (vm, stats_node_index,
1125 NAT64_IN2OUT_ERROR_TCP_PACKETS, tcp_packets);
1126 vlib_node_increment_counter (vm, stats_node_index,
1127 NAT64_IN2OUT_ERROR_UDP_PACKETS, tcp_packets);
1128 vlib_node_increment_counter (vm, stats_node_index,
1129 NAT64_IN2OUT_ERROR_ICMP_PACKETS, icmp_packets);
1130 vlib_node_increment_counter (vm, stats_node_index,
1131 NAT64_IN2OUT_ERROR_OTHER_PACKETS,
1133 vlib_node_increment_counter (vm, stats_node_index,
1134 NAT64_IN2OUT_ERROR_FRAGMENTS, fragments);
1136 return frame->n_vectors;
1140 nat64_in2out_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1141 vlib_frame_t * frame)
1143 return nat64_in2out_node_fn_inline (vm, node, frame, 0);
1147 VLIB_REGISTER_NODE (nat64_in2out_node) = {
1148 .function = nat64_in2out_node_fn,
1149 .name = "nat64-in2out",
1150 .vector_size = sizeof (u32),
1151 .format_trace = format_nat64_in2out_trace,
1152 .type = VLIB_NODE_TYPE_INTERNAL,
1153 .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1154 .error_strings = nat64_in2out_error_strings,
1155 .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1156 /* edit / add dispositions here */
1158 [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1159 [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1160 [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1161 [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1162 [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1167 VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_node, nat64_in2out_node_fn);
1170 nat64_in2out_slowpath_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1171 vlib_frame_t * frame)
1173 return nat64_in2out_node_fn_inline (vm, node, frame, 1);
1177 VLIB_REGISTER_NODE (nat64_in2out_slowpath_node) = {
1178 .function = nat64_in2out_slowpath_node_fn,
1179 .name = "nat64-in2out-slowpath",
1180 .vector_size = sizeof (u32),
1181 .format_trace = format_nat64_in2out_trace,
1182 .type = VLIB_NODE_TYPE_INTERNAL,
1183 .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1184 .error_strings = nat64_in2out_error_strings,
1185 .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1186 /* edit / add dispositions here */
1188 [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1189 [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1190 [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1191 [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1192 [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1197 VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_slowpath_node,
1198 nat64_in2out_slowpath_node_fn);
1200 typedef struct nat64_in2out_frag_set_ctx_t_
1208 } nat64_in2out_frag_set_ctx_t;
1211 nat64_in2out_frag_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg)
1213 nat64_main_t *nm = &nat64_main;
1214 nat64_in2out_frag_set_ctx_t *ctx = arg;
1215 nat64_db_st_entry_t *ste;
1216 nat64_db_bib_entry_t *bibe;
1218 nat64_db_t *db = &nm->db[ctx->thread_index];
1220 ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
1224 bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
1228 nat64_session_reset_timeout (ste, ctx->vm);
1230 if (ctx->first_frag)
1232 udp = (udp_header_t *) u8_ptr_add (ip6, ctx->l4_offset);
1234 if (ctx->proto == IP_PROTOCOL_TCP)
1238 tcp_header_t *tcp = (tcp_header_t *) udp;
1240 nat64_tcp_session_set_state (ste, tcp, 1);
1241 checksum = &tcp->checksum;
1242 csum = ip_csum_sub_even (*checksum, tcp->src_port);
1243 csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[0]);
1244 csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
1245 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
1246 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
1247 csum = ip_csum_add_even (csum, bibe->out_port);
1248 csum = ip_csum_add_even (csum, bibe->out_addr.as_u32);
1249 csum = ip_csum_add_even (csum, ste->out_r_addr.as_u32);
1250 *checksum = ip_csum_fold (csum);
1253 udp->src_port = bibe->out_port;
1256 ip4->src_address.as_u32 = bibe->out_addr.as_u32;
1257 ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
1263 nat64_in2out_frag_hairpinning (vlib_buffer_t * b, ip6_header_t * ip6,
1264 nat64_in2out_frag_set_ctx_t * ctx)
1266 nat64_main_t *nm = &nat64_main;
1267 nat64_db_st_entry_t *ste;
1268 nat64_db_bib_entry_t *bibe;
1269 udp_header_t *udp = (udp_header_t *) u8_ptr_add (ip6, ctx->l4_offset);
1270 tcp_header_t *tcp = (tcp_header_t *) udp;
1271 u16 sport = udp->src_port;
1272 u16 dport = udp->dst_port;
1275 ip46_address_t daddr;
1276 nat64_db_t *db = &nm->db[ctx->thread_index];
1278 if (ctx->first_frag)
1280 if (ctx->proto == IP_PROTOCOL_UDP)
1281 checksum = &udp->checksum;
1283 checksum = &tcp->checksum;
1285 csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
1286 csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
1287 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
1288 csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
1289 csum = ip_csum_sub_even (csum, sport);
1290 csum = ip_csum_sub_even (csum, dport);
1293 ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
1297 bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
1301 if (ctx->proto == IP_PROTOCOL_TCP)
1302 nat64_tcp_session_set_state (ste, tcp, 1);
1304 nat64_session_reset_timeout (ste, ctx->vm);
1306 sport = bibe->out_port;
1307 dport = ste->r_port;
1309 nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, bibe->fib_index);
1311 clib_memset (&daddr, 0, sizeof (daddr));
1312 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1316 vec_foreach (db, nm->db)
1318 bibe = nat64_db_bib_entry_find (db, &daddr, dport, ctx->proto, 0, 0);
1328 ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
1329 ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
1331 if (ctx->first_frag)
1333 udp->dst_port = bibe->in_port;
1334 udp->src_port = sport;
1335 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
1336 csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
1337 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
1338 csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
1339 csum = ip_csum_add_even (csum, udp->src_port);
1340 csum = ip_csum_add_even (csum, udp->dst_port);
1341 *checksum = ip_csum_fold (csum);
1348 nat64_in2out_reass_node_fn (vlib_main_t * vm,
1349 vlib_node_runtime_t * node, vlib_frame_t * frame)
1351 u32 n_left_from, *from, *to_next;
1352 nat64_in2out_next_t next_index;
1353 u32 pkts_processed = 0, cached_fragments = 0;
1354 u32 *fragments_to_drop = 0;
1355 u32 *fragments_to_loopback = 0;
1356 nat64_main_t *nm = &nat64_main;
1357 u32 thread_index = vm->thread_index;
1359 from = vlib_frame_vector_args (frame);
1360 n_left_from = frame->n_vectors;
1361 next_index = node->cached_next_index;
1363 while (n_left_from > 0)
1367 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1369 while (n_left_from > 0 && n_left_to_next > 0)
1376 u16 l4_offset0, frag_offset0;
1378 nat_reass_ip6_t *reass0;
1379 ip6_frag_hdr_t *frag0;
1380 nat64_db_bib_entry_t *bibe0;
1381 nat64_db_st_entry_t *ste0;
1383 snat_protocol_t proto0;
1384 u32 sw_if_index0, fib_index0;
1385 ip46_address_t saddr0, daddr0;
1386 nat64_in2out_frag_set_ctx_t ctx0;
1387 nat64_db_t *db = &nm->db[thread_index];
1389 /* speculatively enqueue b0 to the current next frame */
1395 n_left_to_next -= 1;
1397 b0 = vlib_get_buffer (vm, bi0);
1398 next0 = NAT64_IN2OUT_NEXT_IP4_LOOKUP;
1400 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1402 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP6,
1405 ctx0.thread_index = thread_index;
1407 if (PREDICT_FALSE (nat_reass_is_drop_frag (1)))
1409 next0 = NAT64_IN2OUT_NEXT_DROP;
1410 b0->error = node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1414 ip60 = (ip6_header_t *) vlib_buffer_get_current (b0);
1418 (ip60, b0->current_length, &l4_protocol0, &l4_offset0,
1421 next0 = NAT64_IN2OUT_NEXT_DROP;
1422 b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1427 (!(l4_protocol0 == IP_PROTOCOL_TCP
1428 || l4_protocol0 == IP_PROTOCOL_UDP)))
1430 next0 = NAT64_IN2OUT_NEXT_DROP;
1431 b0->error = node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1435 udp0 = (udp_header_t *) u8_ptr_add (ip60, l4_offset0);
1436 frag0 = (ip6_frag_hdr_t *) u8_ptr_add (ip60, frag_offset0);
1437 proto0 = ip_proto_to_snat_proto (l4_protocol0);
1439 reass0 = nat_ip6_reass_find_or_create (ip60->src_address,
1441 frag0->identification,
1443 1, &fragments_to_drop);
1445 if (PREDICT_FALSE (!reass0))
1447 next0 = NAT64_IN2OUT_NEXT_DROP;
1448 b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_REASS];
1452 if (PREDICT_TRUE (ip6_frag_hdr_offset (frag0)))
1454 ctx0.first_frag = 0;
1455 if (PREDICT_FALSE (reass0->sess_index == (u32) ~ 0))
1457 if (nat_ip6_reass_add_fragment
1458 (reass0, bi0, &fragments_to_drop))
1460 b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_FRAG];
1461 next0 = NAT64_IN2OUT_NEXT_DROP;
1470 ctx0.first_frag = 1;
1472 saddr0.as_u64[0] = ip60->src_address.as_u64[0];
1473 saddr0.as_u64[1] = ip60->src_address.as_u64[1];
1474 daddr0.as_u64[0] = ip60->dst_address.as_u64[0];
1475 daddr0.as_u64[1] = ip60->dst_address.as_u64[1];
1478 nat64_db_st_entry_find (db, &saddr0, &daddr0,
1479 udp0->src_port, udp0->dst_port,
1480 l4_protocol0, fib_index0, 1);
1484 nat64_db_bib_entry_find (db, &saddr0, udp0->src_port,
1485 l4_protocol0, fib_index0, 1);
1489 ip4_address_t out_addr0;
1490 if (nat64_alloc_out_addr_and_port
1491 (fib_index0, proto0, &out_addr0, &out_port0,
1494 next0 = NAT64_IN2OUT_NEXT_DROP;
1496 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1501 nat64_db_bib_entry_create (db,
1503 &out_addr0, udp0->src_port,
1504 out_port0, fib_index0,
1508 next0 = NAT64_IN2OUT_NEXT_DROP;
1510 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1514 nat64_extract_ip4 (&ip60->dst_address, &daddr0.ip4,
1517 nat64_db_st_entry_create (db, bibe0,
1518 &ip60->dst_address, &daddr0.ip4,
1522 next0 = NAT64_IN2OUT_NEXT_DROP;
1524 node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1528 reass0->sess_index = nat64_db_st_entry_get_index (db, ste0);
1530 nat_ip6_reass_get_frags (reass0, &fragments_to_loopback);
1533 ctx0.sess_index = reass0->sess_index;
1534 ctx0.proto = l4_protocol0;
1536 ctx0.l4_offset = l4_offset0;
1538 if (PREDICT_FALSE (is_hairpinning (&ip60->dst_address)))
1540 next0 = NAT64_IN2OUT_NEXT_IP6_LOOKUP;
1541 if (nat64_in2out_frag_hairpinning (b0, ip60, &ctx0))
1543 next0 = NAT64_IN2OUT_NEXT_DROP;
1544 b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1550 if (ip6_to_ip4_fragmented (b0, nat64_in2out_frag_set_cb, &ctx0))
1552 next0 = NAT64_IN2OUT_NEXT_DROP;
1553 b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1560 ((node->flags & VLIB_NODE_FLAG_TRACE)
1561 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1563 nat64_in2out_reass_trace_t *t =
1564 vlib_add_trace (vm, node, b0, sizeof (*t));
1565 t->cached = cached0;
1566 t->sw_if_index = sw_if_index0;
1567 t->next_index = next0;
1578 pkts_processed += next0 != NAT64_IN2OUT_NEXT_DROP;
1580 /* verify speculative enqueue, maybe switch current next frame */
1581 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1582 to_next, n_left_to_next,
1586 if (n_left_from == 0 && vec_len (fragments_to_loopback))
1588 from = vlib_frame_vector_args (frame);
1589 u32 len = vec_len (fragments_to_loopback);
1590 if (len <= VLIB_FRAME_SIZE)
1592 clib_memcpy_fast (from, fragments_to_loopback,
1593 sizeof (u32) * len);
1595 vec_reset_length (fragments_to_loopback);
1599 clib_memcpy_fast (from, fragments_to_loopback +
1600 (len - VLIB_FRAME_SIZE),
1601 sizeof (u32) * VLIB_FRAME_SIZE);
1602 n_left_from = VLIB_FRAME_SIZE;
1603 _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
1608 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1611 vlib_node_increment_counter (vm, nat64_in2out_reass_node.index,
1612 NAT64_IN2OUT_ERROR_PROCESSED_FRAGMENTS,
1614 vlib_node_increment_counter (vm, nat64_in2out_reass_node.index,
1615 NAT64_IN2OUT_ERROR_CACHED_FRAGMENTS,
1618 nat_send_all_to_node (vm, fragments_to_drop, node,
1619 &node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT],
1620 NAT64_IN2OUT_NEXT_DROP);
1622 vec_free (fragments_to_drop);
1623 vec_free (fragments_to_loopback);
1624 return frame->n_vectors;
1628 VLIB_REGISTER_NODE (nat64_in2out_reass_node) = {
1629 .function = nat64_in2out_reass_node_fn,
1630 .name = "nat64-in2out-reass",
1631 .vector_size = sizeof (u32),
1632 .format_trace = format_nat64_in2out_reass_trace,
1633 .type = VLIB_NODE_TYPE_INTERNAL,
1634 .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1635 .error_strings = nat64_in2out_error_strings,
1636 .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1637 /* edit / add dispositions here */
1639 [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1640 [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1641 [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1642 [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1643 [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1648 VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_reass_node,
1649 nat64_in2out_reass_node_fn);
1651 #define foreach_nat64_in2out_handoff_error \
1652 _(CONGESTION_DROP, "congestion drop") \
1653 _(SAME_WORKER, "same worker") \
1654 _(DO_HANDOFF, "do handoff")
1658 #define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym,
1659 foreach_nat64_in2out_handoff_error
1661 NAT64_IN2OUT_HANDOFF_N_ERROR,
1662 } nat64_in2out_handoff_error_t;
1664 static char *nat64_in2out_handoff_error_strings[] = {
1665 #define _(sym,string) string,
1666 foreach_nat64_in2out_handoff_error
1672 u32 next_worker_index;
1673 } nat64_in2out_handoff_trace_t;
1676 format_nat64_in2out_handoff_trace (u8 * s, va_list * args)
1678 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1679 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1680 nat64_in2out_handoff_trace_t *t =
1681 va_arg (*args, nat64_in2out_handoff_trace_t *);
1684 format (s, "NAT64-IN2OUT-HANDOFF: next-worker %d", t->next_worker_index);
1690 nat64_in2out_handoff_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
1691 vlib_frame_t * frame)
1693 nat64_main_t *nm = &nat64_main;
1694 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1695 u32 n_enq, n_left_from, *from;
1696 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1698 u32 thread_index = vm->thread_index;
1699 u32 do_handoff = 0, same_worker = 0;
1701 from = vlib_frame_vector_args (frame);
1702 n_left_from = frame->n_vectors;
1703 vlib_get_buffers (vm, from, bufs, n_left_from);
1706 ti = thread_indices;
1708 fq_index = nm->fq_in2out_index;
1710 while (n_left_from > 0)
1714 ip0 = vlib_buffer_get_current (b[0]);
1715 ti[0] = nat64_get_worker_in2out (&ip0->src_address);
1717 if (ti[0] != thread_index)
1723 ((node->flags & VLIB_NODE_FLAG_TRACE)
1724 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1726 nat64_in2out_handoff_trace_t *t =
1727 vlib_add_trace (vm, node, b[0], sizeof (*t));
1728 t->next_worker_index = ti[0];
1737 vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1738 frame->n_vectors, 1);
1740 if (n_enq < frame->n_vectors)
1741 vlib_node_increment_counter (vm, node->node_index,
1742 NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
1743 frame->n_vectors - n_enq);
1744 vlib_node_increment_counter (vm, node->node_index,
1745 NAT64_IN2OUT_HANDOFF_ERROR_SAME_WORKER,
1747 vlib_node_increment_counter (vm, node->node_index,
1748 NAT64_IN2OUT_HANDOFF_ERROR_DO_HANDOFF,
1751 return frame->n_vectors;
1755 VLIB_REGISTER_NODE (nat64_in2out_handoff_node) = {
1756 .function = nat64_in2out_handoff_node_fn,
1757 .name = "nat64-in2out-handoff",
1758 .vector_size = sizeof (u32),
1759 .format_trace = format_nat64_in2out_handoff_trace,
1760 .type = VLIB_NODE_TYPE_INTERNAL,
1761 .n_errors = ARRAY_LEN(nat64_in2out_handoff_error_strings),
1762 .error_strings = nat64_in2out_handoff_error_strings,
1772 VLIB_NODE_FUNCTION_MULTIARCH (nat64_in2out_handoff_node,
1773 nat64_in2out_handoff_node_fn);
1776 * fd.io coding-style-patch-verification: ON
1779 * eval: (c-set-style "gnu")