2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief NAT64 implementation
20 #include <nat/nat64.h>
21 #include <nat/nat64_db.h>
22 #include <nat/nat_reass.h>
23 #include <nat/nat_inlines.h>
24 #include <vnet/fib/ip4_fib.h>
25 #include <vppinfra/crc32.h>
28 nat64_main_t nat64_main;
32 /* Hook up input features */
33 VNET_FEATURE_INIT (nat64_in2out, static) = {
34 .arc_name = "ip6-unicast",
35 .node_name = "nat64-in2out",
36 .runs_before = VNET_FEATURES ("ip6-lookup"),
38 VNET_FEATURE_INIT (nat64_out2in, static) = {
39 .arc_name = "ip4-unicast",
40 .node_name = "nat64-out2in",
41 .runs_before = VNET_FEATURES ("ip4-lookup"),
43 VNET_FEATURE_INIT (nat64_in2out_handoff, static) = {
44 .arc_name = "ip6-unicast",
45 .node_name = "nat64-in2out-handoff",
46 .runs_before = VNET_FEATURES ("ip6-lookup"),
48 VNET_FEATURE_INIT (nat64_out2in_handoff, static) = {
49 .arc_name = "ip4-unicast",
50 .node_name = "nat64-out2in-handoff",
51 .runs_before = VNET_FEATURES ("ip4-lookup"),
55 static u8 well_known_prefix[] = {
56 0x00, 0x64, 0xff, 0x9b,
57 0x00, 0x00, 0x00, 0x00,
58 0x00, 0x00, 0x00, 0x00,
59 0x00, 0x00, 0x00, 0x00
65 nat64_ip4_add_del_interface_address_cb (ip4_main_t * im, uword opaque,
67 ip4_address_t * address,
69 u32 if_address_index, u32 is_delete)
71 nat64_main_t *nm = &nat64_main;
74 for (i = 0; i < vec_len (nm->auto_add_sw_if_indices); i++)
76 if (sw_if_index == nm->auto_add_sw_if_indices[i])
80 /* Don't trip over lease renewal, static config */
81 for (j = 0; j < vec_len (nm->addr_pool); j++)
82 if (nm->addr_pool[j].addr.as_u32 == address->as_u32)
85 (void) nat64_add_del_pool_addr (address, ~0, 1);
90 (void) nat64_add_del_pool_addr (address, ~0, 0);
98 nat64_get_worker_in2out (ip6_address_t * addr)
100 nat64_main_t *nm = &nat64_main;
101 snat_main_t *sm = nm->sm;
102 u32 next_worker_index = nm->sm->first_worker_index;
105 #ifdef clib_crc32c_uses_intrinsics
106 hash = clib_crc32c ((u8 *) addr->as_u32, 16);
108 u64 tmp = addr->as_u64[0] ^ addr->as_u64[1];
109 hash = clib_xxhash (tmp);
112 if (PREDICT_TRUE (is_pow2 (_vec_len (sm->workers))))
113 next_worker_index += sm->workers[hash & (_vec_len (sm->workers) - 1)];
115 next_worker_index += sm->workers[hash % _vec_len (sm->workers)];
117 return next_worker_index;
121 nat64_get_worker_out2in (ip4_header_t * ip)
123 nat64_main_t *nm = &nat64_main;
124 snat_main_t *sm = nm->sm;
129 proto = ip_proto_to_snat_proto (ip->protocol);
130 udp = ip4_next_header (ip);
131 port = udp->dst_port;
134 if (PREDICT_FALSE (ip4_is_fragment (ip)))
136 if (PREDICT_FALSE (nat_reass_is_drop_frag (0)))
137 return vlib_get_thread_index ();
139 if (PREDICT_TRUE (!ip4_is_first_fragment (ip)))
141 nat_reass_ip4_t *reass;
143 reass = nat_ip4_reass_find (ip->src_address, ip->dst_address,
144 ip->fragment_id, ip->protocol);
146 if (reass && (reass->thread_index != (u32) ~ 0))
147 return reass->thread_index;
149 return vlib_get_thread_index ();
153 /* unknown protocol */
154 if (PREDICT_FALSE (proto == ~0))
157 ip46_address_t daddr;
158 nat64_db_bib_entry_t *bibe;
160 memset (&daddr, 0, sizeof (daddr));
161 daddr.ip4.as_u32 = ip->dst_address.as_u32;
164 vec_foreach (db, nm->db)
166 bibe = nat64_db_bib_entry_find (db, &daddr, 0, ip->protocol, 0, 0);
168 return (u32) (db - nm->db);
171 return vlib_get_thread_index ();
175 if (PREDICT_FALSE (ip->protocol == IP_PROTOCOL_ICMP))
177 icmp46_header_t *icmp = (icmp46_header_t *) udp;
178 icmp_echo_header_t *echo = (icmp_echo_header_t *) (icmp + 1);
179 if (!icmp_is_error_message (icmp))
180 port = echo->identifier;
183 ip4_header_t *inner_ip = (ip4_header_t *) (echo + 1);
184 proto = ip_proto_to_snat_proto (inner_ip->protocol);
185 void *l4_header = ip4_next_header (inner_ip);
188 case SNAT_PROTOCOL_ICMP:
189 icmp = (icmp46_header_t *) l4_header;
190 echo = (icmp_echo_header_t *) (icmp + 1);
191 port = echo->identifier;
193 case SNAT_PROTOCOL_UDP:
194 case SNAT_PROTOCOL_TCP:
195 port = ((tcp_udp_header_t *) l4_header)->src_port;
198 return vlib_get_thread_index ();
203 /* worker by outside port (TCP/UDP) */
204 port = clib_net_to_host_u16 (port);
206 return nm->sm->first_worker_index + ((port - 1024) / sm->port_per_thread);
208 return vlib_get_thread_index ();
212 nat64_init (vlib_main_t * vm)
214 nat64_main_t *nm = &nat64_main;
215 vlib_thread_main_t *tm = vlib_get_thread_main ();
216 ip4_add_del_interface_address_callback_t cb4;
217 ip4_main_t *im = &ip4_main;
218 vlib_node_t *error_drop_node =
219 vlib_get_node_by_name (vm, (u8 *) "error-drop");
221 vec_validate (nm->db, tm->n_vlib_mains - 1);
225 nm->fq_in2out_index = ~0;
226 nm->fq_out2in_index = ~0;
227 nm->error_node_index = error_drop_node->index;
229 /* set session timeouts to default values */
230 nm->udp_timeout = SNAT_UDP_TIMEOUT;
231 nm->icmp_timeout = SNAT_ICMP_TIMEOUT;
232 nm->tcp_trans_timeout = SNAT_TCP_TRANSITORY_TIMEOUT;
233 nm->tcp_est_timeout = SNAT_TCP_ESTABLISHED_TIMEOUT;
234 nm->tcp_incoming_syn_timeout = SNAT_TCP_INCOMING_SYN;
236 nm->total_enabled_count = 0;
238 /* Set up the interface address add/del callback */
239 cb4.function = nat64_ip4_add_del_interface_address_cb;
240 cb4.function_opaque = 0;
241 vec_add1 (im->add_del_interface_address_callbacks, cb4);
247 static void nat64_free_out_addr_and_port (struct nat64_db_s *db,
248 ip4_address_t * addr, u16 port,
252 nat64_set_hash (u32 bib_buckets, u32 bib_memory_size, u32 st_buckets,
255 nat64_main_t *nm = &nat64_main;
258 nm->bib_buckets = bib_buckets;
259 nm->bib_memory_size = bib_memory_size;
260 nm->st_buckets = st_buckets;
261 nm->st_memory_size = st_memory_size;
264 vec_foreach (db, nm->db)
266 if (nat64_db_init (db, bib_buckets, bib_memory_size, st_buckets,
267 st_memory_size, nat64_free_out_addr_and_port))
268 clib_warning ("NAT64 DB init failed");
274 nat64_add_del_pool_addr (ip4_address_t * addr, u32 vrf_id, u8 is_add)
276 nat64_main_t *nm = &nat64_main;
277 snat_address_t *a = 0;
278 snat_interface_t *interface;
281 vlib_thread_main_t *tm = vlib_get_thread_main ();
283 /* Check if address already exists */
284 for (i = 0; i < vec_len (nm->addr_pool); i++)
286 if (nm->addr_pool[i].addr.as_u32 == addr->as_u32)
288 a = nm->addr_pool + i;
296 return VNET_API_ERROR_VALUE_EXIST;
298 vec_add2 (nm->addr_pool, a, 1);
303 fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id,
304 FIB_SOURCE_PLUGIN_HI);
305 #define _(N, id, n, s) \
306 clib_bitmap_alloc (a->busy_##n##_port_bitmap, 65535); \
307 a->busy_##n##_ports = 0; \
308 vec_validate_init_empty (a->busy_##n##_ports_per_thread, tm->n_vlib_mains - 1, 0);
309 foreach_snat_protocol
315 return VNET_API_ERROR_NO_SUCH_ENTRY;
317 if (a->fib_index != ~0)
318 fib_table_unlock (a->fib_index, FIB_PROTOCOL_IP6,
319 FIB_SOURCE_PLUGIN_HI);
320 /* Delete sessions using address */
322 vec_foreach (db, nm->db)
323 nat64_db_free_out_addr (db, &a->addr);
324 #define _(N, id, n, s) \
325 clib_bitmap_free (a->busy_##n##_port_bitmap);
326 foreach_snat_protocol
329 vec_del1 (nm->addr_pool, i);
332 /* Add/del external address to FIB */
334 pool_foreach (interface, nm->interfaces,
336 if (nat_interface_is_inside(interface))
339 snat_add_del_addr_to_fib (addr, 32, interface->sw_if_index, is_add);
348 nat64_pool_addr_walk (nat64_pool_addr_walk_fn_t fn, void *ctx)
350 nat64_main_t *nm = &nat64_main;
351 snat_address_t *a = 0;
354 vec_foreach (a, nm->addr_pool)
363 nat64_add_interface_address (u32 sw_if_index, int is_add)
365 nat64_main_t *nm = &nat64_main;
366 ip4_main_t *ip4_main = nm->ip4_main;
367 ip4_address_t *first_int_addr;
370 first_int_addr = ip4_interface_first_address (ip4_main, sw_if_index, 0);
372 for (i = 0; i < vec_len (nm->auto_add_sw_if_indices); i++)
374 if (nm->auto_add_sw_if_indices[i] == sw_if_index)
377 return VNET_API_ERROR_VALUE_EXIST;
380 /* if have address remove it */
382 (void) nat64_add_del_pool_addr (first_int_addr, ~0, 0);
384 vec_del1 (nm->auto_add_sw_if_indices, i);
391 return VNET_API_ERROR_NO_SUCH_ENTRY;
393 /* add to the auto-address list */
394 vec_add1 (nm->auto_add_sw_if_indices, sw_if_index);
396 /* If the address is already bound - or static - add it now */
398 (void) nat64_add_del_pool_addr (first_int_addr, ~0, 1);
404 nat64_add_del_interface (u32 sw_if_index, u8 is_inside, u8 is_add)
406 nat64_main_t *nm = &nat64_main;
407 snat_interface_t *interface = 0, *i;
409 const char *feature_name, *arc_name;
411 /* Check if interface already exists */
413 pool_foreach (i, nm->interfaces,
415 if (i->sw_if_index == sw_if_index)
428 pool_get (nm->interfaces, interface);
429 interface->sw_if_index = sw_if_index;
430 interface->flags = 0;
433 interface->flags |= NAT_INTERFACE_FLAG_IS_INSIDE;
435 interface->flags |= NAT_INTERFACE_FLAG_IS_OUTSIDE;
437 nm->total_enabled_count++;
438 vlib_process_signal_event (nm->sm->vlib_main,
439 nm->nat64_expire_walk_node_index,
440 NAT64_CLEANER_RESCHEDULE, 0);
446 return VNET_API_ERROR_NO_SUCH_ENTRY;
448 if ((nat_interface_is_inside (interface)
449 && nat_interface_is_outside (interface)))
451 is_inside ? ~NAT_INTERFACE_FLAG_IS_INSIDE :
452 ~NAT_INTERFACE_FLAG_IS_OUTSIDE;
454 pool_put (nm->interfaces, interface);
456 nm->total_enabled_count--;
462 vec_foreach (ap, nm->addr_pool)
463 snat_add_del_addr_to_fib(&ap->addr, 32, sw_if_index, is_add);
467 if (nm->sm->num_workers > 1)
470 is_inside ? "nat64-in2out-handoff" : "nat64-out2in-handoff";
471 if (nm->fq_in2out_index == ~0)
472 nm->fq_in2out_index =
473 vlib_frame_queue_main_init (nat64_in2out_node.index, 0);
474 if (nm->fq_out2in_index == ~0)
475 nm->fq_out2in_index =
476 vlib_frame_queue_main_init (nat64_out2in_node.index, 0);
479 feature_name = is_inside ? "nat64-in2out" : "nat64-out2in";
481 arc_name = is_inside ? "ip6-unicast" : "ip4-unicast";
483 return vnet_feature_enable_disable (arc_name, feature_name, sw_if_index,
488 nat64_interfaces_walk (nat64_interface_walk_fn_t fn, void *ctx)
490 nat64_main_t *nm = &nat64_main;
491 snat_interface_t *i = 0;
494 pool_foreach (i, nm->interfaces,
503 nat64_alloc_out_addr_and_port (u32 fib_index, snat_protocol_t proto,
504 ip4_address_t * addr, u16 * port,
507 nat64_main_t *nm = &nat64_main;
508 snat_main_t *sm = nm->sm;
509 snat_session_key_t k;
511 u32 worker_index = 0;
516 if (sm->num_workers > 1)
517 worker_index = thread_index - sm->first_worker_index;
520 sm->alloc_addr_and_port (nm->addr_pool, fib_index, thread_index, &k, &ai,
521 sm->port_per_thread, worker_index);
526 addr->as_u32 = k.addr.as_u32;
533 nat64_free_out_addr_and_port (struct nat64_db_s *db, ip4_address_t * addr,
534 u16 port, u8 protocol)
536 nat64_main_t *nm = &nat64_main;
539 u32 thread_index = db - nm->db;
540 snat_protocol_t proto = ip_proto_to_snat_proto (protocol);
541 u16 port_host_byte_order = clib_net_to_host_u16 (port);
543 for (i = 0; i < vec_len (nm->addr_pool); i++)
545 a = nm->addr_pool + i;
546 if (addr->as_u32 != a->addr.as_u32)
550 #define _(N, j, n, s) \
551 case SNAT_PROTOCOL_##N: \
552 ASSERT (clib_bitmap_get_no_check (a->busy_##n##_port_bitmap, \
553 port_host_byte_order) == 1); \
554 clib_bitmap_set_no_check (a->busy_##n##_port_bitmap, port, 0); \
555 a->busy_##n##_ports--; \
556 a->busy_##n##_ports_per_thread[thread_index]--; \
558 foreach_snat_protocol
561 clib_warning ("unknown protocol");
569 * @brief Add/delete static BIB entry in worker thread.
572 nat64_static_bib_worker_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
575 nat64_main_t *nm = &nat64_main;
576 u32 thread_index = vlib_get_thread_index ();
577 nat64_db_t *db = &nm->db[thread_index];
578 nat64_static_bib_to_update_t *static_bib;
579 nat64_db_bib_entry_t *bibe;
583 pool_foreach (static_bib, nm->static_bibs,
585 if ((static_bib->thread_index != thread_index) || (static_bib->done))
588 if (static_bib->is_add)
589 (void) nat64_db_bib_entry_create (db, &static_bib->in_addr,
590 &static_bib->out_addr,
592 static_bib->out_port,
593 static_bib->fib_index,
594 static_bib->proto, 1);
597 addr.as_u64[0] = static_bib->in_addr.as_u64[0];
598 addr.as_u64[1] = static_bib->in_addr.as_u64[1];
599 bibe = nat64_db_bib_entry_find (db, &addr, static_bib->in_port,
601 static_bib->fib_index, 1);
603 nat64_db_bib_entry_free (db, bibe);
606 static_bib->done = 1;
613 static vlib_node_registration_t nat64_static_bib_worker_node;
616 VLIB_REGISTER_NODE (nat64_static_bib_worker_node, static) = {
617 .function = nat64_static_bib_worker_fn,
618 .type = VLIB_NODE_TYPE_INPUT,
619 .state = VLIB_NODE_STATE_INTERRUPT,
620 .name = "nat64-static-bib-worker",
625 nat64_add_del_static_bib_entry (ip6_address_t * in_addr,
626 ip4_address_t * out_addr, u16 in_port,
627 u16 out_port, u8 proto, u32 vrf_id, u8 is_add)
629 nat64_main_t *nm = &nat64_main;
630 nat64_db_bib_entry_t *bibe;
631 u32 fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id,
632 FIB_SOURCE_PLUGIN_HI);
633 snat_protocol_t p = ip_proto_to_snat_proto (proto);
637 u32 thread_index = 0;
639 nat64_static_bib_to_update_t *static_bib;
640 vlib_main_t *worker_vm;
641 u32 *to_be_free = 0, *index;
643 if (nm->sm->num_workers > 1)
645 thread_index = nat64_get_worker_in2out (in_addr);
646 db = &nm->db[thread_index];
649 db = &nm->db[nm->sm->num_workers];
651 addr.as_u64[0] = in_addr->as_u64[0];
652 addr.as_u64[1] = in_addr->as_u64[1];
654 nat64_db_bib_entry_find (db, &addr, clib_host_to_net_u16 (in_port),
655 proto, fib_index, 1);
660 return VNET_API_ERROR_VALUE_EXIST;
662 /* outside port must be assigned to same thread as internall address */
663 if ((out_port > 1024) && (nm->sm->num_workers > 1))
665 if (thread_index != ((out_port - 1024) / nm->sm->port_per_thread))
666 return VNET_API_ERROR_INVALID_VALUE_2;
669 for (i = 0; i < vec_len (nm->addr_pool); i++)
671 a = nm->addr_pool + i;
672 if (out_addr->as_u32 != a->addr.as_u32)
676 #define _(N, j, n, s) \
677 case SNAT_PROTOCOL_##N: \
678 if (clib_bitmap_get_no_check (a->busy_##n##_port_bitmap, \
680 return VNET_API_ERROR_INVALID_VALUE; \
681 clib_bitmap_set_no_check (a->busy_##n##_port_bitmap, \
683 if (out_port > 1024) \
685 a->busy_##n##_ports++; \
686 a->busy_##n##_ports_per_thread[thread_index]++; \
689 foreach_snat_protocol
692 memset (&addr, 0, sizeof (addr));
693 addr.ip4.as_u32 = out_addr->as_u32;
694 if (nat64_db_bib_entry_find (db, &addr, 0, proto, fib_index, 0))
695 return VNET_API_ERROR_INVALID_VALUE;
699 if (!nm->sm->num_workers)
702 nat64_db_bib_entry_create (db, in_addr, out_addr,
703 clib_host_to_net_u16 (in_port),
704 clib_host_to_net_u16 (out_port),
705 fib_index, proto, 1);
707 return VNET_API_ERROR_UNSPECIFIED;
713 return VNET_API_ERROR_NO_SUCH_ENTRY;
715 if (!nm->sm->num_workers)
716 nat64_db_bib_entry_free (db, bibe);
719 if (nm->sm->num_workers)
722 pool_foreach (static_bib, nm->static_bibs,
724 if (static_bib->done)
725 vec_add1 (to_be_free, static_bib - nm->static_bibs);
727 vec_foreach (index, to_be_free)
728 pool_put_index (nm->static_bibs, index[0]);
730 vec_free (to_be_free);
731 pool_get (nm->static_bibs, static_bib);
732 static_bib->in_addr.as_u64[0] = in_addr->as_u64[0];
733 static_bib->in_addr.as_u64[1] = in_addr->as_u64[1];
734 static_bib->in_port = clib_host_to_net_u16 (in_port);
735 static_bib->out_addr.as_u32 = out_addr->as_u32;
736 static_bib->out_port = clib_host_to_net_u16 (out_port);
737 static_bib->fib_index = fib_index;
738 static_bib->proto = proto;
739 static_bib->is_add = is_add;
740 static_bib->thread_index = thread_index;
741 static_bib->done = 0;
742 worker_vm = vlib_mains[thread_index];
744 vlib_node_set_interrupt_pending (worker_vm,
745 nat64_static_bib_worker_node.index);
747 return VNET_API_ERROR_UNSPECIFIED;
754 nat64_set_udp_timeout (u32 timeout)
756 nat64_main_t *nm = &nat64_main;
759 nm->udp_timeout = SNAT_UDP_TIMEOUT;
760 else if (timeout < SNAT_UDP_TIMEOUT_MIN)
761 return VNET_API_ERROR_INVALID_VALUE;
763 nm->udp_timeout = timeout;
769 nat64_get_udp_timeout (void)
771 nat64_main_t *nm = &nat64_main;
773 return nm->udp_timeout;
777 nat64_set_icmp_timeout (u32 timeout)
779 nat64_main_t *nm = &nat64_main;
782 nm->icmp_timeout = SNAT_ICMP_TIMEOUT;
784 nm->icmp_timeout = timeout;
790 nat64_get_icmp_timeout (void)
792 nat64_main_t *nm = &nat64_main;
794 return nm->icmp_timeout;
798 nat64_set_tcp_timeouts (u32 trans, u32 est, u32 incoming_syn)
800 nat64_main_t *nm = &nat64_main;
803 nm->tcp_trans_timeout = SNAT_TCP_TRANSITORY_TIMEOUT;
805 nm->tcp_trans_timeout = trans;
808 nm->tcp_est_timeout = SNAT_TCP_ESTABLISHED_TIMEOUT;
810 nm->tcp_est_timeout = est;
812 if (incoming_syn == 0)
813 nm->tcp_incoming_syn_timeout = SNAT_TCP_INCOMING_SYN;
815 nm->tcp_incoming_syn_timeout = incoming_syn;
821 nat64_get_tcp_trans_timeout (void)
823 nat64_main_t *nm = &nat64_main;
825 return nm->tcp_trans_timeout;
829 nat64_get_tcp_est_timeout (void)
831 nat64_main_t *nm = &nat64_main;
833 return nm->tcp_est_timeout;
837 nat64_get_tcp_incoming_syn_timeout (void)
839 nat64_main_t *nm = &nat64_main;
841 return nm->tcp_incoming_syn_timeout;
845 nat64_session_reset_timeout (nat64_db_st_entry_t * ste, vlib_main_t * vm)
847 nat64_main_t *nm = &nat64_main;
848 u32 now = (u32) vlib_time_now (vm);
850 switch (ip_proto_to_snat_proto (ste->proto))
852 case SNAT_PROTOCOL_ICMP:
853 ste->expire = now + nm->icmp_timeout;
855 case SNAT_PROTOCOL_TCP:
857 switch (ste->tcp_state)
859 case NAT64_TCP_STATE_V4_INIT:
860 case NAT64_TCP_STATE_V6_INIT:
861 case NAT64_TCP_STATE_V4_FIN_RCV:
862 case NAT64_TCP_STATE_V6_FIN_RCV:
863 case NAT64_TCP_STATE_V6_FIN_V4_FIN_RCV:
864 case NAT64_TCP_STATE_TRANS:
865 ste->expire = now + nm->tcp_trans_timeout;
867 case NAT64_TCP_STATE_ESTABLISHED:
868 ste->expire = now + nm->tcp_est_timeout;
874 case SNAT_PROTOCOL_UDP:
875 ste->expire = now + nm->udp_timeout;
878 ste->expire = now + nm->udp_timeout;
884 nat64_tcp_session_set_state (nat64_db_st_entry_t * ste, tcp_header_t * tcp,
887 switch (ste->tcp_state)
889 case NAT64_TCP_STATE_CLOSED:
891 if (tcp->flags & TCP_FLAG_SYN)
894 ste->tcp_state = NAT64_TCP_STATE_V6_INIT;
896 ste->tcp_state = NAT64_TCP_STATE_V4_INIT;
900 case NAT64_TCP_STATE_V4_INIT:
902 if (is_ip6 && (tcp->flags & TCP_FLAG_SYN))
903 ste->tcp_state = NAT64_TCP_STATE_ESTABLISHED;
906 case NAT64_TCP_STATE_V6_INIT:
908 if (!is_ip6 && (tcp->flags & TCP_FLAG_SYN))
909 ste->tcp_state = NAT64_TCP_STATE_ESTABLISHED;
912 case NAT64_TCP_STATE_ESTABLISHED:
914 if (tcp->flags & TCP_FLAG_FIN)
917 ste->tcp_state = NAT64_TCP_STATE_V6_FIN_RCV;
919 ste->tcp_state = NAT64_TCP_STATE_V4_FIN_RCV;
921 else if (tcp->flags & TCP_FLAG_RST)
923 ste->tcp_state = NAT64_TCP_STATE_TRANS;
927 case NAT64_TCP_STATE_V4_FIN_RCV:
929 if (is_ip6 && (tcp->flags & TCP_FLAG_FIN))
930 ste->tcp_state = NAT64_TCP_STATE_V6_FIN_V4_FIN_RCV;
933 case NAT64_TCP_STATE_V6_FIN_RCV:
935 if (!is_ip6 && (tcp->flags & TCP_FLAG_FIN))
936 ste->tcp_state = NAT64_TCP_STATE_V6_FIN_V4_FIN_RCV;
939 case NAT64_TCP_STATE_TRANS:
941 if (!(tcp->flags & TCP_FLAG_RST))
942 ste->tcp_state = NAT64_TCP_STATE_ESTABLISHED;
951 nat64_add_del_prefix (ip6_address_t * prefix, u8 plen, u32 vrf_id, u8 is_add)
953 nat64_main_t *nm = &nat64_main;
954 nat64_prefix_t *p = 0;
957 /* Verify prefix length */
958 if (plen != 32 && plen != 40 && plen != 48 && plen != 56 && plen != 64
960 return VNET_API_ERROR_INVALID_VALUE;
962 /* Check if tenant already have prefix */
963 for (i = 0; i < vec_len (nm->pref64); i++)
965 if (nm->pref64[i].vrf_id == vrf_id)
976 vec_add2 (nm->pref64, p, 1);
978 fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id,
979 FIB_SOURCE_PLUGIN_HI);
983 p->prefix.as_u64[0] = prefix->as_u64[0];
984 p->prefix.as_u64[1] = prefix->as_u64[1];
990 return VNET_API_ERROR_NO_SUCH_ENTRY;
992 vec_del1 (nm->pref64, i);
999 nat64_prefix_walk (nat64_prefix_walk_fn_t fn, void *ctx)
1001 nat64_main_t *nm = &nat64_main;
1002 nat64_prefix_t *p = 0;
1005 vec_foreach (p, nm->pref64)
1014 nat64_compose_ip6 (ip6_address_t * ip6, ip4_address_t * ip4, u32 fib_index)
1016 nat64_main_t *nm = &nat64_main;
1017 nat64_prefix_t *p, *gp = 0, *prefix = 0;
1020 vec_foreach (p, nm->pref64)
1022 if (p->fib_index == fib_index)
1028 if (p->fib_index == 0)
1038 clib_memcpy (ip6, &p->prefix, sizeof (ip6_address_t));
1042 ip6->as_u32[1] = ip4->as_u32;
1045 ip6->as_u8[5] = ip4->as_u8[0];
1046 ip6->as_u8[6] = ip4->as_u8[1];
1047 ip6->as_u8[7] = ip4->as_u8[2];
1048 ip6->as_u8[9] = ip4->as_u8[3];
1051 ip6->as_u8[6] = ip4->as_u8[0];
1052 ip6->as_u8[7] = ip4->as_u8[1];
1053 ip6->as_u8[9] = ip4->as_u8[2];
1054 ip6->as_u8[10] = ip4->as_u8[3];
1057 ip6->as_u8[7] = ip4->as_u8[0];
1058 ip6->as_u8[9] = ip4->as_u8[1];
1059 ip6->as_u8[10] = ip4->as_u8[2];
1060 ip6->as_u8[11] = ip4->as_u8[3];
1063 ip6->as_u8[9] = ip4->as_u8[0];
1064 ip6->as_u8[10] = ip4->as_u8[1];
1065 ip6->as_u8[11] = ip4->as_u8[2];
1066 ip6->as_u8[12] = ip4->as_u8[3];
1069 ip6->as_u32[3] = ip4->as_u32;
1072 clib_warning ("invalid prefix length");
1078 clib_memcpy (ip6, well_known_prefix, sizeof (ip6_address_t));
1079 ip6->as_u32[3] = ip4->as_u32;
1084 nat64_extract_ip4 (ip6_address_t * ip6, ip4_address_t * ip4, u32 fib_index)
1086 nat64_main_t *nm = &nat64_main;
1087 nat64_prefix_t *p, *gp = 0;
1091 vec_foreach (p, nm->pref64)
1093 if (p->fib_index == fib_index)
1115 ip4->as_u32 = ip6->as_u32[1];
1118 ip4->as_u8[0] = ip6->as_u8[5];
1119 ip4->as_u8[1] = ip6->as_u8[6];
1120 ip4->as_u8[2] = ip6->as_u8[7];
1121 ip4->as_u8[3] = ip6->as_u8[9];
1124 ip4->as_u8[0] = ip6->as_u8[6];
1125 ip4->as_u8[1] = ip6->as_u8[7];
1126 ip4->as_u8[2] = ip6->as_u8[9];
1127 ip4->as_u8[3] = ip6->as_u8[10];
1130 ip4->as_u8[0] = ip6->as_u8[7];
1131 ip4->as_u8[1] = ip6->as_u8[9];
1132 ip4->as_u8[2] = ip6->as_u8[10];
1133 ip4->as_u8[3] = ip6->as_u8[11];
1136 ip4->as_u8[0] = ip6->as_u8[9];
1137 ip4->as_u8[1] = ip6->as_u8[10];
1138 ip4->as_u8[2] = ip6->as_u8[11];
1139 ip4->as_u8[3] = ip6->as_u8[12];
1142 ip4->as_u32 = ip6->as_u32[3];
1145 clib_warning ("invalid prefix length");
1151 * @brief Per worker process checking expire time for NAT64 sessions.
1154 nat64_expire_worker_walk_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
1157 nat64_main_t *nm = &nat64_main;
1158 u32 thread_index = vlib_get_thread_index ();
1159 nat64_db_t *db = &nm->db[thread_index];
1160 u32 now = (u32) vlib_time_now (vm);
1162 nad64_db_st_free_expired (db, now);
1167 static vlib_node_registration_t nat64_expire_worker_walk_node;
1170 VLIB_REGISTER_NODE (nat64_expire_worker_walk_node, static) = {
1171 .function = nat64_expire_worker_walk_fn,
1172 .type = VLIB_NODE_TYPE_INPUT,
1173 .state = VLIB_NODE_STATE_INTERRUPT,
1174 .name = "nat64-expire-worker-walk",
1178 static vlib_node_registration_t nat64_expire_walk_node;
1181 * @brief Centralized process to drive per worker expire walk.
1184 nat64_expire_walk_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
1187 nat64_main_t *nm = &nat64_main;
1188 vlib_main_t **worker_vms = 0, *worker_vm;
1190 uword event_type, *event_data = 0;
1192 nm->nat64_expire_walk_node_index = nat64_expire_walk_node.index;
1194 if (vec_len (vlib_mains) == 0)
1195 vec_add1 (worker_vms, vm);
1198 for (i = 0; i < vec_len (vlib_mains); i++)
1200 worker_vm = vlib_mains[i];
1202 vec_add1 (worker_vms, worker_vm);
1208 if (nm->total_enabled_count)
1210 vlib_process_wait_for_event_or_clock (vm, 10.0);
1211 event_type = vlib_process_get_events (vm, &event_data);
1215 vlib_process_wait_for_event (vm);
1216 event_type = vlib_process_get_events (vm, &event_data);
1223 case NAT64_CLEANER_RESCHEDULE:
1226 clib_warning ("unknown event %u", event_type);
1230 for (i = 0; i < vec_len (worker_vms); i++)
1232 worker_vm = worker_vms[i];
1233 vlib_node_set_interrupt_pending (worker_vm,
1234 nat64_expire_worker_walk_node.index);
1242 VLIB_REGISTER_NODE (nat64_expire_walk_node, static) = {
1243 .function = nat64_expire_walk_fn,
1244 .type = VLIB_NODE_TYPE_PROCESS,
1245 .name = "nat64-expire-walk",
1250 * fd.io coding-style-patch-verification: ON
1253 * eval: (c-set-style "gnu")