2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief NAT64 implementation
20 #include <nat/nat64.h>
21 #include <nat/nat64_db.h>
22 #include <nat/nat_inlines.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vppinfra/crc32.h>
25 #include <vnet/ip/reass/ip4_sv_reass.h>
26 #include <vnet/ip/reass/ip6_sv_reass.h>
29 nat64_main_t nat64_main;
33 /* Hook up input features */
34 VNET_FEATURE_INIT (nat64_in2out, static) = {
35 .arc_name = "ip6-unicast",
36 .node_name = "nat64-in2out",
37 .runs_before = VNET_FEATURES ("ip6-lookup"),
38 .runs_after = VNET_FEATURES ("ip6-sv-reassembly-feature"),
40 VNET_FEATURE_INIT (nat64_out2in, static) = {
41 .arc_name = "ip4-unicast",
42 .node_name = "nat64-out2in",
43 .runs_before = VNET_FEATURES ("ip4-lookup"),
44 .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
46 VNET_FEATURE_INIT (nat64_in2out_handoff, static) = {
47 .arc_name = "ip6-unicast",
48 .node_name = "nat64-in2out-handoff",
49 .runs_before = VNET_FEATURES ("ip6-lookup"),
50 .runs_after = VNET_FEATURES ("ip6-sv-reassembly-feature"),
52 VNET_FEATURE_INIT (nat64_out2in_handoff, static) = {
53 .arc_name = "ip4-unicast",
54 .node_name = "nat64-out2in-handoff",
55 .runs_before = VNET_FEATURES ("ip4-lookup"),
56 .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
60 static u8 well_known_prefix[] = {
61 0x00, 0x64, 0xff, 0x9b,
62 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00
70 nat64_ip4_add_del_interface_address_cb (ip4_main_t * im, uword opaque,
72 ip4_address_t * address,
74 u32 if_address_index, u32 is_delete)
76 nat64_main_t *nm = &nat64_main;
79 for (i = 0; i < vec_len (nm->auto_add_sw_if_indices); i++)
81 if (sw_if_index == nm->auto_add_sw_if_indices[i])
85 /* Don't trip over lease renewal, static config */
86 for (j = 0; j < vec_len (nm->addr_pool); j++)
87 if (nm->addr_pool[j].addr.as_u32 == address->as_u32)
90 (void) nat64_add_del_pool_addr (vlib_get_thread_index (),
96 (void) nat64_add_del_pool_addr (vlib_get_thread_index (),
105 nat64_get_worker_in2out (ip6_address_t * addr)
107 nat64_main_t *nm = &nat64_main;
108 snat_main_t *sm = nm->sm;
109 u32 next_worker_index = nm->sm->first_worker_index;
112 #ifdef clib_crc32c_uses_intrinsics
113 hash = clib_crc32c ((u8 *) addr->as_u32, 16);
115 u64 tmp = addr->as_u64[0] ^ addr->as_u64[1];
116 hash = clib_xxhash (tmp);
119 if (PREDICT_TRUE (is_pow2 (_vec_len (sm->workers))))
120 next_worker_index += sm->workers[hash & (_vec_len (sm->workers) - 1)];
122 next_worker_index += sm->workers[hash % _vec_len (sm->workers)];
124 return next_worker_index;
128 nat64_get_worker_out2in (vlib_buffer_t * b, ip4_header_t * ip)
130 nat64_main_t *nm = &nat64_main;
131 snat_main_t *sm = nm->sm;
136 proto = ip_proto_to_nat_proto (ip->protocol);
137 udp = ip4_next_header (ip);
138 port = udp->dst_port;
140 /* unknown protocol */
141 if (PREDICT_FALSE (proto == NAT_PROTOCOL_OTHER))
144 ip46_address_t daddr;
145 nat64_db_bib_entry_t *bibe;
147 clib_memset (&daddr, 0, sizeof (daddr));
148 daddr.ip4.as_u32 = ip->dst_address.as_u32;
151 vec_foreach (db, nm->db)
153 bibe = nat64_db_bib_entry_find (db, &daddr, 0, ip->protocol, 0, 0);
155 return (u32) (db - nm->db);
158 return vlib_get_thread_index ();
162 if (PREDICT_FALSE (ip->protocol == IP_PROTOCOL_ICMP))
164 icmp46_header_t *icmp = (icmp46_header_t *) udp;
165 icmp_echo_header_t *echo = (icmp_echo_header_t *) (icmp + 1);
166 if (!icmp_type_is_error_message
167 (vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags))
168 port = vnet_buffer (b)->ip.reass.l4_src_port;
171 /* if error message, then it's not fragmented and we can access it */
172 ip4_header_t *inner_ip = (ip4_header_t *) (echo + 1);
173 proto = ip_proto_to_nat_proto (inner_ip->protocol);
174 void *l4_header = ip4_next_header (inner_ip);
177 case NAT_PROTOCOL_ICMP:
178 icmp = (icmp46_header_t *) l4_header;
179 echo = (icmp_echo_header_t *) (icmp + 1);
180 port = echo->identifier;
182 case NAT_PROTOCOL_UDP:
183 case NAT_PROTOCOL_TCP:
184 port = ((tcp_udp_header_t *) l4_header)->src_port;
187 return vlib_get_thread_index ();
192 /* worker by outside port (TCP/UDP) */
193 port = clib_net_to_host_u16 (port);
195 return nm->sm->first_worker_index + ((port - 1024) / sm->port_per_thread);
197 return vlib_get_thread_index ();
201 nat64_init (vlib_main_t * vm)
203 nat64_main_t *nm = &nat64_main;
204 vlib_thread_main_t *tm = vlib_get_thread_main ();
205 ip4_add_del_interface_address_callback_t cb4;
206 ip4_main_t *im = &ip4_main;
210 vec_validate (nm->db, tm->n_vlib_mains - 1);
212 nm->fq_in2out_index = ~0;
213 nm->fq_out2in_index = ~0;
215 node = vlib_get_node_by_name (vm, (u8 *) "error-drop");
216 nm->error_node_index = node->index;
218 node = vlib_get_node_by_name (vm, (u8 *) "nat64-in2out");
219 nm->in2out_node_index = node->index;
221 node = vlib_get_node_by_name (vm, (u8 *) "nat64-in2out-slowpath");
222 nm->in2out_slowpath_node_index = node->index;
224 node = vlib_get_node_by_name (vm, (u8 *) "nat64-out2in");
225 nm->out2in_node_index = node->index;
227 /* set session timeouts to default values */
228 nm->udp_timeout = SNAT_UDP_TIMEOUT;
229 nm->icmp_timeout = SNAT_ICMP_TIMEOUT;
230 nm->tcp_trans_timeout = SNAT_TCP_TRANSITORY_TIMEOUT;
231 nm->tcp_est_timeout = SNAT_TCP_ESTABLISHED_TIMEOUT;
233 nm->total_enabled_count = 0;
235 /* Set up the interface address add/del callback */
236 cb4.function = nat64_ip4_add_del_interface_address_cb;
237 cb4.function_opaque = 0;
238 vec_add1 (im->add_del_interface_address_callbacks, cb4);
242 nm->total_bibs.name = "total-bibs";
243 nm->total_bibs.stat_segment_name = "/nat64/total-bibs";
244 vlib_validate_simple_counter (&nm->total_bibs, 0);
245 vlib_zero_simple_counter (&nm->total_bibs, 0);
246 nm->total_sessions.name = "total-sessions";
247 nm->total_sessions.stat_segment_name = "/nat64/total-sessions";
248 vlib_validate_simple_counter (&nm->total_sessions, 0);
249 vlib_zero_simple_counter (&nm->total_sessions, 0);
252 nm->counters.in2out.x.name = #x; \
253 nm->counters.in2out.x.stat_segment_name = "/nat64/in2out/" #x; \
254 nm->counters.out2in.x.name = #x; \
255 nm->counters.out2in.x.stat_segment_name = "/nat64/out2in/" #x;
261 static void nat64_free_out_addr_and_port (struct nat64_db_s *db,
262 ip4_address_t * addr, u16 port,
266 nat64_set_hash (u32 bib_buckets, uword bib_memory_size, u32 st_buckets,
267 uword st_memory_size)
269 nat64_main_t *nm = &nat64_main;
272 nm->bib_buckets = bib_buckets;
273 nm->bib_memory_size = bib_memory_size;
274 nm->st_buckets = st_buckets;
275 nm->st_memory_size = st_memory_size;
278 vec_foreach (db, nm->db)
280 if (nat64_db_init (db, bib_buckets, bib_memory_size, st_buckets,
281 st_memory_size, nat64_free_out_addr_and_port))
282 nat_elog_err ("NAT64 DB init failed");
288 nat64_add_del_pool_addr (u32 thread_index,
289 ip4_address_t * addr, u32 vrf_id, u8 is_add)
291 nat64_main_t *nm = &nat64_main;
292 snat_address_t *a = 0;
293 snat_interface_t *interface;
296 vlib_thread_main_t *tm = vlib_get_thread_main ();
298 /* Check if address already exists */
299 for (i = 0; i < vec_len (nm->addr_pool); i++)
301 if (nm->addr_pool[i].addr.as_u32 == addr->as_u32)
303 a = nm->addr_pool + i;
311 return VNET_API_ERROR_VALUE_EXIST;
313 vec_add2 (nm->addr_pool, a, 1);
318 fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id,
320 #define _(N, id, n, s) \
321 clib_memset (a->busy_##n##_port_refcounts, 0, sizeof(a->busy_##n##_port_refcounts)); \
322 a->busy_##n##_ports = 0; \
323 vec_validate_init_empty (a->busy_##n##_ports_per_thread, tm->n_vlib_mains - 1, 0);
330 return VNET_API_ERROR_NO_SUCH_ENTRY;
332 if (a->fib_index != ~0)
333 fib_table_unlock (a->fib_index, FIB_PROTOCOL_IP6, nat_fib_src_hi);
334 /* Delete sessions using address */
336 vec_foreach (db, nm->db)
338 nat64_db_free_out_addr (thread_index, db, &a->addr);
339 vlib_set_simple_counter (&nm->total_bibs, db - nm->db, 0,
340 db->bib.bib_entries_num);
341 vlib_set_simple_counter (&nm->total_sessions, db - nm->db, 0,
342 db->st.st_entries_num);
345 vec_del1 (nm->addr_pool, i);
348 /* Add/del external address to FIB */
350 pool_foreach (interface, nm->interfaces,
352 if (nat_interface_is_inside(interface))
355 snat_add_del_addr_to_fib (addr, 32, interface->sw_if_index, is_add);
364 nat64_pool_addr_walk (nat64_pool_addr_walk_fn_t fn, void *ctx)
366 nat64_main_t *nm = &nat64_main;
367 snat_address_t *a = 0;
370 vec_foreach (a, nm->addr_pool)
379 nat64_add_interface_address (u32 sw_if_index, int is_add)
381 nat64_main_t *nm = &nat64_main;
382 ip4_main_t *ip4_main = nm->ip4_main;
383 ip4_address_t *first_int_addr;
386 first_int_addr = ip4_interface_first_address (ip4_main, sw_if_index, 0);
388 for (i = 0; i < vec_len (nm->auto_add_sw_if_indices); i++)
390 if (nm->auto_add_sw_if_indices[i] == sw_if_index)
393 return VNET_API_ERROR_VALUE_EXIST;
396 /* if have address remove it */
398 (void) nat64_add_del_pool_addr (vlib_get_thread_index (),
399 first_int_addr, ~0, 0);
400 vec_del1 (nm->auto_add_sw_if_indices, i);
407 return VNET_API_ERROR_NO_SUCH_ENTRY;
409 /* add to the auto-address list */
410 vec_add1 (nm->auto_add_sw_if_indices, sw_if_index);
412 /* If the address is already bound - or static - add it now */
414 (void) nat64_add_del_pool_addr (vlib_get_thread_index (),
415 first_int_addr, ~0, 1);
421 nat64_validate_counters (nat64_main_t * nm, u32 sw_if_index)
424 vlib_validate_simple_counter (&nm->counters.in2out.x, sw_if_index); \
425 vlib_zero_simple_counter (&nm->counters.in2out.x, sw_if_index); \
426 vlib_validate_simple_counter (&nm->counters.out2in.x, sw_if_index); \
427 vlib_zero_simple_counter (&nm->counters.out2in.x, sw_if_index);
433 nat64_add_del_interface (u32 sw_if_index, u8 is_inside, u8 is_add)
435 vlib_main_t *vm = vlib_get_main ();
436 nat64_main_t *nm = &nat64_main;
437 snat_interface_t *interface = 0, *i;
439 const char *feature_name, *arc_name;
441 /* Check if interface already exists */
443 pool_foreach (i, nm->interfaces,
445 if (i->sw_if_index == sw_if_index)
458 pool_get (nm->interfaces, interface);
459 interface->sw_if_index = sw_if_index;
460 interface->flags = 0;
461 nat64_validate_counters (nm, sw_if_index);
464 interface->flags |= NAT_INTERFACE_FLAG_IS_INSIDE;
466 interface->flags |= NAT_INTERFACE_FLAG_IS_OUTSIDE;
468 nm->total_enabled_count++;
469 vlib_process_signal_event (vm,
470 nm->nat64_expire_walk_node_index,
471 NAT64_CLEANER_RESCHEDULE, 0);
477 return VNET_API_ERROR_NO_SUCH_ENTRY;
479 if ((nat_interface_is_inside (interface)
480 && nat_interface_is_outside (interface)))
482 is_inside ? ~NAT_INTERFACE_FLAG_IS_INSIDE :
483 ~NAT_INTERFACE_FLAG_IS_OUTSIDE;
485 pool_put (nm->interfaces, interface);
487 nm->total_enabled_count--;
493 vec_foreach (ap, nm->addr_pool)
494 snat_add_del_addr_to_fib(&ap->addr, 32, sw_if_index, is_add);
498 if (nm->sm->num_workers > 1)
501 is_inside ? "nat64-in2out-handoff" : "nat64-out2in-handoff";
502 if (nm->fq_in2out_index == ~0)
503 nm->fq_in2out_index =
504 vlib_frame_queue_main_init (nat64_in2out_node.index, 0);
505 if (nm->fq_out2in_index == ~0)
506 nm->fq_out2in_index =
507 vlib_frame_queue_main_init (nat64_out2in_node.index, 0);
510 feature_name = is_inside ? "nat64-in2out" : "nat64-out2in";
512 arc_name = is_inside ? "ip6-unicast" : "ip4-unicast";
516 int rv = ip6_sv_reass_enable_disable_with_refcnt (sw_if_index, is_add);
522 int rv = ip4_sv_reass_enable_disable_with_refcnt (sw_if_index, is_add);
527 return vnet_feature_enable_disable (arc_name, feature_name, sw_if_index,
532 nat64_interfaces_walk (nat64_interface_walk_fn_t fn, void *ctx)
534 nat64_main_t *nm = &nat64_main;
535 snat_interface_t *i = 0;
538 pool_foreach (i, nm->interfaces,
547 nat64_alloc_out_addr_and_port (u32 fib_index, nat_protocol_t proto,
548 ip4_address_t * addr, u16 * port,
551 nat64_main_t *nm = &nat64_main;
552 snat_main_t *sm = nm->sm;
553 u32 worker_index = 0;
556 if (sm->num_workers > 1)
557 worker_index = thread_index - sm->first_worker_index;
560 sm->alloc_addr_and_port (nm->addr_pool, fib_index, thread_index,
561 proto, addr, port, sm->port_per_thread,
568 nat64_free_out_addr_and_port (struct nat64_db_s *db, ip4_address_t * addr,
569 u16 port, u8 protocol)
571 nat64_main_t *nm = &nat64_main;
574 u32 thread_index = db - nm->db;
575 nat_protocol_t proto = ip_proto_to_nat_proto (protocol);
576 u16 port_host_byte_order = clib_net_to_host_u16 (port);
578 for (i = 0; i < vec_len (nm->addr_pool); i++)
580 a = nm->addr_pool + i;
581 if (addr->as_u32 != a->addr.as_u32)
585 #define _(N, j, n, s) \
586 case NAT_PROTOCOL_##N: \
587 ASSERT (a->busy_##n##_port_refcounts[port_host_byte_order] >= 1); \
588 --a->busy_##n##_port_refcounts[port_host_byte_order]; \
589 a->busy_##n##_ports--; \
590 a->busy_##n##_ports_per_thread[thread_index]--; \
595 nat_elog_notice ("unknown protocol");
603 * @brief Add/delete static BIB entry in worker thread.
606 nat64_static_bib_worker_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
609 nat64_main_t *nm = &nat64_main;
610 u32 thread_index = vm->thread_index;
611 nat64_db_t *db = &nm->db[thread_index];
612 nat64_static_bib_to_update_t *static_bib;
613 nat64_db_bib_entry_t *bibe;
617 pool_foreach (static_bib, nm->static_bibs,
619 if ((static_bib->thread_index != thread_index) || (static_bib->done))
622 if (static_bib->is_add)
624 (void) nat64_db_bib_entry_create (thread_index, db,
625 &static_bib->in_addr,
626 &static_bib->out_addr,
628 static_bib->out_port,
629 static_bib->fib_index,
630 static_bib->proto, 1);
631 vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
632 db->bib.bib_entries_num);
636 addr.as_u64[0] = static_bib->in_addr.as_u64[0];
637 addr.as_u64[1] = static_bib->in_addr.as_u64[1];
638 bibe = nat64_db_bib_entry_find (db, &addr, static_bib->in_port,
640 static_bib->fib_index, 1);
643 nat64_db_bib_entry_free (thread_index, db, bibe);
644 vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
645 db->bib.bib_entries_num);
646 vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
647 db->st.st_entries_num);
651 static_bib->done = 1;
658 static vlib_node_registration_t nat64_static_bib_worker_node;
661 VLIB_REGISTER_NODE (nat64_static_bib_worker_node, static) = {
662 .function = nat64_static_bib_worker_fn,
663 .type = VLIB_NODE_TYPE_INPUT,
664 .state = VLIB_NODE_STATE_INTERRUPT,
665 .name = "nat64-static-bib-worker",
670 nat64_add_del_static_bib_entry (ip6_address_t * in_addr,
671 ip4_address_t * out_addr, u16 in_port,
672 u16 out_port, u8 proto, u32 vrf_id, u8 is_add)
674 nat64_main_t *nm = &nat64_main;
675 nat64_db_bib_entry_t *bibe;
676 u32 fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id,
678 nat_protocol_t p = ip_proto_to_nat_proto (proto);
682 u32 thread_index = 0;
684 nat64_static_bib_to_update_t *static_bib;
685 vlib_main_t *worker_vm;
686 u32 *to_be_free = 0, *index;
688 if (nm->sm->num_workers > 1)
690 thread_index = nat64_get_worker_in2out (in_addr);
691 db = &nm->db[thread_index];
694 db = &nm->db[nm->sm->num_workers];
696 addr.as_u64[0] = in_addr->as_u64[0];
697 addr.as_u64[1] = in_addr->as_u64[1];
699 nat64_db_bib_entry_find (db, &addr, clib_host_to_net_u16 (in_port),
700 proto, fib_index, 1);
705 return VNET_API_ERROR_VALUE_EXIST;
707 /* outside port must be assigned to same thread as internall address */
708 if ((out_port > 1024) && (nm->sm->num_workers > 1))
710 if (thread_index != ((out_port - 1024) / nm->sm->port_per_thread))
711 return VNET_API_ERROR_INVALID_VALUE_2;
714 for (i = 0; i < vec_len (nm->addr_pool); i++)
716 a = nm->addr_pool + i;
717 if (out_addr->as_u32 != a->addr.as_u32)
721 #define _(N, j, n, s) \
722 case NAT_PROTOCOL_##N: \
723 if (a->busy_##n##_port_refcounts[out_port]) \
724 return VNET_API_ERROR_INVALID_VALUE; \
725 ++a->busy_##n##_port_refcounts[out_port]; \
726 if (out_port > 1024) \
728 a->busy_##n##_ports++; \
729 a->busy_##n##_ports_per_thread[thread_index]++; \
735 clib_memset (&addr, 0, sizeof (addr));
736 addr.ip4.as_u32 = out_addr->as_u32;
737 if (nat64_db_bib_entry_find (db, &addr, 0, proto, fib_index, 0))
738 return VNET_API_ERROR_INVALID_VALUE;
742 if (!nm->sm->num_workers)
745 nat64_db_bib_entry_create (thread_index, db, in_addr, out_addr,
746 clib_host_to_net_u16 (in_port),
747 clib_host_to_net_u16 (out_port),
748 fib_index, proto, 1);
750 return VNET_API_ERROR_UNSPECIFIED;
752 vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
753 db->bib.bib_entries_num);
759 return VNET_API_ERROR_NO_SUCH_ENTRY;
761 if (!nm->sm->num_workers)
763 nat64_db_bib_entry_free (thread_index, db, bibe);
764 vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
765 db->bib.bib_entries_num);
769 if (nm->sm->num_workers)
772 pool_foreach (static_bib, nm->static_bibs,
774 if (static_bib->done)
775 vec_add1 (to_be_free, static_bib - nm->static_bibs);
777 vec_foreach (index, to_be_free)
778 pool_put_index (nm->static_bibs, index[0]);
780 vec_free (to_be_free);
781 pool_get (nm->static_bibs, static_bib);
782 static_bib->in_addr.as_u64[0] = in_addr->as_u64[0];
783 static_bib->in_addr.as_u64[1] = in_addr->as_u64[1];
784 static_bib->in_port = clib_host_to_net_u16 (in_port);
785 static_bib->out_addr.as_u32 = out_addr->as_u32;
786 static_bib->out_port = clib_host_to_net_u16 (out_port);
787 static_bib->fib_index = fib_index;
788 static_bib->proto = proto;
789 static_bib->is_add = is_add;
790 static_bib->thread_index = thread_index;
791 static_bib->done = 0;
792 worker_vm = vlib_mains[thread_index];
794 vlib_node_set_interrupt_pending (worker_vm,
795 nat64_static_bib_worker_node.index);
797 return VNET_API_ERROR_UNSPECIFIED;
804 nat64_set_udp_timeout (u32 timeout)
806 nat64_main_t *nm = &nat64_main;
809 nm->udp_timeout = SNAT_UDP_TIMEOUT;
811 nm->udp_timeout = timeout;
817 nat64_get_udp_timeout (void)
819 nat64_main_t *nm = &nat64_main;
821 return nm->udp_timeout;
825 nat64_set_icmp_timeout (u32 timeout)
827 nat64_main_t *nm = &nat64_main;
830 nm->icmp_timeout = SNAT_ICMP_TIMEOUT;
832 nm->icmp_timeout = timeout;
838 nat64_get_icmp_timeout (void)
840 nat64_main_t *nm = &nat64_main;
842 return nm->icmp_timeout;
846 nat64_set_tcp_timeouts (u32 trans, u32 est)
848 nat64_main_t *nm = &nat64_main;
851 nm->tcp_trans_timeout = SNAT_TCP_TRANSITORY_TIMEOUT;
853 nm->tcp_trans_timeout = trans;
856 nm->tcp_est_timeout = SNAT_TCP_ESTABLISHED_TIMEOUT;
858 nm->tcp_est_timeout = est;
864 nat64_get_tcp_trans_timeout (void)
866 nat64_main_t *nm = &nat64_main;
868 return nm->tcp_trans_timeout;
872 nat64_get_tcp_est_timeout (void)
874 nat64_main_t *nm = &nat64_main;
876 return nm->tcp_est_timeout;
880 nat64_session_reset_timeout (nat64_db_st_entry_t * ste, vlib_main_t * vm)
882 nat64_main_t *nm = &nat64_main;
883 u32 now = (u32) vlib_time_now (vm);
885 switch (ip_proto_to_nat_proto (ste->proto))
887 case NAT_PROTOCOL_ICMP:
888 ste->expire = now + nm->icmp_timeout;
890 case NAT_PROTOCOL_TCP:
892 switch (ste->tcp_state)
894 case NAT64_TCP_STATE_V4_INIT:
895 case NAT64_TCP_STATE_V6_INIT:
896 case NAT64_TCP_STATE_V4_FIN_RCV:
897 case NAT64_TCP_STATE_V6_FIN_RCV:
898 case NAT64_TCP_STATE_V6_FIN_V4_FIN_RCV:
899 case NAT64_TCP_STATE_TRANS:
900 ste->expire = now + nm->tcp_trans_timeout;
902 case NAT64_TCP_STATE_ESTABLISHED:
903 ste->expire = now + nm->tcp_est_timeout;
909 case NAT_PROTOCOL_UDP:
910 ste->expire = now + nm->udp_timeout;
913 ste->expire = now + nm->udp_timeout;
919 nat64_tcp_session_set_state (nat64_db_st_entry_t * ste, tcp_header_t * tcp,
922 switch (ste->tcp_state)
924 case NAT64_TCP_STATE_CLOSED:
926 if (tcp->flags & TCP_FLAG_SYN)
929 ste->tcp_state = NAT64_TCP_STATE_V6_INIT;
931 ste->tcp_state = NAT64_TCP_STATE_V4_INIT;
935 case NAT64_TCP_STATE_V4_INIT:
937 if (is_ip6 && (tcp->flags & TCP_FLAG_SYN))
938 ste->tcp_state = NAT64_TCP_STATE_ESTABLISHED;
941 case NAT64_TCP_STATE_V6_INIT:
943 if (!is_ip6 && (tcp->flags & TCP_FLAG_SYN))
944 ste->tcp_state = NAT64_TCP_STATE_ESTABLISHED;
947 case NAT64_TCP_STATE_ESTABLISHED:
949 if (tcp->flags & TCP_FLAG_FIN)
952 ste->tcp_state = NAT64_TCP_STATE_V6_FIN_RCV;
954 ste->tcp_state = NAT64_TCP_STATE_V4_FIN_RCV;
956 else if (tcp->flags & TCP_FLAG_RST)
958 ste->tcp_state = NAT64_TCP_STATE_TRANS;
962 case NAT64_TCP_STATE_V4_FIN_RCV:
964 if (is_ip6 && (tcp->flags & TCP_FLAG_FIN))
965 ste->tcp_state = NAT64_TCP_STATE_V6_FIN_V4_FIN_RCV;
968 case NAT64_TCP_STATE_V6_FIN_RCV:
970 if (!is_ip6 && (tcp->flags & TCP_FLAG_FIN))
971 ste->tcp_state = NAT64_TCP_STATE_V6_FIN_V4_FIN_RCV;
974 case NAT64_TCP_STATE_TRANS:
976 if (!(tcp->flags & TCP_FLAG_RST))
977 ste->tcp_state = NAT64_TCP_STATE_ESTABLISHED;
986 nat64_add_del_prefix (ip6_address_t * prefix, u8 plen, u32 vrf_id, u8 is_add)
988 nat64_main_t *nm = &nat64_main;
989 nat64_prefix_t *p = 0;
992 /* Verify prefix length */
993 if (plen != 32 && plen != 40 && plen != 48 && plen != 56 && plen != 64
995 return VNET_API_ERROR_INVALID_VALUE;
997 /* Check if tenant already have prefix */
998 for (i = 0; i < vec_len (nm->pref64); i++)
1000 if (nm->pref64[i].vrf_id == vrf_id)
1011 vec_add2 (nm->pref64, p, 1);
1013 fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id,
1018 p->prefix.as_u64[0] = prefix->as_u64[0];
1019 p->prefix.as_u64[1] = prefix->as_u64[1];
1025 return VNET_API_ERROR_NO_SUCH_ENTRY;
1027 vec_del1 (nm->pref64, i);
1034 nat64_prefix_walk (nat64_prefix_walk_fn_t fn, void *ctx)
1036 nat64_main_t *nm = &nat64_main;
1037 nat64_prefix_t *p = 0;
1040 vec_foreach (p, nm->pref64)
1049 nat64_compose_ip6 (ip6_address_t * ip6, ip4_address_t * ip4, u32 fib_index)
1051 nat64_main_t *nm = &nat64_main;
1052 nat64_prefix_t *p, *gp = 0, *prefix = 0;
1055 vec_foreach (p, nm->pref64)
1057 if (p->fib_index == fib_index)
1063 if (p->fib_index == 0)
1073 clib_memcpy_fast (ip6, &p->prefix, sizeof (ip6_address_t));
1077 ip6->as_u32[1] = ip4->as_u32;
1080 ip6->as_u8[5] = ip4->as_u8[0];
1081 ip6->as_u8[6] = ip4->as_u8[1];
1082 ip6->as_u8[7] = ip4->as_u8[2];
1083 ip6->as_u8[9] = ip4->as_u8[3];
1086 ip6->as_u8[6] = ip4->as_u8[0];
1087 ip6->as_u8[7] = ip4->as_u8[1];
1088 ip6->as_u8[9] = ip4->as_u8[2];
1089 ip6->as_u8[10] = ip4->as_u8[3];
1092 ip6->as_u8[7] = ip4->as_u8[0];
1093 ip6->as_u8[9] = ip4->as_u8[1];
1094 ip6->as_u8[10] = ip4->as_u8[2];
1095 ip6->as_u8[11] = ip4->as_u8[3];
1098 ip6->as_u8[9] = ip4->as_u8[0];
1099 ip6->as_u8[10] = ip4->as_u8[1];
1100 ip6->as_u8[11] = ip4->as_u8[2];
1101 ip6->as_u8[12] = ip4->as_u8[3];
1104 ip6->as_u32[3] = ip4->as_u32;
1107 nat_elog_notice ("invalid prefix length");
1113 clib_memcpy_fast (ip6, well_known_prefix, sizeof (ip6_address_t));
1114 ip6->as_u32[3] = ip4->as_u32;
1119 nat64_extract_ip4 (ip6_address_t * ip6, ip4_address_t * ip4, u32 fib_index)
1121 nat64_main_t *nm = &nat64_main;
1122 nat64_prefix_t *p, *gp = 0;
1126 vec_foreach (p, nm->pref64)
1128 if (p->fib_index == fib_index)
1150 ip4->as_u32 = ip6->as_u32[1];
1153 ip4->as_u8[0] = ip6->as_u8[5];
1154 ip4->as_u8[1] = ip6->as_u8[6];
1155 ip4->as_u8[2] = ip6->as_u8[7];
1156 ip4->as_u8[3] = ip6->as_u8[9];
1159 ip4->as_u8[0] = ip6->as_u8[6];
1160 ip4->as_u8[1] = ip6->as_u8[7];
1161 ip4->as_u8[2] = ip6->as_u8[9];
1162 ip4->as_u8[3] = ip6->as_u8[10];
1165 ip4->as_u8[0] = ip6->as_u8[7];
1166 ip4->as_u8[1] = ip6->as_u8[9];
1167 ip4->as_u8[2] = ip6->as_u8[10];
1168 ip4->as_u8[3] = ip6->as_u8[11];
1171 ip4->as_u8[0] = ip6->as_u8[9];
1172 ip4->as_u8[1] = ip6->as_u8[10];
1173 ip4->as_u8[2] = ip6->as_u8[11];
1174 ip4->as_u8[3] = ip6->as_u8[12];
1177 ip4->as_u32 = ip6->as_u32[3];
1180 nat_elog_notice ("invalid prefix length");
1186 * @brief Per worker process checking expire time for NAT64 sessions.
1189 nat64_expire_worker_walk_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
1192 nat64_main_t *nm = &nat64_main;
1193 u32 thread_index = vm->thread_index;
1194 nat64_db_t *db = &nm->db[thread_index];
1195 u32 now = (u32) vlib_time_now (vm);
1197 nad64_db_st_free_expired (thread_index, db, now);
1198 vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
1199 db->bib.bib_entries_num);
1200 vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
1201 db->st.st_entries_num);
1206 static vlib_node_registration_t nat64_expire_worker_walk_node;
1209 VLIB_REGISTER_NODE (nat64_expire_worker_walk_node, static) = {
1210 .function = nat64_expire_worker_walk_fn,
1211 .type = VLIB_NODE_TYPE_INPUT,
1212 .state = VLIB_NODE_STATE_INTERRUPT,
1213 .name = "nat64-expire-worker-walk",
1217 static vlib_node_registration_t nat64_expire_walk_node;
1220 * @brief Centralized process to drive per worker expire walk.
1223 nat64_expire_walk_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
1226 nat64_main_t *nm = &nat64_main;
1227 vlib_main_t **worker_vms = 0, *worker_vm;
1229 uword event_type, *event_data = 0;
1231 nm->nat64_expire_walk_node_index = nat64_expire_walk_node.index;
1233 if (vec_len (vlib_mains) == 0)
1234 vec_add1 (worker_vms, vm);
1237 for (i = 0; i < vec_len (vlib_mains); i++)
1239 worker_vm = vlib_mains[i];
1241 vec_add1 (worker_vms, worker_vm);
1247 if (nm->total_enabled_count)
1249 vlib_process_wait_for_event_or_clock (vm, 10.0);
1250 event_type = vlib_process_get_events (vm, &event_data);
1254 vlib_process_wait_for_event (vm);
1255 event_type = vlib_process_get_events (vm, &event_data);
1262 case NAT64_CLEANER_RESCHEDULE:
1265 nat_elog_notice_X1 ("unknown event %d", "i4", event_type);
1269 for (i = 0; i < vec_len (worker_vms); i++)
1271 worker_vm = worker_vms[i];
1272 vlib_node_set_interrupt_pending (worker_vm,
1273 nat64_expire_worker_walk_node.index);
1281 VLIB_REGISTER_NODE (nat64_expire_walk_node, static) = {
1282 .function = nat64_expire_walk_fn,
1283 .type = VLIB_NODE_TYPE_PROCESS,
1284 .name = "nat64-expire-walk",
1289 * fd.io coding-style-patch-verification: ON
1292 * eval: (c-set-style "gnu")