2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * @brief The NAT inline functions
19 #ifndef __included_nat_inlines_h__
20 #define __included_nat_inlines_h__
22 #include <vnet/fib/ip4_fib.h>
24 #include <nat/nat_ha.h>
27 calc_nat_key (ip4_address_t addr, u16 port, u32 fib_index, u8 proto)
29 ASSERT (fib_index <= (1 << 14) - 1);
30 ASSERT (proto <= (1 << 3) - 1);
31 return (u64) addr.as_u32 << 32 | (u64) port << 16 | fib_index << 3 |
36 split_nat_key (u64 key, ip4_address_t * addr, u16 * port,
37 u32 * fib_index, nat_protocol_t * proto)
41 addr->as_u32 = key >> 32;
45 *port = (key >> 16) & (u16) ~ 0;
49 *fib_index = key >> 3 & ((1 << 13) - 1);
58 init_nat_k (clib_bihash_kv_8_8_t * kv, ip4_address_t addr, u16 port,
59 u32 fib_index, nat_protocol_t proto)
61 kv->key = calc_nat_key (addr, port, fib_index, proto);
66 init_nat_kv (clib_bihash_kv_8_8_t * kv, ip4_address_t addr, u16 port,
67 u32 fib_index, nat_protocol_t proto, u64 value)
69 init_nat_k (kv, addr, port, fib_index, proto);
74 init_nat_i2o_k (clib_bihash_kv_8_8_t * kv, snat_session_t * s)
76 return init_nat_k (kv, s->in2out.addr, s->in2out.port, s->in2out.fib_index,
81 init_nat_i2o_kv (clib_bihash_kv_8_8_t * kv, snat_session_t * s, u64 value)
83 init_nat_k (kv, s->in2out.addr, s->in2out.port, s->in2out.fib_index,
89 init_nat_o2i_k (clib_bihash_kv_8_8_t * kv, snat_session_t * s)
91 return init_nat_k (kv, s->out2in.addr, s->out2in.port, s->out2in.fib_index,
96 init_nat_o2i_kv (clib_bihash_kv_8_8_t * kv, snat_session_t * s, u64 value)
98 init_nat_k (kv, s->out2in.addr, s->out2in.port, s->out2in.fib_index,
104 nat_pre_node_fn_inline (vlib_main_t * vm,
105 vlib_node_runtime_t * node,
106 vlib_frame_t * frame, u32 def_next)
108 u32 n_left_from, *from, *to_next;
111 from = vlib_frame_vector_args (frame);
112 n_left_from = frame->n_vectors;
113 next_index = node->cached_next_index;
115 while (n_left_from > 0)
119 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
121 while (n_left_from >= 4 && n_left_to_next >= 2)
124 u32 arc_next0, arc_next1;
126 vlib_buffer_t *b0, *b1;
128 /* Prefetch next iteration. */
130 vlib_buffer_t *p2, *p3;
132 p2 = vlib_get_buffer (vm, from[2]);
133 p3 = vlib_get_buffer (vm, from[3]);
135 vlib_prefetch_buffer_header (p2, LOAD);
136 vlib_prefetch_buffer_header (p3, LOAD);
138 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
139 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, LOAD);
142 /* speculatively enqueue b0 and b1 to the current next frame */
143 to_next[0] = bi0 = from[0];
144 to_next[1] = bi1 = from[1];
150 b0 = vlib_get_buffer (vm, bi0);
151 b1 = vlib_get_buffer (vm, bi1);
156 vnet_feature_next (&arc_next0, b0);
157 vnet_feature_next (&arc_next1, b1);
159 vnet_buffer2 (b0)->nat.arc_next = arc_next0;
160 vnet_buffer2 (b1)->nat.arc_next = arc_next1;
162 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
164 if (b0->flags & VLIB_BUFFER_IS_TRACED)
167 vlib_add_trace (vm, node, b0, sizeof (*t));
168 t->next_index = next0;
169 t->arc_next_index = arc_next0;
171 if (b1->flags & VLIB_BUFFER_IS_TRACED)
174 vlib_add_trace (vm, node, b0, sizeof (*t));
175 t->next_index = next1;
176 t->arc_next_index = arc_next1;
180 /* verify speculative enqueues, maybe switch current next frame */
181 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
182 to_next, n_left_to_next,
183 bi0, bi1, next0, next1);
186 while (n_left_from > 0 && n_left_to_next > 0)
193 /* speculatively enqueue b0 to the current next frame */
201 b0 = vlib_get_buffer (vm, bi0);
203 vnet_feature_next (&arc_next0, b0);
204 vnet_buffer2 (b0)->nat.arc_next = arc_next0;
206 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
207 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
209 nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
210 t->next_index = next0;
211 t->arc_next_index = arc_next0;
214 /* verify speculative enqueue, maybe switch current next frame */
215 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
216 to_next, n_left_to_next,
220 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
223 return frame->n_vectors;
227 is_interface_addr (snat_main_t * sm, vlib_node_runtime_t * node,
228 u32 sw_if_index0, u32 ip4_addr)
230 snat_runtime_t *rt = (snat_runtime_t *) node->runtime_data;
231 ip4_address_t *first_int_addr;
233 if (PREDICT_FALSE (rt->cached_sw_if_index != sw_if_index0))
236 ip4_interface_first_address (sm->ip4_main, sw_if_index0,
237 0 /* just want the address */ );
238 rt->cached_sw_if_index = sw_if_index0;
240 rt->cached_ip4_address = first_int_addr->as_u32;
242 rt->cached_ip4_address = 0;
245 if (PREDICT_FALSE (ip4_addr == rt->cached_ip4_address))
252 maximum_sessions_exceeded (snat_main_t * sm, u32 thread_index)
254 if (pool_elts (sm->per_thread_data[thread_index].sessions) >=
255 sm->max_translations_per_thread)
262 nat_send_all_to_node (vlib_main_t * vm, u32 * bi_vector,
263 vlib_node_runtime_t * node, vlib_error_t * error,
266 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
269 n_left_from = vec_len (bi_vector);
270 next_index = node->cached_next_index;
271 while (n_left_from > 0)
273 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
274 while (n_left_from > 0 && n_left_to_next > 0)
276 u32 bi0 = to_next[0] = from[0];
281 vlib_buffer_t *p0 = vlib_get_buffer (vm, bi0);
284 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
285 n_left_to_next, bi0, next);
287 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
292 user_session_increment (snat_main_t * sm, snat_user_t * u, u8 is_static)
294 if (u->nsessions + u->nstaticsessions < sm->max_translations_per_user)
297 u->nstaticsessions++;
304 nat44_delete_user_with_no_session (snat_main_t * sm, snat_user_t * u,
307 clib_bihash_kv_8_8_t kv;
308 snat_user_key_t u_key;
309 snat_main_per_thread_data_t *tsm = vec_elt_at_index (sm->per_thread_data,
312 if (u->nstaticsessions == 0 && u->nsessions == 0)
314 u_key.addr.as_u32 = u->addr.as_u32;
315 u_key.fib_index = u->fib_index;
316 kv.key = u_key.as_u64;
317 pool_put_index (tsm->list_pool, u->sessions_per_user_list_head_index);
318 pool_put (tsm->users, u);
319 clib_bihash_add_del_8_8 (&tsm->user_hash, &kv, 0);
320 vlib_set_simple_counter (&sm->total_users, thread_index, 0,
321 pool_elts (tsm->users));
326 nat44_delete_session (snat_main_t * sm, snat_session_t * ses,
329 snat_main_per_thread_data_t *tsm = vec_elt_at_index (sm->per_thread_data,
331 clib_bihash_kv_8_8_t kv, value;
333 const snat_user_key_t u_key = {
334 .addr = ses->in2out.addr,
335 .fib_index = ses->in2out.fib_index
337 const u8 u_static = snat_is_session_static (ses);
339 clib_dlist_remove (tsm->list_pool, ses->per_user_index);
340 pool_put_index (tsm->list_pool, ses->per_user_index);
341 if (sm->endpoint_dependent)
343 clib_dlist_remove (tsm->lru_pool, ses->lru_index);
344 pool_put_index (tsm->lru_pool, ses->lru_index);
346 pool_put (tsm->sessions, ses);
347 vlib_set_simple_counter (&sm->total_sessions, thread_index, 0,
348 pool_elts (tsm->sessions));
350 kv.key = u_key.as_u64;
351 if (!clib_bihash_search_8_8 (&tsm->user_hash, &kv, &value))
353 u = pool_elt_at_index (tsm->users, value.value);
355 u->nstaticsessions--;
359 nat44_delete_user_with_no_session (sm, u, thread_index);
363 /** \brief Set TCP session state.
364 @return 1 if session was closed, otherwise 0
367 nat44_set_tcp_session_state_i2o (snat_main_t * sm, f64 now,
368 snat_session_t * ses, vlib_buffer_t * b,
371 snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
372 u8 tcp_flags = vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags;
373 u32 tcp_ack_number = vnet_buffer (b)->ip.reass.tcp_ack_number;
374 u32 tcp_seq_number = vnet_buffer (b)->ip.reass.tcp_seq_number;
375 if ((ses->state == 0) && (tcp_flags & TCP_FLAG_RST))
376 ses->state = NAT44_SES_RST;
377 if ((ses->state == NAT44_SES_RST) && !(tcp_flags & TCP_FLAG_RST))
379 if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_SYN) &&
380 (ses->state & NAT44_SES_O2I_SYN))
382 if (tcp_flags & TCP_FLAG_SYN)
383 ses->state |= NAT44_SES_I2O_SYN;
384 if (tcp_flags & TCP_FLAG_FIN)
386 ses->i2o_fin_seq = clib_net_to_host_u32 (tcp_seq_number);
387 ses->state |= NAT44_SES_I2O_FIN;
389 if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_O2I_FIN))
391 if (clib_net_to_host_u32 (tcp_ack_number) > ses->o2i_fin_seq)
393 ses->state |= NAT44_SES_O2I_FIN_ACK;
394 if (nat44_is_ses_closed (ses))
395 { // if session is now closed, save the timestamp
396 ses->tcp_closed_timestamp = now + sm->tcp_transitory_timeout;
397 ses->last_lru_update = now;
402 // move the session to proper LRU
405 ses->lru_head_index = tsm->tcp_trans_lru_head_index;
409 ses->lru_head_index = tsm->tcp_estab_lru_head_index;
411 clib_dlist_remove (tsm->lru_pool, ses->lru_index);
412 clib_dlist_addtail (tsm->lru_pool, ses->lru_head_index, ses->lru_index);
417 nat44_set_tcp_session_state_o2i (snat_main_t * sm, f64 now,
418 snat_session_t * ses, u8 tcp_flags,
419 u32 tcp_ack_number, u32 tcp_seq_number,
422 snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
423 if ((ses->state == 0) && (tcp_flags & TCP_FLAG_RST))
424 ses->state = NAT44_SES_RST;
425 if ((ses->state == NAT44_SES_RST) && !(tcp_flags & TCP_FLAG_RST))
427 if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_SYN) &&
428 (ses->state & NAT44_SES_O2I_SYN))
430 if (tcp_flags & TCP_FLAG_SYN)
431 ses->state |= NAT44_SES_O2I_SYN;
432 if (tcp_flags & TCP_FLAG_FIN)
434 ses->o2i_fin_seq = clib_net_to_host_u32 (tcp_seq_number);
435 ses->state |= NAT44_SES_O2I_FIN;
437 if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_FIN))
439 if (clib_net_to_host_u32 (tcp_ack_number) > ses->i2o_fin_seq)
440 ses->state |= NAT44_SES_I2O_FIN_ACK;
441 if (nat44_is_ses_closed (ses))
442 { // if session is now closed, save the timestamp
443 ses->tcp_closed_timestamp = now + sm->tcp_transitory_timeout;
444 ses->last_lru_update = now;
447 // move the session to proper LRU
450 ses->lru_head_index = tsm->tcp_trans_lru_head_index;
454 ses->lru_head_index = tsm->tcp_estab_lru_head_index;
456 clib_dlist_remove (tsm->lru_pool, ses->lru_index);
457 clib_dlist_addtail (tsm->lru_pool, ses->lru_head_index, ses->lru_index);
462 nat44_session_get_timeout (snat_main_t * sm, snat_session_t * s)
464 switch (s->nat_proto)
466 case NAT_PROTOCOL_ICMP:
467 return sm->icmp_timeout;
468 case NAT_PROTOCOL_UDP:
469 return sm->udp_timeout;
470 case NAT_PROTOCOL_TCP:
473 return sm->tcp_transitory_timeout;
475 return sm->tcp_established_timeout;
478 return sm->udp_timeout;
485 nat44_session_update_counters (snat_session_t * s, f64 now, uword bytes,
490 s->total_bytes += bytes;
491 nat_ha_sref (&s->out2in.addr, s->out2in.port, &s->ext_host_addr,
492 s->ext_host_port, s->nat_proto, s->out2in.fib_index,
493 s->total_pkts, s->total_bytes, thread_index,
494 &s->ha_last_refreshed, now);
497 /** \brief Per-user LRU list maintenance */
499 nat44_session_update_lru (snat_main_t * sm, snat_session_t * s,
502 /* don't update too often - timeout is in magnitude of seconds anyway */
503 if (s->last_heard > s->last_lru_update + 1)
505 if (!sm->endpoint_dependent)
507 clib_dlist_remove (sm->per_thread_data[thread_index].list_pool,
509 clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool,
510 s->per_user_list_head_index, s->per_user_index);
514 clib_dlist_remove (sm->per_thread_data[thread_index].lru_pool,
516 clib_dlist_addtail (sm->per_thread_data[thread_index].lru_pool,
517 s->lru_head_index, s->lru_index);
519 s->last_lru_update = s->last_heard;
524 init_ed_k (clib_bihash_kv_16_8_t * kv, ip4_address_t l_addr, u16 l_port,
525 ip4_address_t r_addr, u16 r_port, u32 fib_index, u8 proto)
527 kv->key[0] = (u64) r_addr.as_u32 << 32 | l_addr.as_u32;
529 (u64) r_port << 48 | (u64) l_port << 32 | fib_index << 8 | proto;
533 init_ed_kv (clib_bihash_kv_16_8_t * kv, ip4_address_t l_addr, u16 l_port,
534 ip4_address_t r_addr, u16 r_port, u32 fib_index, u8 proto,
535 u32 thread_index, u32 session_index)
537 init_ed_k (kv, l_addr, l_port, r_addr, r_port, fib_index, proto);
538 kv->value = (u64) thread_index << 32 | session_index;
542 ed_value_get_thread_index (clib_bihash_kv_16_8_t * value)
544 return value->value >> 32;
548 ed_value_get_session_index (clib_bihash_kv_16_8_t * value)
550 return value->value & ~(u32) 0;
554 split_ed_value (clib_bihash_kv_16_8_t * value, u32 * thread_index,
559 *thread_index = ed_value_get_thread_index (value);
563 *session_index = ed_value_get_session_index (value);
568 split_ed_kv (clib_bihash_kv_16_8_t * kv,
569 ip4_address_t * l_addr, ip4_address_t * r_addr, u8 * proto,
570 u32 * fib_index, u16 * l_port, u16 * r_port)
574 l_addr->as_u32 = kv->key[0] & (u32) ~ 0;
578 r_addr->as_u32 = kv->key[0] >> 32;
582 *r_port = kv->key[1] >> 48;
586 *l_port = (kv->key[1] >> 32) & (u16) ~ 0;
590 *fib_index = (kv->key[1] >> 8) & ((1 << 24) - 1);
594 *proto = kv->key[1] & (u8) ~ 0;
598 static_always_inline int
599 get_icmp_i2o_ed_key (vlib_buffer_t * b, ip4_header_t * ip0, u32 rx_fib_index,
600 u32 thread_index, u32 session_index,
601 nat_protocol_t * nat_proto, u16 * l_port, u16 * r_port,
602 clib_bihash_kv_16_8_t * kv)
605 u16 _l_port, _r_port;
606 ip4_address_t *l_addr, *r_addr;
608 icmp46_header_t *icmp0;
609 icmp_echo_header_t *echo0, *inner_echo0 = 0;
610 ip4_header_t *inner_ip0 = 0;
612 icmp46_header_t *inner_icmp0;
614 icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
615 echo0 = (icmp_echo_header_t *) (icmp0 + 1);
617 if (!icmp_type_is_error_message
618 (vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags))
620 proto = IP_PROTOCOL_ICMP;
621 l_addr = &ip0->src_address;
622 r_addr = &ip0->dst_address;
623 _l_port = vnet_buffer (b)->ip.reass.l4_src_port;
628 inner_ip0 = (ip4_header_t *) (echo0 + 1);
629 l4_header = ip4_next_header (inner_ip0);
630 proto = inner_ip0->protocol;
631 r_addr = &inner_ip0->src_address;
632 l_addr = &inner_ip0->dst_address;
633 switch (ip_proto_to_nat_proto (inner_ip0->protocol))
635 case NAT_PROTOCOL_ICMP:
636 inner_icmp0 = (icmp46_header_t *) l4_header;
637 inner_echo0 = (icmp_echo_header_t *) (inner_icmp0 + 1);
639 _l_port = inner_echo0->identifier;
641 case NAT_PROTOCOL_UDP:
642 case NAT_PROTOCOL_TCP:
643 _l_port = ((tcp_udp_header_t *) l4_header)->dst_port;
644 _r_port = ((tcp_udp_header_t *) l4_header)->src_port;
647 return NAT_IN2OUT_ED_ERROR_UNSUPPORTED_PROTOCOL;
650 init_ed_kv (kv, *l_addr, _l_port, *r_addr, _r_port, rx_fib_index, proto,
651 thread_index, session_index);
654 *nat_proto = ip_proto_to_nat_proto (proto);
667 static_always_inline int
668 get_icmp_o2i_ed_key (vlib_buffer_t * b, ip4_header_t * ip0, u32 rx_fib_index,
669 u32 thread_index, u32 session_index,
670 nat_protocol_t * nat_proto, u16 * l_port, u16 * r_port,
671 clib_bihash_kv_16_8_t * kv)
673 icmp46_header_t *icmp0;
675 ip4_address_t *l_addr, *r_addr;
676 u16 _l_port, _r_port;
677 icmp_echo_header_t *echo0, *inner_echo0 = 0;
678 ip4_header_t *inner_ip0;
680 icmp46_header_t *inner_icmp0;
682 icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
683 echo0 = (icmp_echo_header_t *) (icmp0 + 1);
685 if (!icmp_type_is_error_message
686 (vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags))
688 proto = IP_PROTOCOL_ICMP;
689 l_addr = &ip0->dst_address;
690 r_addr = &ip0->src_address;
691 _l_port = vnet_buffer (b)->ip.reass.l4_src_port;
696 inner_ip0 = (ip4_header_t *) (echo0 + 1);
697 l4_header = ip4_next_header (inner_ip0);
698 proto = inner_ip0->protocol;
699 l_addr = &inner_ip0->src_address;
700 r_addr = &inner_ip0->dst_address;
701 switch (ip_proto_to_nat_proto (inner_ip0->protocol))
703 case NAT_PROTOCOL_ICMP:
704 inner_icmp0 = (icmp46_header_t *) l4_header;
705 inner_echo0 = (icmp_echo_header_t *) (inner_icmp0 + 1);
706 _l_port = inner_echo0->identifier;
709 case NAT_PROTOCOL_UDP:
710 case NAT_PROTOCOL_TCP:
711 _l_port = ((tcp_udp_header_t *) l4_header)->src_port;
712 _r_port = ((tcp_udp_header_t *) l4_header)->dst_port;
718 init_ed_kv (kv, *l_addr, _l_port, *r_addr, _r_port, rx_fib_index, proto,
719 thread_index, session_index);
722 *nat_proto = ip_proto_to_nat_proto (proto);
736 * @brief Check if packet should be translated
738 * Packets aimed at outside interface and external address with active session
739 * should be translated.
742 * @param rt NAT runtime data
743 * @param sw_if_index0 index of the inside interface
744 * @param ip0 IPv4 header
745 * @param proto0 NAT protocol
746 * @param rx_fib_index0 RX FIB index
748 * @returns 0 if packet should be translated otherwise 1
751 snat_not_translate_fast (snat_main_t * sm, vlib_node_runtime_t * node,
752 u32 sw_if_index0, ip4_header_t * ip0, u32 proto0,
758 fib_node_index_t fei = FIB_NODE_INDEX_INVALID;
759 nat_outside_fib_t *outside_fib;
761 .fp_proto = FIB_PROTOCOL_IP4,
764 .ip4.as_u32 = ip0->dst_address.as_u32,
769 /* Don't NAT packet aimed at the intfc address */
770 if (PREDICT_FALSE (is_interface_addr (sm, node, sw_if_index0,
771 ip0->dst_address.as_u32)))
774 fei = fib_table_lookup (rx_fib_index0, &pfx);
775 if (FIB_NODE_INDEX_INVALID != fei)
777 u32 sw_if_index = fib_entry_get_resolving_interface (fei);
778 if (sw_if_index == ~0)
780 vec_foreach (outside_fib, sm->outside_fibs)
782 fei = fib_table_lookup (outside_fib->fib_index, &pfx);
783 if (FIB_NODE_INDEX_INVALID != fei)
785 sw_if_index = fib_entry_get_resolving_interface (fei);
786 if (sw_if_index != ~0)
791 if (sw_if_index == ~0)
796 pool_foreach (i, sm->interfaces, ({
797 /* NAT packet aimed at outside interface */
798 if ((nat_interface_is_outside (i)) && (sw_if_index == i->sw_if_index))
808 increment_v4_address (ip4_address_t * a)
812 v = clib_net_to_host_u32 (a->as_u32) + 1;
813 a->as_u32 = clib_host_to_net_u32 (v);
816 #endif /* __included_nat_inlines_h__ */
819 * fd.io coding-style-patch-verification: ON
822 * eval: (c-set-style "gnu")