2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * @brief The NAT inline functions
19 #ifndef __included_nat44_ed_inlines_h__
20 #define __included_nat44_ed_inlines_h__
23 #include <vppinfra/clib.h>
24 #include <vnet/fib/ip4_fib.h>
26 #include <nat/lib/log.h>
27 #include <nat/nat44-ed/nat44_ed.h>
30 calc_nat_key (ip4_address_t addr, u16 port, u32 fib_index, u8 proto)
32 ASSERT (fib_index <= (1 << 14) - 1);
33 ASSERT (proto <= (1 << 3) - 1);
34 return (u64) addr.as_u32 << 32 | (u64) port << 16 | fib_index << 3 |
39 split_nat_key (u64 key, ip4_address_t *addr, u16 *port, u32 *fib_index,
40 nat_protocol_t *proto)
44 addr->as_u32 = key >> 32;
48 *port = (key >> 16) & (u16) ~0;
52 *fib_index = key >> 3 & ((1 << 13) - 1);
61 init_nat_k (clib_bihash_kv_8_8_t *kv, ip4_address_t addr, u16 port,
62 u32 fib_index, nat_protocol_t proto)
64 kv->key = calc_nat_key (addr, port, fib_index, proto);
69 init_nat_kv (clib_bihash_kv_8_8_t *kv, ip4_address_t addr, u16 port,
70 u32 fib_index, nat_protocol_t proto, u32 thread_index,
73 init_nat_k (kv, addr, port, fib_index, proto);
74 kv->value = (u64) thread_index << 32 | session_index;
78 init_nat_i2o_k (clib_bihash_kv_8_8_t *kv, snat_session_t *s)
80 return init_nat_k (kv, s->in2out.addr, s->in2out.port, s->in2out.fib_index,
85 init_nat_i2o_kv (clib_bihash_kv_8_8_t *kv, snat_session_t *s, u32 thread_index,
88 init_nat_k (kv, s->in2out.addr, s->in2out.port, s->in2out.fib_index,
90 kv->value = (u64) thread_index << 32 | session_index;
94 init_nat_o2i_k (clib_bihash_kv_8_8_t *kv, snat_session_t *s)
96 return init_nat_k (kv, s->out2in.addr, s->out2in.port, s->out2in.fib_index,
101 init_nat_o2i_kv (clib_bihash_kv_8_8_t *kv, snat_session_t *s, u32 thread_index,
104 init_nat_k (kv, s->out2in.addr, s->out2in.port, s->out2in.fib_index,
106 kv->value = (u64) thread_index << 32 | session_index;
110 nat_value_get_thread_index (clib_bihash_kv_8_8_t *value)
112 return value->value >> 32;
116 nat_value_get_session_index (clib_bihash_kv_8_8_t *value)
118 return value->value & ~(u32) 0;
122 init_ed_k (clib_bihash_kv_16_8_t *kv, ip4_address_t l_addr, u16 l_port,
123 ip4_address_t r_addr, u16 r_port, u32 fib_index, u8 proto)
125 kv->key[0] = (u64) r_addr.as_u32 << 32 | l_addr.as_u32;
127 (u64) r_port << 48 | (u64) l_port << 32 | fib_index << 8 | proto;
131 init_ed_kv (clib_bihash_kv_16_8_t *kv, ip4_address_t l_addr, u16 l_port,
132 ip4_address_t r_addr, u16 r_port, u32 fib_index, u8 proto,
133 u32 thread_index, u32 session_index)
135 init_ed_k (kv, l_addr, l_port, r_addr, r_port, fib_index, proto);
136 kv->value = (u64) thread_index << 32 | session_index;
140 ed_value_get_thread_index (clib_bihash_kv_16_8_t *value)
142 return value->value >> 32;
146 ed_value_get_session_index (clib_bihash_kv_16_8_t *value)
148 return value->value & ~(u32) 0;
152 split_ed_kv (clib_bihash_kv_16_8_t *kv, ip4_address_t *l_addr,
153 ip4_address_t *r_addr, u8 *proto, u32 *fib_index, u16 *l_port,
158 l_addr->as_u32 = kv->key[0] & (u32) ~0;
162 r_addr->as_u32 = kv->key[0] >> 32;
166 *r_port = kv->key[1] >> 48;
170 *l_port = (kv->key[1] >> 32) & (u16) ~0;
174 *fib_index = (kv->key[1] >> 8) & ((1 << 24) - 1);
178 *proto = kv->key[1] & (u8) ~0;
182 static_always_inline int
183 nat_get_icmp_session_lookup_values (vlib_buffer_t *b, ip4_header_t *ip0,
184 ip4_address_t *lookup_saddr,
186 ip4_address_t *lookup_daddr,
187 u16 *lookup_dport, u8 *lookup_protocol)
189 icmp46_header_t *icmp0;
190 icmp_echo_header_t *echo0, *inner_echo0 = 0;
191 ip4_header_t *inner_ip0 = 0;
193 icmp46_header_t *inner_icmp0;
195 icmp0 = (icmp46_header_t *) ip4_next_header (ip0);
196 echo0 = (icmp_echo_header_t *) (icmp0 + 1);
198 // avoid warning about unused variables in caller by setting to bogus values
202 if (!icmp_type_is_error_message (
203 vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags))
205 *lookup_protocol = IP_PROTOCOL_ICMP;
206 lookup_saddr->as_u32 = ip0->src_address.as_u32;
207 *lookup_sport = vnet_buffer (b)->ip.reass.l4_src_port;
208 lookup_daddr->as_u32 = ip0->dst_address.as_u32;
209 *lookup_dport = vnet_buffer (b)->ip.reass.l4_dst_port;
213 inner_ip0 = (ip4_header_t *) (echo0 + 1);
214 l4_header = ip4_next_header (inner_ip0);
215 *lookup_protocol = inner_ip0->protocol;
216 lookup_saddr->as_u32 = inner_ip0->dst_address.as_u32;
217 lookup_daddr->as_u32 = inner_ip0->src_address.as_u32;
218 switch (ip_proto_to_nat_proto (inner_ip0->protocol))
220 case NAT_PROTOCOL_ICMP:
221 inner_icmp0 = (icmp46_header_t *) l4_header;
222 inner_echo0 = (icmp_echo_header_t *) (inner_icmp0 + 1);
223 *lookup_sport = inner_echo0->identifier;
224 *lookup_dport = inner_echo0->identifier;
226 case NAT_PROTOCOL_UDP:
227 case NAT_PROTOCOL_TCP:
228 *lookup_sport = ((tcp_udp_header_t *) l4_header)->dst_port;
229 *lookup_dport = ((tcp_udp_header_t *) l4_header)->src_port;
232 return NAT_IN2OUT_ED_ERROR_UNSUPPORTED_PROTOCOL;
239 nat44_session_get_timeout (snat_main_t *sm, snat_session_t *s)
241 switch (s->nat_proto)
243 case NAT_PROTOCOL_ICMP:
244 return sm->timeouts.icmp;
245 case NAT_PROTOCOL_UDP:
246 return sm->timeouts.udp;
247 case NAT_PROTOCOL_TCP:
250 return sm->timeouts.tcp.transitory;
252 return sm->timeouts.tcp.established;
255 return sm->timeouts.udp;
261 static_always_inline u8
262 nat44_ed_maximum_sessions_exceeded (snat_main_t *sm, u32 fib_index,
266 translations = pool_elts (sm->per_thread_data[thread_index].sessions);
267 if (vec_len (sm->max_translations_per_fib) <= fib_index)
269 return translations >= sm->max_translations_per_fib[fib_index];
272 static_always_inline int
273 nat_ed_lru_insert (snat_main_per_thread_data_t *tsm, snat_session_t *s,
276 dlist_elt_t *lru_list_elt;
277 pool_get (tsm->lru_pool, lru_list_elt);
278 s->lru_index = lru_list_elt - tsm->lru_pool;
281 case IP_PROTOCOL_UDP:
282 s->lru_head_index = tsm->udp_lru_head_index;
284 case IP_PROTOCOL_TCP:
285 s->lru_head_index = tsm->tcp_trans_lru_head_index;
287 case IP_PROTOCOL_ICMP:
288 s->lru_head_index = tsm->icmp_lru_head_index;
291 s->lru_head_index = tsm->unk_proto_lru_head_index;
294 clib_dlist_addtail (tsm->lru_pool, s->lru_head_index, s->lru_index);
295 lru_list_elt->value = s - tsm->sessions;
296 s->last_lru_update = now;
300 static_always_inline void
301 nat_6t_flow_to_ed_k (clib_bihash_kv_16_8_t *kv, nat_6t_flow_t *f)
303 init_ed_k (kv, f->match.saddr, f->match.sport, f->match.daddr,
304 f->match.dport, f->match.fib_index, f->match.proto);
307 static_always_inline void
308 nat_6t_flow_to_ed_kv (clib_bihash_kv_16_8_t *kv, nat_6t_flow_t *f,
309 u32 thread_idx, u32 session_idx)
311 init_ed_kv (kv, f->match.saddr, f->match.sport, f->match.daddr,
312 f->match.dport, f->match.fib_index, f->match.proto, thread_idx,
316 static_always_inline int
317 nat_ed_ses_i2o_flow_hash_add_del (snat_main_t *sm, u32 thread_idx,
318 snat_session_t *s, int is_add)
320 snat_main_per_thread_data_t *tsm =
321 vec_elt_at_index (sm->per_thread_data, thread_idx);
322 clib_bihash_kv_16_8_t kv;
325 nat_6t_flow_to_ed_k (&kv, &s->i2o);
329 nat_6t_flow_to_ed_kv (&kv, &s->i2o, thread_idx, s - tsm->sessions);
330 nat_6t_l3_l4_csum_calc (&s->i2o);
333 ASSERT (thread_idx == s->thread_index);
334 return clib_bihash_add_del_16_8 (&sm->flow_hash, &kv, is_add);
337 static_always_inline int
338 nat_ed_ses_o2i_flow_hash_add_del (snat_main_t *sm, u32 thread_idx,
339 snat_session_t *s, int is_add)
341 snat_main_per_thread_data_t *tsm =
342 vec_elt_at_index (sm->per_thread_data, thread_idx);
343 clib_bihash_kv_16_8_t kv;
346 nat_6t_flow_to_ed_k (&kv, &s->o2i);
350 nat_6t_flow_to_ed_kv (&kv, &s->o2i, thread_idx, s - tsm->sessions);
351 nat_6t_l3_l4_csum_calc (&s->o2i);
353 ASSERT (thread_idx == s->thread_index);
354 return clib_bihash_add_del_16_8 (&sm->flow_hash, &kv, is_add);
358 nat_ed_session_delete (snat_main_t *sm, snat_session_t *ses, u32 thread_index,
360 /* delete from global LRU list */)
362 snat_main_per_thread_data_t *tsm =
363 vec_elt_at_index (sm->per_thread_data, thread_index);
367 clib_dlist_remove (tsm->lru_pool, ses->lru_index);
369 pool_put_index (tsm->lru_pool, ses->lru_index);
370 if (nat_ed_ses_i2o_flow_hash_add_del (sm, thread_index, ses, 0))
371 nat_elog_warn (sm, "flow hash del failed");
372 if (nat_ed_ses_o2i_flow_hash_add_del (sm, thread_index, ses, 0))
373 nat_elog_warn (sm, "flow hash del failed");
374 pool_put (tsm->sessions, ses);
375 vlib_set_simple_counter (&sm->total_sessions, thread_index, 0,
376 pool_elts (tsm->sessions));
379 static_always_inline int
380 nat_lru_free_one_with_head (snat_main_t *sm, int thread_index, f64 now,
383 snat_session_t *s = NULL;
384 dlist_elt_t *oldest_elt;
385 f64 sess_timeout_time;
387 snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
388 oldest_index = clib_dlist_remove_head (tsm->lru_pool, head_index);
389 if (~0 != oldest_index)
391 oldest_elt = pool_elt_at_index (tsm->lru_pool, oldest_index);
392 s = pool_elt_at_index (tsm->sessions, oldest_elt->value);
395 s->last_heard + (f64) nat44_session_get_timeout (sm, s);
396 if (now >= sess_timeout_time ||
397 (s->tcp_closed_timestamp && now >= s->tcp_closed_timestamp))
399 nat_free_session_data (sm, s, thread_index, 0);
400 nat_ed_session_delete (sm, s, thread_index, 0);
405 clib_dlist_addhead (tsm->lru_pool, head_index, oldest_index);
411 static_always_inline int
412 nat_lru_free_one (snat_main_t *sm, int thread_index, f64 now)
414 snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
417 if ((rc = nat_lru_free_one_with_head (sm, thread_index, now, \
418 tsm->p##_lru_head_index))) \
431 static_always_inline snat_session_t *
432 nat_ed_session_alloc (snat_main_t *sm, u32 thread_index, f64 now, u8 proto)
435 snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
437 nat_lru_free_one (sm, thread_index, now);
439 pool_get (tsm->sessions, s);
440 clib_memset (s, 0, sizeof (*s));
442 nat_ed_lru_insert (tsm, s, now, proto);
444 s->ha_last_refreshed = now;
445 vlib_set_simple_counter (&sm->total_sessions, thread_index, 0,
446 pool_elts (tsm->sessions));
447 #if CLIB_ASSERT_ENABLE
448 s->thread_index = thread_index;
454 static_always_inline void
455 per_vrf_sessions_cleanup (u32 thread_index)
457 snat_main_t *sm = &snat_main;
458 snat_main_per_thread_data_t *tsm =
459 vec_elt_at_index (sm->per_thread_data, thread_index);
460 per_vrf_sessions_t *per_vrf_sessions;
461 u32 *to_free = 0, *i;
463 vec_foreach (per_vrf_sessions, tsm->per_vrf_sessions_vec)
465 if (per_vrf_sessions->expired)
467 if (per_vrf_sessions->ses_count == 0)
469 vec_add1 (to_free, per_vrf_sessions - tsm->per_vrf_sessions_vec);
474 if (vec_len (to_free))
476 vec_foreach (i, to_free)
478 vec_del1 (tsm->per_vrf_sessions_vec, *i);
486 static_always_inline void
487 per_vrf_sessions_register_session (snat_session_t *s, u32 thread_index)
489 snat_main_t *sm = &snat_main;
490 snat_main_per_thread_data_t *tsm =
491 vec_elt_at_index (sm->per_thread_data, thread_index);
492 per_vrf_sessions_t *per_vrf_sessions;
494 per_vrf_sessions_cleanup (thread_index);
496 // s->per_vrf_sessions_index == ~0 ... reuse of old session
498 vec_foreach (per_vrf_sessions, tsm->per_vrf_sessions_vec)
500 // ignore already expired registrations
501 if (per_vrf_sessions->expired)
504 if ((s->in2out.fib_index == per_vrf_sessions->rx_fib_index) &&
505 (s->out2in.fib_index == per_vrf_sessions->tx_fib_index))
509 if ((s->in2out.fib_index == per_vrf_sessions->tx_fib_index) &&
510 (s->out2in.fib_index == per_vrf_sessions->rx_fib_index))
516 // create a new registration
517 vec_add2 (tsm->per_vrf_sessions_vec, per_vrf_sessions, 1);
518 clib_memset (per_vrf_sessions, 0, sizeof (*per_vrf_sessions));
520 per_vrf_sessions->rx_fib_index = s->in2out.fib_index;
521 per_vrf_sessions->tx_fib_index = s->out2in.fib_index;
524 s->per_vrf_sessions_index = per_vrf_sessions - tsm->per_vrf_sessions_vec;
525 per_vrf_sessions->ses_count++;
529 static_always_inline void
530 per_vrf_sessions_unregister_session (snat_session_t *s, u32 thread_index)
532 snat_main_t *sm = &snat_main;
533 snat_main_per_thread_data_t *tsm;
534 per_vrf_sessions_t *per_vrf_sessions;
536 ASSERT (s->per_vrf_sessions_index != ~0);
538 tsm = vec_elt_at_index (sm->per_thread_data, thread_index);
540 vec_elt_at_index (tsm->per_vrf_sessions_vec, s->per_vrf_sessions_index);
542 ASSERT (per_vrf_sessions->ses_count != 0);
544 per_vrf_sessions->ses_count--;
545 s->per_vrf_sessions_index = ~0;
549 static_always_inline u8
550 per_vrf_sessions_is_expired (snat_session_t *s, u32 thread_index)
552 snat_main_t *sm = &snat_main;
553 snat_main_per_thread_data_t *tsm;
554 per_vrf_sessions_t *per_vrf_sessions;
556 ASSERT (s->per_vrf_sessions_index != ~0);
558 tsm = vec_elt_at_index (sm->per_thread_data, thread_index);
560 vec_elt_at_index (tsm->per_vrf_sessions_vec, s->per_vrf_sessions_index);
561 return per_vrf_sessions->expired;
564 static_always_inline void
565 nat_6t_flow_init (nat_6t_flow_t *f, u32 thread_idx, ip4_address_t saddr,
566 u16 sport, ip4_address_t daddr, u16 dport, u32 fib_index,
567 u8 proto, u32 session_idx)
569 clib_memset (f, 0, sizeof (*f));
570 f->match.saddr = saddr;
571 f->match.sport = sport;
572 f->match.daddr = daddr;
573 f->match.dport = dport;
574 f->match.proto = proto;
575 f->match.fib_index = fib_index;
578 static_always_inline void
579 nat_6t_i2o_flow_init (snat_main_t *sm, u32 thread_idx, snat_session_t *s,
580 ip4_address_t saddr, u16 sport, ip4_address_t daddr,
581 u16 dport, u32 fib_index, u8 proto)
583 snat_main_per_thread_data_t *tsm =
584 vec_elt_at_index (sm->per_thread_data, thread_idx);
585 nat_6t_flow_init (&s->i2o, thread_idx, saddr, sport, daddr, dport, fib_index,
586 proto, s - tsm->sessions);
589 static_always_inline void
590 nat_6t_o2i_flow_init (snat_main_t *sm, u32 thread_idx, snat_session_t *s,
591 ip4_address_t saddr, u16 sport, ip4_address_t daddr,
592 u16 dport, u32 fib_index, u8 proto)
594 snat_main_per_thread_data_t *tsm =
595 vec_elt_at_index (sm->per_thread_data, thread_idx);
596 nat_6t_flow_init (&s->o2i, thread_idx, saddr, sport, daddr, dport, fib_index,
597 proto, s - tsm->sessions);
600 static_always_inline int
601 nat_6t_t_eq (nat_6t_t *t1, nat_6t_t *t2)
603 return t1->as_u64[0] == t2->as_u64[0] && t1->as_u64[1] == t2->as_u64[1];
607 nat_pre_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
608 vlib_frame_t *frame, u32 def_next)
610 u32 n_left_from, *from;
612 from = vlib_frame_vector_args (frame);
613 n_left_from = frame->n_vectors;
615 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
616 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
617 vlib_get_buffers (vm, from, b, n_left_from);
619 while (n_left_from >= 2)
622 u32 arc_next0, arc_next1;
623 vlib_buffer_t *b0, *b1;
630 /* Prefetch next iteration. */
631 if (PREDICT_TRUE (n_left_from >= 4))
633 vlib_buffer_t *p2, *p3;
638 vlib_prefetch_buffer_header (p2, LOAD);
639 vlib_prefetch_buffer_header (p3, LOAD);
641 clib_prefetch_load (p2->data);
642 clib_prefetch_load (p3->data);
648 vnet_feature_next (&arc_next0, b0);
649 vnet_feature_next (&arc_next1, b1);
651 vnet_buffer2 (b0)->nat.arc_next = arc_next0;
652 vnet_buffer2 (b1)->nat.arc_next = arc_next1;
654 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
656 if (b0->flags & VLIB_BUFFER_IS_TRACED)
658 nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
659 t->next_index = next0;
660 t->arc_next_index = arc_next0;
662 if (b1->flags & VLIB_BUFFER_IS_TRACED)
664 nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
665 t->next_index = next1;
666 t->arc_next_index = arc_next1;
676 while (n_left_from > 0)
686 vnet_feature_next (&arc_next0, b0);
687 vnet_buffer2 (b0)->nat.arc_next = arc_next0;
689 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
690 (b0->flags & VLIB_BUFFER_IS_TRACED)))
692 nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
693 t->next_index = next0;
694 t->arc_next_index = arc_next0;
701 vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
704 return frame->n_vectors;
707 static_always_inline u16
708 snat_random_port (u16 min, u16 max)
710 snat_main_t *sm = &snat_main;
714 rwide = random_u32 (&sm->random_seed);
716 if (r >= min && r <= max)
719 return min + (rwide % (max - min + 1));
723 is_interface_addr (snat_main_t *sm, vlib_node_runtime_t *node,
724 u32 sw_if_index0, u32 ip4_addr)
726 snat_runtime_t *rt = (snat_runtime_t *) node->runtime_data;
729 if (PREDICT_FALSE (rt->cached_sw_if_index != sw_if_index0))
731 ip_lookup_main_t *lm = &sm->ip4_main->lookup_main;
732 ip_interface_address_t *ia;
735 rt->cached_sw_if_index = ~0;
736 hash_free (rt->cached_presence_by_ip4_address);
738 foreach_ip_interface_address (
739 lm, ia, sw_if_index0, 1 /* honor unnumbered */, ({
740 a = ip_interface_address_get_address (lm, ia);
741 hash_set (rt->cached_presence_by_ip4_address, a->as_u32, 1);
742 rt->cached_sw_if_index = sw_if_index0;
745 if (rt->cached_sw_if_index == ~0)
749 ip4_addr_exists = !!hash_get (rt->cached_presence_by_ip4_address, ip4_addr);
750 if (PREDICT_FALSE (ip4_addr_exists))
757 nat44_set_tcp_session_state_i2o (snat_main_t *sm, f64 now, snat_session_t *ses,
758 vlib_buffer_t *b, u32 thread_index)
760 snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
761 u8 tcp_flags = vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags;
762 u32 tcp_ack_number = vnet_buffer (b)->ip.reass.tcp_ack_number;
763 u32 tcp_seq_number = vnet_buffer (b)->ip.reass.tcp_seq_number;
764 if ((ses->state == 0) && (tcp_flags & TCP_FLAG_RST))
765 ses->state = NAT44_SES_RST;
766 if ((ses->state == NAT44_SES_RST) && !(tcp_flags & TCP_FLAG_RST))
768 if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_SYN) &&
769 (ses->state & NAT44_SES_O2I_SYN))
771 if (tcp_flags & TCP_FLAG_SYN)
772 ses->state |= NAT44_SES_I2O_SYN;
773 if (tcp_flags & TCP_FLAG_FIN)
775 ses->i2o_fin_seq = clib_net_to_host_u32 (tcp_seq_number);
776 ses->state |= NAT44_SES_I2O_FIN;
778 if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_O2I_FIN))
780 if (clib_net_to_host_u32 (tcp_ack_number) > ses->o2i_fin_seq)
782 ses->state |= NAT44_SES_O2I_FIN_ACK;
783 if (nat44_is_ses_closed (ses))
784 { // if session is now closed, save the timestamp
785 ses->tcp_closed_timestamp = now + sm->timeouts.tcp.transitory;
786 ses->last_lru_update = now;
791 // move the session to proper LRU
794 ses->lru_head_index = tsm->tcp_trans_lru_head_index;
798 ses->lru_head_index = tsm->tcp_estab_lru_head_index;
800 clib_dlist_remove (tsm->lru_pool, ses->lru_index);
801 clib_dlist_addtail (tsm->lru_pool, ses->lru_head_index, ses->lru_index);
805 nat44_set_tcp_session_state_o2i (snat_main_t *sm, f64 now, snat_session_t *ses,
806 u8 tcp_flags, u32 tcp_ack_number,
807 u32 tcp_seq_number, u32 thread_index)
809 snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
810 if ((ses->state == 0) && (tcp_flags & TCP_FLAG_RST))
811 ses->state = NAT44_SES_RST;
812 if ((ses->state == NAT44_SES_RST) && !(tcp_flags & TCP_FLAG_RST))
814 if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_SYN) &&
815 (ses->state & NAT44_SES_O2I_SYN))
817 if (tcp_flags & TCP_FLAG_SYN)
818 ses->state |= NAT44_SES_O2I_SYN;
819 if (tcp_flags & TCP_FLAG_FIN)
821 ses->o2i_fin_seq = clib_net_to_host_u32 (tcp_seq_number);
822 ses->state |= NAT44_SES_O2I_FIN;
824 if ((tcp_flags & TCP_FLAG_ACK) && (ses->state & NAT44_SES_I2O_FIN))
826 if (clib_net_to_host_u32 (tcp_ack_number) > ses->i2o_fin_seq)
827 ses->state |= NAT44_SES_I2O_FIN_ACK;
828 if (nat44_is_ses_closed (ses))
829 { // if session is now closed, save the timestamp
830 ses->tcp_closed_timestamp = now + sm->timeouts.tcp.transitory;
831 ses->last_lru_update = now;
834 // move the session to proper LRU
837 ses->lru_head_index = tsm->tcp_trans_lru_head_index;
841 ses->lru_head_index = tsm->tcp_estab_lru_head_index;
843 clib_dlist_remove (tsm->lru_pool, ses->lru_index);
844 clib_dlist_addtail (tsm->lru_pool, ses->lru_head_index, ses->lru_index);
848 nat44_session_update_counters (snat_session_t *s, f64 now, uword bytes,
853 s->total_bytes += bytes;
856 /** \brief Per-user LRU list maintenance */
858 nat44_session_update_lru (snat_main_t *sm, snat_session_t *s, u32 thread_index)
860 /* don't update too often - timeout is in magnitude of seconds anyway */
861 if (s->last_heard > s->last_lru_update + 1)
863 clib_dlist_remove (sm->per_thread_data[thread_index].lru_pool,
865 clib_dlist_addtail (sm->per_thread_data[thread_index].lru_pool,
866 s->lru_head_index, s->lru_index);
867 s->last_lru_update = s->last_heard;
871 #endif /* __included_nat44_ed_inlines_h__ */
874 * fd.io coding-style-patch-verification: ON
877 * eval: (c-set-style "gnu")