nat44_delete_session (sm, s, thread_index);
}
+static_always_inline void
+nat44_user_del_sessions (snat_user_t * u, u32 thread_index)
+{
+ dlist_elt_t *elt;
+ snat_session_t *s;
+
+ snat_main_t *sm = &snat_main;
+ snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
+
+ // get head
+ elt = pool_elt_at_index (tsm->list_pool,
+ u->sessions_per_user_list_head_index);
+ // get first element
+ elt = pool_elt_at_index (tsm->list_pool, elt->next);
+
+ while (elt->value != ~0)
+ {
+ s = pool_elt_at_index (tsm->sessions, elt->value);
+ elt = pool_elt_at_index (tsm->list_pool, elt->next);
+ nat44_session_cleanup (s, thread_index);
+ }
+}
+
+static_always_inline int
+nat44_user_del (ip4_address_t * addr, u32 fib_index)
+{
+ int rv = 1;
+
+ snat_main_t *sm = &snat_main;
+ snat_main_per_thread_data_t *tsm;
+
+ snat_user_key_t user_key;
+ clib_bihash_kv_8_8_t kv, value;
+
+ user_key.addr.as_u32 = addr->as_u32;
+ user_key.fib_index = fib_index;
+ kv.key = user_key.as_u64;
+
+ if (sm->num_workers > 1)
+ {
+ /* *INDENT-OFF* */
+ vec_foreach (tsm, sm->per_thread_data)
+ {
+ if (!clib_bihash_search_8_8 (&tsm->user_hash, &kv, &value))
+ {
+ nat44_user_del_sessions (
+ pool_elt_at_index (tsm->users, value.value),
+ tsm->thread_index);
+ rv = 0;
+ break;
+ }
+ }
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ tsm = vec_elt_at_index (sm->per_thread_data, sm->num_workers);
+ if (!clib_bihash_search_8_8 (&tsm->user_hash, &kv, &value))
+ {
+ nat44_user_del_sessions (pool_elt_at_index
+ (tsm->users, value.value),
+ tsm->thread_index);
+ rv = 0;
+ }
+ }
+ return rv;
+}
+
static_always_inline void
nat44_user_try_cleanup (snat_user_t * u, u32 thread_index, f64 now)
{
(f64) nat44_session_get_timeout (sm, s);
if (now < sess_timeout_time)
- continue;
+ {
+ tsm->min_session_timeout =
+ clib_min (sess_timeout_time, tsm->min_session_timeout);
+ continue;
+ }
nat44_session_cleanup (s, thread_index);
}
if (PREDICT_TRUE (pool_elts (tsm->sessions) < sm->max_translations))
return;
- // there is no place so we try to cleanup all users in this thread
+ if (now >= tsm->min_session_timeout)
+ {
+ tsm->min_session_timeout = ~0;
+ // there is no place so we try to cleanup all users in this thread
+ /* *INDENT-OFF* */
+ pool_foreach (u, tsm->users,
+ ({ nat44_user_try_cleanup (u, thread_index, now); }));
+ /* *INDENT-ON* */
+ if (~0 == tsm->min_session_timeout)
+ {
+ tsm->min_session_timeout = 0;
+ }
+ }
+ return;
+ }
+
+ if (now >= tsm->min_session_timeout)
+ {
+ tsm->min_session_timeout = ~0;
+ // each time user creates a new session we try to cleanup expired sessions
+ nat44_user_try_cleanup (pool_elt_at_index (tsm->users, value.value),
+ thread_index, now);
+ if (~0 == tsm->min_session_timeout)
+ {
+ tsm->min_session_timeout = 0;
+ }
+ }
+}
+
+static_always_inline void
+nat44_force_session_cleanup (void)
+{
+ snat_user_t *u = 0;
+
+ snat_main_t *sm = &snat_main;
+ snat_main_per_thread_data_t *tsm;
+
+ vlib_main_t *vm = vlib_get_main ();
+ f64 now = vlib_time_now (vm);
+
+ // TODO: consider own timeouts
+
+ if (sm->num_workers > 1)
+ {
+ /* *INDENT-OFF* */
+ vec_foreach (tsm, sm->per_thread_data)
+ {
+ pool_foreach (u, tsm->users,
+ ({
+ nat44_user_try_cleanup (u, tsm->thread_index, now);
+ }));
+ }
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ tsm = vec_elt_at_index (sm->per_thread_data, sm->num_workers);
/* *INDENT-OFF* */
pool_foreach (u, tsm->users,
({
- nat44_user_try_cleanup (u, thread_index, now);
+ nat44_user_try_cleanup (u, tsm->thread_index, now);
}));
/* *INDENT-ON* */
- return;
}
-
- // each time user creates a new session we try to cleanup expired sessions
- nat44_user_try_cleanup (pool_elt_at_index (tsm->users, value.value),
- thread_index, now);
}
#endif /* included_nat44_inlines_h__ */