summary |
shortlog |
log |
commit | commitdiff |
review |
tree
raw |
patch |
inline | side by side (from parent 1:
b2d5ff3)
A bihash-per-interface is convenient, but turns out tricky difficult from
the maintenance standpoint with the large number of interfaces.
This patch makes the sessions reside in a single hash table for all the interfaces,
adding the lower 16 bit of sw_if_index as part of the key into the previously
unused space.
There is a tradeoff, that a session with an identical 5-tuple and the same
sw_if_index modulo 65536 will match on either of the interfaces.
The probability of that is deemed sufficiently small to not worry about it.
In case it still happens before the heat death of the universe,
there is a clib_warning and the colliding packet will be dropped,
at which point we will need to bump the hash key size by another u64,
but rather not pay the cost of doing that right now.
Change-Id: I2747839cfcceda73e597cbcafbe1e377fb8f1889
Signed-off-by: Andrew Yourtchenko <ayourtch@gmail.com>
u64 n_dels = sw_if_index < vec_len(am->fa_session_dels_by_sw_if_index) ? am->fa_session_dels_by_sw_if_index[sw_if_index] : 0;
out0 = format(out0, "sw_if_index %d: add %lu - del %lu = %lu\n", sw_if_index, n_adds, n_dels, n_adds - n_dels);
}));
u64 n_dels = sw_if_index < vec_len(am->fa_session_dels_by_sw_if_index) ? am->fa_session_dels_by_sw_if_index[sw_if_index] : 0;
out0 = format(out0, "sw_if_index %d: add %lu - del %lu = %lu\n", sw_if_index, n_adds, n_dels, n_adds - n_dels);
}));
+ {
+ u64 n_adds = am->fa_session_total_adds;
+ u64 n_dels = am->fa_session_total_dels;
+ out0 = format(out0, "TOTAL: add %lu - del %lu = %lu\n", n_adds, n_dels, n_adds - n_dels);
+ }
out0 = format(out0, "\n\nPer-worker data:\n");
for (wk = 0; wk < vec_len (am->per_worker_data); wk++) {
acl_fa_per_worker_data_t *pw = &am->per_worker_data[wk];
out0 = format(out0, "\n\nPer-worker data:\n");
for (wk = 0; wk < vec_len (am->per_worker_data); wk++) {
acl_fa_per_worker_data_t *pw = &am->per_worker_data[wk];
/* bitmaps when set the processing is enabled on the interface */
uword *fa_in_acl_on_sw_if_index;
uword *fa_out_acl_on_sw_if_index;
/* bitmaps when set the processing is enabled on the interface */
uword *fa_in_acl_on_sw_if_index;
uword *fa_out_acl_on_sw_if_index;
- /* bitmap, when set the hash is initialized */
- uword *fa_sessions_on_sw_if_index;
- clib_bihash_40_8_t *fa_sessions_by_sw_if_index;
+ /* bihash holding all of the sessions */
+ int fa_sessions_hash_is_initialized;
+ clib_bihash_40_8_t fa_sessions_hash;
/* The process node which orcherstrates the cleanup */
u32 fa_cleaner_node_index;
/* FA session timeouts, in seconds */
/* The process node which orcherstrates the cleanup */
u32 fa_cleaner_node_index;
/* FA session timeouts, in seconds */
/* session add/delete counters */
u64 *fa_session_adds_by_sw_if_index;
u64 *fa_session_dels_by_sw_if_index;
/* session add/delete counters */
u64 *fa_session_adds_by_sw_if_index;
u64 *fa_session_dels_by_sw_if_index;
+ /* total session adds/dels */
+ u64 fa_session_total_adds;
+ u64 fa_session_total_dels;
static int
acl_fa_ifc_has_sessions (acl_main_t * am, int sw_if_index0)
{
static int
acl_fa_ifc_has_sessions (acl_main_t * am, int sw_if_index0)
{
- int has_sessions =
- clib_bitmap_get (am->fa_sessions_on_sw_if_index, sw_if_index0);
- return has_sessions;
+ return am->fa_sessions_hash_is_initialized;
sw_if_index0, am->fa_conn_table_hash_num_buckets,
am->fa_conn_table_hash_memory_size);
#endif
sw_if_index0, am->fa_conn_table_hash_num_buckets,
am->fa_conn_table_hash_memory_size);
#endif
- vec_validate (am->fa_sessions_by_sw_if_index, sw_if_index0);
- BV (clib_bihash_init) (&am->fa_sessions_by_sw_if_index
- [sw_if_index0], "ACL plugin FA session bihash",
+ BV (clib_bihash_init) (&am->fa_sessions_hash,
+ "ACL plugin FA session bihash",
am->fa_conn_table_hash_num_buckets,
am->fa_conn_table_hash_memory_size);
am->fa_conn_table_hash_num_buckets,
am->fa_conn_table_hash_memory_size);
- am->fa_sessions_on_sw_if_index =
- clib_bitmap_set (am->fa_sessions_on_sw_if_index, sw_if_index0, 1);
+ am->fa_sessions_hash_is_initialized = 1;
}
static inline fa_session_t *get_session_ptr(acl_main_t *am, u16 thread_index, u32 session_index)
}
static inline fa_session_t *get_session_ptr(acl_main_t *am, u16 thread_index, u32 session_index)
{
fa_session_t *sess = get_session_ptr(am, sess_id.thread_index, sess_id.session_index);
ASSERT(sess->thread_index == os_get_thread_index ());
{
fa_session_t *sess = get_session_ptr(am, sess_id.thread_index, sess_id.session_index);
ASSERT(sess->thread_index == os_get_thread_index ());
- BV (clib_bihash_add_del) (&am->fa_sessions_by_sw_if_index[sw_if_index],
+ BV (clib_bihash_add_del) (&am->fa_sessions_hash,
&sess->info.kv, 0);
acl_fa_per_worker_data_t *pw = &am->per_worker_data[sess_id.thread_index];
pool_put_index (pw->fa_sessions_pool, sess_id.session_index);
&sess->info.kv, 0);
acl_fa_per_worker_data_t *pw = &am->per_worker_data[sess_id.thread_index];
pool_put_index (pw->fa_sessions_pool, sess_id.session_index);
as the caller must have dealt with the timers. */
vec_validate (am->fa_session_dels_by_sw_if_index, sw_if_index);
am->fa_session_dels_by_sw_if_index[sw_if_index]++;
as the caller must have dealt with the timers. */
vec_validate (am->fa_session_dels_by_sw_if_index, sw_if_index);
am->fa_session_dels_by_sw_if_index[sw_if_index]++;
+ clib_smp_atomic_add(&am->fa_session_total_dels, 1);
}
static int
acl_fa_can_add_session (acl_main_t * am, int is_input, u32 sw_if_index)
{
}
static int
acl_fa_can_add_session (acl_main_t * am, int is_input, u32 sw_if_index)
{
- u64 curr_sess;
- vec_validate (am->fa_session_adds_by_sw_if_index, sw_if_index);
- vec_validate (am->fa_session_dels_by_sw_if_index, sw_if_index);
- curr_sess =
- am->fa_session_adds_by_sw_if_index[sw_if_index] -
- am->fa_session_dels_by_sw_if_index[sw_if_index];
- return (curr_sess < am->fa_conn_table_max_entries);
+ u64 curr_sess_count;
+ curr_sess_count = am->fa_session_total_adds - am->fa_session_total_dels;
+ return (curr_sess_count < am->fa_conn_table_max_entries);
acl_fa_ifc_init_sessions (am, sw_if_index);
}
acl_fa_ifc_init_sessions (am, sw_if_index);
}
- BV (clib_bihash_add_del) (&am->fa_sessions_by_sw_if_index[sw_if_index],
+ BV (clib_bihash_add_del) (&am->fa_sessions_hash,
&kv, 1);
acl_fa_conn_list_add_session(am, f_sess_id, now);
vec_validate (am->fa_session_adds_by_sw_if_index, sw_if_index);
am->fa_session_adds_by_sw_if_index[sw_if_index]++;
&kv, 1);
acl_fa_conn_list_add_session(am, f_sess_id, now);
vec_validate (am->fa_session_adds_by_sw_if_index, sw_if_index);
am->fa_session_adds_by_sw_if_index[sw_if_index]++;
+ clib_smp_atomic_add(&am->fa_session_total_adds, 1);
clib_bihash_kv_40_8_t * pvalue_sess)
{
return (BV (clib_bihash_search)
clib_bihash_kv_40_8_t * pvalue_sess)
{
return (BV (clib_bihash_search)
- (&am->fa_sessions_by_sw_if_index[sw_if_index0], &p5tuple->kv,
+ (&am->fa_sessions_hash, &p5tuple->kv,
*/
acl_fill_5tuple (am, b0, is_ip6, is_input, is_l2_path, &fa_5tuple);
*/
acl_fill_5tuple (am, b0, is_ip6, is_input, is_l2_path, &fa_5tuple);
+ fa_5tuple.l4.lsb_of_sw_if_index = sw_if_index0 & 0xffff;
acl_make_5tuple_session_key (is_input, &fa_5tuple, &kv_sess);
#ifdef FA_NODE_VERBOSE_DEBUG
clib_warning
acl_make_5tuple_session_key (is_input, &fa_5tuple, &kv_sess);
#ifdef FA_NODE_VERBOSE_DEBUG
clib_warning
0x00010000 + ((0xff & old_timeout_type) << 8) +
(0xff & new_timeout_type);
}
0x00010000 + ((0xff & old_timeout_type) << 8) +
(0xff & new_timeout_type);
}
+ /*
+ * I estimate the likelihood to be very low - the VPP needs
+ * to have >64K interfaces to start with and then on
+ * exactly 64K indices apart needs to be exactly the same
+ * 5-tuple... Anyway, since this probability is nonzero -
+ * print an error and drop the unlucky packet.
+ * If this shows up in real world, we would need to bump
+ * the hash key length.
+ */
+ if (PREDICT_FALSE(sess->sw_if_index != sw_if_index0)) {
+ clib_warning("BUG: session LSB16(sw_if_index) and 5-tuple collision!");
+ acl_check_needed = 0;
+ action = 0;
+ }
struct {
u16 port[2];
u16 proto;
struct {
u16 port[2];
u16 proto;
+ u16 lsb_of_sw_if_index;
};
} fa_session_l4_key_t;
};
} fa_session_l4_key_t;