lb_main_t lb_main;
-#define lb_get_writer_lock() do {} while(__sync_lock_test_and_set (lb_main.writer_lock, 1))
-#define lb_put_writer_lock() lb_main.writer_lock[0] = 0
+#define lb_get_writer_lock() do {} while(clib_atomic_test_and_set (lb_main.writer_lock))
+#define lb_put_writer_lock() clib_atomic_release (lb_main.writer_lock)
static void lb_as_stack (lb_as_t *as);
format_white_space, indent);
u32 i;
for (i=0; i<LB_N_VIP_COUNTERS; i++)
- s = format(s, "%U %s: %d\n",
+ s = format(s, "%U %s: %Lu\n",
format_white_space, indent,
lbm->vip_counters[i].name,
vlib_get_simple_counter(&lbm->vip_counters[i], vip - lbm->vips));
u32 *as_index;
pool_foreach(as_index, vip->as_indexes, {
as = &lbm->ass[*as_index];
- s = format(s, "%U %U %d buckets %d flows dpo:%u %s\n",
+ s = format(s, "%U %U %u buckets %Lu flows dpo:%u %s\n",
format_white_space, indent,
format_ip46_address, &as->address, IP46_TYPE_ANY,
count[as - lbm->ass],
lb_new_flow_entry_t *new_flow_table = 0;
lb_as_t *as;
lb_pseudorand_t *pr, *sort_arr = 0;
- u32 count;
ASSERT (lbm->writer_lock[0]); //We must have the lock
}
//First, let's sort the ASs
- sort_arr = 0;
vec_alloc(sort_arr, pool_elts(vip->as_indexes));
i = 0;
}
}
- vec_free(sort_arr);
-
finished:
-
-//Count number of changed entries
- count = 0;
- for (i=0; i<vec_len(new_flow_table); i++)
- if (vip->new_flow_table == 0 ||
- new_flow_table[i].as_index != vip->new_flow_table[i].as_index)
- count++;
+ vec_free(sort_arr);
old_table = vip->new_flow_table;
vip->new_flow_table = new_flow_table;
{
/* Add SNAT static mapping */
pool_get (lbm->snat_mappings, m);
- memset (m, 0, sizeof (*m));
+ clib_memset (m, 0, sizeof (*m));
if (lb_vip_is_nat4_port(vip)) {
lb_snat4_key_t m_key4;
clib_bihash_kv_8_8_t kv4;
return 0;
}
-int lb_vip_del_ass_withlock(u32 vip_index, ip46_address_t *addresses, u32 n)
+int
+lb_flush_vip_as (u32 vip_index, u32 as_index)
+{
+ u32 thread_index;
+ vlib_thread_main_t *tm = vlib_get_thread_main();
+ lb_main_t *lbm = &lb_main;
+
+ for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) {
+ lb_hash_t *h = lbm->per_cpu[thread_index].sticky_ht;
+ if (h != NULL) {
+ u32 i;
+ lb_hash_bucket_t *b;
+
+ lb_hash_foreach_entry(h, b, i) {
+ if ((vip_index == ~0)
+ || ((b->vip[i] == vip_index) && (as_index == ~0))
+ || ((b->vip[i] == vip_index) && (b->value[i] == as_index)))
+ {
+ vlib_refcount_add(&lbm->as_refcount, thread_index, b->value[i], -1);
+ vlib_refcount_add(&lbm->as_refcount, thread_index, 0, 1);
+ b->vip[i] = ~0;
+ b->value[i] = ~0;
+ }
+ }
+ if (vip_index == ~0)
+ {
+ lb_hash_free(h);
+ lbm->per_cpu[thread_index].sticky_ht = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int lb_vip_del_ass_withlock(u32 vip_index, ip46_address_t *addresses, u32 n,
+ u8 flush)
{
lb_main_t *lbm = &lb_main;
u32 now = (u32) vlib_time_now(vlib_get_main());
vec_foreach(ip, indexes) {
lbm->ass[*ip].flags &= ~LB_AS_FLAGS_USED;
lbm->ass[*ip].last_used = now;
+
+ if(flush)
+ {
+ /* flush flow table for deleted ASs*/
+ lb_flush_vip_as(vip_index, *ip);
+ }
}
//Recompute flows
return 0;
}
-int lb_vip_del_ass(u32 vip_index, ip46_address_t *addresses, u32 n)
+int lb_vip_del_ass(u32 vip_index, ip46_address_t *addresses, u32 n, u8 flush)
{
lb_get_writer_lock();
- int ret = lb_vip_del_ass_withlock(vip_index, addresses, n);
+ int ret = lb_vip_del_ass_withlock(vip_index, addresses, n, flush);
lb_put_writer_lock();
return ret;
ip46_address_t *ass = 0;
lb_as_t *as;
u32 *as_index;
+
pool_foreach(as_index, vip->as_indexes, {
as = &lbm->ass[*as_index];
vec_add1(ass, as->address);
});
if (vec_len(ass))
- lb_vip_del_ass_withlock(vip_index, ass, vec_len(ass));
+ lb_vip_del_ass_withlock(vip_index, ass, vec_len(ass), 0);
vec_free(ass);
}