3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
}
+int
+ vlib_validate_combined_counter_will_expand
+ (vlib_combined_counter_main_t * cm, u32 index)
+{
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ int i;
+ void *oldheap = vlib_stats_push_heap (cm->counters);
+
+ /* Possibly once in recorded history */
+ if (PREDICT_FALSE (vec_len (cm->counters) == 0))
+ {
+ vlib_stats_pop_heap (cm, oldheap, index,
+ 3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
+ return 1;
+ }
+
+ for (i = 0; i < tm->n_vlib_mains; i++)
+ {
+ /* Trivially OK, and proves that index >= vec_len(...) */
+ if (index < vec_len (cm->counters[i]))
+ continue;
+ if (_vec_resize_will_expand
+ (cm->counters[i],
+ index - vec_len (cm->counters[i]) /* length_increment */ ,
+ sizeof (cm->counters[i]) /* data_bytes */ ,
+ 0 /* header_bytes */ ,
+ CLIB_CACHE_LINE_BYTES /* data_alignment */ ))
+ {
+ vlib_stats_pop_heap (cm, oldheap, index,
+ 3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
+ return 1;
+ }
+ }
+ vlib_stats_pop_heap (cm, oldheap, index,
+ 3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
+ return 0;
+}
+
void
vlib_free_combined_counter (vlib_combined_counter_main_t * cm)
{
void vlib_validate_combined_counter (vlib_combined_counter_main_t * cm,
u32 index);
+int vlib_validate_combined_counter_will_expand
+ (vlib_combined_counter_main_t * cm, u32 index);
+
void vlib_free_combined_counter (vlib_combined_counter_main_t * cm);
/** Obtain the number of simple or combined counters allocated.
load_balance_alloc_i (void)
{
load_balance_t *lb;
+ u8 need_barrier_sync = 0;
+ vlib_main_t *vm = vlib_get_main();
+ ASSERT (vm->thread_index == 0);
+
+ pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync,
+ CLIB_CACHE_LINE_BYTES);
+ if (need_barrier_sync)
+ vlib_worker_thread_barrier_sync (vm);
pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
clib_memset(lb, 0, sizeof(*lb));
lb->lb_map = INDEX_INVALID;
lb->lb_urpf = INDEX_INVALID;
+
+ if (need_barrier_sync == 0)
+ {
+ need_barrier_sync += vlib_validate_combined_counter_will_expand
+ (&(load_balance_main.lbm_to_counters),
+ load_balance_get_index(lb));
+ need_barrier_sync += vlib_validate_combined_counter_will_expand
+ (&(load_balance_main.lbm_via_counters),
+ load_balance_get_index(lb));
+ if (need_barrier_sync)
+ vlib_worker_thread_barrier_sync (vm);
+ }
+
vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
load_balance_get_index(lb));
vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
load_balance_get_index(lb));
+ if (need_barrier_sync)
+ vlib_worker_thread_barrier_release (vm);
+
return (lb);
}
vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
}
- dpo0 = load_balance_get_bucket_i(lb0,
+ dpo0 = load_balance_get_bucket_i(lb0,
vnet_buffer(b0)->ip.flow_hash &
(lb0->lb_n_buckets_minus_1));