X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Futil%2Frefcount.h;fp=src%2Fplugins%2Flb%2Frefcount.h;h=ea92148dafae46a77d117b6ecf6c56ee0fbbc1bb;hb=bf4be5730557c4280f2f9f8b7ef1a194716c82bd;hp=dcfcb3fee58c96d38081da915161a37aa79fcc8c;hpb=22229864cb6e30c9b75a9c36e4ffa8500c0cdc5f;p=vpp.git diff --git a/src/plugins/lb/refcount.h b/src/vnet/util/refcount.h similarity index 67% rename from src/plugins/lb/refcount.h rename to src/vnet/util/refcount.h index dcfcb3fee58..ea92148dafa 100644 --- a/src/plugins/lb/refcount.h +++ b/src/vnet/util/refcount.h @@ -31,10 +31,17 @@ #include +/* + * Reference counting + * A specific reference counter is used. The design is quite + * similar to vlib counters but: + * - It is possible to decrease the value + * - Summing will not zero the per-thread counters + * - Only the thread can reallocate its own counters vector (to avoid concurrency issues) +*/ typedef struct { u32 *counters; - u32 length; - u32 *reader_lengths; + volatile u32 *counter_lock; CLIB_CACHE_LINE_ALIGN_MARK(o); } vlib_refcount_per_cpu_t; @@ -42,14 +49,27 @@ typedef struct { vlib_refcount_per_cpu_t *per_cpu; } vlib_refcount_t; +static_always_inline +void vlib_refcount_lock (volatile u32 *counter_lock) +{ + while (__sync_lock_test_and_set (counter_lock, 1)) + ; +} + +static_always_inline +void vlib_refcount_unlock (volatile u32 *counter_lock) +{ + *counter_lock = 0; +} + void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size); static_always_inline void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v) { vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index]; - if (PREDICT_FALSE(counter_index >= per_cpu->length)) - __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16, per_cpu->length * 2)); + if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters))) + __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16,(vec_len(per_cpu->counters)) * 2)); per_cpu->counters[counter_index] += v; } @@ -60,8 +80,16 @@ static_always_inline void vlib_refcount_init(vlib_refcount_t *r) { vlib_thread_main_t *tm = vlib_get_thread_main (); + u32 thread_index; r->per_cpu = 0; vec_validate (r->per_cpu, tm->n_vlib_mains - 1); + + for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) + { + r->per_cpu[thread_index].counter_lock = + clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES); + r->per_cpu[thread_index].counter_lock[0] = 0; + } }