From bf4be5730557c4280f2f9f8b7ef1a194716c82bd Mon Sep 17 00:00:00 2001 From: Hongjun Ni Date: Mon, 13 Nov 2017 20:34:06 +0800 Subject: [PATCH] Restructure some files in LB to src/vnet to reuse Change-Id: Ic8b193e93ce18ca82b294816aa7ee0ef31d64bc2 Signed-off-by: Pierre Pfister Signed-off-by: Hongjun Ni --- src/plugins/lb.am | 2 -- src/plugins/lb/lb.h | 2 +- src/vnet.am | 6 ++++-- src/{plugins/lb => vnet/util}/refcount.c | 15 ++++++++----- src/{plugins/lb => vnet/util}/refcount.h | 36 ++++++++++++++++++++++++++++---- 5 files changed, 47 insertions(+), 14 deletions(-) rename src/{plugins/lb => vnet/util}/refcount.c (71%) rename src/{plugins/lb => vnet/util}/refcount.h (67%) diff --git a/src/plugins/lb.am b/src/plugins/lb.am index 352358fa88f..f0ff6267691 100644 --- a/src/plugins/lb.am +++ b/src/plugins/lb.am @@ -19,7 +19,6 @@ lb_plugin_la_SOURCES = \ lb/node.c \ lb/cli.c \ lb/util.c \ - lb/refcount.c \ lb/api.c BUILT_SOURCES += \ @@ -31,7 +30,6 @@ API_FILES += lb/lb.api noinst_HEADERS += \ lb/lb.h \ lb/util.h \ - lb/refcount.h \ lb/lbhash.h \ lb/lb.api.h diff --git a/src/plugins/lb/lb.h b/src/plugins/lb/lb.h index 882b9b30f7e..fa0b5d48b07 100644 --- a/src/plugins/lb/lb.h +++ b/src/plugins/lb/lb.h @@ -31,7 +31,7 @@ #define LB_PLUGIN_LB_LB_H_ #include -#include +#include #include #include diff --git a/src/vnet.am b/src/vnet.am index a4817f2fc11..bd7efb24c4b 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -63,7 +63,8 @@ nobase_include_HEADERS += \ vnet/vnet.h \ vnet/vnet_all_api_h.h \ vnet/vnet_msg_enum.h \ - vnet/util/radix.h + vnet/util/radix.h \ + vnet/util/refcount.h API_FILES += vnet/interface.api @@ -1147,7 +1148,8 @@ nobase_include_HEADERS += \ ######################################## libvnet_la_SOURCES += \ - vnet/util/radix.c \ + vnet/util/radix.c \ + vnet/util/refcount.c \ vnet/util/trajectory.c ######################################## diff --git a/src/plugins/lb/refcount.c b/src/vnet/util/refcount.c similarity index 71% rename from src/plugins/lb/refcount.c rename to src/vnet/util/refcount.c index 6f01ab5aaf7..a7b525d67be 100644 --- a/src/plugins/lb/refcount.c +++ b/src/vnet/util/refcount.c @@ -13,17 +13,18 @@ * limitations under the License. */ -#include +#include void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size) { u32 *new_counter = 0, *old_counter; vec_validate(new_counter, size); - memcpy(new_counter, per_cpu->counters, per_cpu->length); + vlib_refcount_lock(per_cpu->counter_lock); + memcpy(new_counter, per_cpu->counters, vec_len(per_cpu->counters)*4); old_counter = per_cpu->counters; per_cpu->counters = new_counter; + vlib_refcount_unlock(per_cpu->counter_lock); CLIB_MEMORY_BARRIER(); - per_cpu->length = vec_len(new_counter); vec_free(old_counter); } @@ -33,8 +34,12 @@ u64 vlib_refcount_get(vlib_refcount_t *r, u32 index) vlib_thread_main_t *tm = vlib_get_thread_main (); u32 thread_index; for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) { - if (r->per_cpu[thread_index].length > index) - count += r->per_cpu[thread_index].counters[index]; + vlib_refcount_lock(r->per_cpu[thread_index].counter_lock); + if (index < vec_len(r->per_cpu[thread_index].counters)) + { + count += r->per_cpu[thread_index].counters[index]; + } + vlib_refcount_unlock(r->per_cpu[thread_index].counter_lock); } return count; } diff --git a/src/plugins/lb/refcount.h b/src/vnet/util/refcount.h similarity index 67% rename from src/plugins/lb/refcount.h rename to src/vnet/util/refcount.h index dcfcb3fee58..ea92148dafa 100644 --- a/src/plugins/lb/refcount.h +++ b/src/vnet/util/refcount.h @@ -31,10 +31,17 @@ #include +/* + * Reference counting + * A specific reference counter is used. The design is quite + * similar to vlib counters but: + * - It is possible to decrease the value + * - Summing will not zero the per-thread counters + * - Only the thread can reallocate its own counters vector (to avoid concurrency issues) +*/ typedef struct { u32 *counters; - u32 length; - u32 *reader_lengths; + volatile u32 *counter_lock; CLIB_CACHE_LINE_ALIGN_MARK(o); } vlib_refcount_per_cpu_t; @@ -42,14 +49,27 @@ typedef struct { vlib_refcount_per_cpu_t *per_cpu; } vlib_refcount_t; +static_always_inline +void vlib_refcount_lock (volatile u32 *counter_lock) +{ + while (__sync_lock_test_and_set (counter_lock, 1)) + ; +} + +static_always_inline +void vlib_refcount_unlock (volatile u32 *counter_lock) +{ + *counter_lock = 0; +} + void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size); static_always_inline void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v) { vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index]; - if (PREDICT_FALSE(counter_index >= per_cpu->length)) - __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16, per_cpu->length * 2)); + if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters))) + __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16,(vec_len(per_cpu->counters)) * 2)); per_cpu->counters[counter_index] += v; } @@ -60,8 +80,16 @@ static_always_inline void vlib_refcount_init(vlib_refcount_t *r) { vlib_thread_main_t *tm = vlib_get_thread_main (); + u32 thread_index; r->per_cpu = 0; vec_validate (r->per_cpu, tm->n_vlib_mains - 1); + + for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) + { + r->per_cpu[thread_index].counter_lock = + clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES); + r->per_cpu[thread_index].counter_lock[0] = 0; + } } -- 2.16.6