Restructure some files in LB to src/vnet to reuse 93/9393/22
authorHongjun Ni <hongjun.ni@intel.com>
Mon, 13 Nov 2017 12:34:06 +0000 (20:34 +0800)
committerNeale Ranns <nranns@cisco.com>
Wed, 6 Dec 2017 13:22:51 +0000 (13:22 +0000)
Change-Id: Ic8b193e93ce18ca82b294816aa7ee0ef31d64bc2
Signed-off-by: Pierre Pfister <ppfister@cisco.com>
Signed-off-by: Hongjun Ni <hongjun.ni@intel.com>
src/plugins/lb.am
src/plugins/lb/lb.h
src/vnet.am
src/vnet/util/refcount.c [moved from src/plugins/lb/refcount.c with 71% similarity]
src/vnet/util/refcount.h [moved from src/plugins/lb/refcount.h with 67% similarity]

index 352358f..f0ff626 100644 (file)
@@ -19,7 +19,6 @@ lb_plugin_la_SOURCES =                        \
        lb/node.c                       \
        lb/cli.c                        \
        lb/util.c                       \
-       lb/refcount.c                   \
        lb/api.c
 
 BUILT_SOURCES +=                       \
@@ -31,7 +30,6 @@ API_FILES += lb/lb.api
 noinst_HEADERS +=                      \
        lb/lb.h                         \
        lb/util.h                       \
-       lb/refcount.h                   \
        lb/lbhash.h                     \
        lb/lb.api.h
 
index 882b9b3..fa0b5d4 100644 (file)
@@ -31,7 +31,7 @@
 #define LB_PLUGIN_LB_LB_H_
 
 #include <lb/util.h>
-#include <lb/refcount.h>
+#include <vnet/util/refcount.h>
 
 #include <vnet/vnet.h>
 #include <vnet/ip/ip.h>
index a4817f2..bd7efb2 100644 (file)
@@ -63,7 +63,8 @@ nobase_include_HEADERS +=                     \
   vnet/vnet.h                                  \
   vnet/vnet_all_api_h.h                                \
   vnet/vnet_msg_enum.h                         \
-  vnet/util/radix.h
+  vnet/util/radix.h                 \
+  vnet/util/refcount.h
 
 API_FILES += vnet/interface.api
 
@@ -1147,7 +1148,8 @@ nobase_include_HEADERS +=                 \
 ########################################
 
 libvnet_la_SOURCES +=                    \
-  vnet/util/radix.c                     \
+  vnet/util/radix.c                         \
+  vnet/util/refcount.c                  \
   vnet/util/trajectory.c
 
 ########################################
similarity index 71%
rename from src/plugins/lb/refcount.c
rename to src/vnet/util/refcount.c
index 6f01ab5..a7b525d 100644 (file)
  * limitations under the License.
  */
 
-#include <lb/refcount.h>
+#include <vnet/util/refcount.h>
 
 void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size)
 {
   u32 *new_counter = 0, *old_counter;
   vec_validate(new_counter, size);
-  memcpy(new_counter, per_cpu->counters, per_cpu->length);
+  vlib_refcount_lock(per_cpu->counter_lock);
+  memcpy(new_counter, per_cpu->counters, vec_len(per_cpu->counters)*4);
   old_counter = per_cpu->counters;
   per_cpu->counters = new_counter;
+  vlib_refcount_unlock(per_cpu->counter_lock);
   CLIB_MEMORY_BARRIER();
-  per_cpu->length = vec_len(new_counter);
   vec_free(old_counter);
 }
 
@@ -33,8 +34,12 @@ u64 vlib_refcount_get(vlib_refcount_t *r, u32 index)
   vlib_thread_main_t *tm = vlib_get_thread_main ();
   u32 thread_index;
   for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) {
-    if (r->per_cpu[thread_index].length > index)
-      count += r->per_cpu[thread_index].counters[index];
+    vlib_refcount_lock(r->per_cpu[thread_index].counter_lock);
+    if (index < vec_len(r->per_cpu[thread_index].counters))
+      {
+        count += r->per_cpu[thread_index].counters[index];
+      }
+    vlib_refcount_unlock(r->per_cpu[thread_index].counter_lock);
   }
   return count;
 }
similarity index 67%
rename from src/plugins/lb/refcount.h
rename to src/vnet/util/refcount.h
index dcfcb3f..ea92148 100644 (file)
 
 #include <vnet/vnet.h>
 
+/*
+ * Reference counting
+ * A specific reference counter is used. The design is quite
+ * similar to vlib counters but:
+ *   - It is possible to decrease the value
+ *   - Summing will not zero the per-thread counters
+ *   - Only the thread can reallocate its own counters vector (to avoid concurrency issues)
+*/
 typedef struct {
   u32 *counters;
-  u32 length;
-  u32 *reader_lengths;
+  volatile u32 *counter_lock;
   CLIB_CACHE_LINE_ALIGN_MARK(o);
 } vlib_refcount_per_cpu_t;
 
@@ -42,14 +49,27 @@ typedef struct {
   vlib_refcount_per_cpu_t *per_cpu;
 } vlib_refcount_t;
 
+static_always_inline
+void vlib_refcount_lock (volatile u32 *counter_lock)
+{
+  while (__sync_lock_test_and_set (counter_lock, 1))
+    ;
+}
+
+static_always_inline
+void vlib_refcount_unlock (volatile u32 *counter_lock)
+{
+  *counter_lock = 0;
+}
+
 void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
 
 static_always_inline
 void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
 {
   vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index];
-  if (PREDICT_FALSE(counter_index >= per_cpu->length))
-    __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16, per_cpu->length * 2));
+  if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters)))
+    __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16,(vec_len(per_cpu->counters)) * 2));
 
   per_cpu->counters[counter_index] += v;
 }
@@ -60,8 +80,16 @@ static_always_inline
 void vlib_refcount_init(vlib_refcount_t *r)
 {
   vlib_thread_main_t *tm = vlib_get_thread_main ();
+  u32 thread_index;
   r->per_cpu = 0;
   vec_validate (r->per_cpu, tm->n_vlib_mains - 1);
+
+  for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++)
+    {
+      r->per_cpu[thread_index].counter_lock =
+         clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES);
+      r->per_cpu[thread_index].counter_lock[0] = 0;
+    }
 }