fib: add barrier sync, pool/vector expand cases 07/27407/2
authorDave Barach <dave@barachs.net>
Wed, 3 Jun 2020 12:05:15 +0000 (08:05 -0400)
committerFlorin Coras <florin.coras@gmail.com>
Thu, 4 Jun 2020 14:42:26 +0000 (14:42 +0000)
load_balance_alloc_i(...) is not thread safe when the
load_balance_pool or combined counter vectors expand.

Type: fix

Signed-off-by: Dave Barach <dave@barachs.net>
Change-Id: I7f295ed77350d1df0434d5ff461eedafe79131de

src/vlib/counter.c
src/vlib/counter.h
src/vnet/dpo/load_balance.c

index edba375..adf667f 100644 (file)
@@ -119,6 +119,44 @@ vlib_validate_combined_counter (vlib_combined_counter_main_t * cm, u32 index)
                       3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
 }
 
+int
+  vlib_validate_combined_counter_will_expand
+  (vlib_combined_counter_main_t * cm, u32 index)
+{
+  vlib_thread_main_t *tm = vlib_get_thread_main ();
+  int i;
+  void *oldheap = vlib_stats_push_heap (cm->counters);
+
+  /* Possibly once in recorded history */
+  if (PREDICT_FALSE (vec_len (cm->counters) == 0))
+    {
+      vlib_stats_pop_heap (cm, oldheap, index,
+                          3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
+      return 1;
+    }
+
+  for (i = 0; i < tm->n_vlib_mains; i++)
+    {
+      /* Trivially OK, and proves that index >= vec_len(...) */
+      if (index < vec_len (cm->counters[i]))
+       continue;
+      if (_vec_resize_will_expand
+         (cm->counters[i],
+          index - vec_len (cm->counters[i]) /* length_increment */ ,
+          sizeof (cm->counters[i]) /* data_bytes */ ,
+          0 /* header_bytes */ ,
+          CLIB_CACHE_LINE_BYTES /* data_alignment */ ))
+       {
+         vlib_stats_pop_heap (cm, oldheap, index,
+                              3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
+         return 1;
+       }
+    }
+  vlib_stats_pop_heap (cm, oldheap, index,
+                      3 /*STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED */ );
+  return 0;
+}
+
 void
 vlib_free_combined_counter (vlib_combined_counter_main_t * cm)
 {
index 7c90947..8a5aed4 100644 (file)
@@ -314,6 +314,9 @@ void vlib_free_simple_counter (vlib_simple_counter_main_t * cm);
 
 void vlib_validate_combined_counter (vlib_combined_counter_main_t * cm,
                                     u32 index);
+int vlib_validate_combined_counter_will_expand
+  (vlib_combined_counter_main_t * cm, u32 index);
+
 void vlib_free_combined_counter (vlib_combined_counter_main_t * cm);
 
 /** Obtain the number of simple or combined counters allocated.
index 7acccca..c029341 100644 (file)
@@ -93,12 +93,33 @@ static load_balance_t *
 load_balance_alloc_i (void)
 {
     load_balance_t *lb;
+    u8 need_barrier_sync = 0;
+    vlib_main_t *vm = vlib_get_main();
+    ASSERT (vm->thread_index == 0);
+
+    pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync,
+                                  CLIB_CACHE_LINE_BYTES);
+    if (need_barrier_sync)
+        vlib_worker_thread_barrier_sync (vm);
 
     pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
     clib_memset(lb, 0, sizeof(*lb));
 
     lb->lb_map = INDEX_INVALID;
     lb->lb_urpf = INDEX_INVALID;
+
+    if (need_barrier_sync == 0)
+    {
+        need_barrier_sync += vlib_validate_combined_counter_will_expand
+            (&(load_balance_main.lbm_to_counters),
+             load_balance_get_index(lb));
+        need_barrier_sync += vlib_validate_combined_counter_will_expand
+            (&(load_balance_main.lbm_via_counters),
+             load_balance_get_index(lb));
+        if (need_barrier_sync)
+            vlib_worker_thread_barrier_sync (vm);
+    }
+
     vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
                                    load_balance_get_index(lb));
     vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
@@ -108,6 +129,9 @@ load_balance_alloc_i (void)
     vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
                                load_balance_get_index(lb));
 
+    if (need_barrier_sync)
+        vlib_worker_thread_barrier_release (vm);
+
     return (lb);
 }
 
@@ -1121,7 +1145,7 @@ load_balance_inline (vlib_main_t * vm,
              vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
          }
 
-         dpo0 = load_balance_get_bucket_i(lb0, 
+         dpo0 = load_balance_get_bucket_i(lb0,
                                           vnet_buffer(b0)->ip.flow_hash &
                                           (lb0->lb_n_buckets_minus_1));