tls: don't add listen to lookup table
[vpp.git] / src / vnet / dpo / load_balance.c
index 7acccca..a212532 100644 (file)
@@ -13,7 +13,6 @@
  * limitations under the License.
  */
 
-#include <vnet/ip/lookup.h>
 #include <vnet/dpo/load_balance.h>
 #include <vnet/dpo/load_balance_map.h>
 #include <vnet/dpo/drop_dpo.h>
 #include <vnet/fib/fib_urpf_list.h>
 #include <vnet/bier/bier_fwd.h>
 #include <vnet/fib/mpls_fib.h>
+#include <vnet/ip/ip4_inlines.h>
+#include <vnet/ip/ip6_inlines.h>
+
+// clang-format off
 
 /*
  * distribution error tolerance for load-balancing
@@ -93,12 +96,33 @@ static load_balance_t *
 load_balance_alloc_i (void)
 {
     load_balance_t *lb;
+    u8 need_barrier_sync = 0;
+    vlib_main_t *vm = vlib_get_main();
+    ASSERT (vm->thread_index == 0);
+
+    pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync,
+                                  CLIB_CACHE_LINE_BYTES);
+    if (need_barrier_sync)
+        vlib_worker_thread_barrier_sync (vm);
 
     pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
     clib_memset(lb, 0, sizeof(*lb));
 
     lb->lb_map = INDEX_INVALID;
     lb->lb_urpf = INDEX_INVALID;
+
+    if (need_barrier_sync == 0)
+    {
+        need_barrier_sync += vlib_validate_combined_counter_will_expand
+            (&(load_balance_main.lbm_to_counters),
+             load_balance_get_index(lb));
+        need_barrier_sync += vlib_validate_combined_counter_will_expand
+            (&(load_balance_main.lbm_via_counters),
+             load_balance_get_index(lb));
+        if (need_barrier_sync)
+            vlib_worker_thread_barrier_sync (vm);
+    }
+
     vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
                                    load_balance_get_index(lb));
     vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
@@ -108,6 +132,9 @@ load_balance_alloc_i (void)
     vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
                                load_balance_get_index(lb));
 
+    if (need_barrier_sync)
+        vlib_worker_thread_barrier_release (vm);
+
     return (lb);
 }
 
@@ -893,11 +920,30 @@ load_balance_mem_show (void)
     load_balance_map_show_mem();
 }
 
+static u16
+load_balance_dpo_get_mtu (const dpo_id_t *dpo)
+{
+    const dpo_id_t *buckets;
+    load_balance_t *lb;
+    u16 i, mtu = 0xffff;
+
+    lb = load_balance_get(dpo->dpoi_index);
+    buckets = load_balance_get_buckets(lb);
+
+    for (i = 0; i < lb->lb_n_buckets; i++)
+    {
+        mtu = clib_min (mtu, dpo_get_mtu (&buckets[i]));
+    }
+
+    return (mtu);
+}
+
 const static dpo_vft_t lb_vft = {
     .dv_lock = load_balance_lock,
     .dv_unlock = load_balance_unlock,
     .dv_format = format_load_balance_dpo,
     .dv_mem_show = load_balance_mem_show,
+    .dv_get_mtu = load_balance_dpo_get_mtu,
 };
 
 /**
@@ -1004,12 +1050,12 @@ load_balance_show (vlib_main_t * vm,
     {
         load_balance_t *lb;
 
-        pool_foreach(lb, load_balance_pool,
-        ({
+        pool_foreach (lb, load_balance_pool)
+         {
             vlib_cli_output (vm, "%U", format_load_balance,
                              load_balance_get_index(lb),
                              LOAD_BALANCE_FORMAT_NONE);
-        }));
+        }
     }
 
     return 0;
@@ -1121,7 +1167,7 @@ load_balance_inline (vlib_main_t * vm,
              vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
          }
 
-         dpo0 = load_balance_get_bucket_i(lb0, 
+         dpo0 = load_balance_get_bucket_i(lb0,
                                           vnet_buffer(b0)->ip.flow_hash &
                                           (lb0->lb_n_buckets_minus_1));
 
@@ -1298,3 +1344,5 @@ VLIB_REGISTER_NODE (bier_load_balance_node) = {
   .format_trace = format_bier_load_balance_trace,
   .sibling_of = "mpls-load-balance",
 };
+
+// clang-format on