* limitations under the License.
*/
-#include <vnet/ip/lookup.h>
#include <vnet/dpo/load_balance.h>
#include <vnet/dpo/load_balance_map.h>
#include <vnet/dpo/drop_dpo.h>
#include <vnet/adj/adj_internal.h>
#include <vnet/fib/fib_urpf_list.h>
#include <vnet/bier/bier_fwd.h>
+#include <vnet/fib/mpls_fib.h>
+#include <vnet/ip/ip4_inlines.h>
+#include <vnet/ip/ip6_inlines.h>
/*
* distribution error tolerance for load-balancing
load_balance_alloc_i (void)
{
load_balance_t *lb;
+ u8 need_barrier_sync = 0;
+ vlib_main_t *vm = vlib_get_main();
+ ASSERT (vm->thread_index == 0);
+
+ pool_get_aligned_will_expand (load_balance_pool, need_barrier_sync,
+ CLIB_CACHE_LINE_BYTES);
+ if (need_barrier_sync)
+ vlib_worker_thread_barrier_sync (vm);
pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
clib_memset(lb, 0, sizeof(*lb));
lb->lb_map = INDEX_INVALID;
lb->lb_urpf = INDEX_INVALID;
+
+ if (need_barrier_sync == 0)
+ {
+ need_barrier_sync += vlib_validate_combined_counter_will_expand
+ (&(load_balance_main.lbm_to_counters),
+ load_balance_get_index(lb));
+ need_barrier_sync += vlib_validate_combined_counter_will_expand
+ (&(load_balance_main.lbm_via_counters),
+ load_balance_get_index(lb));
+ if (need_barrier_sync)
+ vlib_worker_thread_barrier_sync (vm);
+ }
+
vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
load_balance_get_index(lb));
vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
load_balance_get_index(lb));
+ if (need_barrier_sync)
+ vlib_worker_thread_barrier_release (vm);
+
return (lb);
}
return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
}
+flow_hash_config_t
+load_balance_get_default_flow_hash (dpo_proto_t lb_proto)
+{
+ switch (lb_proto)
+ {
+ case DPO_PROTO_IP4:
+ case DPO_PROTO_IP6:
+ return (IP_FLOW_HASH_DEFAULT);
+
+ case DPO_PROTO_MPLS:
+ return (MPLS_FLOW_HASH_DEFAULT);
+
+ case DPO_PROTO_ETHERNET:
+ case DPO_PROTO_BIER:
+ case DPO_PROTO_NSH:
+ break;
+ }
+
+ return (0);
+}
+
static load_balance_t *
load_balance_create_i (u32 num_buckets,
dpo_proto_t lb_proto,
}
else
{
- clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
+ clib_memcpy_fast (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
}
/*
* when the weight skew is high (norm is small) and n == nf.
* without this correction the path with a low weight would have
- * no represenation in the load-balanace - don't want that.
+ * no representation in the load-balanace - don't want that.
* If the weight skew is high so the load-balance has many buckets
* to allow it. pays ya money takes ya choice.
*/
{
load_balance_t *lb;
- pool_foreach(lb, load_balance_pool,
- ({
+ pool_foreach (lb, load_balance_pool)
+ {
vlib_cli_output (vm, "%U", format_load_balance,
load_balance_get_index(lb),
LOAD_BALANCE_FORMAT_NONE);
- }));
+ }
}
return 0;
vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
}
- dpo0 = load_balance_get_bucket_i(lb0,
+ dpo0 = load_balance_get_bucket_i(lb0,
vnet_buffer(b0)->ip.flow_hash &
(lb0->lb_n_buckets_minus_1));