2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/load_balance.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/dpo/drop_dpo.h>
20 #include <vppinfra/math.h> /* for fabs */
21 #include <vnet/adj/adj.h>
22 #include <vnet/adj/adj_alloc.h>
23 #include <vnet/adj/adj_internal.h>
26 * distribution error tolerance for load-balancing
28 const f64 multipath_next_hop_error_tolerance = 0.1;
33 #define LB_DBG(_lb, _fmt, _args...) \
36 clib_warning("lb:[%s]:" _fmt, \
37 load_balance_format(load_balance_get_index((_lb)), \
43 #define LB_DBG(_p, _fmt, _args...)
48 * Pool of all DPOs. It's not static so the DP can have fast access
50 load_balance_t *load_balance_pool;
53 * The one instance of load-balance main
55 load_balance_main_t load_balance_main;
58 load_balance_get_multipath_tolerance (void)
60 return (multipath_next_hop_error_tolerance);
64 load_balance_get_index (const load_balance_t *lb)
66 return (lb - load_balance_pool);
69 static inline dpo_id_t*
70 load_balance_get_buckets (load_balance_t *lb)
72 if (LB_HAS_INLINE_BUCKETS(lb))
74 return (lb->lb_buckets_inline);
78 return (lb->lb_buckets);
82 static load_balance_t *
83 load_balance_alloc_i (void)
87 pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
88 memset(lb, 0, sizeof(*lb));
90 lb->lb_map = INDEX_INVALID;
91 vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
92 load_balance_get_index(lb));
93 vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
94 load_balance_get_index(lb));
95 vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
96 load_balance_get_index(lb));
97 vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
98 load_balance_get_index(lb));
104 load_balance_format (index_t lbi,
105 load_balance_format_flags_t flags,
109 vlib_counter_t to, via;
114 lb = load_balance_get(lbi);
115 vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
116 vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
117 buckets = load_balance_get_buckets(lb);
119 s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
120 s = format(s, "[index:%d buckets:%d ", lbi, lb->lb_n_buckets);
121 s = format(s, "locks:%d ", lb->lb_locks);
122 s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
123 if (0 != via.packets)
125 s = format(s, " via:[%Ld:%Ld]",
126 via.packets, via.bytes);
130 if (INDEX_INVALID != lb->lb_map)
132 s = format(s, "\n%U%U",
133 format_white_space, indent+4,
134 format_load_balance_map, lb->lb_map, indent+4);
136 for (i = 0; i < lb->lb_n_buckets; i++)
138 s = format(s, "\n%U[%d] %U",
139 format_white_space, indent+2,
142 &buckets[i], indent+6);
148 format_load_balance (u8 * s, va_list * args)
150 index_t lbi = va_arg(args, index_t);
151 load_balance_format_flags_t flags = va_arg(args, load_balance_format_flags_t);
153 return (load_balance_format(lbi, flags, 0, s));
156 format_load_balance_dpo (u8 * s, va_list * args)
158 index_t lbi = va_arg(args, index_t);
159 u32 indent = va_arg(args, u32);
161 return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
165 static load_balance_t *
166 load_balance_create_i (u32 num_buckets,
167 dpo_proto_t lb_proto,
168 flow_hash_config_t fhc)
172 lb = load_balance_alloc_i();
173 lb->lb_hash_config = fhc;
174 lb->lb_n_buckets = num_buckets;
175 lb->lb_n_buckets_minus_1 = num_buckets-1;
176 lb->lb_proto = lb_proto;
178 if (!LB_HAS_INLINE_BUCKETS(lb))
180 vec_validate_aligned(lb->lb_buckets,
181 lb->lb_n_buckets - 1,
182 CLIB_CACHE_LINE_BYTES);
185 LB_DBG(lb, "create");
191 load_balance_create (u32 n_buckets,
192 dpo_proto_t lb_proto,
193 flow_hash_config_t fhc)
195 return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
199 load_balance_set_bucket_i (load_balance_t *lb,
202 const dpo_id_t *next)
204 dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
208 load_balance_set_bucket (index_t lbi,
210 const dpo_id_t *next)
215 lb = load_balance_get(lbi);
216 buckets = load_balance_get_buckets(lb);
218 ASSERT(bucket < lb->lb_n_buckets);
220 load_balance_set_bucket_i(lb, bucket, buckets, next);
224 load_balance_is_drop (const dpo_id_t *dpo)
228 if (DPO_LOAD_BALANCE != dpo->dpoi_type)
231 lb = load_balance_get(dpo->dpoi_index);
233 if (1 == lb->lb_n_buckets)
235 return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
241 load_balance_get_bucket (index_t lbi,
246 lb = load_balance_get(lbi);
248 return (load_balance_get_bucket_i(lb, bucket));
252 next_hop_sort_by_weight (load_balance_path_t * n1,
253 load_balance_path_t * n2)
255 return ((int) n1->path_weight - (int) n2->path_weight);
258 /* Given next hop vector is over-written with normalized one with sorted weights and
259 with weights corresponding to the number of adjacencies for each next hop.
260 Returns number of adjacencies in block. */
262 ip_multipath_normalize_next_hops (load_balance_path_t * raw_next_hops,
263 load_balance_path_t ** normalized_next_hops,
265 f64 multipath_next_hop_error_tolerance)
267 load_balance_path_t * nhs;
268 uword n_nhs, n_adj, n_adj_left, i, sum_weight;
271 n_nhs = vec_len (raw_next_hops);
276 /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
277 nhs = *normalized_next_hops;
278 vec_validate (nhs, 2*n_nhs - 1);
280 /* Fast path: 1 next hop in block. */
284 nhs[0] = raw_next_hops[0];
285 nhs[0].path_weight = 1;
293 int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
296 nhs[0] = raw_next_hops[cmp];
297 nhs[1] = raw_next_hops[cmp ^ 1];
299 /* Fast path: equal cost multipath with 2 next hops. */
300 if (nhs[0].path_weight == nhs[1].path_weight)
302 nhs[0].path_weight = nhs[1].path_weight = 1;
310 clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
311 qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
314 /* Find total weight to normalize weights. */
316 for (i = 0; i < n_nhs; i++)
317 sum_weight += nhs[i].path_weight;
319 /* In the unlikely case that all weights are given as 0, set them all to 1. */
322 for (i = 0; i < n_nhs; i++)
323 nhs[i].path_weight = 1;
327 /* Save copies of all next hop weights to avoid being overwritten in loop below. */
328 for (i = 0; i < n_nhs; i++)
329 nhs[n_nhs + i].path_weight = nhs[i].path_weight;
331 /* Try larger and larger power of 2 sized adjacency blocks until we
332 find one where traffic flows to within 1% of specified weights. */
333 for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
337 norm = n_adj / ((f64) sum_weight);
339 for (i = 0; i < n_nhs; i++)
341 f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
342 word n = flt_round_nearest (nf);
344 n = n > n_adj_left ? n_adj_left : n;
346 error += fabs (nf - n);
347 nhs[i].path_weight = n;
350 nhs[0].path_weight += n_adj_left;
352 /* Less than 5% average error per adjacency with this size adjacency block? */
353 if (error <= multipath_next_hop_error_tolerance*n_adj)
355 /* Truncate any next hops with zero weight. */
362 /* Save vector for next call. */
363 *normalized_next_hops = nhs;
364 *sum_weight_in = sum_weight;
368 static load_balance_path_t *
369 load_balance_multipath_next_hop_fixup (load_balance_path_t *nhs,
370 dpo_proto_t drop_proto)
372 if (0 == vec_len(nhs))
374 load_balance_path_t *nh;
377 * we need something for the load-balance. so use the drop
379 vec_add2(nhs, nh, 1);
382 dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
389 * Fill in adjacencies in block based on corresponding
390 * next hop adjacencies.
393 load_balance_fill_buckets (load_balance_t *lb,
394 load_balance_path_t *nhs,
398 load_balance_path_t * nh;
404 * the next-hops have normalised weights. that means their sum is the number
405 * of buckets we need to fill.
407 vec_foreach (nh, nhs)
409 for (ii = 0; ii < nh->path_weight; ii++)
411 ASSERT(bucket < n_buckets);
412 load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
418 load_balance_set_n_buckets (load_balance_t *lb,
421 lb->lb_n_buckets = n_buckets;
422 lb->lb_n_buckets_minus_1 = n_buckets-1;
426 load_balance_multipath_update (const dpo_id_t *dpo,
427 load_balance_path_t * raw_next_hops,
428 load_balance_flags_t flags)
430 u32 sum_of_weights,n_buckets, ii;
431 load_balance_path_t * nh, * nhs;
432 index_t lbmi, old_lbmi;
438 ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
439 lb = load_balance_get(dpo->dpoi_index);
441 load_balance_multipath_next_hop_fixup(raw_next_hops,
444 ip_multipath_normalize_next_hops(raw_next_hops,
447 multipath_next_hop_error_tolerance);
449 ASSERT (n_buckets >= vec_len (raw_next_hops));
452 * Save the old load-balance map used, and get a new one if required.
454 old_lbmi = lb->lb_map;
455 if (flags & LOAD_BALANCE_FLAG_USES_MAP)
457 lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
461 lbmi = INDEX_INVALID;
464 if (0 == lb->lb_n_buckets)
467 * first time initialisation. no packets inflight, so we can write
470 load_balance_set_n_buckets(lb, n_buckets);
472 if (!LB_HAS_INLINE_BUCKETS(lb))
473 vec_validate_aligned(lb->lb_buckets,
474 lb->lb_n_buckets - 1,
475 CLIB_CACHE_LINE_BYTES);
477 load_balance_fill_buckets(lb, nhs,
478 load_balance_get_buckets(lb),
485 * This is a modification of an existing load-balance.
486 * We need to ensure that packets inflight see a consistent state, that
487 * is the number of reported buckets the LB has (read from
488 * lb_n_buckets_minus_1) is not more than it actually has. So if the
489 * number of buckets is increasing, we must update the bucket array first,
490 * then the reported number. vice-versa if the number of buckets goes down.
492 if (n_buckets == lb->lb_n_buckets)
495 * no change in the number of buckets. we can simply fill what
496 * is new over what is old.
498 load_balance_fill_buckets(lb, nhs,
499 load_balance_get_buckets(lb),
503 else if (n_buckets > lb->lb_n_buckets)
506 * we have more buckets. the old load-balance map (if there is one)
507 * will remain valid, i.e. mapping to indices within range, so we
510 if (n_buckets > LB_NUM_INLINE_BUCKETS &&
511 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
514 * the new increased number of buckets is crossing the threshold
515 * from the inline storage to out-line. Alloc the outline buckets
516 * first, then fixup the number. then reset the inlines.
518 ASSERT(NULL == lb->lb_buckets);
519 vec_validate_aligned(lb->lb_buckets,
521 CLIB_CACHE_LINE_BYTES);
523 load_balance_fill_buckets(lb, nhs,
526 CLIB_MEMORY_BARRIER();
527 load_balance_set_n_buckets(lb, n_buckets);
529 CLIB_MEMORY_BARRIER();
531 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
533 dpo_reset(&lb->lb_buckets_inline[ii]);
539 * we are not crossing the threshold. we can write the new on the
540 * old, whether they be inline or not.
542 load_balance_fill_buckets(lb, nhs,
543 load_balance_get_buckets(lb),
545 CLIB_MEMORY_BARRIER();
546 load_balance_set_n_buckets(lb, n_buckets);
550 * buckets fixed. ready for the MAP update.
557 * bucket size shrinkage.
558 * Any map we have will be based on the old
559 * larger number of buckets, so will be translating to indices
560 * out of range. So the new MAP must be installed first.
563 CLIB_MEMORY_BARRIER();
566 if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
567 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
570 * the new decreased number of buckets is crossing the threshold
571 * from out-line storage to inline:
572 * 1 - Fill the inline buckets,
573 * 2 - fixup the number (and this point the inline buckets are
575 * 3 - free the outline buckets
577 load_balance_fill_buckets(lb, nhs,
578 lb->lb_buckets_inline,
580 CLIB_MEMORY_BARRIER();
581 load_balance_set_n_buckets(lb, n_buckets);
582 CLIB_MEMORY_BARRIER();
584 vec_foreach(tmp_dpo, lb->lb_buckets)
588 vec_free(lb->lb_buckets);
593 * not crossing the threshold.
594 * 1 - update the number to the smaller size
595 * 2 - write the new buckets
596 * 3 - reset those no longer used.
601 old_n_buckets = lb->lb_n_buckets;
602 buckets = load_balance_get_buckets(lb);
604 load_balance_set_n_buckets(lb, n_buckets);
605 CLIB_MEMORY_BARRIER();
607 load_balance_fill_buckets(lb, nhs,
611 for (ii = old_n_buckets-n_buckets; ii < old_n_buckets; ii++)
613 dpo_reset(&buckets[ii]);
619 vec_foreach (nh, nhs)
621 dpo_reset(&nh->path_dpo);
624 load_balance_map_unlock(old_lbmi);
628 load_balance_lock (dpo_id_t *dpo)
632 lb = load_balance_get(dpo->dpoi_index);
638 load_balance_destroy (load_balance_t *lb)
643 buckets = load_balance_get_buckets(lb);
645 for (i = 0; i < lb->lb_n_buckets; i++)
647 dpo_reset(&buckets[i]);
650 LB_DBG(lb, "destroy");
651 if (!LB_HAS_INLINE_BUCKETS(lb))
653 vec_free(lb->lb_buckets);
656 pool_put(load_balance_pool, lb);
660 load_balance_unlock (dpo_id_t *dpo)
664 lb = load_balance_get(dpo->dpoi_index);
668 if (0 == lb->lb_locks)
670 load_balance_destroy(lb);
674 const static dpo_vft_t lb_vft = {
675 .dv_lock = load_balance_lock,
676 .dv_unlock = load_balance_unlock,
677 .dv_format = format_load_balance_dpo,
681 * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
684 * this means that these graph nodes are ones from which a load-balance is the
685 * parent object in the DPO-graph.
687 * We do not list all the load-balance nodes, such as the *-lookup. instead
688 * we are relying on the correct use of the .sibling_of field when setting
689 * up these sibling nodes.
691 const static char* const load_balance_ip4_nodes[] =
696 const static char* const load_balance_ip6_nodes[] =
701 const static char* const load_balance_mpls_nodes[] =
706 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
708 [DPO_PROTO_IP4] = load_balance_ip4_nodes,
709 [DPO_PROTO_IP6] = load_balance_ip6_nodes,
710 [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
714 load_balance_module_init (void)
716 dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
718 load_balance_map_module_init();
721 static clib_error_t *
722 load_balance_show (vlib_main_t * vm,
723 unformat_input_t * input,
724 vlib_cli_command_t * cmd)
726 index_t lbi = INDEX_INVALID;
728 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
730 if (unformat (input, "%d", &lbi))
736 if (INDEX_INVALID != lbi)
738 vlib_cli_output (vm, "%U", format_load_balance, lbi,
739 LOAD_BALANCE_FORMAT_DETAIL);
745 pool_foreach(lb, load_balance_pool,
747 vlib_cli_output (vm, "%U", format_load_balance,
748 load_balance_get_index(lb),
749 LOAD_BALANCE_FORMAT_NONE);
756 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
757 .path = "show load-balance",
758 .short_help = "show load-balance [<index>]",
759 .function = load_balance_show,