2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/dpo/load_balance.h>
17 #include <vnet/dpo/load_balance_map.h>
18 #include <vnet/dpo/drop_dpo.h>
19 #include <vppinfra/math.h> /* for fabs */
20 #include <vnet/adj/adj.h>
21 #include <vnet/adj/adj_internal.h>
22 #include <vnet/fib/fib_urpf_list.h>
23 #include <vnet/bier/bier_fwd.h>
24 #include <vnet/fib/mpls_fib.h>
25 #include <vnet/ip/ip4_inlines.h>
26 #include <vnet/ip/ip6_inlines.h>
31 * distribution error tolerance for load-balancing
33 const f64 multipath_next_hop_error_tolerance = 0.1;
35 static const char *load_balance_attr_names[] = LOAD_BALANCE_ATTR_NAMES;
40 vlib_log_class_t load_balance_logger;
42 #define LB_DBG(_lb, _fmt, _args...) \
44 vlib_log_debug(load_balance_logger, \
46 format_load_balance, load_balance_get_index(_lb), \
47 LOAD_BALANCE_FORMAT_NONE, \
52 * Pool of all DPOs. It's not static so the DP can have fast access
54 load_balance_t *load_balance_pool;
57 * The one instance of load-balance main
59 load_balance_main_t load_balance_main = {
62 .stat_segment_name = "/net/route/to",
66 .stat_segment_name = "/net/route/via",
71 load_balance_get_multipath_tolerance (void)
73 return (multipath_next_hop_error_tolerance);
77 load_balance_get_index (const load_balance_t *lb)
79 return (lb - load_balance_pool);
82 static inline dpo_id_t*
83 load_balance_get_buckets (load_balance_t *lb)
85 if (LB_HAS_INLINE_BUCKETS(lb))
87 return (lb->lb_buckets_inline);
91 return (lb->lb_buckets);
95 static load_balance_t *
96 load_balance_alloc_i (void)
99 u8 need_barrier_sync = 0;
100 vlib_main_t *vm = vlib_get_main();
101 ASSERT (vm->thread_index == 0);
103 need_barrier_sync = pool_get_will_expand (load_balance_pool);
105 if (need_barrier_sync)
106 vlib_worker_thread_barrier_sync (vm);
108 pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
109 clib_memset(lb, 0, sizeof(*lb));
111 lb->lb_map = INDEX_INVALID;
112 lb->lb_urpf = INDEX_INVALID;
114 if (need_barrier_sync == 0)
116 need_barrier_sync += vlib_validate_combined_counter_will_expand
117 (&(load_balance_main.lbm_to_counters),
118 load_balance_get_index(lb));
119 need_barrier_sync += vlib_validate_combined_counter_will_expand
120 (&(load_balance_main.lbm_via_counters),
121 load_balance_get_index(lb));
122 if (need_barrier_sync)
123 vlib_worker_thread_barrier_sync (vm);
126 vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
127 load_balance_get_index(lb));
128 vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
129 load_balance_get_index(lb));
130 vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
131 load_balance_get_index(lb));
132 vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
133 load_balance_get_index(lb));
135 if (need_barrier_sync)
136 vlib_worker_thread_barrier_release (vm);
142 load_balance_format (index_t lbi,
143 load_balance_format_flags_t flags,
147 vlib_counter_t to, via;
152 lb = load_balance_get(lbi);
153 vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
154 vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
155 buckets = load_balance_get_buckets(lb);
157 s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
158 s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
159 s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
160 s = format(s, "uRPF:%d ", lb->lb_urpf);
163 load_balance_attr_t attr;
165 s = format(s, "flags:[");
167 FOR_EACH_LOAD_BALANCE_ATTR(attr)
169 if (lb->lb_flags & (1 << attr))
171 s = format (s, "%s", load_balance_attr_names[attr]);
176 s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
177 if (0 != via.packets)
179 s = format(s, " via:[%Ld:%Ld]",
180 via.packets, via.bytes);
184 if (INDEX_INVALID != lb->lb_map)
186 s = format(s, "\n%U%U",
187 format_white_space, indent+4,
188 format_load_balance_map, lb->lb_map, indent+4);
190 for (i = 0; i < lb->lb_n_buckets; i++)
192 s = format(s, "\n%U[%d] %U",
193 format_white_space, indent+2,
196 &buckets[i], indent+6);
202 format_load_balance (u8 * s, va_list * args)
204 index_t lbi = va_arg(*args, index_t);
205 load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
207 return (load_balance_format(lbi, flags, 0, s));
211 format_load_balance_dpo (u8 * s, va_list * args)
213 index_t lbi = va_arg(*args, index_t);
214 u32 indent = va_arg(*args, u32);
216 return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
220 load_balance_get_default_flow_hash (dpo_proto_t lb_proto)
226 return (IP_FLOW_HASH_DEFAULT);
229 return (MPLS_FLOW_HASH_DEFAULT);
231 case DPO_PROTO_ETHERNET:
240 static load_balance_t *
241 load_balance_create_i (u32 num_buckets,
242 dpo_proto_t lb_proto,
243 flow_hash_config_t fhc)
247 lb = load_balance_alloc_i();
248 lb->lb_hash_config = fhc;
249 lb->lb_n_buckets = num_buckets;
250 lb->lb_n_buckets_minus_1 = num_buckets-1;
251 lb->lb_proto = lb_proto;
253 if (!LB_HAS_INLINE_BUCKETS(lb))
255 vec_validate_aligned(lb->lb_buckets,
256 lb->lb_n_buckets - 1,
257 CLIB_CACHE_LINE_BYTES);
260 LB_DBG(lb, "create");
266 load_balance_create (u32 n_buckets,
267 dpo_proto_t lb_proto,
268 flow_hash_config_t fhc)
270 return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
274 load_balance_set_bucket_i (load_balance_t *lb,
277 const dpo_id_t *next)
279 dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
283 load_balance_set_bucket (index_t lbi,
285 const dpo_id_t *next)
290 lb = load_balance_get(lbi);
291 buckets = load_balance_get_buckets(lb);
293 ASSERT(bucket < lb->lb_n_buckets);
295 load_balance_set_bucket_i(lb, bucket, buckets, next);
299 load_balance_is_drop (const dpo_id_t *dpo)
303 if (DPO_LOAD_BALANCE != dpo->dpoi_type)
306 lb = load_balance_get(dpo->dpoi_index);
308 if (1 == lb->lb_n_buckets)
310 return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
316 load_balance_n_buckets (index_t lbi)
320 lb = load_balance_get(lbi);
322 return (lb->lb_n_buckets);
326 load_balance_set_fib_entry_flags (index_t lbi,
327 fib_entry_flag_t flags)
331 lb = load_balance_get(lbi);
332 lb->lb_fib_entry_flags = flags;
337 load_balance_set_urpf (index_t lbi,
343 lb = load_balance_get(lbi);
346 * packets in flight we see this change. but it's atomic, so :P
351 fib_urpf_list_unlock(old);
352 fib_urpf_list_lock(urpf);
356 load_balance_get_urpf (index_t lbi)
360 lb = load_balance_get(lbi);
362 return (lb->lb_urpf);
366 load_balance_get_bucket (index_t lbi,
371 lb = load_balance_get(lbi);
373 return (load_balance_get_bucket_i(lb, bucket));
377 next_hop_sort_by_weight (const load_balance_path_t * n1,
378 const load_balance_path_t * n2)
380 return ((int) n1->path_weight - (int) n2->path_weight);
383 /* Given next hop vector is over-written with normalized one with sorted weights and
384 with weights corresponding to the number of adjacencies for each next hop.
385 Returns number of adjacencies in block. */
387 ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops,
388 load_balance_path_t ** normalized_next_hops,
390 f64 multipath_next_hop_error_tolerance)
392 load_balance_path_t * nhs;
393 uword n_nhs, n_adj, n_adj_left, i, sum_weight;
396 n_nhs = vec_len (raw_next_hops);
401 /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
402 nhs = *normalized_next_hops;
403 vec_validate (nhs, 2*n_nhs - 1);
405 /* Fast path: 1 next hop in block. */
409 nhs[0] = raw_next_hops[0];
410 nhs[0].path_weight = 1;
411 vec_set_len (nhs, 1);
418 int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
421 nhs[0] = raw_next_hops[cmp];
422 nhs[1] = raw_next_hops[cmp ^ 1];
424 /* Fast path: equal cost multipath with 2 next hops. */
425 if (nhs[0].path_weight == nhs[1].path_weight)
427 nhs[0].path_weight = nhs[1].path_weight = 1;
428 vec_set_len (nhs, 2);
435 clib_memcpy_fast (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
436 qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
439 /* Find total weight to normalize weights. */
441 for (i = 0; i < n_nhs; i++)
442 sum_weight += nhs[i].path_weight;
444 /* In the unlikely case that all weights are given as 0, set them all to 1. */
447 for (i = 0; i < n_nhs; i++)
448 nhs[i].path_weight = 1;
452 /* Save copies of all next hop weights to avoid being overwritten in loop below. */
453 for (i = 0; i < n_nhs; i++)
454 nhs[n_nhs + i].path_weight = nhs[i].path_weight;
456 /* Try larger and larger power of 2 sized adjacency blocks until we
457 find one where traffic flows to within 1% of specified weights. */
458 for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
462 norm = n_adj / ((f64) sum_weight);
464 for (i = 0; i < n_nhs; i++)
466 f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
467 word n = flt_round_nearest (nf);
469 n = n > n_adj_left ? n_adj_left : n;
471 error += fabs (nf - n);
472 nhs[i].path_weight = n;
474 if (0 == nhs[i].path_weight)
477 * when the weight skew is high (norm is small) and n == nf.
478 * without this correction the path with a low weight would have
479 * no representation in the load-balanace - don't want that.
480 * If the weight skew is high so the load-balance has many buckets
481 * to allow it. pays ya money takes ya choice.
488 nhs[0].path_weight += n_adj_left;
490 /* Less than 5% average error per adjacency with this size adjacency block? */
491 if (error <= multipath_next_hop_error_tolerance*n_adj)
493 /* Truncate any next hops with zero weight. */
494 vec_set_len (nhs, i);
500 /* Save vector for next call. */
501 *normalized_next_hops = nhs;
502 *sum_weight_in = sum_weight;
506 static load_balance_path_t *
507 load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs,
508 dpo_proto_t drop_proto)
510 if (0 == vec_len(nhs))
512 load_balance_path_t *new_nhs = NULL, *nh;
515 * we need something for the load-balance. so use the drop
517 vec_add2(new_nhs, nh, 1);
520 dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
529 * Fill in adjacencies in block based on corresponding
530 * next hop adjacencies.
533 load_balance_fill_buckets_norm (load_balance_t *lb,
534 load_balance_path_t *nhs,
538 load_balance_path_t *nh;
544 * the next-hops have normalised weights. that means their sum is the number
545 * of buckets we need to fill.
547 vec_foreach (nh, nhs)
549 for (ii = 0; ii < nh->path_weight; ii++)
551 ASSERT(bucket < n_buckets);
552 load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
557 load_balance_fill_buckets_sticky (load_balance_t *lb,
558 load_balance_path_t *nhs,
562 load_balance_path_t *nh, *fwding_paths;
563 u16 ii, bucket, fpath;
568 vec_foreach (nh, nhs)
570 if (!dpo_is_drop(&nh->path_dpo))
572 vec_add1(fwding_paths, *nh);
575 if (vec_len(fwding_paths) == 0)
576 fwding_paths = vec_dup(nhs);
579 * the next-hops have normalised weights. that means their sum is the number
580 * of buckets we need to fill.
582 vec_foreach (nh, nhs)
584 for (ii = 0; ii < nh->path_weight; ii++)
586 ASSERT(bucket < n_buckets);
587 if (!dpo_is_drop(&nh->path_dpo))
589 load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
593 /* fill the bucks from the next up path */
594 load_balance_set_bucket_i(lb, bucket++, buckets, &fwding_paths[fpath].path_dpo);
595 ASSERT(vec_len(fwding_paths) > 0);
596 fpath = (fpath + 1) % vec_len(fwding_paths);
601 vec_free(fwding_paths);
605 load_balance_fill_buckets (load_balance_t *lb,
606 load_balance_path_t *nhs,
609 load_balance_flags_t flags)
611 if (flags & LOAD_BALANCE_FLAG_STICKY)
613 load_balance_fill_buckets_sticky(lb, nhs, buckets, n_buckets);
617 load_balance_fill_buckets_norm(lb, nhs, buckets, n_buckets);
622 load_balance_set_n_buckets (load_balance_t *lb,
625 lb->lb_n_buckets = n_buckets;
626 lb->lb_n_buckets_minus_1 = n_buckets-1;
630 load_balance_multipath_update (const dpo_id_t *dpo,
631 const load_balance_path_t * raw_nhs,
632 load_balance_flags_t flags)
634 load_balance_path_t *nh, *nhs, *fixed_nhs;
635 u32 sum_of_weights, n_buckets, ii;
636 index_t lbmi, old_lbmi;
642 ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
643 lb = load_balance_get(dpo->dpoi_index);
644 lb->lb_flags = flags;
645 fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
647 ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
652 multipath_next_hop_error_tolerance);
654 ASSERT (n_buckets >= vec_len (raw_nhs));
657 * Save the old load-balance map used, and get a new one if required.
659 old_lbmi = lb->lb_map;
660 if (flags & LOAD_BALANCE_FLAG_USES_MAP)
662 lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
666 lbmi = INDEX_INVALID;
669 if (0 == lb->lb_n_buckets)
672 * first time initialisation. no packets inflight, so we can write
675 load_balance_set_n_buckets(lb, n_buckets);
677 if (!LB_HAS_INLINE_BUCKETS(lb))
678 vec_validate_aligned(lb->lb_buckets,
679 lb->lb_n_buckets - 1,
680 CLIB_CACHE_LINE_BYTES);
682 load_balance_fill_buckets(lb, nhs,
683 load_balance_get_buckets(lb),
690 * This is a modification of an existing load-balance.
691 * We need to ensure that packets inflight see a consistent state, that
692 * is the number of reported buckets the LB has (read from
693 * lb_n_buckets_minus_1) is not more than it actually has. So if the
694 * number of buckets is increasing, we must update the bucket array first,
695 * then the reported number. vice-versa if the number of buckets goes down.
697 if (n_buckets == lb->lb_n_buckets)
700 * no change in the number of buckets. we can simply fill what
701 * is new over what is old.
703 load_balance_fill_buckets(lb, nhs,
704 load_balance_get_buckets(lb),
708 else if (n_buckets > lb->lb_n_buckets)
711 * we have more buckets. the old load-balance map (if there is one)
712 * will remain valid, i.e. mapping to indices within range, so we
715 if (n_buckets > LB_NUM_INLINE_BUCKETS &&
716 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
719 * the new increased number of buckets is crossing the threshold
720 * from the inline storage to out-line. Alloc the outline buckets
721 * first, then fixup the number. then reset the inlines.
723 ASSERT(NULL == lb->lb_buckets);
724 vec_validate_aligned(lb->lb_buckets,
726 CLIB_CACHE_LINE_BYTES);
728 load_balance_fill_buckets(lb, nhs,
731 CLIB_MEMORY_BARRIER();
732 load_balance_set_n_buckets(lb, n_buckets);
734 CLIB_MEMORY_BARRIER();
736 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
738 dpo_reset(&lb->lb_buckets_inline[ii]);
743 if (n_buckets <= LB_NUM_INLINE_BUCKETS)
746 * we are not crossing the threshold and it's still inline buckets.
747 * we can write the new on the old..
749 load_balance_fill_buckets(lb, nhs,
750 load_balance_get_buckets(lb),
752 CLIB_MEMORY_BARRIER();
753 load_balance_set_n_buckets(lb, n_buckets);
758 * we are not crossing the threshold. We need a new bucket array to
759 * hold the increased number of choices.
761 dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
764 old_buckets = load_balance_get_buckets(lb);
766 vec_validate_aligned(new_buckets,
768 CLIB_CACHE_LINE_BYTES);
770 load_balance_fill_buckets(lb, nhs, new_buckets,
772 CLIB_MEMORY_BARRIER();
773 lb->lb_buckets = new_buckets;
774 CLIB_MEMORY_BARRIER();
775 load_balance_set_n_buckets(lb, n_buckets);
777 vec_foreach(tmp_dpo, old_buckets)
781 vec_free(old_buckets);
786 * buckets fixed. ready for the MAP update.
793 * bucket size shrinkage.
794 * Any map we have will be based on the old
795 * larger number of buckets, so will be translating to indices
796 * out of range. So the new MAP must be installed first.
799 CLIB_MEMORY_BARRIER();
802 if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
803 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
806 * the new decreased number of buckets is crossing the threshold
807 * from out-line storage to inline:
808 * 1 - Fill the inline buckets,
809 * 2 - fixup the number (and this point the inline buckets are
811 * 3 - free the outline buckets
813 load_balance_fill_buckets(lb, nhs,
814 lb->lb_buckets_inline,
816 CLIB_MEMORY_BARRIER();
817 load_balance_set_n_buckets(lb, n_buckets);
818 CLIB_MEMORY_BARRIER();
820 vec_foreach(tmp_dpo, lb->lb_buckets)
824 vec_free(lb->lb_buckets);
829 * not crossing the threshold.
830 * 1 - update the number to the smaller size
831 * 2 - write the new buckets
832 * 3 - reset those no longer used.
837 old_n_buckets = lb->lb_n_buckets;
838 buckets = load_balance_get_buckets(lb);
840 load_balance_set_n_buckets(lb, n_buckets);
841 CLIB_MEMORY_BARRIER();
843 load_balance_fill_buckets(lb, nhs, buckets,
846 for (ii = n_buckets; ii < old_n_buckets; ii++)
848 dpo_reset(&buckets[ii]);
854 vec_foreach (nh, nhs)
856 dpo_reset(&nh->path_dpo);
861 load_balance_map_unlock(old_lbmi);
865 load_balance_lock (dpo_id_t *dpo)
869 lb = load_balance_get(dpo->dpoi_index);
875 load_balance_destroy (load_balance_t *lb)
880 buckets = load_balance_get_buckets(lb);
882 for (i = 0; i < lb->lb_n_buckets; i++)
884 dpo_reset(&buckets[i]);
887 LB_DBG(lb, "destroy");
888 if (!LB_HAS_INLINE_BUCKETS(lb))
890 vec_free(lb->lb_buckets);
893 fib_urpf_list_unlock(lb->lb_urpf);
894 load_balance_map_unlock(lb->lb_map);
896 pool_put(load_balance_pool, lb);
900 load_balance_unlock (dpo_id_t *dpo)
904 lb = load_balance_get(dpo->dpoi_index);
908 if (0 == lb->lb_locks)
910 load_balance_destroy(lb);
915 load_balance_mem_show (void)
917 fib_show_memory_usage("load-balance",
918 pool_elts(load_balance_pool),
919 pool_len(load_balance_pool),
920 sizeof(load_balance_t));
921 load_balance_map_show_mem();
925 load_balance_dpo_get_mtu (const dpo_id_t *dpo)
927 const dpo_id_t *buckets;
931 lb = load_balance_get(dpo->dpoi_index);
932 buckets = load_balance_get_buckets(lb);
934 for (i = 0; i < lb->lb_n_buckets; i++)
936 mtu = clib_min (mtu, dpo_get_mtu (&buckets[i]));
942 const static dpo_vft_t lb_vft = {
943 .dv_lock = load_balance_lock,
944 .dv_unlock = load_balance_unlock,
945 .dv_format = format_load_balance_dpo,
946 .dv_mem_show = load_balance_mem_show,
947 .dv_get_mtu = load_balance_dpo_get_mtu,
951 * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
954 * this means that these graph nodes are ones from which a load-balance is the
955 * parent object in the DPO-graph.
957 * We do not list all the load-balance nodes, such as the *-lookup. instead
958 * we are relying on the correct use of the .sibling_of field when setting
959 * up these sibling nodes.
961 const static char* const load_balance_ip4_nodes[] =
966 const static char* const load_balance_ip6_nodes[] =
971 const static char* const load_balance_mpls_nodes[] =
976 const static char* const load_balance_l2_nodes[] =
981 const static char* const load_balance_nsh_nodes[] =
986 const static char* const load_balance_bier_nodes[] =
991 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
993 [DPO_PROTO_IP4] = load_balance_ip4_nodes,
994 [DPO_PROTO_IP6] = load_balance_ip6_nodes,
995 [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
996 [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
997 [DPO_PROTO_NSH] = load_balance_nsh_nodes,
998 [DPO_PROTO_BIER] = load_balance_bier_nodes,
1002 load_balance_module_init (void)
1006 dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
1009 * Special LB with index zero. we need to define this since the v4 mtrie
1010 * assumes an index of 0 implies the ply is empty. therefore all 'real'
1011 * adjs need a non-zero index.
1012 * This should never be used, but just in case, stack it on a drop.
1014 lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
1015 load_balance_set_bucket(lbi, 0, drop_dpo_get(DPO_PROTO_IP4));
1017 load_balance_logger =
1018 vlib_log_register_class("dpo", "load-balance");
1020 load_balance_map_module_init();
1023 static clib_error_t *
1024 load_balance_show (vlib_main_t * vm,
1025 unformat_input_t * input,
1026 vlib_cli_command_t * cmd)
1028 index_t lbi = INDEX_INVALID;
1030 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1032 if (unformat (input, "%d", &lbi))
1038 if (INDEX_INVALID != lbi)
1040 if (pool_is_free_index(load_balance_pool, lbi))
1042 vlib_cli_output (vm, "no such load-balance:%d", lbi);
1046 vlib_cli_output (vm, "%U", format_load_balance, lbi,
1047 LOAD_BALANCE_FORMAT_DETAIL);
1054 pool_foreach (lb, load_balance_pool)
1056 vlib_cli_output (vm, "%U", format_load_balance,
1057 load_balance_get_index(lb),
1058 LOAD_BALANCE_FORMAT_NONE);
1065 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
1066 .path = "show load-balance",
1067 .short_help = "show load-balance [<index>]",
1068 .function = load_balance_show,
1073 ip_flow_hash (void *data)
1075 ip4_header_t *iph = (ip4_header_t *) data;
1077 if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
1078 return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
1080 return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
1086 return (*((u64 *) m) & 0xffffffffffff);
1090 l2_flow_hash (vlib_buffer_t * b0)
1092 ethernet_header_t *eh;
1094 uword is_ip, eh_size;
1097 eh = vlib_buffer_get_current (b0);
1098 eh_type = clib_net_to_host_u16 (eh->type);
1099 eh_size = ethernet_buffer_header_size (b0);
1101 is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
1103 /* since we have 2 cache lines, use them */
1105 a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
1109 b = mac_to_u64 ((u8 *) eh->dst_address);
1110 c = mac_to_u64 ((u8 *) eh->src_address);
1111 hash_mix64 (a, b, c);
1116 typedef struct load_balance_trace_t_
1119 } load_balance_trace_t;
1122 load_balance_inline (vlib_main_t * vm,
1123 vlib_node_runtime_t * node,
1124 vlib_frame_t * frame,
1127 u32 n_left_from, next_index, *from, *to_next;
1129 from = vlib_frame_vector_args (frame);
1130 n_left_from = frame->n_vectors;
1132 next_index = node->cached_next_index;
1134 while (n_left_from > 0)
1138 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1140 while (n_left_from > 0 && n_left_to_next > 0)
1143 u32 bi0, lbi0, next0;
1144 const dpo_id_t *dpo0;
1145 const load_balance_t *lb0;
1152 n_left_to_next -= 1;
1154 b0 = vlib_get_buffer (vm, bi0);
1156 /* lookup dst + src mac */
1157 lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1158 lb0 = load_balance_get(lbi0);
1162 vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
1167 const bier_hdr_t *bh0 = vlib_buffer_get_current(b0);
1168 vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
1171 dpo0 = load_balance_get_bucket_i(lb0,
1172 vnet_buffer(b0)->ip.flow_hash &
1173 (lb0->lb_n_buckets_minus_1));
1175 next0 = dpo0->dpoi_next_node;
1176 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1178 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1180 load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1182 tr->lb_index = lbi0;
1184 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1185 n_left_to_next, bi0, next0);
1188 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1191 return frame->n_vectors;
1195 l2_load_balance (vlib_main_t * vm,
1196 vlib_node_runtime_t * node,
1197 vlib_frame_t * frame)
1199 return (load_balance_inline(vm, node, frame, 1));
1203 format_l2_load_balance_trace (u8 * s, va_list * args)
1205 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1206 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1207 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1209 s = format (s, "L2-load-balance: index %d", t->lb_index);
1216 VLIB_REGISTER_NODE (l2_load_balance_node) = {
1217 .function = l2_load_balance,
1218 .name = "l2-load-balance",
1219 .vector_size = sizeof (u32),
1221 .format_trace = format_l2_load_balance_trace,
1229 nsh_load_balance (vlib_main_t * vm,
1230 vlib_node_runtime_t * node,
1231 vlib_frame_t * frame)
1233 u32 n_left_from, next_index, *from, *to_next;
1235 from = vlib_frame_vector_args (frame);
1236 n_left_from = frame->n_vectors;
1238 next_index = node->cached_next_index;
1240 while (n_left_from > 0)
1244 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1246 while (n_left_from > 0 && n_left_to_next > 0)
1249 u32 bi0, lbi0, next0, *nsh0;
1250 const dpo_id_t *dpo0;
1251 const load_balance_t *lb0;
1258 n_left_to_next -= 1;
1260 b0 = vlib_get_buffer (vm, bi0);
1262 lbi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1263 lb0 = load_balance_get(lbi0);
1265 /* SPI + SI are the second word of the NSH header */
1266 nsh0 = vlib_buffer_get_current (b0);
1267 vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1269 dpo0 = load_balance_get_bucket_i(lb0,
1270 vnet_buffer(b0)->ip.flow_hash &
1271 (lb0->lb_n_buckets_minus_1));
1273 next0 = dpo0->dpoi_next_node;
1274 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1276 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1278 load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1280 tr->lb_index = lbi0;
1282 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1283 n_left_to_next, bi0, next0);
1286 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1289 return frame->n_vectors;
1293 format_nsh_load_balance_trace (u8 * s, va_list * args)
1295 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1296 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1297 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1299 s = format (s, "NSH-load-balance: index %d", t->lb_index);
1306 VLIB_REGISTER_NODE (nsh_load_balance_node) = {
1307 .function = nsh_load_balance,
1308 .name = "nsh-load-balance",
1309 .vector_size = sizeof (u32),
1311 .format_trace = format_nsh_load_balance_trace,
1319 format_bier_load_balance_trace (u8 * s, va_list * args)
1321 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1322 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1323 load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1325 s = format (s, "BIER-load-balance: index %d", t->lb_index);
1330 bier_load_balance (vlib_main_t * vm,
1331 vlib_node_runtime_t * node,
1332 vlib_frame_t * frame)
1334 return (load_balance_inline(vm, node, frame, 0));
1340 VLIB_REGISTER_NODE (bier_load_balance_node) = {
1341 .function = bier_load_balance,
1342 .name = "bier-load-balance",
1343 .vector_size = sizeof (u32),
1345 .format_trace = format_bier_load_balance_trace,
1346 .sibling_of = "mpls-load-balance",