fib: fix load-balance and replicate dpos buckets overflow
[vpp.git] / src / vnet / dpo / load_balance.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/dpo/load_balance.h>
17 #include <vnet/dpo/load_balance_map.h>
18 #include <vnet/dpo/drop_dpo.h>
19 #include <vppinfra/math.h>              /* for fabs */
20 #include <vnet/adj/adj.h>
21 #include <vnet/adj/adj_internal.h>
22 #include <vnet/fib/fib_urpf_list.h>
23 #include <vnet/bier/bier_fwd.h>
24 #include <vnet/fib/mpls_fib.h>
25 #include <vnet/ip/ip4_inlines.h>
26 #include <vnet/ip/ip6_inlines.h>
27
28 // clang-format off
29
30 /*
31  * distribution error tolerance for load-balancing
32  */
33 const f64 multipath_next_hop_error_tolerance = 0.1;
34
35 static const char *load_balance_attr_names[] = LOAD_BALANCE_ATTR_NAMES;
36
37 /**
38  * the logger
39  */
40 vlib_log_class_t load_balance_logger;
41
42 #define LB_DBG(_lb, _fmt, _args...)                                     \
43 {                                                                       \
44     vlib_log_debug(load_balance_logger,                                 \
45                    "lb:[%U]:" _fmt,                                     \
46                    format_load_balance, load_balance_get_index(_lb),    \
47                    LOAD_BALANCE_FORMAT_NONE,                            \
48                    ##_args);                                            \
49 }
50
51 /**
52  * Pool of all DPOs. It's not static so the DP can have fast access
53  */
54 load_balance_t *load_balance_pool;
55
56 /**
57  * The one instance of load-balance main
58  */
59 load_balance_main_t load_balance_main = {
60     .lbm_to_counters = {
61         .name = "route-to",
62         .stat_segment_name = "/net/route/to",
63     },
64     .lbm_via_counters = {
65         .name = "route-via",
66         .stat_segment_name = "/net/route/via",
67     }
68 };
69
70 f64
71 load_balance_get_multipath_tolerance (void)
72 {
73     return (multipath_next_hop_error_tolerance);
74 }
75
76 static inline index_t
77 load_balance_get_index (const load_balance_t *lb)
78 {
79     return (lb - load_balance_pool);
80 }
81
82 static inline dpo_id_t*
83 load_balance_get_buckets (load_balance_t *lb)
84 {
85     if (LB_HAS_INLINE_BUCKETS(lb))
86     {
87         return (lb->lb_buckets_inline);
88     }
89     else
90     {
91         return (lb->lb_buckets);
92     }
93 }
94
95 static load_balance_t *
96 load_balance_alloc_i (void)
97 {
98     load_balance_t *lb;
99     u8 need_barrier_sync = 0;
100     vlib_main_t *vm = vlib_get_main();
101     ASSERT (vm->thread_index == 0);
102
103     need_barrier_sync = pool_get_will_expand (load_balance_pool);
104
105     if (need_barrier_sync)
106         vlib_worker_thread_barrier_sync (vm);
107
108     pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
109     clib_memset(lb, 0, sizeof(*lb));
110
111     lb->lb_map = INDEX_INVALID;
112     lb->lb_urpf = INDEX_INVALID;
113
114     if (need_barrier_sync == 0)
115     {
116         need_barrier_sync += vlib_validate_combined_counter_will_expand
117             (&(load_balance_main.lbm_to_counters),
118              load_balance_get_index(lb));
119         need_barrier_sync += vlib_validate_combined_counter_will_expand
120             (&(load_balance_main.lbm_via_counters),
121              load_balance_get_index(lb));
122         if (need_barrier_sync)
123             vlib_worker_thread_barrier_sync (vm);
124     }
125
126     vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
127                                    load_balance_get_index(lb));
128     vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
129                                    load_balance_get_index(lb));
130     vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
131                                load_balance_get_index(lb));
132     vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
133                                load_balance_get_index(lb));
134
135     if (need_barrier_sync)
136         vlib_worker_thread_barrier_release (vm);
137
138     return (lb);
139 }
140
141 static u8*
142 load_balance_format (index_t lbi,
143                      load_balance_format_flags_t flags,
144                      u32 indent,
145                      u8 *s)
146 {
147     vlib_counter_t to, via;
148     load_balance_t *lb;
149     dpo_id_t *buckets;
150     u32 i;
151
152     lb = load_balance_get(lbi);
153     vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
154     vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
155     buckets = load_balance_get_buckets(lb);
156
157     s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
158     s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
159     s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
160     s = format(s, "uRPF:%d ", lb->lb_urpf);
161     if (lb->lb_flags)
162     {
163         load_balance_attr_t attr;
164
165         s = format(s, "flags:[");
166
167         FOR_EACH_LOAD_BALANCE_ATTR(attr)
168         {
169             if (lb->lb_flags & (1 << attr))
170             {
171                 s = format (s, "%s", load_balance_attr_names[attr]);
172             }
173         }
174         s = format(s, "] ");
175     }
176     s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
177     if (0 != via.packets)
178     {
179         s = format(s, " via:[%Ld:%Ld]",
180                    via.packets, via.bytes);
181     }
182     s = format(s, "]");
183
184     if (INDEX_INVALID != lb->lb_map)
185     {
186         s = format(s, "\n%U%U",
187                    format_white_space, indent+4,
188                    format_load_balance_map, lb->lb_map, indent+4);
189     }
190     for (i = 0; i < lb->lb_n_buckets; i++)
191     {
192         s = format(s, "\n%U[%d] %U",
193                    format_white_space, indent+2,
194                    i,
195                    format_dpo_id,
196                    &buckets[i], indent+6);
197     }
198     return (s);
199 }
200
201 u8*
202 format_load_balance (u8 * s, va_list * args)
203 {
204     index_t lbi = va_arg(*args, index_t);
205     load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
206
207     return (load_balance_format(lbi, flags, 0, s));
208 }
209
210 static u8*
211 format_load_balance_dpo (u8 * s, va_list * args)
212 {
213     index_t lbi = va_arg(*args, index_t);
214     u32 indent = va_arg(*args, u32);
215
216     return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
217 }
218
219 flow_hash_config_t
220 load_balance_get_default_flow_hash (dpo_proto_t lb_proto)
221 {
222     switch (lb_proto)
223     {
224     case DPO_PROTO_IP4:
225     case DPO_PROTO_IP6:
226         return (IP_FLOW_HASH_DEFAULT);
227
228     case DPO_PROTO_MPLS:
229         return (MPLS_FLOW_HASH_DEFAULT);
230
231     case DPO_PROTO_ETHERNET:
232     case DPO_PROTO_BIER:
233     case DPO_PROTO_NSH:
234         break;
235     }
236
237     return (0);
238 }
239
240 static load_balance_t *
241 load_balance_create_i (u32 num_buckets,
242                        dpo_proto_t lb_proto,
243                        flow_hash_config_t fhc)
244 {
245     load_balance_t *lb;
246
247     ASSERT (num_buckets <= LB_MAX_BUCKETS);
248
249     lb = load_balance_alloc_i();
250     lb->lb_hash_config = fhc;
251     lb->lb_n_buckets = num_buckets;
252     lb->lb_n_buckets_minus_1 = num_buckets-1;
253     lb->lb_proto = lb_proto;
254
255     if (!LB_HAS_INLINE_BUCKETS(lb))
256     {
257         vec_validate_aligned(lb->lb_buckets,
258                              lb->lb_n_buckets - 1,
259                              CLIB_CACHE_LINE_BYTES);
260     }
261
262     LB_DBG(lb, "create");
263
264     return (lb);
265 }
266
267 index_t
268 load_balance_create (u32 n_buckets,
269                      dpo_proto_t lb_proto,
270                      flow_hash_config_t fhc)
271 {
272     return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
273 }
274
275 static inline void
276 load_balance_set_bucket_i (load_balance_t *lb,
277                            u32 bucket,
278                            dpo_id_t *buckets,
279                            const dpo_id_t *next)
280 {
281     dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
282 }
283
284 void
285 load_balance_set_bucket (index_t lbi,
286                          u32 bucket,
287                          const dpo_id_t *next)
288 {
289     load_balance_t *lb;
290     dpo_id_t *buckets;
291
292     lb = load_balance_get(lbi);
293     buckets = load_balance_get_buckets(lb);
294
295     ASSERT(bucket < lb->lb_n_buckets);
296
297     load_balance_set_bucket_i(lb, bucket, buckets, next);
298 }
299
300 int
301 load_balance_is_drop (const dpo_id_t *dpo)
302 {
303     load_balance_t *lb;
304
305     if (DPO_LOAD_BALANCE != dpo->dpoi_type)
306         return (0);
307
308     lb = load_balance_get(dpo->dpoi_index);
309
310     if (1 == lb->lb_n_buckets)
311     {
312         return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
313     }
314     return (0);
315 }
316
317 u16
318 load_balance_n_buckets (index_t lbi)
319 {
320     load_balance_t *lb;
321
322     lb = load_balance_get(lbi);
323
324     return (lb->lb_n_buckets);
325 }
326
327 void
328 load_balance_set_fib_entry_flags (index_t lbi,
329                                   fib_entry_flag_t flags)
330 {
331     load_balance_t *lb;
332
333     lb = load_balance_get(lbi);
334     lb->lb_fib_entry_flags = flags;
335 }
336
337
338 void
339 load_balance_set_urpf (index_t lbi,
340                        index_t urpf)
341 {
342     load_balance_t *lb;
343     index_t old;
344
345     lb = load_balance_get(lbi);
346
347     /*
348      * packets in flight we see this change. but it's atomic, so :P
349      */
350     old = lb->lb_urpf;
351     lb->lb_urpf = urpf;
352
353     fib_urpf_list_unlock(old);
354     fib_urpf_list_lock(urpf);
355 }
356
357 index_t
358 load_balance_get_urpf (index_t lbi)
359 {
360     load_balance_t *lb;
361
362     lb = load_balance_get(lbi);
363
364     return (lb->lb_urpf);
365 }
366
367 const dpo_id_t *
368 load_balance_get_bucket (index_t lbi,
369                          u32 bucket)
370 {
371     load_balance_t *lb;
372
373     lb = load_balance_get(lbi);
374
375     return (load_balance_get_bucket_i(lb, bucket));
376 }
377
378 static int
379 next_hop_sort_by_weight (const load_balance_path_t * n1,
380                          const load_balance_path_t * n2)
381 {
382     return ((int) n1->path_weight - (int) n2->path_weight);
383 }
384
385 /* Given next hop vector is over-written with normalized one with sorted weights and
386    with weights corresponding to the number of adjacencies for each next hop.
387    Returns number of adjacencies in block. */
388 u32
389 ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops,
390                                   load_balance_path_t ** normalized_next_hops,
391                                   u32 *sum_weight_in,
392                                   f64 multipath_next_hop_error_tolerance)
393 {
394     load_balance_path_t * nhs;
395     uword n_nhs, n_adj, n_adj_left, i, sum_weight;
396     f64 norm, error;
397
398     n_nhs = vec_len (raw_next_hops);
399     ASSERT (n_nhs > 0);
400     if (n_nhs == 0)
401         return 0;
402
403     /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
404     nhs = *normalized_next_hops;
405     vec_validate (nhs, 2*n_nhs - 1);
406
407     /* Fast path: 1 next hop in block. */
408     n_adj = n_nhs;
409     if (n_nhs == 1)
410     {
411         nhs[0] = raw_next_hops[0];
412         nhs[0].path_weight = 1;
413         vec_set_len (nhs, 1);
414         sum_weight = 1;
415         goto done;
416     }
417
418     else if (n_nhs == 2)
419     {
420         int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
421
422         /* Fast sort. */
423         nhs[0] = raw_next_hops[cmp];
424         nhs[1] = raw_next_hops[cmp ^ 1];
425
426         /* Fast path: equal cost multipath with 2 next hops. */
427         if (nhs[0].path_weight == nhs[1].path_weight)
428         {
429             nhs[0].path_weight = nhs[1].path_weight = 1;
430             vec_set_len (nhs, 2);
431             sum_weight = 2;
432             goto done;
433         }
434     }
435     else
436     {
437         clib_memcpy_fast (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
438         qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
439     }
440
441     /* Find total weight to normalize weights. */
442     sum_weight = 0;
443     for (i = 0; i < n_nhs; i++)
444         sum_weight += nhs[i].path_weight;
445
446     /* In the unlikely case that all weights are given as 0, set them all to 1. */
447     if (sum_weight == 0)
448     {
449         for (i = 0; i < n_nhs; i++)
450             nhs[i].path_weight = 1;
451         sum_weight = n_nhs;
452     }
453
454     /* Save copies of all next hop weights to avoid being overwritten in loop below. */
455     for (i = 0; i < n_nhs; i++)
456         nhs[n_nhs + i].path_weight = nhs[i].path_weight;
457
458     /* Try larger and larger power of 2 sized adjacency blocks until we
459        find one where traffic flows to within 1% of specified weights. */
460     for (n_adj = clib_min(max_pow2 (n_nhs), LB_MAX_BUCKETS); ; n_adj *= 2)
461     {
462         ASSERT (n_adj <= LB_MAX_BUCKETS);
463         error = 0;
464
465         norm = n_adj / ((f64) sum_weight);
466         n_adj_left = n_adj;
467         for (i = 0; i < n_nhs; i++)
468         {
469             f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
470             word n = flt_round_nearest (nf);
471
472             n = n > n_adj_left ? n_adj_left : n;
473             n_adj_left -= n;
474             error += fabs (nf - n);
475             nhs[i].path_weight = n;
476
477             if (0 == nhs[i].path_weight)
478             {
479                 /*
480                  * when the weight skew is high (norm is small) and n == nf.
481                  * without this correction the path with a low weight would have
482                  * no representation in the load-balanace - don't want that.
483                  * If the weight skew is high so the load-balance has many buckets
484                  * to allow it. pays ya money takes ya choice.
485                  */
486                 error = n_adj;
487                 break;
488             }
489         }
490
491         nhs[0].path_weight += n_adj_left;
492
493         /* Less than 1% average error per adjacency with this size adjacency block,
494          * or did we reached the maximum number of buckets we support? */
495         if (error <= multipath_next_hop_error_tolerance*n_adj ||
496             n_adj >= LB_MAX_BUCKETS)
497         {
498           if (i < n_nhs)
499           {
500             /* Truncate any next hops in excess */
501             vlib_log_err(load_balance_logger,
502                          "Too many paths for load-balance, truncating %d -> %d",
503                          n_nhs, i);
504             for (int j = i; j < n_nhs; j++)
505               dpo_reset (&vec_elt(nhs, j).path_dpo);
506           }
507           vec_set_len (nhs, i);
508           break;
509         }
510     }
511
512 done:
513     /* Save vector for next call. */
514     *normalized_next_hops = nhs;
515     *sum_weight_in = sum_weight;
516     return n_adj;
517 }
518
519 static load_balance_path_t *
520 load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs,
521                                        dpo_proto_t drop_proto)
522 {
523     if (0 == vec_len(nhs))
524     {
525         load_balance_path_t *new_nhs = NULL, *nh;
526
527         /*
528          * we need something for the load-balance. so use the drop
529          */
530         vec_add2(new_nhs, nh, 1);
531
532         nh->path_weight = 1;
533         dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
534
535         return (new_nhs);
536     }
537
538     return (NULL);
539 }
540
541 /*
542  * Fill in adjacencies in block based on corresponding
543  * next hop adjacencies.
544  */
545 static void
546 load_balance_fill_buckets_norm (load_balance_t *lb,
547                                 load_balance_path_t *nhs,
548                                 dpo_id_t *buckets,
549                                 u32 n_buckets)
550 {
551     load_balance_path_t *nh;
552     u16 ii, bucket;
553
554     bucket = 0;
555
556     /*
557      * the next-hops have normalised weights. that means their sum is the number
558      * of buckets we need to fill.
559      */
560     vec_foreach (nh, nhs)
561     {
562         for (ii = 0; ii < nh->path_weight; ii++)
563         {
564             ASSERT(bucket < n_buckets);
565             load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
566         }
567     }
568 }
569 static void
570 load_balance_fill_buckets_sticky (load_balance_t *lb,
571                                   load_balance_path_t *nhs,
572                                   dpo_id_t *buckets,
573                                   u32 n_buckets)
574 {
575     load_balance_path_t *nh, *fwding_paths;
576     u16 ii, bucket, fpath;
577
578     fpath = bucket = 0;
579     fwding_paths = NULL;
580
581     vec_foreach (nh, nhs)
582     {
583         if (!dpo_is_drop(&nh->path_dpo))
584         {
585             vec_add1(fwding_paths, *nh);
586         }
587     }
588     if (vec_len(fwding_paths) == 0)
589         fwding_paths = vec_dup(nhs);
590
591     /*
592      * the next-hops have normalised weights. that means their sum is the number
593      * of buckets we need to fill.
594      */
595     vec_foreach (nh, nhs)
596     {
597         for (ii = 0; ii < nh->path_weight; ii++)
598         {
599             ASSERT(bucket < n_buckets);
600             if (!dpo_is_drop(&nh->path_dpo))
601             {
602                 load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
603             }
604             else
605             {
606                 /* fill the bucks from the next up path */
607                 load_balance_set_bucket_i(lb, bucket++, buckets, &fwding_paths[fpath].path_dpo);
608                 ASSERT(vec_len(fwding_paths) > 0);
609                 fpath = (fpath + 1) % vec_len(fwding_paths);
610             }
611         }
612     }
613
614     vec_free(fwding_paths);
615 }
616
617 static void
618 load_balance_fill_buckets (load_balance_t *lb,
619                            load_balance_path_t *nhs,
620                            dpo_id_t *buckets,
621                            u32 n_buckets,
622                            load_balance_flags_t flags)
623 {
624     if (flags & LOAD_BALANCE_FLAG_STICKY)
625     {
626         load_balance_fill_buckets_sticky(lb, nhs, buckets, n_buckets);
627     }
628     else
629     {
630         load_balance_fill_buckets_norm(lb, nhs, buckets, n_buckets);
631     }
632 }
633
634 static inline void
635 load_balance_set_n_buckets (load_balance_t *lb,
636                             u32 n_buckets)
637 {
638     ASSERT (n_buckets <= LB_MAX_BUCKETS);
639     lb->lb_n_buckets = n_buckets;
640     lb->lb_n_buckets_minus_1 = n_buckets-1;
641 }
642
643 void
644 load_balance_multipath_update (const dpo_id_t *dpo,
645                                const load_balance_path_t * raw_nhs,
646                                load_balance_flags_t flags)
647 {
648     load_balance_path_t *nh, *nhs, *fixed_nhs;
649     u32 sum_of_weights, n_buckets, ii;
650     index_t lbmi, old_lbmi;
651     load_balance_t *lb;
652     dpo_id_t *tmp_dpo;
653
654     nhs = NULL;
655
656     ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
657     lb = load_balance_get(dpo->dpoi_index);
658     lb->lb_flags = flags;
659     fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
660     n_buckets =
661         ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
662                                           raw_nhs :
663                                           fixed_nhs),
664                                          &nhs,
665                                          &sum_of_weights,
666                                          multipath_next_hop_error_tolerance);
667
668     /*
669      * Save the old load-balance map used, and get a new one if required.
670      */
671     old_lbmi = lb->lb_map;
672     if (flags & LOAD_BALANCE_FLAG_USES_MAP)
673     {
674         lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
675     }
676     else
677     {
678         lbmi = INDEX_INVALID;
679     }
680
681     if (0 == lb->lb_n_buckets)
682     {
683         /*
684          * first time initialisation. no packets inflight, so we can write
685          * at leisure.
686          */
687         load_balance_set_n_buckets(lb, n_buckets);
688
689         if (!LB_HAS_INLINE_BUCKETS(lb))
690             vec_validate_aligned(lb->lb_buckets,
691                                  lb->lb_n_buckets - 1,
692                                  CLIB_CACHE_LINE_BYTES);
693
694         load_balance_fill_buckets(lb, nhs,
695                                   load_balance_get_buckets(lb),
696                                   n_buckets, flags);
697         lb->lb_map = lbmi;
698     }
699     else
700     {
701         /*
702          * This is a modification of an existing load-balance.
703          * We need to ensure that packets inflight see a consistent state, that
704          * is the number of reported buckets the LB has (read from
705          * lb_n_buckets_minus_1) is not more than it actually has. So if the
706          * number of buckets is increasing, we must update the bucket array first,
707          * then the reported number. vice-versa if the number of buckets goes down.
708          */
709         if (n_buckets == lb->lb_n_buckets)
710         {
711             /*
712              * no change in the number of buckets. we can simply fill what
713              * is new over what is old.
714              */
715             load_balance_fill_buckets(lb, nhs,
716                                       load_balance_get_buckets(lb),
717                                       n_buckets, flags);
718             lb->lb_map = lbmi;
719         }
720         else if (n_buckets > lb->lb_n_buckets)
721         {
722             /*
723              * we have more buckets. the old load-balance map (if there is one)
724              * will remain valid, i.e. mapping to indices within range, so we
725              * update it last.
726              */
727             if (n_buckets > LB_NUM_INLINE_BUCKETS &&
728                 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
729             {
730                 /*
731                  * the new increased number of buckets is crossing the threshold
732                  * from the inline storage to out-line. Alloc the outline buckets
733                  * first, then fixup the number. then reset the inlines.
734                  */
735                 ASSERT(NULL == lb->lb_buckets);
736                 vec_validate_aligned(lb->lb_buckets,
737                                      n_buckets - 1,
738                                      CLIB_CACHE_LINE_BYTES);
739
740                 load_balance_fill_buckets(lb, nhs,
741                                           lb->lb_buckets,
742                                           n_buckets, flags);
743                 CLIB_MEMORY_BARRIER();
744                 load_balance_set_n_buckets(lb, n_buckets);
745
746                 CLIB_MEMORY_BARRIER();
747
748                 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
749                 {
750                     dpo_reset(&lb->lb_buckets_inline[ii]);
751                 }
752             }
753             else
754             {
755                 if (n_buckets <= LB_NUM_INLINE_BUCKETS)
756                 {
757                     /*
758                      * we are not crossing the threshold and it's still inline buckets.
759                      * we can write the new on the old..
760                      */
761                     load_balance_fill_buckets(lb, nhs,
762                                               load_balance_get_buckets(lb),
763                                               n_buckets, flags);
764                     CLIB_MEMORY_BARRIER();
765                     load_balance_set_n_buckets(lb, n_buckets);
766                 }
767                 else
768                 {
769                     /*
770                      * we are not crossing the threshold. We need a new bucket array to
771                      * hold the increased number of choices.
772                      */
773                     dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
774
775                     new_buckets = NULL;
776                     old_buckets = load_balance_get_buckets(lb);
777
778                     vec_validate_aligned(new_buckets,
779                                          n_buckets - 1,
780                                          CLIB_CACHE_LINE_BYTES);
781
782                     load_balance_fill_buckets(lb, nhs, new_buckets,
783                                               n_buckets, flags);
784                     CLIB_MEMORY_BARRIER();
785                     lb->lb_buckets = new_buckets;
786                     CLIB_MEMORY_BARRIER();
787                     load_balance_set_n_buckets(lb, n_buckets);
788
789                     vec_foreach(tmp_dpo, old_buckets)
790                     {
791                         dpo_reset(tmp_dpo);
792                     }
793                     vec_free(old_buckets);
794                 }
795             }
796
797             /*
798              * buckets fixed. ready for the MAP update.
799              */
800             lb->lb_map = lbmi;
801         }
802         else
803         {
804             /*
805              * bucket size shrinkage.
806              * Any map we have will be based on the old
807              * larger number of buckets, so will be translating to indices
808              * out of range. So the new MAP must be installed first.
809              */
810             lb->lb_map = lbmi;
811             CLIB_MEMORY_BARRIER();
812
813
814             if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
815                 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
816             {
817                 /*
818                  * the new decreased number of buckets is crossing the threshold
819                  * from out-line storage to inline:
820                  *   1 - Fill the inline buckets,
821                  *   2 - fixup the number (and this point the inline buckets are
822                  *       used).
823                  *   3 - free the outline buckets
824                  */
825                 load_balance_fill_buckets(lb, nhs,
826                                           lb->lb_buckets_inline,
827                                           n_buckets, flags);
828                 CLIB_MEMORY_BARRIER();
829                 load_balance_set_n_buckets(lb, n_buckets);
830                 CLIB_MEMORY_BARRIER();
831
832                 vec_foreach(tmp_dpo, lb->lb_buckets)
833                 {
834                     dpo_reset(tmp_dpo);
835                 }
836                 vec_free(lb->lb_buckets);
837             }
838             else
839             {
840                 /*
841                  * not crossing the threshold.
842                  *  1 - update the number to the smaller size
843                  *  2 - write the new buckets
844                  *  3 - reset those no longer used.
845                  */
846                 dpo_id_t *buckets;
847                 u32 old_n_buckets;
848
849                 old_n_buckets = lb->lb_n_buckets;
850                 buckets = load_balance_get_buckets(lb);
851
852                 load_balance_set_n_buckets(lb, n_buckets);
853                 CLIB_MEMORY_BARRIER();
854
855                 load_balance_fill_buckets(lb, nhs, buckets,
856                                           n_buckets, flags);
857
858                 for (ii = n_buckets; ii < old_n_buckets; ii++)
859                 {
860                     dpo_reset(&buckets[ii]);
861                 }
862             }
863         }
864     }
865
866     vec_foreach (nh, nhs)
867     {
868         dpo_reset(&nh->path_dpo);
869     }
870     vec_free(nhs);
871     vec_free(fixed_nhs);
872
873     load_balance_map_unlock(old_lbmi);
874 }
875
876 static void
877 load_balance_lock (dpo_id_t *dpo)
878 {
879     load_balance_t *lb;
880
881     lb = load_balance_get(dpo->dpoi_index);
882
883     lb->lb_locks++;
884 }
885
886 static void
887 load_balance_destroy (load_balance_t *lb)
888 {
889     dpo_id_t *buckets;
890     int i;
891
892     buckets = load_balance_get_buckets(lb);
893
894     for (i = 0; i < lb->lb_n_buckets; i++)
895     {
896         dpo_reset(&buckets[i]);
897     }
898
899     LB_DBG(lb, "destroy");
900     if (!LB_HAS_INLINE_BUCKETS(lb))
901     {
902         vec_free(lb->lb_buckets);
903     }
904
905     fib_urpf_list_unlock(lb->lb_urpf);
906     load_balance_map_unlock(lb->lb_map);
907
908     pool_put(load_balance_pool, lb);
909 }
910
911 static void
912 load_balance_unlock (dpo_id_t *dpo)
913 {
914     load_balance_t *lb;
915
916     lb = load_balance_get(dpo->dpoi_index);
917
918     lb->lb_locks--;
919
920     if (0 == lb->lb_locks)
921     {
922         load_balance_destroy(lb);
923     }
924 }
925
926 static void
927 load_balance_mem_show (void)
928 {
929     fib_show_memory_usage("load-balance",
930                           pool_elts(load_balance_pool),
931                           pool_len(load_balance_pool),
932                           sizeof(load_balance_t));
933     load_balance_map_show_mem();
934 }
935
936 static u16
937 load_balance_dpo_get_mtu (const dpo_id_t *dpo)
938 {
939     const dpo_id_t *buckets;
940     load_balance_t *lb;
941     u16 i, mtu = 0xffff;
942
943     lb = load_balance_get(dpo->dpoi_index);
944     buckets = load_balance_get_buckets(lb);
945
946     for (i = 0; i < lb->lb_n_buckets; i++)
947     {
948         mtu = clib_min (mtu, dpo_get_mtu (&buckets[i]));
949     }
950
951     return (mtu);
952 }
953
954 const static dpo_vft_t lb_vft = {
955     .dv_lock = load_balance_lock,
956     .dv_unlock = load_balance_unlock,
957     .dv_format = format_load_balance_dpo,
958     .dv_mem_show = load_balance_mem_show,
959     .dv_get_mtu = load_balance_dpo_get_mtu,
960 };
961
962 /**
963  * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
964  *        object.
965  *
966  * this means that these graph nodes are ones from which a load-balance is the
967  * parent object in the DPO-graph.
968  *
969  * We do not list all the load-balance nodes, such as the *-lookup. instead
970  * we are relying on the correct use of the .sibling_of field when setting
971  * up these sibling nodes.
972  */
973 const static char* const load_balance_ip4_nodes[] =
974 {
975     "ip4-load-balance",
976     NULL,
977 };
978 const static char* const load_balance_ip6_nodes[] =
979 {
980     "ip6-load-balance",
981     NULL,
982 };
983 const static char* const load_balance_mpls_nodes[] =
984 {
985     "mpls-load-balance",
986     NULL,
987 };
988 const static char* const load_balance_l2_nodes[] =
989 {
990     "l2-load-balance",
991     NULL,
992 };
993 const static char* const load_balance_nsh_nodes[] =
994 {
995     "nsh-load-balance",
996     NULL
997 };
998 const static char* const load_balance_bier_nodes[] =
999 {
1000     "bier-load-balance",
1001     NULL,
1002 };
1003 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
1004 {
1005     [DPO_PROTO_IP4]  = load_balance_ip4_nodes,
1006     [DPO_PROTO_IP6]  = load_balance_ip6_nodes,
1007     [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
1008     [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
1009     [DPO_PROTO_NSH] = load_balance_nsh_nodes,
1010     [DPO_PROTO_BIER] = load_balance_bier_nodes,
1011 };
1012
1013 void
1014 load_balance_module_init (void)
1015 {
1016     index_t lbi;
1017
1018     dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
1019
1020     /*
1021      * Special LB with index zero. we need to define this since the v4 mtrie
1022      * assumes an index of 0 implies the ply is empty. therefore all 'real'
1023      * adjs need a non-zero index.
1024      * This should never be used, but just in case, stack it on a drop.
1025      */
1026     lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
1027     load_balance_set_bucket(lbi, 0, drop_dpo_get(DPO_PROTO_IP4));
1028
1029     load_balance_logger =
1030         vlib_log_register_class("dpo", "load-balance");
1031
1032     load_balance_map_module_init();
1033 }
1034
1035 static clib_error_t *
1036 load_balance_show (vlib_main_t * vm,
1037                    unformat_input_t * input,
1038                    vlib_cli_command_t * cmd)
1039 {
1040     index_t lbi = INDEX_INVALID;
1041
1042     while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1043     {
1044         if (unformat (input, "%d", &lbi))
1045             ;
1046         else
1047             break;
1048     }
1049
1050     if (INDEX_INVALID != lbi)
1051     {
1052         if (pool_is_free_index(load_balance_pool, lbi))
1053         {
1054             vlib_cli_output (vm, "no such load-balance:%d", lbi);
1055         }
1056         else
1057         {
1058             vlib_cli_output (vm, "%U", format_load_balance, lbi,
1059                          LOAD_BALANCE_FORMAT_DETAIL);
1060         }
1061     }
1062     else
1063     {
1064         load_balance_t *lb;
1065
1066         pool_foreach (lb, load_balance_pool)
1067          {
1068             vlib_cli_output (vm, "%U", format_load_balance,
1069                              load_balance_get_index(lb),
1070                              LOAD_BALANCE_FORMAT_NONE);
1071         }
1072     }
1073
1074     return 0;
1075 }
1076
1077 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
1078     .path = "show load-balance",
1079     .short_help = "show load-balance [<index>]",
1080     .function = load_balance_show,
1081 };
1082
1083
1084 always_inline u32
1085 ip_flow_hash (void *data)
1086 {
1087   ip4_header_t *iph = (ip4_header_t *) data;
1088
1089   if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
1090     return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
1091   else
1092     return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
1093 }
1094
1095 always_inline u64
1096 mac_to_u64 (u8 * m)
1097 {
1098   return (*((u64 *) m) & 0xffffffffffff);
1099 }
1100
1101 always_inline u32
1102 l2_flow_hash (vlib_buffer_t * b0)
1103 {
1104   ethernet_header_t *eh;
1105   u64 a, b, c;
1106   uword is_ip, eh_size;
1107   u16 eh_type;
1108
1109   eh = vlib_buffer_get_current (b0);
1110   eh_type = clib_net_to_host_u16 (eh->type);
1111   eh_size = ethernet_buffer_header_size (b0);
1112
1113   is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
1114
1115   /* since we have 2 cache lines, use them */
1116   if (is_ip)
1117     a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
1118   else
1119     a = eh->type;
1120
1121   b = mac_to_u64 ((u8 *) eh->dst_address);
1122   c = mac_to_u64 ((u8 *) eh->src_address);
1123   hash_mix64 (a, b, c);
1124
1125   return (u32) c;
1126 }
1127
1128 typedef struct load_balance_trace_t_
1129 {
1130     index_t lb_index;
1131 } load_balance_trace_t;
1132
1133 always_inline uword
1134 load_balance_inline (vlib_main_t * vm,
1135                      vlib_node_runtime_t * node,
1136                      vlib_frame_t * frame,
1137                      int is_l2)
1138 {
1139   u32 n_left_from, next_index, *from, *to_next;
1140
1141   from = vlib_frame_vector_args (frame);
1142   n_left_from = frame->n_vectors;
1143
1144   next_index = node->cached_next_index;
1145
1146   while (n_left_from > 0)
1147     {
1148       u32 n_left_to_next;
1149
1150       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1151
1152       while (n_left_from > 0 && n_left_to_next > 0)
1153         {
1154           vlib_buffer_t *b0;
1155           u32 bi0, lbi0, next0;
1156           const dpo_id_t *dpo0;
1157           const load_balance_t *lb0;
1158
1159           bi0 = from[0];
1160           to_next[0] = bi0;
1161           from += 1;
1162           to_next += 1;
1163           n_left_from -= 1;
1164           n_left_to_next -= 1;
1165
1166           b0 = vlib_get_buffer (vm, bi0);
1167
1168           /* lookup dst + src mac */
1169           lbi0 =  vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1170           lb0 = load_balance_get(lbi0);
1171
1172           if (is_l2)
1173           {
1174               vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
1175           }
1176           else
1177           {
1178               /* it's BIER */
1179               const bier_hdr_t *bh0 = vlib_buffer_get_current(b0);
1180               vnet_buffer(b0)->ip.flow_hash = bier_compute_flow_hash(bh0);
1181           }
1182
1183           dpo0 = load_balance_get_bucket_i(lb0,
1184                                            vnet_buffer(b0)->ip.flow_hash &
1185                                            (lb0->lb_n_buckets_minus_1));
1186
1187           next0 = dpo0->dpoi_next_node;
1188           vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1189
1190           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1191             {
1192               load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1193                                                          sizeof (*tr));
1194               tr->lb_index = lbi0;
1195             }
1196           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1197                                            n_left_to_next, bi0, next0);
1198         }
1199
1200       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1201     }
1202
1203   return frame->n_vectors;
1204 }
1205
1206 static uword
1207 l2_load_balance (vlib_main_t * vm,
1208                  vlib_node_runtime_t * node,
1209                  vlib_frame_t * frame)
1210 {
1211     return (load_balance_inline(vm, node, frame, 1));
1212 }
1213
1214 static u8 *
1215 format_l2_load_balance_trace (u8 * s, va_list * args)
1216 {
1217   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1218   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1219   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1220
1221   s = format (s, "L2-load-balance: index %d", t->lb_index);
1222   return s;
1223 }
1224
1225 /**
1226  * @brief
1227  */
1228 VLIB_REGISTER_NODE (l2_load_balance_node) = {
1229   .function = l2_load_balance,
1230   .name = "l2-load-balance",
1231   .vector_size = sizeof (u32),
1232
1233   .format_trace = format_l2_load_balance_trace,
1234   .n_next_nodes = 1,
1235   .next_nodes = {
1236       [0] = "error-drop",
1237   },
1238 };
1239
1240 static uword
1241 nsh_load_balance (vlib_main_t * vm,
1242                  vlib_node_runtime_t * node,
1243                  vlib_frame_t * frame)
1244 {
1245   u32 n_left_from, next_index, *from, *to_next;
1246
1247   from = vlib_frame_vector_args (frame);
1248   n_left_from = frame->n_vectors;
1249
1250   next_index = node->cached_next_index;
1251
1252   while (n_left_from > 0)
1253     {
1254       u32 n_left_to_next;
1255
1256       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1257
1258       while (n_left_from > 0 && n_left_to_next > 0)
1259         {
1260           vlib_buffer_t *b0;
1261           u32 bi0, lbi0, next0, *nsh0;
1262           const dpo_id_t *dpo0;
1263           const load_balance_t *lb0;
1264
1265           bi0 = from[0];
1266           to_next[0] = bi0;
1267           from += 1;
1268           to_next += 1;
1269           n_left_from -= 1;
1270           n_left_to_next -= 1;
1271
1272           b0 = vlib_get_buffer (vm, bi0);
1273
1274           lbi0 =  vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1275           lb0 = load_balance_get(lbi0);
1276
1277           /* SPI + SI are the second word of the NSH header */
1278           nsh0 = vlib_buffer_get_current (b0);
1279           vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1280
1281           dpo0 = load_balance_get_bucket_i(lb0,
1282                                            vnet_buffer(b0)->ip.flow_hash &
1283                                            (lb0->lb_n_buckets_minus_1));
1284
1285           next0 = dpo0->dpoi_next_node;
1286           vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1287
1288           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1289             {
1290               load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1291                                                          sizeof (*tr));
1292               tr->lb_index = lbi0;
1293             }
1294           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1295                                            n_left_to_next, bi0, next0);
1296         }
1297
1298       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1299     }
1300
1301   return frame->n_vectors;
1302 }
1303
1304 static u8 *
1305 format_nsh_load_balance_trace (u8 * s, va_list * args)
1306 {
1307   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1308   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1309   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1310
1311   s = format (s, "NSH-load-balance: index %d", t->lb_index);
1312   return s;
1313 }
1314
1315 /**
1316  * @brief
1317  */
1318 VLIB_REGISTER_NODE (nsh_load_balance_node) = {
1319   .function = nsh_load_balance,
1320   .name = "nsh-load-balance",
1321   .vector_size = sizeof (u32),
1322
1323   .format_trace = format_nsh_load_balance_trace,
1324   .n_next_nodes = 1,
1325   .next_nodes = {
1326       [0] = "error-drop",
1327   },
1328 };
1329
1330 static u8 *
1331 format_bier_load_balance_trace (u8 * s, va_list * args)
1332 {
1333   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1334   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1335   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1336
1337   s = format (s, "BIER-load-balance: index %d", t->lb_index);
1338   return s;
1339 }
1340
1341 static uword
1342 bier_load_balance (vlib_main_t * vm,
1343                    vlib_node_runtime_t * node,
1344                    vlib_frame_t * frame)
1345 {
1346     return (load_balance_inline(vm, node, frame, 0));
1347 }
1348
1349 /**
1350  * @brief
1351  */
1352 VLIB_REGISTER_NODE (bier_load_balance_node) = {
1353   .function = bier_load_balance,
1354   .name = "bier-load-balance",
1355   .vector_size = sizeof (u32),
1356
1357   .format_trace = format_bier_load_balance_trace,
1358   .sibling_of = "mpls-load-balance",
1359 };
1360
1361 // clang-format on