BIER
[vpp.git] / src / vnet / dpo / load_balance.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/load_balance.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/dpo/drop_dpo.h>
20 #include <vppinfra/math.h>              /* for fabs */
21 #include <vnet/adj/adj.h>
22 #include <vnet/adj/adj_internal.h>
23 #include <vnet/fib/fib_urpf_list.h>
24 #include <vnet/bier/bier_hdr_inlines.h>
25
26 /*
27  * distribution error tolerance for load-balancing
28  */
29 const f64 multipath_next_hop_error_tolerance = 0.1;
30
31 #undef LB_DEBUG
32
33 #ifdef LB_DEBUG
34 #define LB_DBG(_lb, _fmt, _args...)                                     \
35 {                                                                       \
36     u8* _tmp =NULL;                                                     \
37     clib_warning("lb:[%s]:" _fmt,                                       \
38                  load_balance_format(load_balance_get_index((_lb)),     \
39                                      0, _tmp),                          \
40                  ##_args);                                              \
41     vec_free(_tmp);                                                     \
42 }
43 #else
44 #define LB_DBG(_p, _fmt, _args...)
45 #endif
46
47
48 /**
49  * Pool of all DPOs. It's not static so the DP can have fast access
50  */
51 load_balance_t *load_balance_pool;
52
53 /**
54  * The one instance of load-balance main
55  */
56 load_balance_main_t load_balance_main;
57
58 f64
59 load_balance_get_multipath_tolerance (void)
60 {
61     return (multipath_next_hop_error_tolerance);
62 }
63
64 static inline index_t
65 load_balance_get_index (const load_balance_t *lb)
66 {
67     return (lb - load_balance_pool);
68 }
69
70 static inline dpo_id_t*
71 load_balance_get_buckets (load_balance_t *lb)
72 {
73     if (LB_HAS_INLINE_BUCKETS(lb))
74     {
75         return (lb->lb_buckets_inline);
76     }
77     else
78     {
79         return (lb->lb_buckets);
80     }
81 }
82
83 static load_balance_t *
84 load_balance_alloc_i (void)
85 {
86     load_balance_t *lb;
87
88     pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
89     memset(lb, 0, sizeof(*lb));
90
91     lb->lb_map = INDEX_INVALID;
92     lb->lb_urpf = INDEX_INVALID;
93     vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
94                                    load_balance_get_index(lb));
95     vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
96                                    load_balance_get_index(lb));
97     vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
98                                load_balance_get_index(lb));
99     vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
100                                load_balance_get_index(lb));
101
102     return (lb);
103 }
104
105 static u8*
106 load_balance_format (index_t lbi,
107                      load_balance_format_flags_t flags,
108                      u32 indent,
109                      u8 *s)
110 {
111     vlib_counter_t to, via;
112     load_balance_t *lb;
113     dpo_id_t *buckets;
114     u32 i;
115
116     lb = load_balance_get(lbi);
117     vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
118     vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
119     buckets = load_balance_get_buckets(lb);
120
121     s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
122     s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
123     s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
124     s = format(s, "uRPF:%d ", lb->lb_urpf);
125     s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
126     if (0 != via.packets)
127     {
128         s = format(s, " via:[%Ld:%Ld]",
129                    via.packets, via.bytes);
130     }
131     s = format(s, "]");
132
133     if (INDEX_INVALID != lb->lb_map)
134     {
135         s = format(s, "\n%U%U",
136                    format_white_space, indent+4,
137                    format_load_balance_map, lb->lb_map, indent+4);
138     }
139     for (i = 0; i < lb->lb_n_buckets; i++)
140     {
141         s = format(s, "\n%U[%d] %U",
142                    format_white_space, indent+2,
143                    i,
144                    format_dpo_id,
145                    &buckets[i], indent+6);
146     }
147     return (s);
148 }
149
150 u8*
151 format_load_balance (u8 * s, va_list * args)
152 {
153     index_t lbi = va_arg(*args, index_t);
154     load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
155
156     return (load_balance_format(lbi, flags, 0, s));
157 }
158 static u8*
159 format_load_balance_dpo (u8 * s, va_list * args)
160 {
161     index_t lbi = va_arg(*args, index_t);
162     u32 indent = va_arg(*args, u32);
163
164     return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
165 }
166
167
168 static load_balance_t *
169 load_balance_create_i (u32 num_buckets,
170                        dpo_proto_t lb_proto,
171                        flow_hash_config_t fhc)
172 {
173     load_balance_t *lb;
174
175     lb = load_balance_alloc_i();
176     lb->lb_hash_config = fhc;
177     lb->lb_n_buckets = num_buckets;
178     lb->lb_n_buckets_minus_1 = num_buckets-1;
179     lb->lb_proto = lb_proto;
180
181     if (!LB_HAS_INLINE_BUCKETS(lb))
182     {
183         vec_validate_aligned(lb->lb_buckets,
184                              lb->lb_n_buckets - 1,
185                              CLIB_CACHE_LINE_BYTES);
186     }
187
188     LB_DBG(lb, "create");
189
190     return (lb);
191 }
192
193 index_t
194 load_balance_create (u32 n_buckets,
195                      dpo_proto_t lb_proto,
196                      flow_hash_config_t fhc)
197 {
198     return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
199 }
200
201 static inline void
202 load_balance_set_bucket_i (load_balance_t *lb,
203                            u32 bucket,
204                            dpo_id_t *buckets,
205                            const dpo_id_t *next)
206 {
207     dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
208 }
209
210 void
211 load_balance_set_bucket (index_t lbi,
212                          u32 bucket,
213                          const dpo_id_t *next)
214 {
215     load_balance_t *lb;
216     dpo_id_t *buckets;
217
218     lb = load_balance_get(lbi);
219     buckets = load_balance_get_buckets(lb);
220
221     ASSERT(bucket < lb->lb_n_buckets);
222
223     load_balance_set_bucket_i(lb, bucket, buckets, next);
224 }
225
226 int
227 load_balance_is_drop (const dpo_id_t *dpo)
228 {
229     load_balance_t *lb;
230
231     if (DPO_LOAD_BALANCE != dpo->dpoi_type)
232         return (0);
233
234     lb = load_balance_get(dpo->dpoi_index);
235
236     if (1 == lb->lb_n_buckets)
237     {
238         return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
239     }
240     return (0);
241 }
242
243 void
244 load_balance_set_fib_entry_flags (index_t lbi,
245                                   fib_entry_flag_t flags)
246 {
247     load_balance_t *lb;
248
249     lb = load_balance_get(lbi);
250     lb->lb_fib_entry_flags = flags;
251 }
252
253
254 void
255 load_balance_set_urpf (index_t lbi,
256                        index_t urpf)
257 {
258     load_balance_t *lb;
259     index_t old;
260
261     lb = load_balance_get(lbi);
262
263     /*
264      * packets in flight we see this change. but it's atomic, so :P
265      */
266     old = lb->lb_urpf;
267     lb->lb_urpf = urpf;
268
269     fib_urpf_list_unlock(old);
270     fib_urpf_list_lock(urpf);
271 }
272
273 index_t
274 load_balance_get_urpf (index_t lbi)
275 {
276     load_balance_t *lb;
277
278     lb = load_balance_get(lbi);
279
280     return (lb->lb_urpf);
281 }
282
283 const dpo_id_t *
284 load_balance_get_bucket (index_t lbi,
285                          u32 bucket)
286 {
287     load_balance_t *lb;
288
289     lb = load_balance_get(lbi);
290
291     return (load_balance_get_bucket_i(lb, bucket));
292 }
293
294 static int
295 next_hop_sort_by_weight (const load_balance_path_t * n1,
296                          const load_balance_path_t * n2)
297 {
298     return ((int) n1->path_weight - (int) n2->path_weight);
299 }
300
301 /* Given next hop vector is over-written with normalized one with sorted weights and
302    with weights corresponding to the number of adjacencies for each next hop.
303    Returns number of adjacencies in block. */
304 u32
305 ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops,
306                                   load_balance_path_t ** normalized_next_hops,
307                                   u32 *sum_weight_in,
308                                   f64 multipath_next_hop_error_tolerance)
309 {
310     load_balance_path_t * nhs;
311     uword n_nhs, n_adj, n_adj_left, i, sum_weight;
312     f64 norm, error;
313
314     n_nhs = vec_len (raw_next_hops);
315     ASSERT (n_nhs > 0);
316     if (n_nhs == 0)
317         return 0;
318
319     /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
320     nhs = *normalized_next_hops;
321     vec_validate (nhs, 2*n_nhs - 1);
322
323     /* Fast path: 1 next hop in block. */
324     n_adj = n_nhs;
325     if (n_nhs == 1)
326     {
327         nhs[0] = raw_next_hops[0];
328         nhs[0].path_weight = 1;
329         _vec_len (nhs) = 1;
330         sum_weight = 1;
331         goto done;
332     }
333
334     else if (n_nhs == 2)
335     {
336         int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
337
338         /* Fast sort. */
339         nhs[0] = raw_next_hops[cmp];
340         nhs[1] = raw_next_hops[cmp ^ 1];
341
342         /* Fast path: equal cost multipath with 2 next hops. */
343         if (nhs[0].path_weight == nhs[1].path_weight)
344         {
345             nhs[0].path_weight = nhs[1].path_weight = 1;
346             _vec_len (nhs) = 2;
347             sum_weight = 2;
348             goto done;
349         }
350     }
351     else
352     {
353         clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
354         qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
355     }
356
357     /* Find total weight to normalize weights. */
358     sum_weight = 0;
359     for (i = 0; i < n_nhs; i++)
360         sum_weight += nhs[i].path_weight;
361
362     /* In the unlikely case that all weights are given as 0, set them all to 1. */
363     if (sum_weight == 0)
364     {
365         for (i = 0; i < n_nhs; i++)
366             nhs[i].path_weight = 1;
367         sum_weight = n_nhs;
368     }
369
370     /* Save copies of all next hop weights to avoid being overwritten in loop below. */
371     for (i = 0; i < n_nhs; i++)
372         nhs[n_nhs + i].path_weight = nhs[i].path_weight;
373
374     /* Try larger and larger power of 2 sized adjacency blocks until we
375        find one where traffic flows to within 1% of specified weights. */
376     for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
377     {
378         error = 0;
379
380         norm = n_adj / ((f64) sum_weight);
381         n_adj_left = n_adj;
382         for (i = 0; i < n_nhs; i++)
383         {
384             f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
385             word n = flt_round_nearest (nf);
386
387             n = n > n_adj_left ? n_adj_left : n;
388             n_adj_left -= n;
389             error += fabs (nf - n);
390             nhs[i].path_weight = n;
391
392             if (0 == nhs[i].path_weight)
393             {
394                 /*
395                  * when the weight skew is high (norm is small) and n == nf.
396                  * without this correction the path with a low weight would have
397                  * no represenation in the load-balanace - don't want that.
398                  * If the weight skew is high so the load-balance has many buckets
399                  * to allow it. pays ya money takes ya choice.
400                  */
401                 error = n_adj;
402                 break;
403             }
404         }
405
406         nhs[0].path_weight += n_adj_left;
407
408         /* Less than 5% average error per adjacency with this size adjacency block? */
409         if (error <= multipath_next_hop_error_tolerance*n_adj)
410         {
411             /* Truncate any next hops with zero weight. */
412             _vec_len (nhs) = i;
413             break;
414         }
415     }
416
417 done:
418     /* Save vector for next call. */
419     *normalized_next_hops = nhs;
420     *sum_weight_in = sum_weight;
421     return n_adj;
422 }
423
424 static load_balance_path_t *
425 load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs,
426                                        dpo_proto_t drop_proto)
427 {
428     if (0 == vec_len(nhs))
429     {
430         load_balance_path_t *new_nhs = NULL, *nh;
431
432         /*
433          * we need something for the load-balance. so use the drop
434          */
435         vec_add2(new_nhs, nh, 1);
436
437         nh->path_weight = 1;
438         dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
439
440         return (new_nhs);
441     }
442
443     return (NULL);
444 }
445
446 /*
447  * Fill in adjacencies in block based on corresponding
448  * next hop adjacencies.
449  */
450 static void
451 load_balance_fill_buckets (load_balance_t *lb,
452                            load_balance_path_t *nhs,
453                            dpo_id_t *buckets,
454                            u32 n_buckets)
455 {
456     load_balance_path_t * nh;
457     u16 ii, bucket;
458
459     bucket = 0;
460
461     /*
462      * the next-hops have normalised weights. that means their sum is the number
463      * of buckets we need to fill.
464      */
465     vec_foreach (nh, nhs)
466     {
467         for (ii = 0; ii < nh->path_weight; ii++)
468         {
469             ASSERT(bucket < n_buckets);
470             load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
471         }
472     }
473 }
474
475 static inline void
476 load_balance_set_n_buckets (load_balance_t *lb,
477                             u32 n_buckets)
478 {
479     lb->lb_n_buckets = n_buckets;
480     lb->lb_n_buckets_minus_1 = n_buckets-1;
481 }
482
483 void
484 load_balance_multipath_update (const dpo_id_t *dpo,
485                                const load_balance_path_t * raw_nhs,
486                                load_balance_flags_t flags)
487 {
488     load_balance_path_t *nh, *nhs, *fixed_nhs;
489     u32 sum_of_weights, n_buckets, ii;
490     index_t lbmi, old_lbmi;
491     load_balance_t *lb;
492     dpo_id_t *tmp_dpo;
493
494     nhs = NULL;
495
496     ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
497     lb = load_balance_get(dpo->dpoi_index);
498     fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
499     n_buckets =
500         ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
501                                           raw_nhs :
502                                           fixed_nhs),
503                                          &nhs,
504                                          &sum_of_weights,
505                                          multipath_next_hop_error_tolerance);
506
507     ASSERT (n_buckets >= vec_len (raw_nhs));
508
509     /*
510      * Save the old load-balance map used, and get a new one if required.
511      */
512     old_lbmi = lb->lb_map;
513     if (flags & LOAD_BALANCE_FLAG_USES_MAP)
514     {
515         lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
516     }
517     else
518     {
519         lbmi = INDEX_INVALID;
520     }
521
522     if (0 == lb->lb_n_buckets)
523     {
524         /*
525          * first time initialisation. no packets inflight, so we can write
526          * at leisure.
527          */
528         load_balance_set_n_buckets(lb, n_buckets);
529
530         if (!LB_HAS_INLINE_BUCKETS(lb))
531             vec_validate_aligned(lb->lb_buckets,
532                                  lb->lb_n_buckets - 1,
533                                  CLIB_CACHE_LINE_BYTES);
534
535         load_balance_fill_buckets(lb, nhs,
536                                   load_balance_get_buckets(lb),
537                                   n_buckets);
538         lb->lb_map = lbmi;
539     }
540     else
541     {
542         /*
543          * This is a modification of an existing load-balance.
544          * We need to ensure that packets inflight see a consistent state, that
545          * is the number of reported buckets the LB has (read from
546          * lb_n_buckets_minus_1) is not more than it actually has. So if the
547          * number of buckets is increasing, we must update the bucket array first,
548          * then the reported number. vice-versa if the number of buckets goes down.
549          */
550         if (n_buckets == lb->lb_n_buckets)
551         {
552             /*
553              * no change in the number of buckets. we can simply fill what
554              * is new over what is old.
555              */
556             load_balance_fill_buckets(lb, nhs,
557                                       load_balance_get_buckets(lb),
558                                       n_buckets);
559             lb->lb_map = lbmi;
560         }
561         else if (n_buckets > lb->lb_n_buckets)
562         {
563             /*
564              * we have more buckets. the old load-balance map (if there is one)
565              * will remain valid, i.e. mapping to indices within range, so we
566              * update it last.
567              */
568             if (n_buckets > LB_NUM_INLINE_BUCKETS &&
569                 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
570             {
571                 /*
572                  * the new increased number of buckets is crossing the threshold
573                  * from the inline storage to out-line. Alloc the outline buckets
574                  * first, then fixup the number. then reset the inlines.
575                  */
576                 ASSERT(NULL == lb->lb_buckets);
577                 vec_validate_aligned(lb->lb_buckets,
578                                      n_buckets - 1,
579                                      CLIB_CACHE_LINE_BYTES);
580
581                 load_balance_fill_buckets(lb, nhs,
582                                           lb->lb_buckets,
583                                           n_buckets);
584                 CLIB_MEMORY_BARRIER();
585                 load_balance_set_n_buckets(lb, n_buckets);
586
587                 CLIB_MEMORY_BARRIER();
588
589                 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
590                 {
591                     dpo_reset(&lb->lb_buckets_inline[ii]);
592                 }
593             }
594             else
595             {
596                 if (n_buckets <= LB_NUM_INLINE_BUCKETS)
597                 {
598                     /*
599                      * we are not crossing the threshold and it's still inline buckets.
600                      * we can write the new on the old..
601                      */
602                     load_balance_fill_buckets(lb, nhs,
603                                               load_balance_get_buckets(lb),
604                                               n_buckets);
605                     CLIB_MEMORY_BARRIER();
606                     load_balance_set_n_buckets(lb, n_buckets);
607                 }
608                 else
609                 {
610                     /*
611                      * we are not crossing the threshold. We need a new bucket array to
612                      * hold the increased number of choices.
613                      */
614                     dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
615
616                     new_buckets = NULL;
617                     old_buckets = load_balance_get_buckets(lb);
618
619                     vec_validate_aligned(new_buckets,
620                                          n_buckets - 1,
621                                          CLIB_CACHE_LINE_BYTES);
622
623                     load_balance_fill_buckets(lb, nhs, new_buckets, n_buckets);
624                     CLIB_MEMORY_BARRIER();
625                     lb->lb_buckets = new_buckets;
626                     CLIB_MEMORY_BARRIER();
627                     load_balance_set_n_buckets(lb, n_buckets);
628
629                     vec_foreach(tmp_dpo, old_buckets)
630                     {
631                         dpo_reset(tmp_dpo);
632                     }
633                     vec_free(old_buckets);
634                 }
635             }
636
637             /*
638              * buckets fixed. ready for the MAP update.
639              */
640             lb->lb_map = lbmi;
641         }
642         else
643         {
644             /*
645              * bucket size shrinkage.
646              * Any map we have will be based on the old
647              * larger number of buckets, so will be translating to indices
648              * out of range. So the new MAP must be installed first.
649              */
650             lb->lb_map = lbmi;
651             CLIB_MEMORY_BARRIER();
652
653
654             if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
655                 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
656             {
657                 /*
658                  * the new decreased number of buckets is crossing the threshold
659                  * from out-line storage to inline:
660                  *   1 - Fill the inline buckets,
661                  *   2 - fixup the number (and this point the inline buckets are
662                  *       used).
663                  *   3 - free the outline buckets
664                  */
665                 load_balance_fill_buckets(lb, nhs,
666                                           lb->lb_buckets_inline,
667                                           n_buckets);
668                 CLIB_MEMORY_BARRIER();
669                 load_balance_set_n_buckets(lb, n_buckets);
670                 CLIB_MEMORY_BARRIER();
671
672                 vec_foreach(tmp_dpo, lb->lb_buckets)
673                 {
674                     dpo_reset(tmp_dpo);
675                 }
676                 vec_free(lb->lb_buckets);
677             }
678             else
679             {
680                 /*
681                  * not crossing the threshold.
682                  *  1 - update the number to the smaller size
683                  *  2 - write the new buckets
684                  *  3 - reset those no longer used.
685                  */
686                 dpo_id_t *buckets;
687                 u32 old_n_buckets;
688
689                 old_n_buckets = lb->lb_n_buckets;
690                 buckets = load_balance_get_buckets(lb);
691
692                 load_balance_set_n_buckets(lb, n_buckets);
693                 CLIB_MEMORY_BARRIER();
694
695                 load_balance_fill_buckets(lb, nhs,
696                                           buckets,
697                                           n_buckets);
698
699                 for (ii = n_buckets; ii < old_n_buckets; ii++)
700                 {
701                     dpo_reset(&buckets[ii]);
702                 }
703             }
704         }
705     }
706
707     vec_foreach (nh, nhs)
708     {
709         dpo_reset(&nh->path_dpo);
710     }
711     vec_free(nhs);
712     vec_free(fixed_nhs);
713
714     load_balance_map_unlock(old_lbmi);
715 }
716
717 static void
718 load_balance_lock (dpo_id_t *dpo)
719 {
720     load_balance_t *lb;
721
722     lb = load_balance_get(dpo->dpoi_index);
723
724     lb->lb_locks++;
725 }
726
727 static void
728 load_balance_destroy (load_balance_t *lb)
729 {
730     dpo_id_t *buckets;
731     int i;
732
733     buckets = load_balance_get_buckets(lb);
734
735     for (i = 0; i < lb->lb_n_buckets; i++)
736     {
737         dpo_reset(&buckets[i]);
738     }
739
740     LB_DBG(lb, "destroy");
741     if (!LB_HAS_INLINE_BUCKETS(lb))
742     {
743         vec_free(lb->lb_buckets);
744     }
745
746     fib_urpf_list_unlock(lb->lb_urpf);
747     load_balance_map_unlock(lb->lb_map);
748
749     pool_put(load_balance_pool, lb);
750 }
751
752 static void
753 load_balance_unlock (dpo_id_t *dpo)
754 {
755     load_balance_t *lb;
756
757     lb = load_balance_get(dpo->dpoi_index);
758
759     lb->lb_locks--;
760
761     if (0 == lb->lb_locks)
762     {
763         load_balance_destroy(lb);
764     }
765 }
766
767 static void
768 load_balance_mem_show (void)
769 {
770     fib_show_memory_usage("load-balance",
771                           pool_elts(load_balance_pool),
772                           pool_len(load_balance_pool),
773                           sizeof(load_balance_t));
774     load_balance_map_show_mem();
775 }
776
777 const static dpo_vft_t lb_vft = {
778     .dv_lock = load_balance_lock,
779     .dv_unlock = load_balance_unlock,
780     .dv_format = format_load_balance_dpo,
781     .dv_mem_show = load_balance_mem_show,
782 };
783
784 /**
785  * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
786  *        object.
787  *
788  * this means that these graph nodes are ones from which a load-balance is the
789  * parent object in the DPO-graph.
790  *
791  * We do not list all the load-balance nodes, such as the *-lookup. instead
792  * we are relying on the correct use of the .sibling_of field when setting
793  * up these sibling nodes.
794  */
795 const static char* const load_balance_ip4_nodes[] =
796 {
797     "ip4-load-balance",
798     NULL,
799 };
800 const static char* const load_balance_ip6_nodes[] =
801 {
802     "ip6-load-balance",
803     NULL,
804 };
805 const static char* const load_balance_mpls_nodes[] =
806 {
807     "mpls-load-balance",
808     NULL,
809 };
810 const static char* const load_balance_l2_nodes[] =
811 {
812     "l2-load-balance",
813     NULL,
814 };
815 const static char* const load_balance_nsh_nodes[] =
816 {
817     "nsh-load-balance",
818 };
819 const static char* const load_balance_bier_nodes[] =
820 {
821     "bier-load-balance",
822     NULL,
823 };
824 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
825 {
826     [DPO_PROTO_IP4]  = load_balance_ip4_nodes,
827     [DPO_PROTO_IP6]  = load_balance_ip6_nodes,
828     [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
829     [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
830     [DPO_PROTO_NSH] = load_balance_nsh_nodes,
831     [DPO_PROTO_BIER] = load_balance_bier_nodes,
832 };
833
834 void
835 load_balance_module_init (void)
836 {
837     index_t lbi;
838
839     dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
840
841     /*
842      * Special LB with index zero. we need to define this since the v4 mtrie
843      * assumes an index of 0 implies the ply is empty. therefore all 'real'
844      * adjs need a non-zero index.
845      * This should never be used, but just in case, stack it on a drop.
846      */
847     lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
848     load_balance_set_bucket(lbi, 0, drop_dpo_get(DPO_PROTO_IP4));
849
850     load_balance_map_module_init();
851 }
852
853 static clib_error_t *
854 load_balance_show (vlib_main_t * vm,
855                    unformat_input_t * input,
856                    vlib_cli_command_t * cmd)
857 {
858     index_t lbi = INDEX_INVALID;
859
860     while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
861     {
862         if (unformat (input, "%d", &lbi))
863             ;
864         else
865             break;
866     }
867
868     if (INDEX_INVALID != lbi)
869     {
870         vlib_cli_output (vm, "%U", format_load_balance, lbi,
871                          LOAD_BALANCE_FORMAT_DETAIL);
872     }
873     else
874     {
875         load_balance_t *lb;
876
877         pool_foreach(lb, load_balance_pool,
878         ({
879             vlib_cli_output (vm, "%U", format_load_balance,
880                              load_balance_get_index(lb),
881                              LOAD_BALANCE_FORMAT_NONE);
882         }));
883     }
884
885     return 0;
886 }
887
888 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
889     .path = "show load-balance",
890     .short_help = "show load-balance [<index>]",
891     .function = load_balance_show,
892 };
893
894
895 always_inline u32
896 ip_flow_hash (void *data)
897 {
898   ip4_header_t *iph = (ip4_header_t *) data;
899
900   if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
901     return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
902   else
903     return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
904 }
905
906 always_inline u64
907 mac_to_u64 (u8 * m)
908 {
909   return (*((u64 *) m) & 0xffffffffffff);
910 }
911
912 always_inline u32
913 l2_flow_hash (vlib_buffer_t * b0)
914 {
915   ethernet_header_t *eh;
916   u64 a, b, c;
917   uword is_ip, eh_size;
918   u16 eh_type;
919
920   eh = vlib_buffer_get_current (b0);
921   eh_type = clib_net_to_host_u16 (eh->type);
922   eh_size = ethernet_buffer_header_size (b0);
923
924   is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
925
926   /* since we have 2 cache lines, use them */
927   if (is_ip)
928     a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
929   else
930     a = eh->type;
931
932   b = mac_to_u64 ((u8 *) eh->dst_address);
933   c = mac_to_u64 ((u8 *) eh->src_address);
934   hash_mix64 (a, b, c);
935
936   return (u32) c;
937 }
938
939 typedef struct load_balance_trace_t_
940 {
941     index_t lb_index;
942 } load_balance_trace_t;
943
944 always_inline uword
945 load_balance_inline (vlib_main_t * vm,
946                      vlib_node_runtime_t * node,
947                      vlib_frame_t * frame,
948                      int is_l2)
949 {
950   u32 n_left_from, next_index, *from, *to_next;
951
952   from = vlib_frame_vector_args (frame);
953   n_left_from = frame->n_vectors;
954
955   next_index = node->cached_next_index;
956
957   while (n_left_from > 0)
958     {
959       u32 n_left_to_next;
960
961       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
962
963       while (n_left_from > 0 && n_left_to_next > 0)
964         {
965           vlib_buffer_t *b0;
966           u32 bi0, lbi0, next0;
967           const dpo_id_t *dpo0;
968           const load_balance_t *lb0;
969
970           bi0 = from[0];
971           to_next[0] = bi0;
972           from += 1;
973           to_next += 1;
974           n_left_from -= 1;
975           n_left_to_next -= 1;
976
977           b0 = vlib_get_buffer (vm, bi0);
978
979           /* lookup dst + src mac */
980           lbi0 =  vnet_buffer (b0)->ip.adj_index[VLIB_TX];
981           lb0 = load_balance_get(lbi0);
982
983           if (is_l2)
984           {
985               vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
986           }
987           else
988           {
989               /* it's BIER */
990               const bier_hdr_t *bh0 = vlib_buffer_get_current(b0);
991               vnet_buffer(b0)->ip.flow_hash = bier_hdr_get_entropy(bh0);
992           }
993
994           dpo0 = load_balance_get_bucket_i(lb0, 
995                                            vnet_buffer(b0)->ip.flow_hash &
996                                            (lb0->lb_n_buckets_minus_1));
997
998           next0 = dpo0->dpoi_next_node;
999           vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1000
1001           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1002             {
1003               load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1004                                                          sizeof (*tr));
1005               tr->lb_index = lbi0;
1006             }
1007           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1008                                            n_left_to_next, bi0, next0);
1009         }
1010
1011       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1012     }
1013
1014   return frame->n_vectors;
1015 }
1016
1017 static uword
1018 l2_load_balance (vlib_main_t * vm,
1019                  vlib_node_runtime_t * node,
1020                  vlib_frame_t * frame)
1021 {
1022     return (load_balance_inline(vm, node, frame, 1));
1023 }
1024
1025 static u8 *
1026 format_l2_load_balance_trace (u8 * s, va_list * args)
1027 {
1028   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1029   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1030   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1031
1032   s = format (s, "L2-load-balance: index %d", t->lb_index);
1033   return s;
1034 }
1035
1036 /**
1037  * @brief
1038  */
1039 VLIB_REGISTER_NODE (l2_load_balance_node) = {
1040   .function = l2_load_balance,
1041   .name = "l2-load-balance",
1042   .vector_size = sizeof (u32),
1043
1044   .format_trace = format_l2_load_balance_trace,
1045   .n_next_nodes = 1,
1046   .next_nodes = {
1047       [0] = "error-drop",
1048   },
1049 };
1050
1051 static uword
1052 nsh_load_balance (vlib_main_t * vm,
1053                  vlib_node_runtime_t * node,
1054                  vlib_frame_t * frame)
1055 {
1056   u32 n_left_from, next_index, *from, *to_next;
1057
1058   from = vlib_frame_vector_args (frame);
1059   n_left_from = frame->n_vectors;
1060
1061   next_index = node->cached_next_index;
1062
1063   while (n_left_from > 0)
1064     {
1065       u32 n_left_to_next;
1066
1067       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1068
1069       while (n_left_from > 0 && n_left_to_next > 0)
1070         {
1071           vlib_buffer_t *b0;
1072           u32 bi0, lbi0, next0, *nsh0;
1073           const dpo_id_t *dpo0;
1074           const load_balance_t *lb0;
1075
1076           bi0 = from[0];
1077           to_next[0] = bi0;
1078           from += 1;
1079           to_next += 1;
1080           n_left_from -= 1;
1081           n_left_to_next -= 1;
1082
1083           b0 = vlib_get_buffer (vm, bi0);
1084
1085           lbi0 =  vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1086           lb0 = load_balance_get(lbi0);
1087
1088           /* SPI + SI are the second word of the NSH header */
1089           nsh0 = vlib_buffer_get_current (b0);
1090           vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1091
1092           dpo0 = load_balance_get_bucket_i(lb0,
1093                                            vnet_buffer(b0)->ip.flow_hash &
1094                                            (lb0->lb_n_buckets_minus_1));
1095
1096           next0 = dpo0->dpoi_next_node;
1097           vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1098
1099           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1100             {
1101               load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1102                                                          sizeof (*tr));
1103               tr->lb_index = lbi0;
1104             }
1105           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1106                                            n_left_to_next, bi0, next0);
1107         }
1108
1109       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1110     }
1111
1112   return frame->n_vectors;
1113 }
1114
1115 static u8 *
1116 format_nsh_load_balance_trace (u8 * s, va_list * args)
1117 {
1118   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1119   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1120   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1121
1122   s = format (s, "NSH-load-balance: index %d", t->lb_index);
1123   return s;
1124 }
1125
1126 /**
1127  * @brief
1128  */
1129 VLIB_REGISTER_NODE (nsh_load_balance_node) = {
1130   .function = nsh_load_balance,
1131   .name = "nsh-load-balance",
1132   .vector_size = sizeof (u32),
1133
1134   .format_trace = format_nsh_load_balance_trace,
1135   .n_next_nodes = 1,
1136   .next_nodes = {
1137       [0] = "error-drop",
1138   },
1139 };
1140
1141 static u8 *
1142 format_bier_load_balance_trace (u8 * s, va_list * args)
1143 {
1144   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1145   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1146   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1147
1148   s = format (s, "BIER-load-balance: index %d", t->lb_index);
1149   return s;
1150 }
1151
1152 static uword
1153 bier_load_balance (vlib_main_t * vm,
1154                    vlib_node_runtime_t * node,
1155                    vlib_frame_t * frame)
1156 {
1157     return (load_balance_inline(vm, node, frame, 0));
1158 }
1159
1160 /**
1161  * @brief
1162  */
1163 VLIB_REGISTER_NODE (bier_load_balance_node) = {
1164   .function = bier_load_balance,
1165   .name = "bier-load-balance",
1166   .vector_size = sizeof (u32),
1167
1168   .format_trace = format_bier_load_balance_trace,
1169   .sibling_of = "mpls-load-balance",
1170 };