af054f1c3f4bffaecd0f9f81ded74a92607cbc7f
[vpp.git] / src / vnet / dpo / load_balance.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/load_balance.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/dpo/drop_dpo.h>
20 #include <vppinfra/math.h>              /* for fabs */
21 #include <vnet/adj/adj.h>
22 #include <vnet/adj/adj_internal.h>
23 #include <vnet/fib/fib_urpf_list.h>
24
25 /*
26  * distribution error tolerance for load-balancing
27  */
28 const f64 multipath_next_hop_error_tolerance = 0.1;
29
30 #undef LB_DEBUG
31
32 #ifdef LB_DEBUG
33 #define LB_DBG(_lb, _fmt, _args...)                                     \
34 {                                                                       \
35     u8* _tmp =NULL;                                                     \
36     clib_warning("lb:[%s]:" _fmt,                                       \
37                  load_balance_format(load_balance_get_index((_lb)),     \
38                                      0, _tmp),                          \
39                  ##_args);                                              \
40     vec_free(_tmp);                                                     \
41 }
42 #else
43 #define LB_DBG(_p, _fmt, _args...)
44 #endif
45
46
47 /**
48  * Pool of all DPOs. It's not static so the DP can have fast access
49  */
50 load_balance_t *load_balance_pool;
51
52 /**
53  * The one instance of load-balance main
54  */
55 load_balance_main_t load_balance_main;
56
57 f64
58 load_balance_get_multipath_tolerance (void)
59 {
60     return (multipath_next_hop_error_tolerance);
61 }
62
63 static inline index_t
64 load_balance_get_index (const load_balance_t *lb)
65 {
66     return (lb - load_balance_pool);
67 }
68
69 static inline dpo_id_t*
70 load_balance_get_buckets (load_balance_t *lb)
71 {
72     if (LB_HAS_INLINE_BUCKETS(lb))
73     {
74         return (lb->lb_buckets_inline);
75     }
76     else
77     {
78         return (lb->lb_buckets);
79     }
80 }
81
82 static load_balance_t *
83 load_balance_alloc_i (void)
84 {
85     load_balance_t *lb;
86
87     pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
88     memset(lb, 0, sizeof(*lb));
89
90     lb->lb_map = INDEX_INVALID;
91     lb->lb_urpf = INDEX_INVALID;
92     vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
93                                    load_balance_get_index(lb));
94     vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
95                                    load_balance_get_index(lb));
96     vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
97                                load_balance_get_index(lb));
98     vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
99                                load_balance_get_index(lb));
100
101     return (lb);
102 }
103
104 static u8*
105 load_balance_format (index_t lbi,
106                      load_balance_format_flags_t flags,
107                      u32 indent,
108                      u8 *s)
109 {
110     vlib_counter_t to, via;
111     load_balance_t *lb;
112     dpo_id_t *buckets;
113     u32 i;
114
115     lb = load_balance_get(lbi);
116     vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
117     vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
118     buckets = load_balance_get_buckets(lb);
119
120     s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
121     s = format(s, "[proto:%U ", format_dpo_proto, lb->lb_proto);
122     s = format(s, "index:%d buckets:%d ", lbi, lb->lb_n_buckets);
123     s = format(s, "uRPF:%d ", lb->lb_urpf);
124     s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
125     if (0 != via.packets)
126     {
127         s = format(s, " via:[%Ld:%Ld]",
128                    via.packets, via.bytes);
129     }
130     s = format(s, "]");
131
132     if (INDEX_INVALID != lb->lb_map)
133     {
134         s = format(s, "\n%U%U",
135                    format_white_space, indent+4,
136                    format_load_balance_map, lb->lb_map, indent+4);
137     }
138     for (i = 0; i < lb->lb_n_buckets; i++)
139     {
140         s = format(s, "\n%U[%d] %U",
141                    format_white_space, indent+2,
142                    i,
143                    format_dpo_id,
144                    &buckets[i], indent+6);
145     }
146     return (s);
147 }
148
149 u8*
150 format_load_balance (u8 * s, va_list * args)
151 {
152     index_t lbi = va_arg(*args, index_t);
153     load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
154
155     return (load_balance_format(lbi, flags, 0, s));
156 }
157 static u8*
158 format_load_balance_dpo (u8 * s, va_list * args)
159 {
160     index_t lbi = va_arg(*args, index_t);
161     u32 indent = va_arg(*args, u32);
162
163     return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
164 }
165
166
167 static load_balance_t *
168 load_balance_create_i (u32 num_buckets,
169                        dpo_proto_t lb_proto,
170                        flow_hash_config_t fhc)
171 {
172     load_balance_t *lb;
173
174     lb = load_balance_alloc_i();
175     lb->lb_hash_config = fhc;
176     lb->lb_n_buckets = num_buckets;
177     lb->lb_n_buckets_minus_1 = num_buckets-1;
178     lb->lb_proto = lb_proto;
179
180     if (!LB_HAS_INLINE_BUCKETS(lb))
181     {
182         vec_validate_aligned(lb->lb_buckets,
183                              lb->lb_n_buckets - 1,
184                              CLIB_CACHE_LINE_BYTES);
185     }
186
187     LB_DBG(lb, "create");
188
189     return (lb);
190 }
191
192 index_t
193 load_balance_create (u32 n_buckets,
194                      dpo_proto_t lb_proto,
195                      flow_hash_config_t fhc)
196 {
197     return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
198 }
199
200 static inline void
201 load_balance_set_bucket_i (load_balance_t *lb,
202                            u32 bucket,
203                            dpo_id_t *buckets,
204                            const dpo_id_t *next)
205 {
206     dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
207 }
208
209 void
210 load_balance_set_bucket (index_t lbi,
211                          u32 bucket,
212                          const dpo_id_t *next)
213 {
214     load_balance_t *lb;
215     dpo_id_t *buckets;
216
217     lb = load_balance_get(lbi);
218     buckets = load_balance_get_buckets(lb);
219
220     ASSERT(bucket < lb->lb_n_buckets);
221
222     load_balance_set_bucket_i(lb, bucket, buckets, next);
223 }
224
225 int
226 load_balance_is_drop (const dpo_id_t *dpo)
227 {
228     load_balance_t *lb;
229
230     if (DPO_LOAD_BALANCE != dpo->dpoi_type)
231         return (0);
232
233     lb = load_balance_get(dpo->dpoi_index);
234
235     if (1 == lb->lb_n_buckets)
236     {
237         return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
238     }
239     return (0);
240 }
241
242 void
243 load_balance_set_fib_entry_flags (index_t lbi,
244                                   fib_entry_flag_t flags)
245 {
246     load_balance_t *lb;
247
248     lb = load_balance_get(lbi);
249     lb->lb_fib_entry_flags = flags;
250 }
251
252
253 void
254 load_balance_set_urpf (index_t lbi,
255                        index_t urpf)
256 {
257     load_balance_t *lb;
258     index_t old;
259
260     lb = load_balance_get(lbi);
261
262     /*
263      * packets in flight we see this change. but it's atomic, so :P
264      */
265     old = lb->lb_urpf;
266     lb->lb_urpf = urpf;
267
268     fib_urpf_list_unlock(old);
269     fib_urpf_list_lock(urpf);
270 }
271
272 index_t
273 load_balance_get_urpf (index_t lbi)
274 {
275     load_balance_t *lb;
276
277     lb = load_balance_get(lbi);
278
279     return (lb->lb_urpf);
280 }
281
282 const dpo_id_t *
283 load_balance_get_bucket (index_t lbi,
284                          u32 bucket)
285 {
286     load_balance_t *lb;
287
288     lb = load_balance_get(lbi);
289
290     return (load_balance_get_bucket_i(lb, bucket));
291 }
292
293 static int
294 next_hop_sort_by_weight (const load_balance_path_t * n1,
295                          const load_balance_path_t * n2)
296 {
297     return ((int) n1->path_weight - (int) n2->path_weight);
298 }
299
300 /* Given next hop vector is over-written with normalized one with sorted weights and
301    with weights corresponding to the number of adjacencies for each next hop.
302    Returns number of adjacencies in block. */
303 u32
304 ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops,
305                                   load_balance_path_t ** normalized_next_hops,
306                                   u32 *sum_weight_in,
307                                   f64 multipath_next_hop_error_tolerance)
308 {
309     load_balance_path_t * nhs;
310     uword n_nhs, n_adj, n_adj_left, i, sum_weight;
311     f64 norm, error;
312
313     n_nhs = vec_len (raw_next_hops);
314     ASSERT (n_nhs > 0);
315     if (n_nhs == 0)
316         return 0;
317
318     /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
319     nhs = *normalized_next_hops;
320     vec_validate (nhs, 2*n_nhs - 1);
321
322     /* Fast path: 1 next hop in block. */
323     n_adj = n_nhs;
324     if (n_nhs == 1)
325     {
326         nhs[0] = raw_next_hops[0];
327         nhs[0].path_weight = 1;
328         _vec_len (nhs) = 1;
329         sum_weight = 1;
330         goto done;
331     }
332
333     else if (n_nhs == 2)
334     {
335         int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
336
337         /* Fast sort. */
338         nhs[0] = raw_next_hops[cmp];
339         nhs[1] = raw_next_hops[cmp ^ 1];
340
341         /* Fast path: equal cost multipath with 2 next hops. */
342         if (nhs[0].path_weight == nhs[1].path_weight)
343         {
344             nhs[0].path_weight = nhs[1].path_weight = 1;
345             _vec_len (nhs) = 2;
346             sum_weight = 2;
347             goto done;
348         }
349     }
350     else
351     {
352         clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
353         qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
354     }
355
356     /* Find total weight to normalize weights. */
357     sum_weight = 0;
358     for (i = 0; i < n_nhs; i++)
359         sum_weight += nhs[i].path_weight;
360
361     /* In the unlikely case that all weights are given as 0, set them all to 1. */
362     if (sum_weight == 0)
363     {
364         for (i = 0; i < n_nhs; i++)
365             nhs[i].path_weight = 1;
366         sum_weight = n_nhs;
367     }
368
369     /* Save copies of all next hop weights to avoid being overwritten in loop below. */
370     for (i = 0; i < n_nhs; i++)
371         nhs[n_nhs + i].path_weight = nhs[i].path_weight;
372
373     /* Try larger and larger power of 2 sized adjacency blocks until we
374        find one where traffic flows to within 1% of specified weights. */
375     for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
376     {
377         error = 0;
378
379         norm = n_adj / ((f64) sum_weight);
380         n_adj_left = n_adj;
381         for (i = 0; i < n_nhs; i++)
382         {
383             f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
384             word n = flt_round_nearest (nf);
385
386             n = n > n_adj_left ? n_adj_left : n;
387             n_adj_left -= n;
388             error += fabs (nf - n);
389             nhs[i].path_weight = n;
390
391             if (0 == nhs[i].path_weight)
392             {
393                 /*
394                  * when the weight skew is high (norm is small) and n == nf.
395                  * without this correction the path with a low weight would have
396                  * no represenation in the load-balanace - don't want that.
397                  * If the weight skew is high so the load-balance has many buckets
398                  * to allow it. pays ya money takes ya choice.
399                  */
400                 error = n_adj;
401                 break;
402             }
403         }
404
405         nhs[0].path_weight += n_adj_left;
406
407         /* Less than 5% average error per adjacency with this size adjacency block? */
408         if (error <= multipath_next_hop_error_tolerance*n_adj)
409         {
410             /* Truncate any next hops with zero weight. */
411             _vec_len (nhs) = i;
412             break;
413         }
414     }
415
416 done:
417     /* Save vector for next call. */
418     *normalized_next_hops = nhs;
419     *sum_weight_in = sum_weight;
420     return n_adj;
421 }
422
423 static load_balance_path_t *
424 load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs,
425                                        dpo_proto_t drop_proto)
426 {
427     if (0 == vec_len(nhs))
428     {
429         load_balance_path_t *new_nhs = NULL, *nh;
430
431         /*
432          * we need something for the load-balance. so use the drop
433          */
434         vec_add2(new_nhs, nh, 1);
435
436         nh->path_weight = 1;
437         dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
438
439         return (new_nhs);
440     }
441
442     return (NULL);
443 }
444
445 /*
446  * Fill in adjacencies in block based on corresponding
447  * next hop adjacencies.
448  */
449 static void
450 load_balance_fill_buckets (load_balance_t *lb,
451                            load_balance_path_t *nhs,
452                            dpo_id_t *buckets,
453                            u32 n_buckets)
454 {
455     load_balance_path_t * nh;
456     u16 ii, bucket;
457
458     bucket = 0;
459
460     /*
461      * the next-hops have normalised weights. that means their sum is the number
462      * of buckets we need to fill.
463      */
464     vec_foreach (nh, nhs)
465     {
466         for (ii = 0; ii < nh->path_weight; ii++)
467         {
468             ASSERT(bucket < n_buckets);
469             load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
470         }
471     }
472 }
473
474 static inline void
475 load_balance_set_n_buckets (load_balance_t *lb,
476                             u32 n_buckets)
477 {
478     lb->lb_n_buckets = n_buckets;
479     lb->lb_n_buckets_minus_1 = n_buckets-1;
480 }
481
482 void
483 load_balance_multipath_update (const dpo_id_t *dpo,
484                                const load_balance_path_t * raw_nhs,
485                                load_balance_flags_t flags)
486 {
487     load_balance_path_t *nh, *nhs, *fixed_nhs;
488     u32 sum_of_weights, n_buckets, ii;
489     index_t lbmi, old_lbmi;
490     load_balance_t *lb;
491     dpo_id_t *tmp_dpo;
492
493     nhs = NULL;
494
495     ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
496     lb = load_balance_get(dpo->dpoi_index);
497     fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
498     n_buckets =
499         ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
500                                           raw_nhs :
501                                           fixed_nhs),
502                                          &nhs,
503                                          &sum_of_weights,
504                                          multipath_next_hop_error_tolerance);
505
506     ASSERT (n_buckets >= vec_len (raw_nhs));
507
508     /*
509      * Save the old load-balance map used, and get a new one if required.
510      */
511     old_lbmi = lb->lb_map;
512     if (flags & LOAD_BALANCE_FLAG_USES_MAP)
513     {
514         lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
515     }
516     else
517     {
518         lbmi = INDEX_INVALID;
519     }
520
521     if (0 == lb->lb_n_buckets)
522     {
523         /*
524          * first time initialisation. no packets inflight, so we can write
525          * at leisure.
526          */
527         load_balance_set_n_buckets(lb, n_buckets);
528
529         if (!LB_HAS_INLINE_BUCKETS(lb))
530             vec_validate_aligned(lb->lb_buckets,
531                                  lb->lb_n_buckets - 1,
532                                  CLIB_CACHE_LINE_BYTES);
533
534         load_balance_fill_buckets(lb, nhs,
535                                   load_balance_get_buckets(lb),
536                                   n_buckets);
537         lb->lb_map = lbmi;
538     }
539     else
540     {
541         /*
542          * This is a modification of an existing load-balance.
543          * We need to ensure that packets inflight see a consistent state, that
544          * is the number of reported buckets the LB has (read from
545          * lb_n_buckets_minus_1) is not more than it actually has. So if the
546          * number of buckets is increasing, we must update the bucket array first,
547          * then the reported number. vice-versa if the number of buckets goes down.
548          */
549         if (n_buckets == lb->lb_n_buckets)
550         {
551             /*
552              * no change in the number of buckets. we can simply fill what
553              * is new over what is old.
554              */
555             load_balance_fill_buckets(lb, nhs,
556                                       load_balance_get_buckets(lb),
557                                       n_buckets);
558             lb->lb_map = lbmi;
559         }
560         else if (n_buckets > lb->lb_n_buckets)
561         {
562             /*
563              * we have more buckets. the old load-balance map (if there is one)
564              * will remain valid, i.e. mapping to indices within range, so we
565              * update it last.
566              */
567             if (n_buckets > LB_NUM_INLINE_BUCKETS &&
568                 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
569             {
570                 /*
571                  * the new increased number of buckets is crossing the threshold
572                  * from the inline storage to out-line. Alloc the outline buckets
573                  * first, then fixup the number. then reset the inlines.
574                  */
575                 ASSERT(NULL == lb->lb_buckets);
576                 vec_validate_aligned(lb->lb_buckets,
577                                      n_buckets - 1,
578                                      CLIB_CACHE_LINE_BYTES);
579
580                 load_balance_fill_buckets(lb, nhs,
581                                           lb->lb_buckets,
582                                           n_buckets);
583                 CLIB_MEMORY_BARRIER();
584                 load_balance_set_n_buckets(lb, n_buckets);
585
586                 CLIB_MEMORY_BARRIER();
587
588                 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
589                 {
590                     dpo_reset(&lb->lb_buckets_inline[ii]);
591                 }
592             }
593             else
594             {
595                 if (n_buckets <= LB_NUM_INLINE_BUCKETS)
596                 {
597                     /*
598                      * we are not crossing the threshold and it's still inline buckets.
599                      * we can write the new on the old..
600                      */
601                     load_balance_fill_buckets(lb, nhs,
602                                               load_balance_get_buckets(lb),
603                                               n_buckets);
604                     CLIB_MEMORY_BARRIER();
605                     load_balance_set_n_buckets(lb, n_buckets);
606                 }
607                 else
608                 {
609                     /*
610                      * we are not crossing the threshold. We need a new bucket array to
611                      * hold the increased number of choices.
612                      */
613                     dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
614
615                     new_buckets = NULL;
616                     old_buckets = load_balance_get_buckets(lb);
617
618                     vec_validate_aligned(new_buckets,
619                                          n_buckets - 1,
620                                          CLIB_CACHE_LINE_BYTES);
621
622                     load_balance_fill_buckets(lb, nhs, new_buckets, n_buckets);
623                     CLIB_MEMORY_BARRIER();
624                     lb->lb_buckets = new_buckets;
625                     CLIB_MEMORY_BARRIER();
626                     load_balance_set_n_buckets(lb, n_buckets);
627
628                     vec_foreach(tmp_dpo, old_buckets)
629                     {
630                         dpo_reset(tmp_dpo);
631                     }
632                     vec_free(old_buckets);
633                 }
634             }
635
636             /*
637              * buckets fixed. ready for the MAP update.
638              */
639             lb->lb_map = lbmi;
640         }
641         else
642         {
643             /*
644              * bucket size shrinkage.
645              * Any map we have will be based on the old
646              * larger number of buckets, so will be translating to indices
647              * out of range. So the new MAP must be installed first.
648              */
649             lb->lb_map = lbmi;
650             CLIB_MEMORY_BARRIER();
651
652
653             if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
654                 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
655             {
656                 /*
657                  * the new decreased number of buckets is crossing the threshold
658                  * from out-line storage to inline:
659                  *   1 - Fill the inline buckets,
660                  *   2 - fixup the number (and this point the inline buckets are
661                  *       used).
662                  *   3 - free the outline buckets
663                  */
664                 load_balance_fill_buckets(lb, nhs,
665                                           lb->lb_buckets_inline,
666                                           n_buckets);
667                 CLIB_MEMORY_BARRIER();
668                 load_balance_set_n_buckets(lb, n_buckets);
669                 CLIB_MEMORY_BARRIER();
670
671                 vec_foreach(tmp_dpo, lb->lb_buckets)
672                 {
673                     dpo_reset(tmp_dpo);
674                 }
675                 vec_free(lb->lb_buckets);
676             }
677             else
678             {
679                 /*
680                  * not crossing the threshold.
681                  *  1 - update the number to the smaller size
682                  *  2 - write the new buckets
683                  *  3 - reset those no longer used.
684                  */
685                 dpo_id_t *buckets;
686                 u32 old_n_buckets;
687
688                 old_n_buckets = lb->lb_n_buckets;
689                 buckets = load_balance_get_buckets(lb);
690
691                 load_balance_set_n_buckets(lb, n_buckets);
692                 CLIB_MEMORY_BARRIER();
693
694                 load_balance_fill_buckets(lb, nhs,
695                                           buckets,
696                                           n_buckets);
697
698                 for (ii = n_buckets; ii < old_n_buckets; ii++)
699                 {
700                     dpo_reset(&buckets[ii]);
701                 }
702             }
703         }
704     }
705
706     vec_foreach (nh, nhs)
707     {
708         dpo_reset(&nh->path_dpo);
709     }
710     vec_free(nhs);
711     vec_free(fixed_nhs);
712
713     load_balance_map_unlock(old_lbmi);
714 }
715
716 static void
717 load_balance_lock (dpo_id_t *dpo)
718 {
719     load_balance_t *lb;
720
721     lb = load_balance_get(dpo->dpoi_index);
722
723     lb->lb_locks++;
724 }
725
726 static void
727 load_balance_destroy (load_balance_t *lb)
728 {
729     dpo_id_t *buckets;
730     int i;
731
732     buckets = load_balance_get_buckets(lb);
733
734     for (i = 0; i < lb->lb_n_buckets; i++)
735     {
736         dpo_reset(&buckets[i]);
737     }
738
739     LB_DBG(lb, "destroy");
740     if (!LB_HAS_INLINE_BUCKETS(lb))
741     {
742         vec_free(lb->lb_buckets);
743     }
744
745     fib_urpf_list_unlock(lb->lb_urpf);
746     load_balance_map_unlock(lb->lb_map);
747
748     pool_put(load_balance_pool, lb);
749 }
750
751 static void
752 load_balance_unlock (dpo_id_t *dpo)
753 {
754     load_balance_t *lb;
755
756     lb = load_balance_get(dpo->dpoi_index);
757
758     lb->lb_locks--;
759
760     if (0 == lb->lb_locks)
761     {
762         load_balance_destroy(lb);
763     }
764 }
765
766 static void
767 load_balance_mem_show (void)
768 {
769     fib_show_memory_usage("load-balance",
770                           pool_elts(load_balance_pool),
771                           pool_len(load_balance_pool),
772                           sizeof(load_balance_t));
773     load_balance_map_show_mem();
774 }
775
776 const static dpo_vft_t lb_vft = {
777     .dv_lock = load_balance_lock,
778     .dv_unlock = load_balance_unlock,
779     .dv_format = format_load_balance_dpo,
780     .dv_mem_show = load_balance_mem_show,
781 };
782
783 /**
784  * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
785  *        object.
786  *
787  * this means that these graph nodes are ones from which a load-balance is the
788  * parent object in the DPO-graph.
789  *
790  * We do not list all the load-balance nodes, such as the *-lookup. instead
791  * we are relying on the correct use of the .sibling_of field when setting
792  * up these sibling nodes.
793  */
794 const static char* const load_balance_ip4_nodes[] =
795 {
796     "ip4-load-balance",
797     NULL,
798 };
799 const static char* const load_balance_ip6_nodes[] =
800 {
801     "ip6-load-balance",
802     NULL,
803 };
804 const static char* const load_balance_mpls_nodes[] =
805 {
806     "mpls-load-balance",
807     NULL,
808 };
809 const static char* const load_balance_l2_nodes[] =
810 {
811     "l2-load-balance",
812     NULL,
813 };
814 const static char* const load_balance_nsh_nodes[] =
815 {
816     "nsh-load-balance",
817     NULL,
818 };
819 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
820 {
821     [DPO_PROTO_IP4]  = load_balance_ip4_nodes,
822     [DPO_PROTO_IP6]  = load_balance_ip6_nodes,
823     [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
824     [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
825     [DPO_PROTO_NSH] = load_balance_nsh_nodes,
826 };
827
828 void
829 load_balance_module_init (void)
830 {
831     index_t lbi;
832
833     dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
834
835     /*
836      * Special LB with index zero. we need to define this since the v4 mtrie
837      * assumes an index of 0 implies the ply is empty. therefore all 'real'
838      * adjs need a non-zero index.
839      * This should never be used, but just in case, stack it on a drop.
840      */
841     lbi = load_balance_create(1, DPO_PROTO_IP4, 0);
842     load_balance_set_bucket(lbi, 0, drop_dpo_get(DPO_PROTO_IP4));
843
844     load_balance_map_module_init();
845 }
846
847 static clib_error_t *
848 load_balance_show (vlib_main_t * vm,
849                    unformat_input_t * input,
850                    vlib_cli_command_t * cmd)
851 {
852     index_t lbi = INDEX_INVALID;
853
854     while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
855     {
856         if (unformat (input, "%d", &lbi))
857             ;
858         else
859             break;
860     }
861
862     if (INDEX_INVALID != lbi)
863     {
864         vlib_cli_output (vm, "%U", format_load_balance, lbi,
865                          LOAD_BALANCE_FORMAT_DETAIL);
866     }
867     else
868     {
869         load_balance_t *lb;
870
871         pool_foreach(lb, load_balance_pool,
872         ({
873             vlib_cli_output (vm, "%U", format_load_balance,
874                              load_balance_get_index(lb),
875                              LOAD_BALANCE_FORMAT_NONE);
876         }));
877     }
878
879     return 0;
880 }
881
882 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
883     .path = "show load-balance",
884     .short_help = "show load-balance [<index>]",
885     .function = load_balance_show,
886 };
887
888
889 always_inline u32
890 ip_flow_hash (void *data)
891 {
892   ip4_header_t *iph = (ip4_header_t *) data;
893
894   if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
895     return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
896   else
897     return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
898 }
899
900 always_inline u64
901 mac_to_u64 (u8 * m)
902 {
903   return (*((u64 *) m) & 0xffffffffffff);
904 }
905
906 always_inline u32
907 l2_flow_hash (vlib_buffer_t * b0)
908 {
909   ethernet_header_t *eh;
910   u64 a, b, c;
911   uword is_ip, eh_size;
912   u16 eh_type;
913
914   eh = vlib_buffer_get_current (b0);
915   eh_type = clib_net_to_host_u16 (eh->type);
916   eh_size = ethernet_buffer_header_size (b0);
917
918   is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
919
920   /* since we have 2 cache lines, use them */
921   if (is_ip)
922     a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
923   else
924     a = eh->type;
925
926   b = mac_to_u64 ((u8 *) eh->dst_address);
927   c = mac_to_u64 ((u8 *) eh->src_address);
928   hash_mix64 (a, b, c);
929
930   return (u32) c;
931 }
932
933 typedef struct load_balance_trace_t_
934 {
935     index_t lb_index;
936 } load_balance_trace_t;
937
938 static uword
939 l2_load_balance (vlib_main_t * vm,
940                  vlib_node_runtime_t * node,
941                  vlib_frame_t * frame)
942 {
943   u32 n_left_from, next_index, *from, *to_next;
944
945   from = vlib_frame_vector_args (frame);
946   n_left_from = frame->n_vectors;
947
948   next_index = node->cached_next_index;
949
950   while (n_left_from > 0)
951     {
952       u32 n_left_to_next;
953
954       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
955
956       while (n_left_from > 0 && n_left_to_next > 0)
957         {
958           vlib_buffer_t *b0;
959           u32 bi0, lbi0, next0;
960           const dpo_id_t *dpo0;
961           const load_balance_t *lb0;
962
963           bi0 = from[0];
964           to_next[0] = bi0;
965           from += 1;
966           to_next += 1;
967           n_left_from -= 1;
968           n_left_to_next -= 1;
969
970           b0 = vlib_get_buffer (vm, bi0);
971
972           /* lookup dst + src mac */
973           lbi0 =  vnet_buffer (b0)->ip.adj_index[VLIB_TX];
974           lb0 = load_balance_get(lbi0);
975
976           vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
977
978           dpo0 = load_balance_get_bucket_i(lb0, 
979                                            vnet_buffer(b0)->ip.flow_hash &
980                                            (lb0->lb_n_buckets_minus_1));
981
982           next0 = dpo0->dpoi_next_node;
983           vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
984
985           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
986             {
987               load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
988                                                          sizeof (*tr));
989               tr->lb_index = lbi0;
990             }
991           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
992                                            n_left_to_next, bi0, next0);
993         }
994
995       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
996     }
997
998   return frame->n_vectors;
999 }
1000
1001 static u8 *
1002 format_l2_load_balance_trace (u8 * s, va_list * args)
1003 {
1004   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1005   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1006   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1007
1008   s = format (s, "L2-load-balance: index %d", t->lb_index);
1009   return s;
1010 }
1011
1012 /**
1013  * @brief
1014  */
1015 VLIB_REGISTER_NODE (l2_load_balance_node) = {
1016   .function = l2_load_balance,
1017   .name = "l2-load-balance",
1018   .vector_size = sizeof (u32),
1019
1020   .format_trace = format_l2_load_balance_trace,
1021   .n_next_nodes = 1,
1022   .next_nodes = {
1023       [0] = "error-drop",
1024   },
1025 };
1026
1027 static uword
1028 nsh_load_balance (vlib_main_t * vm,
1029                  vlib_node_runtime_t * node,
1030                  vlib_frame_t * frame)
1031 {
1032   u32 n_left_from, next_index, *from, *to_next;
1033
1034   from = vlib_frame_vector_args (frame);
1035   n_left_from = frame->n_vectors;
1036
1037   next_index = node->cached_next_index;
1038
1039   while (n_left_from > 0)
1040     {
1041       u32 n_left_to_next;
1042
1043       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1044
1045       while (n_left_from > 0 && n_left_to_next > 0)
1046         {
1047           vlib_buffer_t *b0;
1048           u32 bi0, lbi0, next0, *nsh0;
1049           const dpo_id_t *dpo0;
1050           const load_balance_t *lb0;
1051
1052           bi0 = from[0];
1053           to_next[0] = bi0;
1054           from += 1;
1055           to_next += 1;
1056           n_left_from -= 1;
1057           n_left_to_next -= 1;
1058
1059           b0 = vlib_get_buffer (vm, bi0);
1060
1061           lbi0 =  vnet_buffer (b0)->ip.adj_index[VLIB_TX];
1062           lb0 = load_balance_get(lbi0);
1063
1064           /* SPI + SI are the second word of the NSH header */
1065           nsh0 = vlib_buffer_get_current (b0);
1066           vnet_buffer(b0)->ip.flow_hash = nsh0[1] % lb0->lb_n_buckets;
1067
1068           dpo0 = load_balance_get_bucket_i(lb0,
1069                                            vnet_buffer(b0)->ip.flow_hash &
1070                                            (lb0->lb_n_buckets_minus_1));
1071
1072           next0 = dpo0->dpoi_next_node;
1073           vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
1074
1075           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1076             {
1077               load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
1078                                                          sizeof (*tr));
1079               tr->lb_index = lbi0;
1080             }
1081           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1082                                            n_left_to_next, bi0, next0);
1083         }
1084
1085       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1086     }
1087
1088   return frame->n_vectors;
1089 }
1090
1091 static u8 *
1092 format_nsh_load_balance_trace (u8 * s, va_list * args)
1093 {
1094   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1095   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1096   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
1097
1098   s = format (s, "NSH-load-balance: index %d", t->lb_index);
1099   return s;
1100 }
1101
1102 /**
1103  * @brief
1104  */
1105 VLIB_REGISTER_NODE (nsh_load_balance_node) = {
1106   .function = nsh_load_balance,
1107   .name = "nsh-load-balance",
1108   .vector_size = sizeof (u32),
1109
1110   .format_trace = format_nsh_load_balance_trace,
1111   .n_next_nodes = 1,
1112   .next_nodes = {
1113       [0] = "error-drop",
1114   },
1115 };