6badb0e7c38db97ee9d1c5300bf477315b79a75b
[vpp.git] / vnet / vnet / dpo / load_balance.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/load_balance.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/dpo/drop_dpo.h>
20 #include <vppinfra/math.h>              /* for fabs */
21 #include <vnet/adj/adj.h>
22 #include <vnet/adj/adj_internal.h>
23 #include <vnet/fib/fib_urpf_list.h>
24
25 /*
26  * distribution error tolerance for load-balancing
27  */
28 const f64 multipath_next_hop_error_tolerance = 0.1;
29
30 #undef LB_DEBUG
31
32 #ifdef LB_DEBUG
33 #define LB_DBG(_lb, _fmt, _args...)                                     \
34 {                                                                       \
35     u8* _tmp =NULL;                                                     \
36     clib_warning("lb:[%s]:" _fmt,                                       \
37                  load_balance_format(load_balance_get_index((_lb)),     \
38                                      0, _tmp),                          \
39                  ##_args);                                              \
40     vec_free(_tmp);                                                     \
41 }
42 #else
43 #define LB_DBG(_p, _fmt, _args...)
44 #endif
45
46
47 /**
48  * Pool of all DPOs. It's not static so the DP can have fast access
49  */
50 load_balance_t *load_balance_pool;
51
52 /**
53  * The one instance of load-balance main
54  */
55 load_balance_main_t load_balance_main;
56
57 f64
58 load_balance_get_multipath_tolerance (void)
59 {
60     return (multipath_next_hop_error_tolerance);
61 }
62
63 static inline index_t
64 load_balance_get_index (const load_balance_t *lb)
65 {
66     return (lb - load_balance_pool);
67 }
68
69 static inline dpo_id_t*
70 load_balance_get_buckets (load_balance_t *lb)
71 {
72     if (LB_HAS_INLINE_BUCKETS(lb))
73     {
74         return (lb->lb_buckets_inline);
75     }
76     else
77     {
78         return (lb->lb_buckets);
79     }
80 }
81
82 static load_balance_t *
83 load_balance_alloc_i (void)
84 {
85     load_balance_t *lb;
86
87     pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
88     memset(lb, 0, sizeof(*lb));
89
90     lb->lb_map = INDEX_INVALID;
91     lb->lb_urpf = INDEX_INVALID;
92     vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
93                                    load_balance_get_index(lb));
94     vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
95                                    load_balance_get_index(lb));
96     vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
97                                load_balance_get_index(lb));
98     vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
99                                load_balance_get_index(lb));
100
101     return (lb);
102 }
103
104 static u8*
105 load_balance_format (index_t lbi,
106                      load_balance_format_flags_t flags,
107                      u32 indent,
108                      u8 *s)
109 {
110     vlib_counter_t to, via;
111     load_balance_t *lb;
112     dpo_id_t *buckets;
113     u32 i;
114
115     lb = load_balance_get(lbi);
116     vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
117     vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
118     buckets = load_balance_get_buckets(lb);
119
120     s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
121     s = format(s, "[index:%d buckets:%d ", lbi, lb->lb_n_buckets);
122     s = format(s, "uRPF:%d ", lb->lb_urpf);
123     s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
124     if (0 != via.packets)
125     {
126         s = format(s, " via:[%Ld:%Ld]",
127                    via.packets, via.bytes);
128     }
129     s = format(s, "]");
130
131     if (INDEX_INVALID != lb->lb_map)
132     {
133         s = format(s, "\n%U%U",
134                    format_white_space, indent+4,
135                    format_load_balance_map, lb->lb_map, indent+4);
136     }
137     for (i = 0; i < lb->lb_n_buckets; i++)
138     {
139         s = format(s, "\n%U[%d] %U",
140                    format_white_space, indent+2,
141                    i,
142                    format_dpo_id,
143                    &buckets[i], indent+6);
144     }
145     return (s);
146 }
147
148 u8*
149 format_load_balance (u8 * s, va_list * args)
150 {
151     index_t lbi = va_arg(args, index_t);
152     load_balance_format_flags_t flags = va_arg(args, load_balance_format_flags_t);
153
154     return (load_balance_format(lbi, flags, 0, s));
155 }
156 static u8*
157 format_load_balance_dpo (u8 * s, va_list * args)
158 {
159     index_t lbi = va_arg(args, index_t);
160     u32 indent = va_arg(args, u32);
161
162     return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
163 }
164
165
166 static load_balance_t *
167 load_balance_create_i (u32 num_buckets,
168                        dpo_proto_t lb_proto,
169                        flow_hash_config_t fhc)
170 {
171     load_balance_t *lb;
172
173     lb = load_balance_alloc_i();
174     lb->lb_hash_config = fhc;
175     lb->lb_n_buckets = num_buckets;
176     lb->lb_n_buckets_minus_1 = num_buckets-1;
177     lb->lb_proto = lb_proto;
178
179     if (!LB_HAS_INLINE_BUCKETS(lb))
180     {
181         vec_validate_aligned(lb->lb_buckets,
182                              lb->lb_n_buckets - 1,
183                              CLIB_CACHE_LINE_BYTES);
184     }
185
186     LB_DBG(lb, "create");
187
188     return (lb);
189 }
190
191 index_t
192 load_balance_create (u32 n_buckets,
193                      dpo_proto_t lb_proto,
194                      flow_hash_config_t fhc)
195 {
196     return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
197 }
198
199 static inline void
200 load_balance_set_bucket_i (load_balance_t *lb,
201                            u32 bucket,
202                            dpo_id_t *buckets,
203                            const dpo_id_t *next)
204 {
205     dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
206 }
207
208 void
209 load_balance_set_bucket (index_t lbi,
210                          u32 bucket,
211                          const dpo_id_t *next)
212 {
213     load_balance_t *lb;
214     dpo_id_t *buckets;
215
216     lb = load_balance_get(lbi);
217     buckets = load_balance_get_buckets(lb);
218
219     ASSERT(bucket < lb->lb_n_buckets);
220
221     load_balance_set_bucket_i(lb, bucket, buckets, next);
222 }
223
224 int
225 load_balance_is_drop (const dpo_id_t *dpo)
226 {
227     load_balance_t *lb;
228
229     if (DPO_LOAD_BALANCE != dpo->dpoi_type)
230         return (0);
231
232     lb = load_balance_get(dpo->dpoi_index);
233
234     if (1 == lb->lb_n_buckets)
235     {
236         return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
237     }
238     return (0);
239 }
240
241 void
242 load_balance_set_urpf (index_t lbi,
243                        index_t urpf)
244 {
245     load_balance_t *lb;
246     index_t old;
247
248     lb = load_balance_get(lbi);
249
250     /*
251      * packets in flight we see this change. but it's atomic, so :P
252      */
253     old = lb->lb_urpf;
254     lb->lb_urpf = urpf;
255
256     fib_urpf_list_unlock(old);
257     fib_urpf_list_lock(urpf);
258 }
259
260 index_t
261 load_balance_get_urpf (index_t lbi)
262 {
263     load_balance_t *lb;
264
265     lb = load_balance_get(lbi);
266
267     return (lb->lb_urpf);
268 }
269
270 const dpo_id_t *
271 load_balance_get_bucket (index_t lbi,
272                          u32 bucket)
273 {
274     load_balance_t *lb;
275
276     lb = load_balance_get(lbi);
277
278     return (load_balance_get_bucket_i(lb, bucket));
279 }
280
281 static int
282 next_hop_sort_by_weight (load_balance_path_t * n1,
283                          load_balance_path_t * n2)
284 {
285     return ((int) n1->path_weight - (int) n2->path_weight);
286 }
287
288 /* Given next hop vector is over-written with normalized one with sorted weights and
289    with weights corresponding to the number of adjacencies for each next hop.
290    Returns number of adjacencies in block. */
291 u32
292 ip_multipath_normalize_next_hops (load_balance_path_t * raw_next_hops,
293                                   load_balance_path_t ** normalized_next_hops,
294                                   u32 *sum_weight_in,
295                                   f64 multipath_next_hop_error_tolerance)
296 {
297     load_balance_path_t * nhs;
298     uword n_nhs, n_adj, n_adj_left, i, sum_weight;
299     f64 norm, error;
300
301     n_nhs = vec_len (raw_next_hops);
302     ASSERT (n_nhs > 0);
303     if (n_nhs == 0)
304         return 0;
305
306     /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
307     nhs = *normalized_next_hops;
308     vec_validate (nhs, 2*n_nhs - 1);
309
310     /* Fast path: 1 next hop in block. */
311     n_adj = n_nhs;
312     if (n_nhs == 1)
313     {
314         nhs[0] = raw_next_hops[0];
315         nhs[0].path_weight = 1;
316         _vec_len (nhs) = 1;
317         sum_weight = 1;
318         goto done;
319     }
320
321     else if (n_nhs == 2)
322     {
323         int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
324
325         /* Fast sort. */
326         nhs[0] = raw_next_hops[cmp];
327         nhs[1] = raw_next_hops[cmp ^ 1];
328
329         /* Fast path: equal cost multipath with 2 next hops. */
330         if (nhs[0].path_weight == nhs[1].path_weight)
331         {
332             nhs[0].path_weight = nhs[1].path_weight = 1;
333             _vec_len (nhs) = 2;
334             sum_weight = 2;
335             goto done;
336         }
337     }
338     else
339     {
340         clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
341         qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
342     }
343
344     /* Find total weight to normalize weights. */
345     sum_weight = 0;
346     for (i = 0; i < n_nhs; i++)
347         sum_weight += nhs[i].path_weight;
348
349     /* In the unlikely case that all weights are given as 0, set them all to 1. */
350     if (sum_weight == 0)
351     {
352         for (i = 0; i < n_nhs; i++)
353             nhs[i].path_weight = 1;
354         sum_weight = n_nhs;
355     }
356
357     /* Save copies of all next hop weights to avoid being overwritten in loop below. */
358     for (i = 0; i < n_nhs; i++)
359         nhs[n_nhs + i].path_weight = nhs[i].path_weight;
360
361     /* Try larger and larger power of 2 sized adjacency blocks until we
362        find one where traffic flows to within 1% of specified weights. */
363     for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
364     {
365         error = 0;
366
367         norm = n_adj / ((f64) sum_weight);
368         n_adj_left = n_adj;
369         for (i = 0; i < n_nhs; i++)
370         {
371             f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
372             word n = flt_round_nearest (nf);
373
374             n = n > n_adj_left ? n_adj_left : n;
375             n_adj_left -= n;
376             error += fabs (nf - n);
377             nhs[i].path_weight = n;
378         }
379
380         nhs[0].path_weight += n_adj_left;
381
382         /* Less than 5% average error per adjacency with this size adjacency block? */
383         if (error <= multipath_next_hop_error_tolerance*n_adj)
384         {
385             /* Truncate any next hops with zero weight. */
386             _vec_len (nhs) = i;
387             break;
388         }
389     }
390
391 done:
392     /* Save vector for next call. */
393     *normalized_next_hops = nhs;
394     *sum_weight_in = sum_weight;
395     return n_adj;
396 }
397
398 static load_balance_path_t *
399 load_balance_multipath_next_hop_fixup (load_balance_path_t *nhs,
400                                        dpo_proto_t drop_proto)
401 {
402     if (0 == vec_len(nhs))
403     {
404         load_balance_path_t *nh;
405
406         /*
407          * we need something for the load-balance. so use the drop
408          */
409         vec_add2(nhs, nh, 1);
410
411         nh->path_weight = 1;
412         dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
413     }
414
415     return (nhs);
416 }
417
418 /*
419  * Fill in adjacencies in block based on corresponding
420  * next hop adjacencies.
421  */
422 static void
423 load_balance_fill_buckets (load_balance_t *lb,
424                            load_balance_path_t *nhs,
425                            dpo_id_t *buckets,
426                            u32 n_buckets)
427 {
428     load_balance_path_t * nh;
429     u16 ii, bucket;
430
431     bucket = 0;
432
433     /*
434      * the next-hops have normalised weights. that means their sum is the number
435      * of buckets we need to fill.
436      */
437     vec_foreach (nh, nhs)
438     {
439         for (ii = 0; ii < nh->path_weight; ii++)
440         {
441             ASSERT(bucket < n_buckets);
442             load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
443         }
444     }
445 }
446
447 static inline void
448 load_balance_set_n_buckets (load_balance_t *lb,
449                             u32 n_buckets)
450 {
451     lb->lb_n_buckets = n_buckets;
452     lb->lb_n_buckets_minus_1 = n_buckets-1;
453 }
454
455 void
456 load_balance_multipath_update (const dpo_id_t *dpo,
457                                load_balance_path_t * raw_next_hops,
458                                load_balance_flags_t flags)
459 {
460     u32 sum_of_weights,n_buckets, ii;
461     load_balance_path_t * nh, * nhs;
462     index_t lbmi, old_lbmi;
463     load_balance_t *lb;
464     dpo_id_t *tmp_dpo;
465
466     nhs = NULL;
467
468     ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
469     lb = load_balance_get(dpo->dpoi_index);
470     raw_next_hops =
471         load_balance_multipath_next_hop_fixup(raw_next_hops,
472                                               lb->lb_proto);
473     n_buckets =
474         ip_multipath_normalize_next_hops(raw_next_hops,
475                                          &nhs,
476                                          &sum_of_weights,
477                                          multipath_next_hop_error_tolerance);
478
479     ASSERT (n_buckets >= vec_len (raw_next_hops));
480
481     /*
482      * Save the old load-balance map used, and get a new one if required.
483      */
484     old_lbmi = lb->lb_map;
485     if (flags & LOAD_BALANCE_FLAG_USES_MAP)
486     {
487         lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
488     }
489     else
490     {
491         lbmi = INDEX_INVALID;
492     }
493
494     if (0 == lb->lb_n_buckets)
495     {
496         /*
497          * first time initialisation. no packets inflight, so we can write
498          * at leisure.
499          */
500         load_balance_set_n_buckets(lb, n_buckets);
501
502         if (!LB_HAS_INLINE_BUCKETS(lb))
503             vec_validate_aligned(lb->lb_buckets,
504                                  lb->lb_n_buckets - 1,
505                                  CLIB_CACHE_LINE_BYTES);
506
507         load_balance_fill_buckets(lb, nhs,
508                                   load_balance_get_buckets(lb),
509                                   n_buckets);
510         lb->lb_map = lbmi;
511     }
512     else
513     {
514         /*
515          * This is a modification of an existing load-balance.
516          * We need to ensure that packets inflight see a consistent state, that
517          * is the number of reported buckets the LB has (read from
518          * lb_n_buckets_minus_1) is not more than it actually has. So if the
519          * number of buckets is increasing, we must update the bucket array first,
520          * then the reported number. vice-versa if the number of buckets goes down.
521          */
522         if (n_buckets == lb->lb_n_buckets)
523         {
524             /*
525              * no change in the number of buckets. we can simply fill what
526              * is new over what is old.
527              */
528             load_balance_fill_buckets(lb, nhs,
529                                       load_balance_get_buckets(lb),
530                                       n_buckets);
531             lb->lb_map = lbmi;
532         }
533         else if (n_buckets > lb->lb_n_buckets)
534         {
535             /*
536              * we have more buckets. the old load-balance map (if there is one)
537              * will remain valid, i.e. mapping to indices within range, so we
538              * update it last.
539              */
540             if (n_buckets > LB_NUM_INLINE_BUCKETS &&
541                 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
542             {
543                 /*
544                  * the new increased number of buckets is crossing the threshold
545                  * from the inline storage to out-line. Alloc the outline buckets
546                  * first, then fixup the number. then reset the inlines.
547                  */
548                 ASSERT(NULL == lb->lb_buckets);
549                 vec_validate_aligned(lb->lb_buckets,
550                                      n_buckets - 1,
551                                      CLIB_CACHE_LINE_BYTES);
552
553                 load_balance_fill_buckets(lb, nhs,
554                                           lb->lb_buckets,
555                                           n_buckets);
556                 CLIB_MEMORY_BARRIER();
557                 load_balance_set_n_buckets(lb, n_buckets);
558
559                 CLIB_MEMORY_BARRIER();
560
561                 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
562                 {
563                     dpo_reset(&lb->lb_buckets_inline[ii]);
564                 }
565             }
566             else
567             {
568                 /*
569                  * we are not crossing the threshold. we can write the new on the
570                  * old, whether they be inline or not.
571                  */
572                 load_balance_fill_buckets(lb, nhs,
573                                           load_balance_get_buckets(lb),
574                                           n_buckets);
575                 CLIB_MEMORY_BARRIER();
576                 load_balance_set_n_buckets(lb, n_buckets);
577             }
578
579             /*
580              * buckets fixed. ready for the MAP update.
581              */
582             lb->lb_map = lbmi;
583         }
584         else
585         {
586             /*
587              * bucket size shrinkage.
588              * Any map we have will be based on the old
589              * larger number of buckets, so will be translating to indices
590              * out of range. So the new MAP must be installed first.
591              */
592             lb->lb_map = lbmi;
593             CLIB_MEMORY_BARRIER();
594
595
596             if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
597                 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
598             {
599                 /*
600                  * the new decreased number of buckets is crossing the threshold
601                  * from out-line storage to inline:
602                  *   1 - Fill the inline buckets,
603                  *   2 - fixup the number (and this point the inline buckets are
604                  *       used).
605                  *   3 - free the outline buckets
606                  */
607                 load_balance_fill_buckets(lb, nhs,
608                                           lb->lb_buckets_inline,
609                                           n_buckets);
610                 CLIB_MEMORY_BARRIER();
611                 load_balance_set_n_buckets(lb, n_buckets);
612                 CLIB_MEMORY_BARRIER();
613
614                 vec_foreach(tmp_dpo, lb->lb_buckets)
615                 {
616                     dpo_reset(tmp_dpo);
617                 }
618                 vec_free(lb->lb_buckets);
619             }
620             else
621             {
622                 /*
623                  * not crossing the threshold.
624                  *  1 - update the number to the smaller size
625                  *  2 - write the new buckets
626                  *  3 - reset those no longer used.
627                  */
628                 dpo_id_t *buckets;
629                 u32 old_n_buckets;
630
631                 old_n_buckets = lb->lb_n_buckets;
632                 buckets = load_balance_get_buckets(lb);
633
634                 load_balance_set_n_buckets(lb, n_buckets);
635                 CLIB_MEMORY_BARRIER();
636
637                 load_balance_fill_buckets(lb, nhs,
638                                           buckets,
639                                           n_buckets);
640
641                 for (ii = old_n_buckets-n_buckets; ii < old_n_buckets; ii++)
642                 {
643                     dpo_reset(&buckets[ii]);
644                 }
645             }
646         }
647     }
648
649     vec_foreach (nh, nhs)
650     {
651         dpo_reset(&nh->path_dpo);
652     }
653     vec_free(nhs);
654
655     load_balance_map_unlock(old_lbmi);
656 }
657
658 static void
659 load_balance_lock (dpo_id_t *dpo)
660 {
661     load_balance_t *lb;
662
663     lb = load_balance_get(dpo->dpoi_index);
664
665     lb->lb_locks++;
666 }
667
668 static void
669 load_balance_destroy (load_balance_t *lb)
670 {
671     dpo_id_t *buckets;
672     int i;
673
674     buckets = load_balance_get_buckets(lb);
675
676     for (i = 0; i < lb->lb_n_buckets; i++)
677     {
678         dpo_reset(&buckets[i]);
679     }
680
681     LB_DBG(lb, "destroy");
682     if (!LB_HAS_INLINE_BUCKETS(lb))
683     {
684         vec_free(lb->lb_buckets);
685     }
686
687     fib_urpf_list_unlock(lb->lb_urpf);
688     load_balance_map_unlock(lb->lb_map);
689
690     pool_put(load_balance_pool, lb);
691 }
692
693 static void
694 load_balance_unlock (dpo_id_t *dpo)
695 {
696     load_balance_t *lb;
697
698     lb = load_balance_get(dpo->dpoi_index);
699
700     lb->lb_locks--;
701
702     if (0 == lb->lb_locks)
703     {
704         load_balance_destroy(lb);
705     }
706 }
707
708 static void
709 load_balance_mem_show (void)
710 {
711     fib_show_memory_usage("load-balance",
712                           pool_elts(load_balance_pool),
713                           pool_len(load_balance_pool),
714                           sizeof(load_balance_t));
715     load_balance_map_show_mem();
716 }
717
718 const static dpo_vft_t lb_vft = {
719     .dv_lock = load_balance_lock,
720     .dv_unlock = load_balance_unlock,
721     .dv_format = format_load_balance_dpo,
722     .dv_mem_show = load_balance_mem_show,
723 };
724
725 /**
726  * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
727  *        object.
728  *
729  * this means that these graph nodes are ones from which a load-balance is the
730  * parent object in the DPO-graph.
731  *
732  * We do not list all the load-balance nodes, such as the *-lookup. instead
733  * we are relying on the correct use of the .sibling_of field when setting
734  * up these sibling nodes.
735  */
736 const static char* const load_balance_ip4_nodes[] =
737 {
738     "ip4-load-balance",
739     NULL,
740 };
741 const static char* const load_balance_ip6_nodes[] =
742 {
743     "ip6-load-balance",
744     NULL,
745 };
746 const static char* const load_balance_mpls_nodes[] =
747 {
748     "mpls-load-balance",
749     NULL,
750 };
751 const static char* const load_balance_l2_nodes[] =
752 {
753     "l2-load-balance",
754     NULL,
755 };
756 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
757 {
758     [DPO_PROTO_IP4]  = load_balance_ip4_nodes,
759     [DPO_PROTO_IP6]  = load_balance_ip6_nodes,
760     [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
761     [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
762 };
763
764 void
765 load_balance_module_init (void)
766 {
767     dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
768
769     load_balance_map_module_init();
770 }
771
772 static clib_error_t *
773 load_balance_show (vlib_main_t * vm,
774                    unformat_input_t * input,
775                    vlib_cli_command_t * cmd)
776 {
777     index_t lbi = INDEX_INVALID;
778
779     while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
780     {
781         if (unformat (input, "%d", &lbi))
782             ;
783         else
784             break;
785     }
786
787     if (INDEX_INVALID != lbi)
788     {
789         vlib_cli_output (vm, "%U", format_load_balance, lbi,
790                          LOAD_BALANCE_FORMAT_DETAIL);
791     }
792     else
793     {
794         load_balance_t *lb;
795
796         pool_foreach(lb, load_balance_pool,
797         ({
798             vlib_cli_output (vm, "%U", format_load_balance,
799                              load_balance_get_index(lb),
800                              LOAD_BALANCE_FORMAT_NONE);
801         }));
802     }
803
804     return 0;
805 }
806
807 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
808     .path = "show load-balance",
809     .short_help = "show load-balance [<index>]",
810     .function = load_balance_show,
811 };
812
813
814 always_inline u32
815 ip_flow_hash (void *data)
816 {
817   ip4_header_t *iph = (ip4_header_t *) data;
818
819   if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
820     return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
821   else
822     return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
823 }
824
825 always_inline u64
826 mac_to_u64 (u8 * m)
827 {
828   return (*((u64 *) m) & 0xffffffffffff);
829 }
830
831 always_inline u32
832 l2_flow_hash (vlib_buffer_t * b0)
833 {
834   ethernet_header_t *eh;
835   u64 a, b, c;
836   uword is_ip, eh_size;
837   u16 eh_type;
838
839   eh = vlib_buffer_get_current (b0);
840   eh_type = clib_net_to_host_u16 (eh->type);
841   eh_size = ethernet_buffer_header_size (b0);
842
843   is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
844
845   /* since we have 2 cache lines, use them */
846   if (is_ip)
847     a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
848   else
849     a = eh->type;
850
851   b = mac_to_u64 ((u8 *) eh->dst_address);
852   c = mac_to_u64 ((u8 *) eh->src_address);
853   hash_mix64 (a, b, c);
854
855   return (u32) c;
856 }
857
858 typedef struct load_balance_trace_t_
859 {
860     index_t lb_index;
861 } load_balance_trace_t;
862
863 static uword
864 l2_load_balance (vlib_main_t * vm,
865                  vlib_node_runtime_t * node,
866                  vlib_frame_t * frame)
867 {
868   u32 n_left_from, next_index, *from, *to_next;
869
870   from = vlib_frame_vector_args (frame);
871   n_left_from = frame->n_vectors;
872
873   next_index = node->cached_next_index;
874
875   while (n_left_from > 0)
876     {
877       u32 n_left_to_next;
878
879       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
880
881       while (n_left_from > 0 && n_left_to_next > 0)
882         {
883           vlib_buffer_t *b0;
884           u32 bi0, lbi0, next0;
885           const dpo_id_t *dpo0;
886           const load_balance_t *lb0;
887
888           bi0 = from[0];
889           to_next[0] = bi0;
890           from += 1;
891           to_next += 1;
892           n_left_from -= 1;
893           n_left_to_next -= 1;
894
895           b0 = vlib_get_buffer (vm, bi0);
896
897           /* lookup dst + src mac */
898           lbi0 =  vnet_buffer (b0)->ip.adj_index[VLIB_TX];
899           lb0 = load_balance_get(lbi0);
900
901           vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
902
903           dpo0 = load_balance_get_bucket_i(lb0, 
904                                            vnet_buffer(b0)->ip.flow_hash &
905                                            (lb0->lb_n_buckets_minus_1));
906
907           next0 = dpo0->dpoi_next_node;
908           vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
909
910           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
911             {
912               load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
913                                                          sizeof (*tr));
914               tr->lb_index = lbi0;
915             }
916           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
917                                            n_left_to_next, bi0, next0);
918         }
919
920       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
921     }
922
923   return frame->n_vectors;
924 }
925
926 static u8 *
927 format_load_balance_trace (u8 * s, va_list * args)
928 {
929   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
930   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
931   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
932
933   s = format (s, "L2-load-balance: index %d", t->lb_index);
934   return s;
935 }
936
937 /**
938  * @brief
939  */
940 VLIB_REGISTER_NODE (l2_load_balance_node) = {
941   .function = l2_load_balance,
942   .name = "l2-load-balance",
943   .vector_size = sizeof (u32),
944
945   .format_trace = format_load_balance_trace,
946   .n_next_nodes = 1,
947   .next_nodes = {
948       [0] = "error-drop",
949   },
950 };