e70a7a306e17571fa1d4e165b7956d5175177022
[vpp.git] / src / vnet / dpo / load_balance.c
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vnet/ip/lookup.h>
17 #include <vnet/dpo/load_balance.h>
18 #include <vnet/dpo/load_balance_map.h>
19 #include <vnet/dpo/drop_dpo.h>
20 #include <vppinfra/math.h>              /* for fabs */
21 #include <vnet/adj/adj.h>
22 #include <vnet/adj/adj_internal.h>
23 #include <vnet/fib/fib_urpf_list.h>
24
25 /*
26  * distribution error tolerance for load-balancing
27  */
28 const f64 multipath_next_hop_error_tolerance = 0.1;
29
30 #undef LB_DEBUG
31
32 #ifdef LB_DEBUG
33 #define LB_DBG(_lb, _fmt, _args...)                                     \
34 {                                                                       \
35     u8* _tmp =NULL;                                                     \
36     clib_warning("lb:[%s]:" _fmt,                                       \
37                  load_balance_format(load_balance_get_index((_lb)),     \
38                                      0, _tmp),                          \
39                  ##_args);                                              \
40     vec_free(_tmp);                                                     \
41 }
42 #else
43 #define LB_DBG(_p, _fmt, _args...)
44 #endif
45
46
47 /**
48  * Pool of all DPOs. It's not static so the DP can have fast access
49  */
50 load_balance_t *load_balance_pool;
51
52 /**
53  * The one instance of load-balance main
54  */
55 load_balance_main_t load_balance_main;
56
57 f64
58 load_balance_get_multipath_tolerance (void)
59 {
60     return (multipath_next_hop_error_tolerance);
61 }
62
63 static inline index_t
64 load_balance_get_index (const load_balance_t *lb)
65 {
66     return (lb - load_balance_pool);
67 }
68
69 static inline dpo_id_t*
70 load_balance_get_buckets (load_balance_t *lb)
71 {
72     if (LB_HAS_INLINE_BUCKETS(lb))
73     {
74         return (lb->lb_buckets_inline);
75     }
76     else
77     {
78         return (lb->lb_buckets);
79     }
80 }
81
82 static load_balance_t *
83 load_balance_alloc_i (void)
84 {
85     load_balance_t *lb;
86
87     pool_get_aligned(load_balance_pool, lb, CLIB_CACHE_LINE_BYTES);
88     memset(lb, 0, sizeof(*lb));
89
90     lb->lb_map = INDEX_INVALID;
91     lb->lb_urpf = INDEX_INVALID;
92     vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
93                                    load_balance_get_index(lb));
94     vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
95                                    load_balance_get_index(lb));
96     vlib_zero_combined_counter(&(load_balance_main.lbm_to_counters),
97                                load_balance_get_index(lb));
98     vlib_zero_combined_counter(&(load_balance_main.lbm_via_counters),
99                                load_balance_get_index(lb));
100
101     return (lb);
102 }
103
104 static u8*
105 load_balance_format (index_t lbi,
106                      load_balance_format_flags_t flags,
107                      u32 indent,
108                      u8 *s)
109 {
110     vlib_counter_t to, via;
111     load_balance_t *lb;
112     dpo_id_t *buckets;
113     u32 i;
114
115     lb = load_balance_get(lbi);
116     vlib_get_combined_counter(&(load_balance_main.lbm_to_counters), lbi, &to);
117     vlib_get_combined_counter(&(load_balance_main.lbm_via_counters), lbi, &via);
118     buckets = load_balance_get_buckets(lb);
119
120     s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
121     s = format(s, "[index:%d buckets:%d ", lbi, lb->lb_n_buckets);
122     s = format(s, "uRPF:%d ", lb->lb_urpf);
123     s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
124     if (0 != via.packets)
125     {
126         s = format(s, " via:[%Ld:%Ld]",
127                    via.packets, via.bytes);
128     }
129     s = format(s, "]");
130
131     if (INDEX_INVALID != lb->lb_map)
132     {
133         s = format(s, "\n%U%U",
134                    format_white_space, indent+4,
135                    format_load_balance_map, lb->lb_map, indent+4);
136     }
137     for (i = 0; i < lb->lb_n_buckets; i++)
138     {
139         s = format(s, "\n%U[%d] %U",
140                    format_white_space, indent+2,
141                    i,
142                    format_dpo_id,
143                    &buckets[i], indent+6);
144     }
145     return (s);
146 }
147
148 u8*
149 format_load_balance (u8 * s, va_list * args)
150 {
151     index_t lbi = va_arg(*args, index_t);
152     load_balance_format_flags_t flags = va_arg(*args, load_balance_format_flags_t);
153
154     return (load_balance_format(lbi, flags, 0, s));
155 }
156 static u8*
157 format_load_balance_dpo (u8 * s, va_list * args)
158 {
159     index_t lbi = va_arg(*args, index_t);
160     u32 indent = va_arg(*args, u32);
161
162     return (load_balance_format(lbi, LOAD_BALANCE_FORMAT_DETAIL, indent, s));
163 }
164
165
166 static load_balance_t *
167 load_balance_create_i (u32 num_buckets,
168                        dpo_proto_t lb_proto,
169                        flow_hash_config_t fhc)
170 {
171     load_balance_t *lb;
172
173     lb = load_balance_alloc_i();
174     lb->lb_hash_config = fhc;
175     lb->lb_n_buckets = num_buckets;
176     lb->lb_n_buckets_minus_1 = num_buckets-1;
177     lb->lb_proto = lb_proto;
178
179     if (!LB_HAS_INLINE_BUCKETS(lb))
180     {
181         vec_validate_aligned(lb->lb_buckets,
182                              lb->lb_n_buckets - 1,
183                              CLIB_CACHE_LINE_BYTES);
184     }
185
186     LB_DBG(lb, "create");
187
188     return (lb);
189 }
190
191 index_t
192 load_balance_create (u32 n_buckets,
193                      dpo_proto_t lb_proto,
194                      flow_hash_config_t fhc)
195 {
196     return (load_balance_get_index(load_balance_create_i(n_buckets, lb_proto, fhc)));
197 }
198
199 static inline void
200 load_balance_set_bucket_i (load_balance_t *lb,
201                            u32 bucket,
202                            dpo_id_t *buckets,
203                            const dpo_id_t *next)
204 {
205     dpo_stack(DPO_LOAD_BALANCE, lb->lb_proto, &buckets[bucket], next);
206 }
207
208 void
209 load_balance_set_bucket (index_t lbi,
210                          u32 bucket,
211                          const dpo_id_t *next)
212 {
213     load_balance_t *lb;
214     dpo_id_t *buckets;
215
216     lb = load_balance_get(lbi);
217     buckets = load_balance_get_buckets(lb);
218
219     ASSERT(bucket < lb->lb_n_buckets);
220
221     load_balance_set_bucket_i(lb, bucket, buckets, next);
222 }
223
224 int
225 load_balance_is_drop (const dpo_id_t *dpo)
226 {
227     load_balance_t *lb;
228
229     if (DPO_LOAD_BALANCE != dpo->dpoi_type)
230         return (0);
231
232     lb = load_balance_get(dpo->dpoi_index);
233
234     if (1 == lb->lb_n_buckets)
235     {
236         return (dpo_is_drop(load_balance_get_bucket_i(lb, 0)));
237     }
238     return (0);
239 }
240
241 void
242 load_balance_set_urpf (index_t lbi,
243                        index_t urpf)
244 {
245     load_balance_t *lb;
246     index_t old;
247
248     lb = load_balance_get(lbi);
249
250     /*
251      * packets in flight we see this change. but it's atomic, so :P
252      */
253     old = lb->lb_urpf;
254     lb->lb_urpf = urpf;
255
256     fib_urpf_list_unlock(old);
257     fib_urpf_list_lock(urpf);
258 }
259
260 index_t
261 load_balance_get_urpf (index_t lbi)
262 {
263     load_balance_t *lb;
264
265     lb = load_balance_get(lbi);
266
267     return (lb->lb_urpf);
268 }
269
270 const dpo_id_t *
271 load_balance_get_bucket (index_t lbi,
272                          u32 bucket)
273 {
274     load_balance_t *lb;
275
276     lb = load_balance_get(lbi);
277
278     return (load_balance_get_bucket_i(lb, bucket));
279 }
280
281 static int
282 next_hop_sort_by_weight (const load_balance_path_t * n1,
283                          const load_balance_path_t * n2)
284 {
285     return ((int) n1->path_weight - (int) n2->path_weight);
286 }
287
288 /* Given next hop vector is over-written with normalized one with sorted weights and
289    with weights corresponding to the number of adjacencies for each next hop.
290    Returns number of adjacencies in block. */
291 u32
292 ip_multipath_normalize_next_hops (const load_balance_path_t * raw_next_hops,
293                                   load_balance_path_t ** normalized_next_hops,
294                                   u32 *sum_weight_in,
295                                   f64 multipath_next_hop_error_tolerance)
296 {
297     load_balance_path_t * nhs;
298     uword n_nhs, n_adj, n_adj_left, i, sum_weight;
299     f64 norm, error;
300
301     n_nhs = vec_len (raw_next_hops);
302     ASSERT (n_nhs > 0);
303     if (n_nhs == 0)
304         return 0;
305
306     /* Allocate enough space for 2 copies; we'll use second copy to save original weights. */
307     nhs = *normalized_next_hops;
308     vec_validate (nhs, 2*n_nhs - 1);
309
310     /* Fast path: 1 next hop in block. */
311     n_adj = n_nhs;
312     if (n_nhs == 1)
313     {
314         nhs[0] = raw_next_hops[0];
315         nhs[0].path_weight = 1;
316         _vec_len (nhs) = 1;
317         sum_weight = 1;
318         goto done;
319     }
320
321     else if (n_nhs == 2)
322     {
323         int cmp = next_hop_sort_by_weight (&raw_next_hops[0], &raw_next_hops[1]) < 0;
324
325         /* Fast sort. */
326         nhs[0] = raw_next_hops[cmp];
327         nhs[1] = raw_next_hops[cmp ^ 1];
328
329         /* Fast path: equal cost multipath with 2 next hops. */
330         if (nhs[0].path_weight == nhs[1].path_weight)
331         {
332             nhs[0].path_weight = nhs[1].path_weight = 1;
333             _vec_len (nhs) = 2;
334             sum_weight = 2;
335             goto done;
336         }
337     }
338     else
339     {
340         clib_memcpy (nhs, raw_next_hops, n_nhs * sizeof (raw_next_hops[0]));
341         qsort (nhs, n_nhs, sizeof (nhs[0]), (void *) next_hop_sort_by_weight);
342     }
343
344     /* Find total weight to normalize weights. */
345     sum_weight = 0;
346     for (i = 0; i < n_nhs; i++)
347         sum_weight += nhs[i].path_weight;
348
349     /* In the unlikely case that all weights are given as 0, set them all to 1. */
350     if (sum_weight == 0)
351     {
352         for (i = 0; i < n_nhs; i++)
353             nhs[i].path_weight = 1;
354         sum_weight = n_nhs;
355     }
356
357     /* Save copies of all next hop weights to avoid being overwritten in loop below. */
358     for (i = 0; i < n_nhs; i++)
359         nhs[n_nhs + i].path_weight = nhs[i].path_weight;
360
361     /* Try larger and larger power of 2 sized adjacency blocks until we
362        find one where traffic flows to within 1% of specified weights. */
363     for (n_adj = max_pow2 (n_nhs); ; n_adj *= 2)
364     {
365         error = 0;
366
367         norm = n_adj / ((f64) sum_weight);
368         n_adj_left = n_adj;
369         for (i = 0; i < n_nhs; i++)
370         {
371             f64 nf = nhs[n_nhs + i].path_weight * norm; /* use saved weights */
372             word n = flt_round_nearest (nf);
373
374             n = n > n_adj_left ? n_adj_left : n;
375             n_adj_left -= n;
376             error += fabs (nf - n);
377             nhs[i].path_weight = n;
378
379             if (0 == nhs[i].path_weight)
380             {
381                 /*
382                  * when the weight skew is high (norm is small) and n == nf.
383                  * without this correction the path with a low weight would have
384                  * no represenation in the load-balanace - don't want that.
385                  * If the weight skew is high so the load-balance has many buckets
386                  * to allow it. pays ya money takes ya choice.
387                  */
388                 error = n_adj;
389                 break;
390             }
391         }
392
393         nhs[0].path_weight += n_adj_left;
394
395         /* Less than 5% average error per adjacency with this size adjacency block? */
396         if (error <= multipath_next_hop_error_tolerance*n_adj)
397         {
398             /* Truncate any next hops with zero weight. */
399             _vec_len (nhs) = i;
400             break;
401         }
402     }
403
404 done:
405     /* Save vector for next call. */
406     *normalized_next_hops = nhs;
407     *sum_weight_in = sum_weight;
408     return n_adj;
409 }
410
411 static load_balance_path_t *
412 load_balance_multipath_next_hop_fixup (const load_balance_path_t *nhs,
413                                        dpo_proto_t drop_proto)
414 {
415     if (0 == vec_len(nhs))
416     {
417         load_balance_path_t *new_nhs = NULL, *nh;
418
419         /*
420          * we need something for the load-balance. so use the drop
421          */
422         vec_add2(new_nhs, nh, 1);
423
424         nh->path_weight = 1;
425         dpo_copy(&nh->path_dpo, drop_dpo_get(drop_proto));
426
427         return (new_nhs);
428     }
429
430     return (NULL);
431 }
432
433 /*
434  * Fill in adjacencies in block based on corresponding
435  * next hop adjacencies.
436  */
437 static void
438 load_balance_fill_buckets (load_balance_t *lb,
439                            load_balance_path_t *nhs,
440                            dpo_id_t *buckets,
441                            u32 n_buckets)
442 {
443     load_balance_path_t * nh;
444     u16 ii, bucket;
445
446     bucket = 0;
447
448     /*
449      * the next-hops have normalised weights. that means their sum is the number
450      * of buckets we need to fill.
451      */
452     vec_foreach (nh, nhs)
453     {
454         for (ii = 0; ii < nh->path_weight; ii++)
455         {
456             ASSERT(bucket < n_buckets);
457             load_balance_set_bucket_i(lb, bucket++, buckets, &nh->path_dpo);
458         }
459     }
460 }
461
462 static inline void
463 load_balance_set_n_buckets (load_balance_t *lb,
464                             u32 n_buckets)
465 {
466     lb->lb_n_buckets = n_buckets;
467     lb->lb_n_buckets_minus_1 = n_buckets-1;
468 }
469
470 void
471 load_balance_multipath_update (const dpo_id_t *dpo,
472                                const load_balance_path_t * raw_nhs,
473                                load_balance_flags_t flags)
474 {
475     load_balance_path_t *nh, *nhs, *fixed_nhs;
476     u32 sum_of_weights, n_buckets, ii;
477     index_t lbmi, old_lbmi;
478     load_balance_t *lb;
479     dpo_id_t *tmp_dpo;
480
481     nhs = NULL;
482
483     ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
484     lb = load_balance_get(dpo->dpoi_index);
485     fixed_nhs = load_balance_multipath_next_hop_fixup(raw_nhs, lb->lb_proto);
486     n_buckets =
487         ip_multipath_normalize_next_hops((NULL == fixed_nhs ?
488                                           raw_nhs :
489                                           fixed_nhs),
490                                          &nhs,
491                                          &sum_of_weights,
492                                          multipath_next_hop_error_tolerance);
493
494     ASSERT (n_buckets >= vec_len (raw_nhs));
495
496     /*
497      * Save the old load-balance map used, and get a new one if required.
498      */
499     old_lbmi = lb->lb_map;
500     if (flags & LOAD_BALANCE_FLAG_USES_MAP)
501     {
502         lbmi = load_balance_map_add_or_lock(n_buckets, sum_of_weights, nhs);
503     }
504     else
505     {
506         lbmi = INDEX_INVALID;
507     }
508
509     if (0 == lb->lb_n_buckets)
510     {
511         /*
512          * first time initialisation. no packets inflight, so we can write
513          * at leisure.
514          */
515         load_balance_set_n_buckets(lb, n_buckets);
516
517         if (!LB_HAS_INLINE_BUCKETS(lb))
518             vec_validate_aligned(lb->lb_buckets,
519                                  lb->lb_n_buckets - 1,
520                                  CLIB_CACHE_LINE_BYTES);
521
522         load_balance_fill_buckets(lb, nhs,
523                                   load_balance_get_buckets(lb),
524                                   n_buckets);
525         lb->lb_map = lbmi;
526     }
527     else
528     {
529         /*
530          * This is a modification of an existing load-balance.
531          * We need to ensure that packets inflight see a consistent state, that
532          * is the number of reported buckets the LB has (read from
533          * lb_n_buckets_minus_1) is not more than it actually has. So if the
534          * number of buckets is increasing, we must update the bucket array first,
535          * then the reported number. vice-versa if the number of buckets goes down.
536          */
537         if (n_buckets == lb->lb_n_buckets)
538         {
539             /*
540              * no change in the number of buckets. we can simply fill what
541              * is new over what is old.
542              */
543             load_balance_fill_buckets(lb, nhs,
544                                       load_balance_get_buckets(lb),
545                                       n_buckets);
546             lb->lb_map = lbmi;
547         }
548         else if (n_buckets > lb->lb_n_buckets)
549         {
550             /*
551              * we have more buckets. the old load-balance map (if there is one)
552              * will remain valid, i.e. mapping to indices within range, so we
553              * update it last.
554              */
555             if (n_buckets > LB_NUM_INLINE_BUCKETS &&
556                 lb->lb_n_buckets <= LB_NUM_INLINE_BUCKETS)
557             {
558                 /*
559                  * the new increased number of buckets is crossing the threshold
560                  * from the inline storage to out-line. Alloc the outline buckets
561                  * first, then fixup the number. then reset the inlines.
562                  */
563                 ASSERT(NULL == lb->lb_buckets);
564                 vec_validate_aligned(lb->lb_buckets,
565                                      n_buckets - 1,
566                                      CLIB_CACHE_LINE_BYTES);
567
568                 load_balance_fill_buckets(lb, nhs,
569                                           lb->lb_buckets,
570                                           n_buckets);
571                 CLIB_MEMORY_BARRIER();
572                 load_balance_set_n_buckets(lb, n_buckets);
573
574                 CLIB_MEMORY_BARRIER();
575
576                 for (ii = 0; ii < LB_NUM_INLINE_BUCKETS; ii++)
577                 {
578                     dpo_reset(&lb->lb_buckets_inline[ii]);
579                 }
580             }
581             else
582             {
583                 if (n_buckets <= LB_NUM_INLINE_BUCKETS)
584                 {
585                     /*
586                      * we are not crossing the threshold and it's still inline buckets.
587                      * we can write the new on the old..
588                      */
589                     load_balance_fill_buckets(lb, nhs,
590                                               load_balance_get_buckets(lb),
591                                               n_buckets);
592                     CLIB_MEMORY_BARRIER();
593                     load_balance_set_n_buckets(lb, n_buckets);
594                 }
595                 else
596                 {
597                     /*
598                      * we are not crossing the threshold. We need a new bucket array to
599                      * hold the increased number of choices.
600                      */
601                     dpo_id_t *new_buckets, *old_buckets, *tmp_dpo;
602
603                     new_buckets = NULL;
604                     old_buckets = load_balance_get_buckets(lb);
605
606                     vec_validate_aligned(new_buckets,
607                                          n_buckets - 1,
608                                          CLIB_CACHE_LINE_BYTES);
609
610                     load_balance_fill_buckets(lb, nhs, new_buckets, n_buckets);
611                     CLIB_MEMORY_BARRIER();
612                     lb->lb_buckets = new_buckets;
613                     CLIB_MEMORY_BARRIER();
614                     load_balance_set_n_buckets(lb, n_buckets);
615
616                     vec_foreach(tmp_dpo, old_buckets)
617                     {
618                         dpo_reset(tmp_dpo);
619                     }
620                     vec_free(old_buckets);
621                 }
622             }
623
624             /*
625              * buckets fixed. ready for the MAP update.
626              */
627             lb->lb_map = lbmi;
628         }
629         else
630         {
631             /*
632              * bucket size shrinkage.
633              * Any map we have will be based on the old
634              * larger number of buckets, so will be translating to indices
635              * out of range. So the new MAP must be installed first.
636              */
637             lb->lb_map = lbmi;
638             CLIB_MEMORY_BARRIER();
639
640
641             if (n_buckets <= LB_NUM_INLINE_BUCKETS &&
642                 lb->lb_n_buckets > LB_NUM_INLINE_BUCKETS)
643             {
644                 /*
645                  * the new decreased number of buckets is crossing the threshold
646                  * from out-line storage to inline:
647                  *   1 - Fill the inline buckets,
648                  *   2 - fixup the number (and this point the inline buckets are
649                  *       used).
650                  *   3 - free the outline buckets
651                  */
652                 load_balance_fill_buckets(lb, nhs,
653                                           lb->lb_buckets_inline,
654                                           n_buckets);
655                 CLIB_MEMORY_BARRIER();
656                 load_balance_set_n_buckets(lb, n_buckets);
657                 CLIB_MEMORY_BARRIER();
658
659                 vec_foreach(tmp_dpo, lb->lb_buckets)
660                 {
661                     dpo_reset(tmp_dpo);
662                 }
663                 vec_free(lb->lb_buckets);
664             }
665             else
666             {
667                 /*
668                  * not crossing the threshold.
669                  *  1 - update the number to the smaller size
670                  *  2 - write the new buckets
671                  *  3 - reset those no longer used.
672                  */
673                 dpo_id_t *buckets;
674                 u32 old_n_buckets;
675
676                 old_n_buckets = lb->lb_n_buckets;
677                 buckets = load_balance_get_buckets(lb);
678
679                 load_balance_set_n_buckets(lb, n_buckets);
680                 CLIB_MEMORY_BARRIER();
681
682                 load_balance_fill_buckets(lb, nhs,
683                                           buckets,
684                                           n_buckets);
685
686                 for (ii = old_n_buckets-n_buckets; ii < old_n_buckets; ii++)
687                 {
688                     dpo_reset(&buckets[ii]);
689                 }
690             }
691         }
692     }
693
694     vec_foreach (nh, nhs)
695     {
696         dpo_reset(&nh->path_dpo);
697     }
698     vec_free(nhs);
699     vec_free(fixed_nhs);
700
701     load_balance_map_unlock(old_lbmi);
702 }
703
704 static void
705 load_balance_lock (dpo_id_t *dpo)
706 {
707     load_balance_t *lb;
708
709     lb = load_balance_get(dpo->dpoi_index);
710
711     lb->lb_locks++;
712 }
713
714 static void
715 load_balance_destroy (load_balance_t *lb)
716 {
717     dpo_id_t *buckets;
718     int i;
719
720     buckets = load_balance_get_buckets(lb);
721
722     for (i = 0; i < lb->lb_n_buckets; i++)
723     {
724         dpo_reset(&buckets[i]);
725     }
726
727     LB_DBG(lb, "destroy");
728     if (!LB_HAS_INLINE_BUCKETS(lb))
729     {
730         vec_free(lb->lb_buckets);
731     }
732
733     fib_urpf_list_unlock(lb->lb_urpf);
734     load_balance_map_unlock(lb->lb_map);
735
736     pool_put(load_balance_pool, lb);
737 }
738
739 static void
740 load_balance_unlock (dpo_id_t *dpo)
741 {
742     load_balance_t *lb;
743
744     lb = load_balance_get(dpo->dpoi_index);
745
746     lb->lb_locks--;
747
748     if (0 == lb->lb_locks)
749     {
750         load_balance_destroy(lb);
751     }
752 }
753
754 static void
755 load_balance_mem_show (void)
756 {
757     fib_show_memory_usage("load-balance",
758                           pool_elts(load_balance_pool),
759                           pool_len(load_balance_pool),
760                           sizeof(load_balance_t));
761     load_balance_map_show_mem();
762 }
763
764 const static dpo_vft_t lb_vft = {
765     .dv_lock = load_balance_lock,
766     .dv_unlock = load_balance_unlock,
767     .dv_format = format_load_balance_dpo,
768     .dv_mem_show = load_balance_mem_show,
769 };
770
771 /**
772  * @brief The per-protocol VLIB graph nodes that are assigned to a load-balance
773  *        object.
774  *
775  * this means that these graph nodes are ones from which a load-balance is the
776  * parent object in the DPO-graph.
777  *
778  * We do not list all the load-balance nodes, such as the *-lookup. instead
779  * we are relying on the correct use of the .sibling_of field when setting
780  * up these sibling nodes.
781  */
782 const static char* const load_balance_ip4_nodes[] =
783 {
784     "ip4-load-balance",
785     NULL,
786 };
787 const static char* const load_balance_ip6_nodes[] =
788 {
789     "ip6-load-balance",
790     NULL,
791 };
792 const static char* const load_balance_mpls_nodes[] =
793 {
794     "mpls-load-balance",
795     NULL,
796 };
797 const static char* const load_balance_l2_nodes[] =
798 {
799     "l2-load-balance",
800     NULL,
801 };
802 const static char* const * const load_balance_nodes[DPO_PROTO_NUM] =
803 {
804     [DPO_PROTO_IP4]  = load_balance_ip4_nodes,
805     [DPO_PROTO_IP6]  = load_balance_ip6_nodes,
806     [DPO_PROTO_MPLS] = load_balance_mpls_nodes,
807     [DPO_PROTO_ETHERNET] = load_balance_l2_nodes,
808 };
809
810 void
811 load_balance_module_init (void)
812 {
813     dpo_register(DPO_LOAD_BALANCE, &lb_vft, load_balance_nodes);
814
815     load_balance_map_module_init();
816 }
817
818 static clib_error_t *
819 load_balance_show (vlib_main_t * vm,
820                    unformat_input_t * input,
821                    vlib_cli_command_t * cmd)
822 {
823     index_t lbi = INDEX_INVALID;
824
825     while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
826     {
827         if (unformat (input, "%d", &lbi))
828             ;
829         else
830             break;
831     }
832
833     if (INDEX_INVALID != lbi)
834     {
835         vlib_cli_output (vm, "%U", format_load_balance, lbi,
836                          LOAD_BALANCE_FORMAT_DETAIL);
837     }
838     else
839     {
840         load_balance_t *lb;
841
842         pool_foreach(lb, load_balance_pool,
843         ({
844             vlib_cli_output (vm, "%U", format_load_balance,
845                              load_balance_get_index(lb),
846                              LOAD_BALANCE_FORMAT_NONE);
847         }));
848     }
849
850     return 0;
851 }
852
853 VLIB_CLI_COMMAND (load_balance_show_command, static) = {
854     .path = "show load-balance",
855     .short_help = "show load-balance [<index>]",
856     .function = load_balance_show,
857 };
858
859
860 always_inline u32
861 ip_flow_hash (void *data)
862 {
863   ip4_header_t *iph = (ip4_header_t *) data;
864
865   if ((iph->ip_version_and_header_length & 0xF0) == 0x40)
866     return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT);
867   else
868     return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT);
869 }
870
871 always_inline u64
872 mac_to_u64 (u8 * m)
873 {
874   return (*((u64 *) m) & 0xffffffffffff);
875 }
876
877 always_inline u32
878 l2_flow_hash (vlib_buffer_t * b0)
879 {
880   ethernet_header_t *eh;
881   u64 a, b, c;
882   uword is_ip, eh_size;
883   u16 eh_type;
884
885   eh = vlib_buffer_get_current (b0);
886   eh_type = clib_net_to_host_u16 (eh->type);
887   eh_size = ethernet_buffer_header_size (b0);
888
889   is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6);
890
891   /* since we have 2 cache lines, use them */
892   if (is_ip)
893     a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size);
894   else
895     a = eh->type;
896
897   b = mac_to_u64 ((u8 *) eh->dst_address);
898   c = mac_to_u64 ((u8 *) eh->src_address);
899   hash_mix64 (a, b, c);
900
901   return (u32) c;
902 }
903
904 typedef struct load_balance_trace_t_
905 {
906     index_t lb_index;
907 } load_balance_trace_t;
908
909 static uword
910 l2_load_balance (vlib_main_t * vm,
911                  vlib_node_runtime_t * node,
912                  vlib_frame_t * frame)
913 {
914   u32 n_left_from, next_index, *from, *to_next;
915
916   from = vlib_frame_vector_args (frame);
917   n_left_from = frame->n_vectors;
918
919   next_index = node->cached_next_index;
920
921   while (n_left_from > 0)
922     {
923       u32 n_left_to_next;
924
925       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
926
927       while (n_left_from > 0 && n_left_to_next > 0)
928         {
929           vlib_buffer_t *b0;
930           u32 bi0, lbi0, next0;
931           const dpo_id_t *dpo0;
932           const load_balance_t *lb0;
933
934           bi0 = from[0];
935           to_next[0] = bi0;
936           from += 1;
937           to_next += 1;
938           n_left_from -= 1;
939           n_left_to_next -= 1;
940
941           b0 = vlib_get_buffer (vm, bi0);
942
943           /* lookup dst + src mac */
944           lbi0 =  vnet_buffer (b0)->ip.adj_index[VLIB_TX];
945           lb0 = load_balance_get(lbi0);
946
947           vnet_buffer(b0)->ip.flow_hash = l2_flow_hash(b0);
948
949           dpo0 = load_balance_get_bucket_i(lb0, 
950                                            vnet_buffer(b0)->ip.flow_hash &
951                                            (lb0->lb_n_buckets_minus_1));
952
953           next0 = dpo0->dpoi_next_node;
954           vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
955
956           if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
957             {
958               load_balance_trace_t *tr = vlib_add_trace (vm, node, b0,
959                                                          sizeof (*tr));
960               tr->lb_index = lbi0;
961             }
962           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
963                                            n_left_to_next, bi0, next0);
964         }
965
966       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
967     }
968
969   return frame->n_vectors;
970 }
971
972 static u8 *
973 format_load_balance_trace (u8 * s, va_list * args)
974 {
975   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
976   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
977   load_balance_trace_t *t = va_arg (*args, load_balance_trace_t *);
978
979   s = format (s, "L2-load-balance: index %d", t->lb_index);
980   return s;
981 }
982
983 /**
984  * @brief
985  */
986 VLIB_REGISTER_NODE (l2_load_balance_node) = {
987   .function = l2_load_balance,
988   .name = "l2-load-balance",
989   .vector_size = sizeof (u32),
990
991   .format_trace = format_load_balance_trace,
992   .n_next_nodes = 1,
993   .next_nodes = {
994       [0] = "error-drop",
995   },
996 };