MPLS Mcast
[vpp.git] / src / vnet / mpls / mpls_lookup.c
1 /*
2  * mpls_lookup.c: MPLS lookup
3  *
4  * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls.h>
21 #include <vnet/fib/mpls_fib.h>
22 #include <vnet/dpo/load_balance.h>
23 #include <vnet/dpo/replicate_dpo.h>
24
25 /**
26  * Static MPLS VLIB forwarding node
27  */
28 static vlib_node_registration_t mpls_lookup_node;
29
30 /**
31  * The arc/edge from the MPLS lookup node to the MPLS replicate node
32  */
33 static u32 mpls_lookup_to_replicate_edge;
34
35 typedef struct {
36   u32 next_index;
37   u32 lb_index;
38   u32 lfib_index;
39   u32 label_net_byte_order;
40   u32 hash;
41 } mpls_lookup_trace_t;
42
43 static u8 *
44 format_mpls_lookup_trace (u8 * s, va_list * args)
45 {
46   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
47   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
48   mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
49
50   s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %d"
51               "label %d eos %d", 
52               t->next_index, t->lfib_index, t->lb_index, t->hash,
53               vnet_mpls_uc_get_label(
54                   clib_net_to_host_u32(t->label_net_byte_order)),
55               vnet_mpls_uc_get_s(t->label_net_byte_order));
56   return s;
57 }
58
59 /*
60  * Compute flow hash. 
61  * We'll use it to select which adjacency to use for this flow.  And other things.
62  */
63 always_inline u32
64 mpls_compute_flow_hash (const mpls_unicast_header_t * hdr,
65                         flow_hash_config_t flow_hash_config)
66 {
67     // FIXME
68     return (vnet_mpls_uc_get_label(hdr->label_exp_s_ttl));
69 }
70
71 static inline uword
72 mpls_lookup (vlib_main_t * vm,
73              vlib_node_runtime_t * node,
74              vlib_frame_t * from_frame)
75 {
76   vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
77   u32 n_left_from, next_index, * from, * to_next;
78   mpls_main_t * mm = &mpls_main;
79   u32 thread_index = vlib_get_thread_index();
80
81   from = vlib_frame_vector_args (from_frame);
82   n_left_from = from_frame->n_vectors;
83   next_index = node->cached_next_index;
84
85   while (n_left_from > 0)
86     {
87       u32 n_left_to_next;
88
89       vlib_get_next_frame (vm, node, next_index,
90                            to_next, n_left_to_next);
91
92       while (n_left_from >= 8 && n_left_to_next >= 4)
93         {
94           u32 lbi0, next0, lfib_index0, bi0, hash_c0;
95           const mpls_unicast_header_t * h0;
96           const load_balance_t *lb0;
97           const dpo_id_t *dpo0;
98           vlib_buffer_t * b0;
99           u32 lbi1, next1, lfib_index1, bi1, hash_c1;
100           const mpls_unicast_header_t * h1;
101           const load_balance_t *lb1;
102           const dpo_id_t *dpo1;
103           vlib_buffer_t * b1;
104           u32 lbi2, next2, lfib_index2, bi2, hash_c2;
105           const mpls_unicast_header_t * h2;
106           const load_balance_t *lb2;
107           const dpo_id_t *dpo2;
108           vlib_buffer_t * b2;
109           u32 lbi3, next3, lfib_index3, bi3, hash_c3;
110           const mpls_unicast_header_t * h3;
111           const load_balance_t *lb3;
112           const dpo_id_t *dpo3;
113           vlib_buffer_t * b3;
114
115            /* Prefetch next iteration. */
116           {
117               vlib_buffer_t * p2, * p3, *p4, *p5;
118
119             p2 = vlib_get_buffer (vm, from[2]);
120             p3 = vlib_get_buffer (vm, from[3]);
121             p4 = vlib_get_buffer (vm, from[4]);
122             p5 = vlib_get_buffer (vm, from[5]);
123
124             vlib_prefetch_buffer_header (p2, STORE);
125             vlib_prefetch_buffer_header (p3, STORE);
126             vlib_prefetch_buffer_header (p4, STORE);
127             vlib_prefetch_buffer_header (p5, STORE);
128
129             CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE);
130             CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE);
131             CLIB_PREFETCH (p4->data, sizeof (h0[0]), STORE);
132             CLIB_PREFETCH (p5->data, sizeof (h0[0]), STORE);
133           }
134
135           bi0 = to_next[0] = from[0];
136           bi1 = to_next[1] = from[1];
137           bi2 = to_next[2] = from[2];
138           bi3 = to_next[3] = from[3];
139
140           from += 4;
141           n_left_from -= 4;
142           to_next += 4;
143           n_left_to_next -= 4;
144
145           b0 = vlib_get_buffer (vm, bi0);
146           b1 = vlib_get_buffer (vm, bi1);
147           b2 = vlib_get_buffer (vm, bi2);
148           b3 = vlib_get_buffer (vm, bi3);
149           h0 = vlib_buffer_get_current (b0);
150           h1 = vlib_buffer_get_current (b1);
151           h2 = vlib_buffer_get_current (b2);
152           h3 = vlib_buffer_get_current (b3);
153
154           lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
155                                 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
156           lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
157                                 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
158           lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
159                                 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
160           lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
161                                 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
162
163           lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
164           lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
165           lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
166           lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
167
168           hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
169           hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
170           hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
171           hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
172
173           if (MPLS_IS_REPLICATE & lbi0)
174           {
175               next0 = mpls_lookup_to_replicate_edge;
176               vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
177                   (lbi0 & ~MPLS_IS_REPLICATE);
178           }
179           else
180           {
181               lb0 = load_balance_get(lbi0);
182
183               if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
184               {
185                   hash_c0 = vnet_buffer (b0)->ip.flow_hash =
186                       mpls_compute_flow_hash(h0, lb0->lb_hash_config);
187               }
188               ASSERT (lb0->lb_n_buckets > 0);
189               ASSERT (is_pow2 (lb0->lb_n_buckets));
190               dpo0 = load_balance_get_bucket_i(lb0,
191                                                (hash_c0 &
192                                                 (lb0->lb_n_buckets_minus_1)));
193               next0 = dpo0->dpoi_next_node;
194
195               vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
196
197               vlib_increment_combined_counter
198                   (cm, thread_index, lbi0, 1,
199                    vlib_buffer_length_in_chain (vm, b0));
200           }
201           if (MPLS_IS_REPLICATE & lbi1)
202           {
203               next1 = mpls_lookup_to_replicate_edge;
204               vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
205                   (lbi1 & ~MPLS_IS_REPLICATE);
206           }
207           else
208           {
209               lb1 = load_balance_get(lbi1);
210
211               if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
212               {
213                   hash_c1 = vnet_buffer (b1)->ip.flow_hash =
214                       mpls_compute_flow_hash(h1, lb1->lb_hash_config);
215               }
216               ASSERT (lb1->lb_n_buckets > 0);
217               ASSERT (is_pow2 (lb1->lb_n_buckets));
218               dpo1 = load_balance_get_bucket_i(lb1,
219                                                (hash_c1 &
220                                                 (lb1->lb_n_buckets_minus_1)));
221               next1 = dpo1->dpoi_next_node;
222
223               vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
224
225               vlib_increment_combined_counter
226                   (cm, thread_index, lbi1, 1,
227                    vlib_buffer_length_in_chain (vm, b1));
228           }
229           if (MPLS_IS_REPLICATE & lbi2)
230           {
231               next2 = mpls_lookup_to_replicate_edge;
232               vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
233                   (lbi2 & ~MPLS_IS_REPLICATE);
234           }
235           else
236           {
237               lb2 = load_balance_get(lbi2);
238
239               if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
240               {
241                   hash_c2 = vnet_buffer (b2)->ip.flow_hash =
242                       mpls_compute_flow_hash(h2, lb2->lb_hash_config);
243               }
244               ASSERT (lb2->lb_n_buckets > 0);
245               ASSERT (is_pow2 (lb2->lb_n_buckets));
246               dpo2 = load_balance_get_bucket_i(lb2,
247                                                (hash_c2 &
248                                                 (lb2->lb_n_buckets_minus_1)));
249               next2 = dpo2->dpoi_next_node;
250
251               vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
252
253               vlib_increment_combined_counter
254                   (cm, thread_index, lbi2, 1,
255                    vlib_buffer_length_in_chain (vm, b2));
256           }
257           if (MPLS_IS_REPLICATE & lbi3)
258           {
259               next3 = mpls_lookup_to_replicate_edge;
260               vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
261                   (lbi3 & ~MPLS_IS_REPLICATE);
262           }
263           else
264           {
265               lb3 = load_balance_get(lbi3);
266
267               if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
268               {
269                   hash_c3 = vnet_buffer (b3)->ip.flow_hash =
270                       mpls_compute_flow_hash(h3, lb3->lb_hash_config);
271               }
272               ASSERT (lb3->lb_n_buckets > 0);
273               ASSERT (is_pow2 (lb3->lb_n_buckets));
274               dpo3 = load_balance_get_bucket_i(lb3,
275                                                (hash_c3 &
276                                                 (lb3->lb_n_buckets_minus_1)));
277               next3 = dpo3->dpoi_next_node;
278
279               vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
280
281               vlib_increment_combined_counter
282                   (cm, thread_index, lbi3, 1,
283                    vlib_buffer_length_in_chain (vm, b3));
284           }
285
286           /*
287            * before we pop the label copy th values we need to maintain.
288            * The label header is in network byte order.
289            *  last byte is the TTL.
290            *  bits 2 to 4 inclusive are the EXP bits
291            */
292           vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
293           vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
294           vnet_buffer (b0)->mpls.first = 1;
295           vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
296           vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
297           vnet_buffer (b1)->mpls.first = 1;
298           vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
299           vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
300           vnet_buffer (b2)->mpls.first = 1;
301           vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
302           vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
303           vnet_buffer (b3)->mpls.first = 1;
304
305           /*
306            * pop the label that was just used in the lookup
307            */
308           vlib_buffer_advance(b0, sizeof(*h0));
309           vlib_buffer_advance(b1, sizeof(*h1));
310           vlib_buffer_advance(b2, sizeof(*h2));
311           vlib_buffer_advance(b3, sizeof(*h3));
312
313           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
314           {
315               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
316                                                         b0, sizeof (*tr));
317               tr->next_index = next0;
318               tr->lb_index = lbi0;
319               tr->lfib_index = lfib_index0;
320               tr->hash = hash_c0;
321               tr->label_net_byte_order = h0->label_exp_s_ttl;
322           }
323
324           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
325           {
326               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
327                                                         b1, sizeof (*tr));
328               tr->next_index = next1;
329               tr->lb_index = lbi1;
330               tr->lfib_index = lfib_index1;
331               tr->hash = hash_c1;
332               tr->label_net_byte_order = h1->label_exp_s_ttl;
333           }
334
335           if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
336           {
337               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
338                                                         b2, sizeof (*tr));
339               tr->next_index = next2;
340               tr->lb_index = lbi2;
341               tr->lfib_index = lfib_index2;
342               tr->hash = hash_c2;
343               tr->label_net_byte_order = h2->label_exp_s_ttl;
344           }
345
346           if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
347           {
348               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
349                                                         b3, sizeof (*tr));
350               tr->next_index = next3;
351               tr->lb_index = lbi3;
352               tr->lfib_index = lfib_index3;
353               tr->hash = hash_c3;
354               tr->label_net_byte_order = h3->label_exp_s_ttl;
355           }
356
357           vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
358                                            to_next, n_left_to_next,
359                                            bi0, bi1, bi2, bi3,
360                                            next0, next1, next2, next3);
361         }
362
363       while (n_left_from > 0 && n_left_to_next > 0)
364       {
365           u32 lbi0, next0, lfib_index0, bi0, hash_c0;
366           const mpls_unicast_header_t * h0;
367           const load_balance_t *lb0;
368           const dpo_id_t *dpo0;
369           vlib_buffer_t * b0;
370
371           bi0 = from[0];
372           to_next[0] = bi0;
373           from += 1;
374           to_next += 1;
375           n_left_from -= 1;
376           n_left_to_next -= 1;
377
378           b0 = vlib_get_buffer (vm, bi0);
379           h0 = vlib_buffer_get_current (b0);
380
381           lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
382                                 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
383
384           lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
385           hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
386
387           if (MPLS_IS_REPLICATE & lbi0)
388           {
389               next0 = mpls_lookup_to_replicate_edge;
390               vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
391                   (lbi0 & ~MPLS_IS_REPLICATE);
392           }
393           else
394           {
395               lb0 = load_balance_get(lbi0);
396
397               if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
398               {
399                   hash_c0 = vnet_buffer (b0)->ip.flow_hash =
400                       mpls_compute_flow_hash(h0, lb0->lb_hash_config);
401               }
402
403               ASSERT (lb0->lb_n_buckets > 0);
404               ASSERT (is_pow2 (lb0->lb_n_buckets));
405
406               dpo0 = load_balance_get_bucket_i(lb0,
407                                                (hash_c0 &
408                                                 (lb0->lb_n_buckets_minus_1)));
409
410               next0 = dpo0->dpoi_next_node;
411               vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
412
413               vlib_increment_combined_counter
414                   (cm, thread_index, lbi0, 1,
415                    vlib_buffer_length_in_chain (vm, b0));
416           }
417
418           /*
419            * before we pop the label copy, values we need to maintain.
420            * The label header is in network byte order.
421            *  last byte is the TTL.
422            *  bits 2 to 4 inclusive are the EXP bits
423            */
424           vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
425           vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
426           vnet_buffer (b0)->mpls.first = 1;
427
428           /*
429            * pop the label that was just used in the lookup
430            */
431           vlib_buffer_advance(b0, sizeof(*h0));
432
433           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
434           {
435               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
436                                                         b0, sizeof (*tr));
437               tr->next_index = next0;
438               tr->lb_index = lbi0;
439               tr->lfib_index = lfib_index0;
440               tr->hash = hash_c0;
441               tr->label_net_byte_order = h0->label_exp_s_ttl;
442           }
443
444           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
445                                            to_next, n_left_to_next,
446                                            bi0, next0);
447         }
448
449       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
450     }
451   vlib_node_increment_counter (vm, mpls_lookup_node.index,
452                                MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
453   return from_frame->n_vectors;
454 }
455
456 static char * mpls_error_strings[] = {
457 #define mpls_error(n,s) s,
458 #include "error.def"
459 #undef mpls_error
460 };
461
462 VLIB_REGISTER_NODE (mpls_lookup_node, static) = {
463   .function = mpls_lookup,
464   .name = "mpls-lookup",
465   /* Takes a vector of packets. */
466   .vector_size = sizeof (u32),
467   .n_errors = MPLS_N_ERROR,
468   .error_strings = mpls_error_strings,
469
470   .sibling_of = "ip4-lookup",
471
472   .format_buffer = format_mpls_header,
473   .format_trace = format_mpls_lookup_trace,
474   .unformat_buffer = unformat_mpls_header,
475 };
476
477 VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup)
478
479 typedef struct {
480   u32 next_index;
481   u32 lb_index;
482   u32 hash;
483 } mpls_load_balance_trace_t;
484
485 static u8 *
486 format_mpls_load_balance_trace (u8 * s, va_list * args)
487 {
488   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
489   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
490   mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
491
492   s = format (s, "MPLS: next [%d], LB index %d hash %d",
493               t->next_index, t->lb_index, t->hash);
494   return s;
495 }
496
497 always_inline uword
498 mpls_load_balance (vlib_main_t * vm,
499                   vlib_node_runtime_t * node,
500                   vlib_frame_t * frame)
501 {
502   vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
503   u32 n_left_from, n_left_to_next, * from, * to_next;
504   u32 thread_index = vlib_get_thread_index();
505   u32 next;
506
507   from = vlib_frame_vector_args (frame);
508   n_left_from = frame->n_vectors;
509   next = node->cached_next_index;
510
511   while (n_left_from > 0)
512     {
513       vlib_get_next_frame (vm, node, next,
514                            to_next, n_left_to_next);
515
516
517       while (n_left_from >= 4 && n_left_to_next >= 2)
518         {
519           const load_balance_t *lb0, *lb1;
520           vlib_buffer_t * p0, *p1;
521           u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
522           const mpls_unicast_header_t *mpls0, *mpls1;
523           const dpo_id_t *dpo0, *dpo1;
524
525           /* Prefetch next iteration. */
526           {
527             vlib_buffer_t * p2, * p3;
528
529             p2 = vlib_get_buffer (vm, from[2]);
530             p3 = vlib_get_buffer (vm, from[3]);
531
532             vlib_prefetch_buffer_header (p2, STORE);
533             vlib_prefetch_buffer_header (p3, STORE);
534
535             CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE);
536             CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE);
537           }
538
539           pi0 = to_next[0] = from[0];
540           pi1 = to_next[1] = from[1];
541
542           from += 2;
543           n_left_from -= 2;
544           to_next += 2;
545           n_left_to_next -= 2;
546
547           p0 = vlib_get_buffer (vm, pi0);
548           p1 = vlib_get_buffer (vm, pi1);
549
550           mpls0 = vlib_buffer_get_current (p0);
551           mpls1 = vlib_buffer_get_current (p1);
552           lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
553           lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
554
555           lb0 = load_balance_get(lbi0);
556           lb1 = load_balance_get(lbi1);
557
558           /*
559            * this node is for via FIBs we can re-use the hash value from the
560            * to node if present.
561            * We don't want to use the same hash value at each level in the recursion
562            * graph as that would lead to polarisation
563            */
564           hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
565           hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
566
567           if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
568           {
569               if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
570               {
571                   hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
572               }
573               else
574               {
575                   hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
576               }
577           }
578           if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
579           {
580               if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
581               {
582                   hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
583               }
584               else
585               {
586                   hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
587               }
588           }
589
590           dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
591           dpo1 = load_balance_get_bucket_i(lb1, hc1 & (lb1->lb_n_buckets_minus_1));
592
593           next0 = dpo0->dpoi_next_node;
594           next1 = dpo1->dpoi_next_node;
595
596           vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
597           vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
598
599           vlib_increment_combined_counter
600               (cm, thread_index, lbi0, 1,
601                vlib_buffer_length_in_chain (vm, p0));
602           vlib_increment_combined_counter
603               (cm, thread_index, lbi1, 1,
604                vlib_buffer_length_in_chain (vm, p1));
605
606           if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
607           {
608               mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
609                                                               p0, sizeof (*tr));
610               tr->next_index = next0;
611               tr->lb_index = lbi0;
612               tr->hash = hc0;
613           }
614
615           vlib_validate_buffer_enqueue_x2 (vm, node, next,
616                                            to_next, n_left_to_next,
617                                            pi0, pi1, next0, next1);
618        }
619
620       while (n_left_from > 0 && n_left_to_next > 0)
621         {
622           const load_balance_t *lb0;
623           vlib_buffer_t * p0;
624           u32 pi0, lbi0, hc0, next0;
625           const mpls_unicast_header_t *mpls0;
626           const dpo_id_t *dpo0;
627
628           pi0 = from[0];
629           to_next[0] = pi0;
630           from += 1;
631           to_next += 1;
632           n_left_to_next -= 1;
633           n_left_from -= 1;
634
635           p0 = vlib_get_buffer (vm, pi0);
636
637           mpls0 = vlib_buffer_get_current (p0);
638           lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
639
640           lb0 = load_balance_get(lbi0);
641
642           hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
643           if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
644           {
645               if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
646               {
647                   hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
648               }
649               else
650               {
651                   hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
652               }
653           }
654
655           dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
656
657           next0 = dpo0->dpoi_next_node;
658           vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
659
660           vlib_increment_combined_counter
661               (cm, thread_index, lbi0, 1,
662                vlib_buffer_length_in_chain (vm, p0));
663
664           vlib_validate_buffer_enqueue_x1 (vm, node, next,
665                                            to_next, n_left_to_next,
666                                            pi0, next0);
667         }
668
669       vlib_put_next_frame (vm, node, next, n_left_to_next);
670     }
671
672   return frame->n_vectors;
673 }
674
675 VLIB_REGISTER_NODE (mpls_load_balance_node) = {
676   .function = mpls_load_balance,
677   .name = "mpls-load-balance",
678   .vector_size = sizeof (u32),
679   .sibling_of = "mpls-lookup",
680
681   .format_trace = format_mpls_load_balance_trace,
682 };
683
684 VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance)
685
686
687 static clib_error_t *
688 mpls_lookup_init (vlib_main_t * vm)
689 {
690   clib_error_t * error;
691
692   if ((error = vlib_call_init_function (vm, mpls_init)))
693     return error;
694
695   mpls_lookup_to_replicate_edge =
696       vlib_node_add_named_next(vm,
697                                mpls_lookup_node.index,
698                                "mpls-replicate");
699
700   return (NULL);
701 }
702
703 VLIB_INIT_FUNCTION (mpls_lookup_init);