misc: Purge unused pg includes
[vpp.git] / src / vnet / mpls / mpls_lookup.c
1 /*
2  * mpls_lookup.c: MPLS lookup
3  *
4  * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/mpls/mpls_lookup.h>
20 #include <vnet/fib/mpls_fib.h>
21 #include <vnet/dpo/load_balance_map.h>
22 #include <vnet/dpo/replicate_dpo.h>
23
24 /**
25  * The arc/edge from the MPLS lookup node to the MPLS replicate node
26  */
27 #ifndef CLIB_MARCH_VARIANT
28 u32 mpls_lookup_to_replicate_edge;
29 #endif /* CLIB_MARCH_VARIANT */
30
31 typedef struct {
32   u32 next_index;
33   u32 lb_index;
34   u32 lfib_index;
35   u32 label_net_byte_order;
36   u32 hash;
37 } mpls_lookup_trace_t;
38
39 static u8 *
40 format_mpls_lookup_trace (u8 * s, va_list * args)
41 {
42   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
43   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
44   mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
45
46   s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %x "
47               "label %d eos %d", 
48               t->next_index, t->lfib_index, t->lb_index, t->hash,
49               vnet_mpls_uc_get_label(
50                   clib_net_to_host_u32(t->label_net_byte_order)),
51               vnet_mpls_uc_get_s(
52                   clib_net_to_host_u32(t->label_net_byte_order)));
53   return s;
54 }
55
56 VLIB_NODE_FN (mpls_lookup_node) (vlib_main_t * vm,
57              vlib_node_runtime_t * node,
58              vlib_frame_t * from_frame)
59 {
60   vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
61   u32 n_left_from, next_index, * from, * to_next;
62   mpls_main_t * mm = &mpls_main;
63   u32 thread_index = vlib_get_thread_index();
64
65   from = vlib_frame_vector_args (from_frame);
66   n_left_from = from_frame->n_vectors;
67   next_index = node->cached_next_index;
68
69   while (n_left_from > 0)
70     {
71       u32 n_left_to_next;
72
73       vlib_get_next_frame (vm, node, next_index,
74                            to_next, n_left_to_next);
75
76       while (n_left_from >= 8 && n_left_to_next >= 4)
77         {
78           u32 lbi0, next0, lfib_index0, bi0, hash_c0;
79           const mpls_unicast_header_t * h0;
80           const load_balance_t *lb0;
81           const dpo_id_t *dpo0;
82           vlib_buffer_t * b0;
83           u32 lbi1, next1, lfib_index1, bi1, hash_c1;
84           const mpls_unicast_header_t * h1;
85           const load_balance_t *lb1;
86           const dpo_id_t *dpo1;
87           vlib_buffer_t * b1;
88           u32 lbi2, next2, lfib_index2, bi2, hash_c2;
89           const mpls_unicast_header_t * h2;
90           const load_balance_t *lb2;
91           const dpo_id_t *dpo2;
92           vlib_buffer_t * b2;
93           u32 lbi3, next3, lfib_index3, bi3, hash_c3;
94           const mpls_unicast_header_t * h3;
95           const load_balance_t *lb3;
96           const dpo_id_t *dpo3;
97           vlib_buffer_t * b3;
98
99            /* Prefetch next iteration. */
100           {
101               vlib_buffer_t *p4, *p5, *p6, *p7;
102
103             p4 = vlib_get_buffer (vm, from[4]);
104             p5 = vlib_get_buffer (vm, from[5]);
105             p6 = vlib_get_buffer (vm, from[6]);
106             p7 = vlib_get_buffer (vm, from[7]);
107
108             vlib_prefetch_buffer_header (p4, STORE);
109             vlib_prefetch_buffer_header (p5, STORE);
110             vlib_prefetch_buffer_header (p6, STORE);
111             vlib_prefetch_buffer_header (p7, STORE);
112
113             CLIB_PREFETCH (p4->data, sizeof (h0[0]), LOAD);
114             CLIB_PREFETCH (p5->data, sizeof (h0[0]), LOAD);
115             CLIB_PREFETCH (p6->data, sizeof (h0[0]), LOAD);
116             CLIB_PREFETCH (p7->data, sizeof (h0[0]), LOAD);
117           }
118
119           bi0 = to_next[0] = from[0];
120           bi1 = to_next[1] = from[1];
121           bi2 = to_next[2] = from[2];
122           bi3 = to_next[3] = from[3];
123
124           from += 4;
125           n_left_from -= 4;
126           to_next += 4;
127           n_left_to_next -= 4;
128
129           b0 = vlib_get_buffer (vm, bi0);
130           b1 = vlib_get_buffer (vm, bi1);
131           b2 = vlib_get_buffer (vm, bi2);
132           b3 = vlib_get_buffer (vm, bi3);
133           h0 = vlib_buffer_get_current (b0);
134           h1 = vlib_buffer_get_current (b1);
135           h2 = vlib_buffer_get_current (b2);
136           h3 = vlib_buffer_get_current (b3);
137
138           lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
139                                 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
140           lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
141                                 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
142           lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
143                                 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
144           lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
145                                 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
146
147           lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
148           lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
149           lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
150           lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
151
152           hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
153           hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
154           hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
155           hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
156
157           if (MPLS_IS_REPLICATE & lbi0)
158           {
159               next0 = mpls_lookup_to_replicate_edge;
160               vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
161                   (lbi0 & ~MPLS_IS_REPLICATE);
162           }
163           else
164           {
165               lb0 = load_balance_get(lbi0);
166               ASSERT (lb0->lb_n_buckets > 0);
167               ASSERT (is_pow2 (lb0->lb_n_buckets));
168
169               if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
170               {
171                   hash_c0 = vnet_buffer (b0)->ip.flow_hash =
172                       mpls_compute_flow_hash(h0, lb0->lb_hash_config);
173                   dpo0 = load_balance_get_fwd_bucket
174                       (lb0,
175                        (hash_c0 & (lb0->lb_n_buckets_minus_1)));
176               }
177               else
178               {
179                   dpo0 = load_balance_get_bucket_i (lb0, 0);
180               }
181               next0 = dpo0->dpoi_next_node;
182
183               vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
184
185               vlib_increment_combined_counter
186                   (cm, thread_index, lbi0, 1,
187                    vlib_buffer_length_in_chain (vm, b0));
188           }
189           if (MPLS_IS_REPLICATE & lbi1)
190           {
191               next1 = mpls_lookup_to_replicate_edge;
192               vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
193                   (lbi1 & ~MPLS_IS_REPLICATE);
194           }
195           else
196           {
197               lb1 = load_balance_get(lbi1);
198               ASSERT (lb1->lb_n_buckets > 0);
199               ASSERT (is_pow2 (lb1->lb_n_buckets));
200
201               if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
202               {
203                   hash_c1 = vnet_buffer (b1)->ip.flow_hash =
204                       mpls_compute_flow_hash(h1, lb1->lb_hash_config);
205                   dpo1 = load_balance_get_fwd_bucket
206                       (lb1,
207                        (hash_c1 & (lb1->lb_n_buckets_minus_1)));
208               }
209               else
210               {
211                   dpo1 = load_balance_get_bucket_i (lb1, 0);
212               }
213               next1 = dpo1->dpoi_next_node;
214
215               vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
216
217               vlib_increment_combined_counter
218                   (cm, thread_index, lbi1, 1,
219                    vlib_buffer_length_in_chain (vm, b1));
220           }
221           if (MPLS_IS_REPLICATE & lbi2)
222           {
223               next2 = mpls_lookup_to_replicate_edge;
224               vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
225                   (lbi2 & ~MPLS_IS_REPLICATE);
226           }
227           else
228           {
229               lb2 = load_balance_get(lbi2);
230               ASSERT (lb2->lb_n_buckets > 0);
231               ASSERT (is_pow2 (lb2->lb_n_buckets));
232
233               if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
234               {
235                   hash_c2 = vnet_buffer (b2)->ip.flow_hash =
236                       mpls_compute_flow_hash(h2, lb2->lb_hash_config);
237                   dpo2 = load_balance_get_fwd_bucket
238                       (lb2,
239                        (hash_c2 & (lb2->lb_n_buckets_minus_1)));
240               }
241               else
242               {
243                   dpo2 = load_balance_get_bucket_i (lb2, 0);
244               }
245               next2 = dpo2->dpoi_next_node;
246
247               vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
248
249               vlib_increment_combined_counter
250                   (cm, thread_index, lbi2, 1,
251                    vlib_buffer_length_in_chain (vm, b2));
252           }
253           if (MPLS_IS_REPLICATE & lbi3)
254           {
255               next3 = mpls_lookup_to_replicate_edge;
256               vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
257                   (lbi3 & ~MPLS_IS_REPLICATE);
258           }
259           else
260           {
261               lb3 = load_balance_get(lbi3);
262               ASSERT (lb3->lb_n_buckets > 0);
263               ASSERT (is_pow2 (lb3->lb_n_buckets));
264
265               if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
266               {
267                   hash_c3 = vnet_buffer (b3)->ip.flow_hash =
268                       mpls_compute_flow_hash(h3, lb3->lb_hash_config);
269                   dpo3 = load_balance_get_fwd_bucket
270                       (lb3,
271                        (hash_c3 & (lb3->lb_n_buckets_minus_1)));
272               }
273               else
274               {
275                   dpo3 = load_balance_get_bucket_i (lb3, 0);
276               }
277               next3 = dpo3->dpoi_next_node;
278
279               vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
280
281               vlib_increment_combined_counter
282                   (cm, thread_index, lbi3, 1,
283                    vlib_buffer_length_in_chain (vm, b3));
284           }
285
286           /*
287            * before we pop the label copy th values we need to maintain.
288            * The label header is in network byte order.
289            *  last byte is the TTL.
290            *  bits 2 to 4 inclusive are the EXP bits
291            */
292           vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
293           vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
294           vnet_buffer (b0)->mpls.first = 1;
295           vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
296           vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
297           vnet_buffer (b1)->mpls.first = 1;
298           vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
299           vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
300           vnet_buffer (b2)->mpls.first = 1;
301           vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
302           vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
303           vnet_buffer (b3)->mpls.first = 1;
304
305           /*
306            * pop the label that was just used in the lookup
307            */
308           vlib_buffer_advance(b0, sizeof(*h0));
309           vlib_buffer_advance(b1, sizeof(*h1));
310           vlib_buffer_advance(b2, sizeof(*h2));
311           vlib_buffer_advance(b3, sizeof(*h3));
312
313           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
314           {
315               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
316                                                         b0, sizeof (*tr));
317               tr->next_index = next0;
318               tr->lb_index = lbi0;
319               tr->lfib_index = lfib_index0;
320               tr->hash = hash_c0;
321               tr->label_net_byte_order = h0->label_exp_s_ttl;
322           }
323
324           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
325           {
326               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
327                                                         b1, sizeof (*tr));
328               tr->next_index = next1;
329               tr->lb_index = lbi1;
330               tr->lfib_index = lfib_index1;
331               tr->hash = hash_c1;
332               tr->label_net_byte_order = h1->label_exp_s_ttl;
333           }
334
335           if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
336           {
337               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
338                                                         b2, sizeof (*tr));
339               tr->next_index = next2;
340               tr->lb_index = lbi2;
341               tr->lfib_index = lfib_index2;
342               tr->hash = hash_c2;
343               tr->label_net_byte_order = h2->label_exp_s_ttl;
344           }
345
346           if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
347           {
348               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
349                                                         b3, sizeof (*tr));
350               tr->next_index = next3;
351               tr->lb_index = lbi3;
352               tr->lfib_index = lfib_index3;
353               tr->hash = hash_c3;
354               tr->label_net_byte_order = h3->label_exp_s_ttl;
355           }
356
357           vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
358                                            to_next, n_left_to_next,
359                                            bi0, bi1, bi2, bi3,
360                                            next0, next1, next2, next3);
361         }
362
363       while (n_left_from > 0 && n_left_to_next > 0)
364       {
365           u32 lbi0, next0, lfib_index0, bi0, hash_c0;
366           const mpls_unicast_header_t * h0;
367           const load_balance_t *lb0;
368           const dpo_id_t *dpo0;
369           vlib_buffer_t * b0;
370
371           bi0 = from[0];
372           to_next[0] = bi0;
373           from += 1;
374           to_next += 1;
375           n_left_from -= 1;
376           n_left_to_next -= 1;
377
378           b0 = vlib_get_buffer (vm, bi0);
379           h0 = vlib_buffer_get_current (b0);
380
381           lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
382                                 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
383
384           lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
385           hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
386
387           if (MPLS_IS_REPLICATE & lbi0)
388           {
389               next0 = mpls_lookup_to_replicate_edge;
390               vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
391                   (lbi0 & ~MPLS_IS_REPLICATE);
392           }
393           else
394           {
395               lb0 = load_balance_get(lbi0);
396               ASSERT (lb0->lb_n_buckets > 0);
397               ASSERT (is_pow2 (lb0->lb_n_buckets));
398
399               if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
400               {
401                   hash_c0 = vnet_buffer (b0)->ip.flow_hash =
402                       mpls_compute_flow_hash(h0, lb0->lb_hash_config);
403                   dpo0 = load_balance_get_fwd_bucket
404                       (lb0,
405                        (hash_c0 & (lb0->lb_n_buckets_minus_1)));
406               }
407               else
408               {
409                   dpo0 = load_balance_get_bucket_i (lb0, 0);
410               }
411               next0 = dpo0->dpoi_next_node;
412               vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
413
414               vlib_increment_combined_counter
415                   (cm, thread_index, lbi0, 1,
416                    vlib_buffer_length_in_chain (vm, b0));
417           }
418
419           /*
420            * before we pop the label copy, values we need to maintain.
421            * The label header is in network byte order.
422            *  last byte is the TTL.
423            *  bits 2 to 4 inclusive are the EXP bits
424            */
425           vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
426           vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
427           vnet_buffer (b0)->mpls.first = 1;
428
429           /*
430            * pop the label that was just used in the lookup
431            */
432           vlib_buffer_advance(b0, sizeof(*h0));
433
434           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
435           {
436               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
437                                                         b0, sizeof (*tr));
438               tr->next_index = next0;
439               tr->lb_index = lbi0;
440               tr->lfib_index = lfib_index0;
441               tr->hash = hash_c0;
442               tr->label_net_byte_order = h0->label_exp_s_ttl;
443           }
444
445           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
446                                            to_next, n_left_to_next,
447                                            bi0, next0);
448         }
449
450       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
451     }
452   vlib_node_increment_counter (vm, mm->mpls_lookup_node_index,
453                                MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
454   return from_frame->n_vectors;
455 }
456
457 static char * mpls_error_strings[] = {
458 #define mpls_error(n,s) s,
459 #include "error.def"
460 #undef mpls_error
461 };
462
463 VLIB_REGISTER_NODE (mpls_lookup_node) = {
464   .name = "mpls-lookup",
465   /* Takes a vector of packets. */
466   .vector_size = sizeof (u32),
467   .n_errors = MPLS_N_ERROR,
468   .error_strings = mpls_error_strings,
469
470   .sibling_of = "mpls-load-balance",
471
472   .format_buffer = format_mpls_header,
473   .format_trace = format_mpls_lookup_trace,
474   .unformat_buffer = unformat_mpls_header,
475 };
476
477 typedef struct {
478   u32 next_index;
479   u32 lb_index;
480   u32 hash;
481 } mpls_load_balance_trace_t;
482
483 static u8 *
484 format_mpls_load_balance_trace (u8 * s, va_list * args)
485 {
486   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
487   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
488   mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
489
490   s = format (s, "MPLS: next [%d], LB index %d hash %d",
491               t->next_index, t->lb_index, t->hash);
492   return s;
493 }
494
495 VLIB_NODE_FN (mpls_load_balance_node) (vlib_main_t * vm,
496                   vlib_node_runtime_t * node,
497                   vlib_frame_t * frame)
498 {
499   vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
500   u32 n_left_from, n_left_to_next, * from, * to_next;
501   u32 thread_index = vlib_get_thread_index();
502   u32 next;
503
504   from = vlib_frame_vector_args (frame);
505   n_left_from = frame->n_vectors;
506   next = node->cached_next_index;
507
508   while (n_left_from > 0)
509     {
510       vlib_get_next_frame (vm, node, next,
511                            to_next, n_left_to_next);
512
513
514       while (n_left_from >= 4 && n_left_to_next >= 2)
515         {
516           const load_balance_t *lb0, *lb1;
517           vlib_buffer_t * p0, *p1;
518           u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
519           const mpls_unicast_header_t *mpls0, *mpls1;
520           const dpo_id_t *dpo0, *dpo1;
521
522           /* Prefetch next iteration. */
523           {
524             vlib_buffer_t * p2, * p3;
525
526             p2 = vlib_get_buffer (vm, from[2]);
527             p3 = vlib_get_buffer (vm, from[3]);
528
529             vlib_prefetch_buffer_header (p2, STORE);
530             vlib_prefetch_buffer_header (p3, STORE);
531
532             CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), LOAD);
533             CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), LOAD);
534           }
535
536           pi0 = to_next[0] = from[0];
537           pi1 = to_next[1] = from[1];
538
539           from += 2;
540           n_left_from -= 2;
541           to_next += 2;
542           n_left_to_next -= 2;
543
544           p0 = vlib_get_buffer (vm, pi0);
545           p1 = vlib_get_buffer (vm, pi1);
546
547           mpls0 = vlib_buffer_get_current (p0);
548           mpls1 = vlib_buffer_get_current (p1);
549           lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
550           lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
551
552           lb0 = load_balance_get(lbi0);
553           lb1 = load_balance_get(lbi1);
554
555           /*
556            * this node is for via FIBs we can re-use the hash value from the
557            * to node if present.
558            * We don't want to use the same hash value at each level in the recursion
559            * graph as that would lead to polarisation
560            */
561           hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
562           hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
563
564           if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
565           {
566               if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
567               {
568                   hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
569               }
570               else
571               {
572                   hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
573               }
574               dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
575           }
576           else
577           {
578               dpo0 = load_balance_get_bucket_i (lb0, 0);
579           }
580           if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
581           {
582               if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
583               {
584                   hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
585               }
586               else
587               {
588                   hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
589               }
590               dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1));
591           }
592           else
593           {
594               dpo1 = load_balance_get_bucket_i (lb1, 0);
595           }
596
597           next0 = dpo0->dpoi_next_node;
598           next1 = dpo1->dpoi_next_node;
599
600           vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
601           vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
602
603           vlib_increment_combined_counter
604               (cm, thread_index, lbi0, 1,
605                vlib_buffer_length_in_chain (vm, p0));
606           vlib_increment_combined_counter
607               (cm, thread_index, lbi1, 1,
608                vlib_buffer_length_in_chain (vm, p1));
609
610           if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
611           {
612               mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
613                                                               p0, sizeof (*tr));
614               tr->next_index = next0;
615               tr->lb_index = lbi0;
616               tr->hash = hc0;
617           }
618
619           vlib_validate_buffer_enqueue_x2 (vm, node, next,
620                                            to_next, n_left_to_next,
621                                            pi0, pi1, next0, next1);
622        }
623
624       while (n_left_from > 0 && n_left_to_next > 0)
625         {
626           const load_balance_t *lb0;
627           vlib_buffer_t * p0;
628           u32 pi0, lbi0, hc0, next0;
629           const mpls_unicast_header_t *mpls0;
630           const dpo_id_t *dpo0;
631
632           pi0 = from[0];
633           to_next[0] = pi0;
634           from += 1;
635           to_next += 1;
636           n_left_to_next -= 1;
637           n_left_from -= 1;
638
639           p0 = vlib_get_buffer (vm, pi0);
640
641           mpls0 = vlib_buffer_get_current (p0);
642           lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
643
644           lb0 = load_balance_get(lbi0);
645
646           hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
647           if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
648           {
649               if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
650               {
651                   hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
652               }
653               else
654               {
655                   hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
656               }
657                dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
658           }
659           else
660           {
661               dpo0 = load_balance_get_bucket_i (lb0, 0);
662           }
663
664           next0 = dpo0->dpoi_next_node;
665           vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
666
667           vlib_increment_combined_counter
668               (cm, thread_index, lbi0, 1,
669                vlib_buffer_length_in_chain (vm, p0));
670
671           vlib_validate_buffer_enqueue_x1 (vm, node, next,
672                                            to_next, n_left_to_next,
673                                            pi0, next0);
674         }
675
676       vlib_put_next_frame (vm, node, next, n_left_to_next);
677     }
678
679   return frame->n_vectors;
680 }
681
682 VLIB_REGISTER_NODE (mpls_load_balance_node) = {
683   .name = "mpls-load-balance",
684   .vector_size = sizeof (u32),
685   .format_trace = format_mpls_load_balance_trace,
686   .n_next_nodes = 1,
687   .next_nodes =
688   {
689       [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop",
690   },
691
692 };
693
694
695 #ifndef CLIB_MARCH_VARIANT
696 static clib_error_t *
697 mpls_lookup_init (vlib_main_t * vm)
698 {
699   mpls_main_t *mm = &mpls_main;
700   clib_error_t * error;
701   vlib_node_t *node = vlib_get_node_by_name (vm, (u8*)"mpls-lookup" );
702
703   mm->mpls_lookup_node_index = node->index;
704
705   if ((error = vlib_call_init_function (vm, mpls_init)))
706     return error;
707
708   mpls_lookup_to_replicate_edge =
709       vlib_node_add_named_next(vm,
710                                mm->mpls_lookup_node_index,
711                                "mpls-replicate");
712
713   return (NULL);
714 }
715
716 VLIB_INIT_FUNCTION (mpls_lookup_init);
717 #endif /* CLIB_MARCH_VARIANT */