dhcp ip: DSCP settings for transmitted DHCP packets
[vpp.git] / src / vnet / mpls / mpls_lookup.c
1 /*
2  * mpls_lookup.c: MPLS lookup
3  *
4  * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls_lookup.h>
21 #include <vnet/fib/mpls_fib.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/replicate_dpo.h>
24
25 /**
26  * The arc/edge from the MPLS lookup node to the MPLS replicate node
27  */
28 #ifndef CLIB_MARCH_VARIANT
29 u32 mpls_lookup_to_replicate_edge;
30 #endif /* CLIB_MARCH_VARIANT */
31
32 typedef struct {
33   u32 next_index;
34   u32 lb_index;
35   u32 lfib_index;
36   u32 label_net_byte_order;
37   u32 hash;
38 } mpls_lookup_trace_t;
39
40 static u8 *
41 format_mpls_lookup_trace (u8 * s, va_list * args)
42 {
43   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
44   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
45   mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
46
47   s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %x "
48               "label %d eos %d", 
49               t->next_index, t->lfib_index, t->lb_index, t->hash,
50               vnet_mpls_uc_get_label(
51                   clib_net_to_host_u32(t->label_net_byte_order)),
52               vnet_mpls_uc_get_s(
53                   clib_net_to_host_u32(t->label_net_byte_order)));
54   return s;
55 }
56
57 VLIB_NODE_FN (mpls_lookup_node) (vlib_main_t * vm,
58              vlib_node_runtime_t * node,
59              vlib_frame_t * from_frame)
60 {
61   vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
62   u32 n_left_from, next_index, * from, * to_next;
63   mpls_main_t * mm = &mpls_main;
64   u32 thread_index = vlib_get_thread_index();
65
66   from = vlib_frame_vector_args (from_frame);
67   n_left_from = from_frame->n_vectors;
68   next_index = node->cached_next_index;
69
70   while (n_left_from > 0)
71     {
72       u32 n_left_to_next;
73
74       vlib_get_next_frame (vm, node, next_index,
75                            to_next, n_left_to_next);
76
77       while (n_left_from >= 8 && n_left_to_next >= 4)
78         {
79           u32 lbi0, next0, lfib_index0, bi0, hash_c0;
80           const mpls_unicast_header_t * h0;
81           const load_balance_t *lb0;
82           const dpo_id_t *dpo0;
83           vlib_buffer_t * b0;
84           u32 lbi1, next1, lfib_index1, bi1, hash_c1;
85           const mpls_unicast_header_t * h1;
86           const load_balance_t *lb1;
87           const dpo_id_t *dpo1;
88           vlib_buffer_t * b1;
89           u32 lbi2, next2, lfib_index2, bi2, hash_c2;
90           const mpls_unicast_header_t * h2;
91           const load_balance_t *lb2;
92           const dpo_id_t *dpo2;
93           vlib_buffer_t * b2;
94           u32 lbi3, next3, lfib_index3, bi3, hash_c3;
95           const mpls_unicast_header_t * h3;
96           const load_balance_t *lb3;
97           const dpo_id_t *dpo3;
98           vlib_buffer_t * b3;
99
100            /* Prefetch next iteration. */
101           {
102               vlib_buffer_t *p4, *p5, *p6, *p7;
103
104             p4 = vlib_get_buffer (vm, from[4]);
105             p5 = vlib_get_buffer (vm, from[5]);
106             p6 = vlib_get_buffer (vm, from[6]);
107             p7 = vlib_get_buffer (vm, from[7]);
108
109             vlib_prefetch_buffer_header (p4, STORE);
110             vlib_prefetch_buffer_header (p5, STORE);
111             vlib_prefetch_buffer_header (p6, STORE);
112             vlib_prefetch_buffer_header (p7, STORE);
113
114             CLIB_PREFETCH (p4->data, sizeof (h0[0]), LOAD);
115             CLIB_PREFETCH (p5->data, sizeof (h0[0]), LOAD);
116             CLIB_PREFETCH (p6->data, sizeof (h0[0]), LOAD);
117             CLIB_PREFETCH (p7->data, sizeof (h0[0]), LOAD);
118           }
119
120           bi0 = to_next[0] = from[0];
121           bi1 = to_next[1] = from[1];
122           bi2 = to_next[2] = from[2];
123           bi3 = to_next[3] = from[3];
124
125           from += 4;
126           n_left_from -= 4;
127           to_next += 4;
128           n_left_to_next -= 4;
129
130           b0 = vlib_get_buffer (vm, bi0);
131           b1 = vlib_get_buffer (vm, bi1);
132           b2 = vlib_get_buffer (vm, bi2);
133           b3 = vlib_get_buffer (vm, bi3);
134           h0 = vlib_buffer_get_current (b0);
135           h1 = vlib_buffer_get_current (b1);
136           h2 = vlib_buffer_get_current (b2);
137           h3 = vlib_buffer_get_current (b3);
138
139           lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
140                                 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
141           lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
142                                 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
143           lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
144                                 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
145           lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
146                                 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
147
148           lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
149           lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
150           lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
151           lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
152
153           hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
154           hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
155           hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
156           hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
157
158           if (MPLS_IS_REPLICATE & lbi0)
159           {
160               next0 = mpls_lookup_to_replicate_edge;
161               vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
162                   (lbi0 & ~MPLS_IS_REPLICATE);
163           }
164           else
165           {
166               lb0 = load_balance_get(lbi0);
167               ASSERT (lb0->lb_n_buckets > 0);
168               ASSERT (is_pow2 (lb0->lb_n_buckets));
169
170               if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
171               {
172                   hash_c0 = vnet_buffer (b0)->ip.flow_hash =
173                       mpls_compute_flow_hash(h0, lb0->lb_hash_config);
174                   dpo0 = load_balance_get_fwd_bucket
175                       (lb0,
176                        (hash_c0 & (lb0->lb_n_buckets_minus_1)));
177               }
178               else
179               {
180                   dpo0 = load_balance_get_bucket_i (lb0, 0);
181               }
182               next0 = dpo0->dpoi_next_node;
183
184               vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
185
186               vlib_increment_combined_counter
187                   (cm, thread_index, lbi0, 1,
188                    vlib_buffer_length_in_chain (vm, b0));
189           }
190           if (MPLS_IS_REPLICATE & lbi1)
191           {
192               next1 = mpls_lookup_to_replicate_edge;
193               vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
194                   (lbi1 & ~MPLS_IS_REPLICATE);
195           }
196           else
197           {
198               lb1 = load_balance_get(lbi1);
199               ASSERT (lb1->lb_n_buckets > 0);
200               ASSERT (is_pow2 (lb1->lb_n_buckets));
201
202               if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
203               {
204                   hash_c1 = vnet_buffer (b1)->ip.flow_hash =
205                       mpls_compute_flow_hash(h1, lb1->lb_hash_config);
206                   dpo1 = load_balance_get_fwd_bucket
207                       (lb1,
208                        (hash_c1 & (lb1->lb_n_buckets_minus_1)));
209               }
210               else
211               {
212                   dpo1 = load_balance_get_bucket_i (lb1, 0);
213               }
214               next1 = dpo1->dpoi_next_node;
215
216               vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
217
218               vlib_increment_combined_counter
219                   (cm, thread_index, lbi1, 1,
220                    vlib_buffer_length_in_chain (vm, b1));
221           }
222           if (MPLS_IS_REPLICATE & lbi2)
223           {
224               next2 = mpls_lookup_to_replicate_edge;
225               vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
226                   (lbi2 & ~MPLS_IS_REPLICATE);
227           }
228           else
229           {
230               lb2 = load_balance_get(lbi2);
231               ASSERT (lb2->lb_n_buckets > 0);
232               ASSERT (is_pow2 (lb2->lb_n_buckets));
233
234               if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
235               {
236                   hash_c2 = vnet_buffer (b2)->ip.flow_hash =
237                       mpls_compute_flow_hash(h2, lb2->lb_hash_config);
238                   dpo2 = load_balance_get_fwd_bucket
239                       (lb2,
240                        (hash_c2 & (lb2->lb_n_buckets_minus_1)));
241               }
242               else
243               {
244                   dpo2 = load_balance_get_bucket_i (lb2, 0);
245               }
246               next2 = dpo2->dpoi_next_node;
247
248               vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
249
250               vlib_increment_combined_counter
251                   (cm, thread_index, lbi2, 1,
252                    vlib_buffer_length_in_chain (vm, b2));
253           }
254           if (MPLS_IS_REPLICATE & lbi3)
255           {
256               next3 = mpls_lookup_to_replicate_edge;
257               vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
258                   (lbi3 & ~MPLS_IS_REPLICATE);
259           }
260           else
261           {
262               lb3 = load_balance_get(lbi3);
263               ASSERT (lb3->lb_n_buckets > 0);
264               ASSERT (is_pow2 (lb3->lb_n_buckets));
265
266               if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
267               {
268                   hash_c3 = vnet_buffer (b3)->ip.flow_hash =
269                       mpls_compute_flow_hash(h3, lb3->lb_hash_config);
270                   dpo3 = load_balance_get_fwd_bucket
271                       (lb3,
272                        (hash_c3 & (lb3->lb_n_buckets_minus_1)));
273               }
274               else
275               {
276                   dpo3 = load_balance_get_bucket_i (lb3, 0);
277               }
278               next3 = dpo3->dpoi_next_node;
279
280               vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
281
282               vlib_increment_combined_counter
283                   (cm, thread_index, lbi3, 1,
284                    vlib_buffer_length_in_chain (vm, b3));
285           }
286
287           /*
288            * before we pop the label copy th values we need to maintain.
289            * The label header is in network byte order.
290            *  last byte is the TTL.
291            *  bits 2 to 4 inclusive are the EXP bits
292            */
293           vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
294           vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
295           vnet_buffer (b0)->mpls.first = 1;
296           vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
297           vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
298           vnet_buffer (b1)->mpls.first = 1;
299           vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
300           vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
301           vnet_buffer (b2)->mpls.first = 1;
302           vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
303           vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
304           vnet_buffer (b3)->mpls.first = 1;
305
306           /*
307            * pop the label that was just used in the lookup
308            */
309           vlib_buffer_advance(b0, sizeof(*h0));
310           vlib_buffer_advance(b1, sizeof(*h1));
311           vlib_buffer_advance(b2, sizeof(*h2));
312           vlib_buffer_advance(b3, sizeof(*h3));
313
314           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
315           {
316               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
317                                                         b0, sizeof (*tr));
318               tr->next_index = next0;
319               tr->lb_index = lbi0;
320               tr->lfib_index = lfib_index0;
321               tr->hash = hash_c0;
322               tr->label_net_byte_order = h0->label_exp_s_ttl;
323           }
324
325           if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
326           {
327               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
328                                                         b1, sizeof (*tr));
329               tr->next_index = next1;
330               tr->lb_index = lbi1;
331               tr->lfib_index = lfib_index1;
332               tr->hash = hash_c1;
333               tr->label_net_byte_order = h1->label_exp_s_ttl;
334           }
335
336           if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
337           {
338               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
339                                                         b2, sizeof (*tr));
340               tr->next_index = next2;
341               tr->lb_index = lbi2;
342               tr->lfib_index = lfib_index2;
343               tr->hash = hash_c2;
344               tr->label_net_byte_order = h2->label_exp_s_ttl;
345           }
346
347           if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
348           {
349               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
350                                                         b3, sizeof (*tr));
351               tr->next_index = next3;
352               tr->lb_index = lbi3;
353               tr->lfib_index = lfib_index3;
354               tr->hash = hash_c3;
355               tr->label_net_byte_order = h3->label_exp_s_ttl;
356           }
357
358           vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
359                                            to_next, n_left_to_next,
360                                            bi0, bi1, bi2, bi3,
361                                            next0, next1, next2, next3);
362         }
363
364       while (n_left_from > 0 && n_left_to_next > 0)
365       {
366           u32 lbi0, next0, lfib_index0, bi0, hash_c0;
367           const mpls_unicast_header_t * h0;
368           const load_balance_t *lb0;
369           const dpo_id_t *dpo0;
370           vlib_buffer_t * b0;
371
372           bi0 = from[0];
373           to_next[0] = bi0;
374           from += 1;
375           to_next += 1;
376           n_left_from -= 1;
377           n_left_to_next -= 1;
378
379           b0 = vlib_get_buffer (vm, bi0);
380           h0 = vlib_buffer_get_current (b0);
381
382           lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
383                                 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
384
385           lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
386           hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
387
388           if (MPLS_IS_REPLICATE & lbi0)
389           {
390               next0 = mpls_lookup_to_replicate_edge;
391               vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
392                   (lbi0 & ~MPLS_IS_REPLICATE);
393           }
394           else
395           {
396               lb0 = load_balance_get(lbi0);
397               ASSERT (lb0->lb_n_buckets > 0);
398               ASSERT (is_pow2 (lb0->lb_n_buckets));
399
400               if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
401               {
402                   hash_c0 = vnet_buffer (b0)->ip.flow_hash =
403                       mpls_compute_flow_hash(h0, lb0->lb_hash_config);
404                   dpo0 = load_balance_get_fwd_bucket
405                       (lb0,
406                        (hash_c0 & (lb0->lb_n_buckets_minus_1)));
407               }
408               else
409               {
410                   dpo0 = load_balance_get_bucket_i (lb0, 0);
411               }
412               next0 = dpo0->dpoi_next_node;
413               vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
414
415               vlib_increment_combined_counter
416                   (cm, thread_index, lbi0, 1,
417                    vlib_buffer_length_in_chain (vm, b0));
418           }
419
420           /*
421            * before we pop the label copy, values we need to maintain.
422            * The label header is in network byte order.
423            *  last byte is the TTL.
424            *  bits 2 to 4 inclusive are the EXP bits
425            */
426           vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
427           vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
428           vnet_buffer (b0)->mpls.first = 1;
429
430           /*
431            * pop the label that was just used in the lookup
432            */
433           vlib_buffer_advance(b0, sizeof(*h0));
434
435           if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
436           {
437               mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
438                                                         b0, sizeof (*tr));
439               tr->next_index = next0;
440               tr->lb_index = lbi0;
441               tr->lfib_index = lfib_index0;
442               tr->hash = hash_c0;
443               tr->label_net_byte_order = h0->label_exp_s_ttl;
444           }
445
446           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
447                                            to_next, n_left_to_next,
448                                            bi0, next0);
449         }
450
451       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
452     }
453   vlib_node_increment_counter (vm, mm->mpls_lookup_node_index,
454                                MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
455   return from_frame->n_vectors;
456 }
457
458 static char * mpls_error_strings[] = {
459 #define mpls_error(n,s) s,
460 #include "error.def"
461 #undef mpls_error
462 };
463
464 VLIB_REGISTER_NODE (mpls_lookup_node) = {
465   .name = "mpls-lookup",
466   /* Takes a vector of packets. */
467   .vector_size = sizeof (u32),
468   .n_errors = MPLS_N_ERROR,
469   .error_strings = mpls_error_strings,
470
471   .sibling_of = "mpls-load-balance",
472
473   .format_buffer = format_mpls_header,
474   .format_trace = format_mpls_lookup_trace,
475   .unformat_buffer = unformat_mpls_header,
476 };
477
478 typedef struct {
479   u32 next_index;
480   u32 lb_index;
481   u32 hash;
482 } mpls_load_balance_trace_t;
483
484 static u8 *
485 format_mpls_load_balance_trace (u8 * s, va_list * args)
486 {
487   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
488   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
489   mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
490
491   s = format (s, "MPLS: next [%d], LB index %d hash %d",
492               t->next_index, t->lb_index, t->hash);
493   return s;
494 }
495
496 VLIB_NODE_FN (mpls_load_balance_node) (vlib_main_t * vm,
497                   vlib_node_runtime_t * node,
498                   vlib_frame_t * frame)
499 {
500   vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
501   u32 n_left_from, n_left_to_next, * from, * to_next;
502   u32 thread_index = vlib_get_thread_index();
503   u32 next;
504
505   from = vlib_frame_vector_args (frame);
506   n_left_from = frame->n_vectors;
507   next = node->cached_next_index;
508
509   while (n_left_from > 0)
510     {
511       vlib_get_next_frame (vm, node, next,
512                            to_next, n_left_to_next);
513
514
515       while (n_left_from >= 4 && n_left_to_next >= 2)
516         {
517           const load_balance_t *lb0, *lb1;
518           vlib_buffer_t * p0, *p1;
519           u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
520           const mpls_unicast_header_t *mpls0, *mpls1;
521           const dpo_id_t *dpo0, *dpo1;
522
523           /* Prefetch next iteration. */
524           {
525             vlib_buffer_t * p2, * p3;
526
527             p2 = vlib_get_buffer (vm, from[2]);
528             p3 = vlib_get_buffer (vm, from[3]);
529
530             vlib_prefetch_buffer_header (p2, STORE);
531             vlib_prefetch_buffer_header (p3, STORE);
532
533             CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), LOAD);
534             CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), LOAD);
535           }
536
537           pi0 = to_next[0] = from[0];
538           pi1 = to_next[1] = from[1];
539
540           from += 2;
541           n_left_from -= 2;
542           to_next += 2;
543           n_left_to_next -= 2;
544
545           p0 = vlib_get_buffer (vm, pi0);
546           p1 = vlib_get_buffer (vm, pi1);
547
548           mpls0 = vlib_buffer_get_current (p0);
549           mpls1 = vlib_buffer_get_current (p1);
550           lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
551           lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
552
553           lb0 = load_balance_get(lbi0);
554           lb1 = load_balance_get(lbi1);
555
556           /*
557            * this node is for via FIBs we can re-use the hash value from the
558            * to node if present.
559            * We don't want to use the same hash value at each level in the recursion
560            * graph as that would lead to polarisation
561            */
562           hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
563           hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
564
565           if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
566           {
567               if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
568               {
569                   hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
570               }
571               else
572               {
573                   hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
574               }
575               dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
576           }
577           else
578           {
579               dpo0 = load_balance_get_bucket_i (lb0, 0);
580           }
581           if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
582           {
583               if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
584               {
585                   hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
586               }
587               else
588               {
589                   hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
590               }
591               dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1));
592           }
593           else
594           {
595               dpo1 = load_balance_get_bucket_i (lb1, 0);
596           }
597
598           next0 = dpo0->dpoi_next_node;
599           next1 = dpo1->dpoi_next_node;
600
601           vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
602           vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
603
604           vlib_increment_combined_counter
605               (cm, thread_index, lbi0, 1,
606                vlib_buffer_length_in_chain (vm, p0));
607           vlib_increment_combined_counter
608               (cm, thread_index, lbi1, 1,
609                vlib_buffer_length_in_chain (vm, p1));
610
611           if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
612           {
613               mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
614                                                               p0, sizeof (*tr));
615               tr->next_index = next0;
616               tr->lb_index = lbi0;
617               tr->hash = hc0;
618           }
619
620           vlib_validate_buffer_enqueue_x2 (vm, node, next,
621                                            to_next, n_left_to_next,
622                                            pi0, pi1, next0, next1);
623        }
624
625       while (n_left_from > 0 && n_left_to_next > 0)
626         {
627           const load_balance_t *lb0;
628           vlib_buffer_t * p0;
629           u32 pi0, lbi0, hc0, next0;
630           const mpls_unicast_header_t *mpls0;
631           const dpo_id_t *dpo0;
632
633           pi0 = from[0];
634           to_next[0] = pi0;
635           from += 1;
636           to_next += 1;
637           n_left_to_next -= 1;
638           n_left_from -= 1;
639
640           p0 = vlib_get_buffer (vm, pi0);
641
642           mpls0 = vlib_buffer_get_current (p0);
643           lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
644
645           lb0 = load_balance_get(lbi0);
646
647           hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
648           if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
649           {
650               if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
651               {
652                   hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
653               }
654               else
655               {
656                   hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
657               }
658                dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
659           }
660           else
661           {
662               dpo0 = load_balance_get_bucket_i (lb0, 0);
663           }
664
665           next0 = dpo0->dpoi_next_node;
666           vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
667
668           vlib_increment_combined_counter
669               (cm, thread_index, lbi0, 1,
670                vlib_buffer_length_in_chain (vm, p0));
671
672           vlib_validate_buffer_enqueue_x1 (vm, node, next,
673                                            to_next, n_left_to_next,
674                                            pi0, next0);
675         }
676
677       vlib_put_next_frame (vm, node, next, n_left_to_next);
678     }
679
680   return frame->n_vectors;
681 }
682
683 VLIB_REGISTER_NODE (mpls_load_balance_node) = {
684   .name = "mpls-load-balance",
685   .vector_size = sizeof (u32),
686   .format_trace = format_mpls_load_balance_trace,
687   .n_next_nodes = 1,
688   .next_nodes =
689   {
690       [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop",
691   },
692
693 };
694
695
696 #ifndef CLIB_MARCH_VARIANT
697 static clib_error_t *
698 mpls_lookup_init (vlib_main_t * vm)
699 {
700   mpls_main_t *mm = &mpls_main;
701   clib_error_t * error;
702   vlib_node_t *node = vlib_get_node_by_name (vm, (u8*)"mpls-lookup" );
703
704   mm->mpls_lookup_node_index = node->index;
705
706   if ((error = vlib_call_init_function (vm, mpls_init)))
707     return error;
708
709   mpls_lookup_to_replicate_edge =
710       vlib_node_add_named_next(vm,
711                                mm->mpls_lookup_node_index,
712                                "mpls-replicate");
713
714   return (NULL);
715 }
716
717 VLIB_INIT_FUNCTION (mpls_lookup_init);
718 #endif /* CLIB_MARCH_VARIANT */