2 * mpls_lookup.c: MPLS lookup
4 * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls_lookup.h>
21 #include <vnet/fib/mpls_fib.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/replicate_dpo.h>
26 * Static MPLS VLIB forwarding node
28 static vlib_node_registration_t mpls_lookup_node;
31 * The arc/edge from the MPLS lookup node to the MPLS replicate node
33 u32 mpls_lookup_to_replicate_edge;
39 u32 label_net_byte_order;
41 } mpls_lookup_trace_t;
44 format_mpls_lookup_trace (u8 * s, va_list * args)
46 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
47 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
48 mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
50 s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %x "
52 t->next_index, t->lfib_index, t->lb_index, t->hash,
53 vnet_mpls_uc_get_label(
54 clib_net_to_host_u32(t->label_net_byte_order)),
55 vnet_mpls_uc_get_s(t->label_net_byte_order));
60 mpls_lookup (vlib_main_t * vm,
61 vlib_node_runtime_t * node,
62 vlib_frame_t * from_frame)
64 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
65 u32 n_left_from, next_index, * from, * to_next;
66 mpls_main_t * mm = &mpls_main;
67 u32 thread_index = vlib_get_thread_index();
69 from = vlib_frame_vector_args (from_frame);
70 n_left_from = from_frame->n_vectors;
71 next_index = node->cached_next_index;
73 while (n_left_from > 0)
77 vlib_get_next_frame (vm, node, next_index,
78 to_next, n_left_to_next);
80 while (n_left_from >= 8 && n_left_to_next >= 4)
82 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
83 const mpls_unicast_header_t * h0;
84 const load_balance_t *lb0;
87 u32 lbi1, next1, lfib_index1, bi1, hash_c1;
88 const mpls_unicast_header_t * h1;
89 const load_balance_t *lb1;
92 u32 lbi2, next2, lfib_index2, bi2, hash_c2;
93 const mpls_unicast_header_t * h2;
94 const load_balance_t *lb2;
97 u32 lbi3, next3, lfib_index3, bi3, hash_c3;
98 const mpls_unicast_header_t * h3;
99 const load_balance_t *lb3;
100 const dpo_id_t *dpo3;
103 /* Prefetch next iteration. */
105 vlib_buffer_t * p2, * p3, *p4, *p5;
107 p2 = vlib_get_buffer (vm, from[2]);
108 p3 = vlib_get_buffer (vm, from[3]);
109 p4 = vlib_get_buffer (vm, from[4]);
110 p5 = vlib_get_buffer (vm, from[5]);
112 vlib_prefetch_buffer_header (p2, STORE);
113 vlib_prefetch_buffer_header (p3, STORE);
114 vlib_prefetch_buffer_header (p4, STORE);
115 vlib_prefetch_buffer_header (p5, STORE);
117 CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE);
118 CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE);
119 CLIB_PREFETCH (p4->data, sizeof (h0[0]), STORE);
120 CLIB_PREFETCH (p5->data, sizeof (h0[0]), STORE);
123 bi0 = to_next[0] = from[0];
124 bi1 = to_next[1] = from[1];
125 bi2 = to_next[2] = from[2];
126 bi3 = to_next[3] = from[3];
133 b0 = vlib_get_buffer (vm, bi0);
134 b1 = vlib_get_buffer (vm, bi1);
135 b2 = vlib_get_buffer (vm, bi2);
136 b3 = vlib_get_buffer (vm, bi3);
137 h0 = vlib_buffer_get_current (b0);
138 h1 = vlib_buffer_get_current (b1);
139 h2 = vlib_buffer_get_current (b2);
140 h3 = vlib_buffer_get_current (b3);
142 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
143 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
144 lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
145 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
146 lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
147 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
148 lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
149 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
151 lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
152 lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
153 lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
154 lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
156 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
157 hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
158 hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
159 hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
161 if (MPLS_IS_REPLICATE & lbi0)
163 next0 = mpls_lookup_to_replicate_edge;
164 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
165 (lbi0 & ~MPLS_IS_REPLICATE);
169 lb0 = load_balance_get(lbi0);
170 ASSERT (lb0->lb_n_buckets > 0);
171 ASSERT (is_pow2 (lb0->lb_n_buckets));
173 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
175 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
176 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
177 dpo0 = load_balance_get_fwd_bucket
179 (hash_c0 & (lb0->lb_n_buckets_minus_1)));
183 dpo0 = load_balance_get_bucket_i (lb0, 0);
185 next0 = dpo0->dpoi_next_node;
187 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
189 vlib_increment_combined_counter
190 (cm, thread_index, lbi0, 1,
191 vlib_buffer_length_in_chain (vm, b0));
193 if (MPLS_IS_REPLICATE & lbi1)
195 next1 = mpls_lookup_to_replicate_edge;
196 vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
197 (lbi1 & ~MPLS_IS_REPLICATE);
201 lb1 = load_balance_get(lbi1);
202 ASSERT (lb1->lb_n_buckets > 0);
203 ASSERT (is_pow2 (lb1->lb_n_buckets));
205 if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
207 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
208 mpls_compute_flow_hash(h1, lb1->lb_hash_config);
209 dpo1 = load_balance_get_fwd_bucket
211 (hash_c1 & (lb1->lb_n_buckets_minus_1)));
215 dpo1 = load_balance_get_bucket_i (lb1, 0);
217 next1 = dpo1->dpoi_next_node;
219 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
221 vlib_increment_combined_counter
222 (cm, thread_index, lbi1, 1,
223 vlib_buffer_length_in_chain (vm, b1));
225 if (MPLS_IS_REPLICATE & lbi2)
227 next2 = mpls_lookup_to_replicate_edge;
228 vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
229 (lbi2 & ~MPLS_IS_REPLICATE);
233 lb2 = load_balance_get(lbi2);
234 ASSERT (lb2->lb_n_buckets > 0);
235 ASSERT (is_pow2 (lb2->lb_n_buckets));
237 if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
239 hash_c2 = vnet_buffer (b2)->ip.flow_hash =
240 mpls_compute_flow_hash(h2, lb2->lb_hash_config);
241 dpo2 = load_balance_get_fwd_bucket
243 (hash_c2 & (lb2->lb_n_buckets_minus_1)));
247 dpo2 = load_balance_get_bucket_i (lb2, 0);
249 next2 = dpo2->dpoi_next_node;
251 vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
253 vlib_increment_combined_counter
254 (cm, thread_index, lbi2, 1,
255 vlib_buffer_length_in_chain (vm, b2));
257 if (MPLS_IS_REPLICATE & lbi3)
259 next3 = mpls_lookup_to_replicate_edge;
260 vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
261 (lbi3 & ~MPLS_IS_REPLICATE);
265 lb3 = load_balance_get(lbi3);
266 ASSERT (lb3->lb_n_buckets > 0);
267 ASSERT (is_pow2 (lb3->lb_n_buckets));
269 if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
271 hash_c3 = vnet_buffer (b3)->ip.flow_hash =
272 mpls_compute_flow_hash(h3, lb3->lb_hash_config);
273 dpo3 = load_balance_get_fwd_bucket
275 (hash_c3 & (lb3->lb_n_buckets_minus_1)));
279 dpo3 = load_balance_get_bucket_i (lb3, 0);
281 next3 = dpo3->dpoi_next_node;
283 vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
285 vlib_increment_combined_counter
286 (cm, thread_index, lbi3, 1,
287 vlib_buffer_length_in_chain (vm, b3));
291 * before we pop the label copy th values we need to maintain.
292 * The label header is in network byte order.
293 * last byte is the TTL.
294 * bits 2 to 4 inclusive are the EXP bits
296 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
297 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
298 vnet_buffer (b0)->mpls.first = 1;
299 vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
300 vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
301 vnet_buffer (b1)->mpls.first = 1;
302 vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
303 vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
304 vnet_buffer (b2)->mpls.first = 1;
305 vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
306 vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
307 vnet_buffer (b3)->mpls.first = 1;
310 * pop the label that was just used in the lookup
312 vlib_buffer_advance(b0, sizeof(*h0));
313 vlib_buffer_advance(b1, sizeof(*h1));
314 vlib_buffer_advance(b2, sizeof(*h2));
315 vlib_buffer_advance(b3, sizeof(*h3));
317 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
319 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
321 tr->next_index = next0;
323 tr->lfib_index = lfib_index0;
325 tr->label_net_byte_order = h0->label_exp_s_ttl;
328 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
330 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
332 tr->next_index = next1;
334 tr->lfib_index = lfib_index1;
336 tr->label_net_byte_order = h1->label_exp_s_ttl;
339 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
341 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
343 tr->next_index = next2;
345 tr->lfib_index = lfib_index2;
347 tr->label_net_byte_order = h2->label_exp_s_ttl;
350 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
352 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
354 tr->next_index = next3;
356 tr->lfib_index = lfib_index3;
358 tr->label_net_byte_order = h3->label_exp_s_ttl;
361 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
362 to_next, n_left_to_next,
364 next0, next1, next2, next3);
367 while (n_left_from > 0 && n_left_to_next > 0)
369 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
370 const mpls_unicast_header_t * h0;
371 const load_balance_t *lb0;
372 const dpo_id_t *dpo0;
382 b0 = vlib_get_buffer (vm, bi0);
383 h0 = vlib_buffer_get_current (b0);
385 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
386 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
388 lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
389 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
391 if (MPLS_IS_REPLICATE & lbi0)
393 next0 = mpls_lookup_to_replicate_edge;
394 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
395 (lbi0 & ~MPLS_IS_REPLICATE);
399 lb0 = load_balance_get(lbi0);
400 ASSERT (lb0->lb_n_buckets > 0);
401 ASSERT (is_pow2 (lb0->lb_n_buckets));
403 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
405 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
406 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
407 dpo0 = load_balance_get_fwd_bucket
409 (hash_c0 & (lb0->lb_n_buckets_minus_1)));
413 dpo0 = load_balance_get_bucket_i (lb0, 0);
415 next0 = dpo0->dpoi_next_node;
416 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
418 vlib_increment_combined_counter
419 (cm, thread_index, lbi0, 1,
420 vlib_buffer_length_in_chain (vm, b0));
424 * before we pop the label copy, values we need to maintain.
425 * The label header is in network byte order.
426 * last byte is the TTL.
427 * bits 2 to 4 inclusive are the EXP bits
429 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
430 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
431 vnet_buffer (b0)->mpls.first = 1;
434 * pop the label that was just used in the lookup
436 vlib_buffer_advance(b0, sizeof(*h0));
438 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
440 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
442 tr->next_index = next0;
444 tr->lfib_index = lfib_index0;
446 tr->label_net_byte_order = h0->label_exp_s_ttl;
449 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
450 to_next, n_left_to_next,
454 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
456 vlib_node_increment_counter (vm, mpls_lookup_node.index,
457 MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
458 return from_frame->n_vectors;
461 static char * mpls_error_strings[] = {
462 #define mpls_error(n,s) s,
467 VLIB_REGISTER_NODE (mpls_lookup_node, static) = {
468 .function = mpls_lookup,
469 .name = "mpls-lookup",
470 /* Takes a vector of packets. */
471 .vector_size = sizeof (u32),
472 .n_errors = MPLS_N_ERROR,
473 .error_strings = mpls_error_strings,
475 .sibling_of = "mpls-load-balance",
477 .format_buffer = format_mpls_header,
478 .format_trace = format_mpls_lookup_trace,
479 .unformat_buffer = unformat_mpls_header,
482 VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup)
488 } mpls_load_balance_trace_t;
491 format_mpls_load_balance_trace (u8 * s, va_list * args)
493 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
494 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
495 mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
497 s = format (s, "MPLS: next [%d], LB index %d hash %d",
498 t->next_index, t->lb_index, t->hash);
503 mpls_load_balance (vlib_main_t * vm,
504 vlib_node_runtime_t * node,
505 vlib_frame_t * frame)
507 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
508 u32 n_left_from, n_left_to_next, * from, * to_next;
509 u32 thread_index = vlib_get_thread_index();
512 from = vlib_frame_vector_args (frame);
513 n_left_from = frame->n_vectors;
514 next = node->cached_next_index;
516 while (n_left_from > 0)
518 vlib_get_next_frame (vm, node, next,
519 to_next, n_left_to_next);
522 while (n_left_from >= 4 && n_left_to_next >= 2)
524 const load_balance_t *lb0, *lb1;
525 vlib_buffer_t * p0, *p1;
526 u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
527 const mpls_unicast_header_t *mpls0, *mpls1;
528 const dpo_id_t *dpo0, *dpo1;
530 /* Prefetch next iteration. */
532 vlib_buffer_t * p2, * p3;
534 p2 = vlib_get_buffer (vm, from[2]);
535 p3 = vlib_get_buffer (vm, from[3]);
537 vlib_prefetch_buffer_header (p2, STORE);
538 vlib_prefetch_buffer_header (p3, STORE);
540 CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE);
541 CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE);
544 pi0 = to_next[0] = from[0];
545 pi1 = to_next[1] = from[1];
552 p0 = vlib_get_buffer (vm, pi0);
553 p1 = vlib_get_buffer (vm, pi1);
555 mpls0 = vlib_buffer_get_current (p0);
556 mpls1 = vlib_buffer_get_current (p1);
557 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
558 lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
560 lb0 = load_balance_get(lbi0);
561 lb1 = load_balance_get(lbi1);
564 * this node is for via FIBs we can re-use the hash value from the
565 * to node if present.
566 * We don't want to use the same hash value at each level in the recursion
567 * graph as that would lead to polarisation
569 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
570 hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
572 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
574 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
576 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
580 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
582 dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
586 dpo0 = load_balance_get_bucket_i (lb0, 0);
588 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
590 if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
592 hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
596 hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
598 dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1));
602 dpo1 = load_balance_get_bucket_i (lb1, 0);
605 next0 = dpo0->dpoi_next_node;
606 next1 = dpo1->dpoi_next_node;
608 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
609 vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
611 vlib_increment_combined_counter
612 (cm, thread_index, lbi0, 1,
613 vlib_buffer_length_in_chain (vm, p0));
614 vlib_increment_combined_counter
615 (cm, thread_index, lbi1, 1,
616 vlib_buffer_length_in_chain (vm, p1));
618 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
620 mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
622 tr->next_index = next0;
627 vlib_validate_buffer_enqueue_x2 (vm, node, next,
628 to_next, n_left_to_next,
629 pi0, pi1, next0, next1);
632 while (n_left_from > 0 && n_left_to_next > 0)
634 const load_balance_t *lb0;
636 u32 pi0, lbi0, hc0, next0;
637 const mpls_unicast_header_t *mpls0;
638 const dpo_id_t *dpo0;
647 p0 = vlib_get_buffer (vm, pi0);
649 mpls0 = vlib_buffer_get_current (p0);
650 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
652 lb0 = load_balance_get(lbi0);
654 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
655 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
657 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
659 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
663 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
665 dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
669 dpo0 = load_balance_get_bucket_i (lb0, 0);
672 next0 = dpo0->dpoi_next_node;
673 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
675 vlib_increment_combined_counter
676 (cm, thread_index, lbi0, 1,
677 vlib_buffer_length_in_chain (vm, p0));
679 vlib_validate_buffer_enqueue_x1 (vm, node, next,
680 to_next, n_left_to_next,
684 vlib_put_next_frame (vm, node, next, n_left_to_next);
687 return frame->n_vectors;
690 VLIB_REGISTER_NODE (mpls_load_balance_node) = {
691 .function = mpls_load_balance,
692 .name = "mpls-load-balance",
693 .vector_size = sizeof (u32),
694 .format_trace = format_mpls_load_balance_trace,
703 VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance)
706 static clib_error_t *
707 mpls_lookup_init (vlib_main_t * vm)
709 clib_error_t * error;
711 if ((error = vlib_call_init_function (vm, mpls_init)))
714 mpls_lookup_to_replicate_edge =
715 vlib_node_add_named_next(vm,
716 mpls_lookup_node.index,
722 VLIB_INIT_FUNCTION (mpls_lookup_init);