2 * mpls_lookup.c: MPLS lookup
4 * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls_lookup.h>
21 #include <vnet/fib/mpls_fib.h>
22 #include <vnet/dpo/load_balance_map.h>
23 #include <vnet/dpo/replicate_dpo.h>
26 * Static MPLS VLIB forwarding node
28 static vlib_node_registration_t mpls_lookup_node;
31 * The arc/edge from the MPLS lookup node to the MPLS replicate node
33 u32 mpls_lookup_to_replicate_edge;
39 u32 label_net_byte_order;
41 } mpls_lookup_trace_t;
44 format_mpls_lookup_trace (u8 * s, va_list * args)
46 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
47 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
48 mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
50 s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %x "
52 t->next_index, t->lfib_index, t->lb_index, t->hash,
53 vnet_mpls_uc_get_label(
54 clib_net_to_host_u32(t->label_net_byte_order)),
56 clib_net_to_host_u32(t->label_net_byte_order)));
61 mpls_lookup (vlib_main_t * vm,
62 vlib_node_runtime_t * node,
63 vlib_frame_t * from_frame)
65 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
66 u32 n_left_from, next_index, * from, * to_next;
67 mpls_main_t * mm = &mpls_main;
68 u32 thread_index = vlib_get_thread_index();
70 from = vlib_frame_vector_args (from_frame);
71 n_left_from = from_frame->n_vectors;
72 next_index = node->cached_next_index;
74 while (n_left_from > 0)
78 vlib_get_next_frame (vm, node, next_index,
79 to_next, n_left_to_next);
81 while (n_left_from >= 8 && n_left_to_next >= 4)
83 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
84 const mpls_unicast_header_t * h0;
85 const load_balance_t *lb0;
88 u32 lbi1, next1, lfib_index1, bi1, hash_c1;
89 const mpls_unicast_header_t * h1;
90 const load_balance_t *lb1;
93 u32 lbi2, next2, lfib_index2, bi2, hash_c2;
94 const mpls_unicast_header_t * h2;
95 const load_balance_t *lb2;
98 u32 lbi3, next3, lfib_index3, bi3, hash_c3;
99 const mpls_unicast_header_t * h3;
100 const load_balance_t *lb3;
101 const dpo_id_t *dpo3;
104 /* Prefetch next iteration. */
106 vlib_buffer_t * p2, * p3, *p4, *p5;
108 p2 = vlib_get_buffer (vm, from[2]);
109 p3 = vlib_get_buffer (vm, from[3]);
110 p4 = vlib_get_buffer (vm, from[4]);
111 p5 = vlib_get_buffer (vm, from[5]);
113 vlib_prefetch_buffer_header (p2, STORE);
114 vlib_prefetch_buffer_header (p3, STORE);
115 vlib_prefetch_buffer_header (p4, STORE);
116 vlib_prefetch_buffer_header (p5, STORE);
118 CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE);
119 CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE);
120 CLIB_PREFETCH (p4->data, sizeof (h0[0]), STORE);
121 CLIB_PREFETCH (p5->data, sizeof (h0[0]), STORE);
124 bi0 = to_next[0] = from[0];
125 bi1 = to_next[1] = from[1];
126 bi2 = to_next[2] = from[2];
127 bi3 = to_next[3] = from[3];
134 b0 = vlib_get_buffer (vm, bi0);
135 b1 = vlib_get_buffer (vm, bi1);
136 b2 = vlib_get_buffer (vm, bi2);
137 b3 = vlib_get_buffer (vm, bi3);
138 h0 = vlib_buffer_get_current (b0);
139 h1 = vlib_buffer_get_current (b1);
140 h2 = vlib_buffer_get_current (b2);
141 h3 = vlib_buffer_get_current (b3);
143 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
144 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
145 lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
146 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
147 lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
148 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
149 lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
150 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
152 lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
153 lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
154 lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
155 lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
157 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
158 hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
159 hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
160 hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
162 if (MPLS_IS_REPLICATE & lbi0)
164 next0 = mpls_lookup_to_replicate_edge;
165 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
166 (lbi0 & ~MPLS_IS_REPLICATE);
170 lb0 = load_balance_get(lbi0);
171 ASSERT (lb0->lb_n_buckets > 0);
172 ASSERT (is_pow2 (lb0->lb_n_buckets));
174 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
176 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
177 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
178 dpo0 = load_balance_get_fwd_bucket
180 (hash_c0 & (lb0->lb_n_buckets_minus_1)));
184 dpo0 = load_balance_get_bucket_i (lb0, 0);
186 next0 = dpo0->dpoi_next_node;
188 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
190 vlib_increment_combined_counter
191 (cm, thread_index, lbi0, 1,
192 vlib_buffer_length_in_chain (vm, b0));
194 if (MPLS_IS_REPLICATE & lbi1)
196 next1 = mpls_lookup_to_replicate_edge;
197 vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
198 (lbi1 & ~MPLS_IS_REPLICATE);
202 lb1 = load_balance_get(lbi1);
203 ASSERT (lb1->lb_n_buckets > 0);
204 ASSERT (is_pow2 (lb1->lb_n_buckets));
206 if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
208 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
209 mpls_compute_flow_hash(h1, lb1->lb_hash_config);
210 dpo1 = load_balance_get_fwd_bucket
212 (hash_c1 & (lb1->lb_n_buckets_minus_1)));
216 dpo1 = load_balance_get_bucket_i (lb1, 0);
218 next1 = dpo1->dpoi_next_node;
220 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
222 vlib_increment_combined_counter
223 (cm, thread_index, lbi1, 1,
224 vlib_buffer_length_in_chain (vm, b1));
226 if (MPLS_IS_REPLICATE & lbi2)
228 next2 = mpls_lookup_to_replicate_edge;
229 vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
230 (lbi2 & ~MPLS_IS_REPLICATE);
234 lb2 = load_balance_get(lbi2);
235 ASSERT (lb2->lb_n_buckets > 0);
236 ASSERT (is_pow2 (lb2->lb_n_buckets));
238 if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
240 hash_c2 = vnet_buffer (b2)->ip.flow_hash =
241 mpls_compute_flow_hash(h2, lb2->lb_hash_config);
242 dpo2 = load_balance_get_fwd_bucket
244 (hash_c2 & (lb2->lb_n_buckets_minus_1)));
248 dpo2 = load_balance_get_bucket_i (lb2, 0);
250 next2 = dpo2->dpoi_next_node;
252 vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
254 vlib_increment_combined_counter
255 (cm, thread_index, lbi2, 1,
256 vlib_buffer_length_in_chain (vm, b2));
258 if (MPLS_IS_REPLICATE & lbi3)
260 next3 = mpls_lookup_to_replicate_edge;
261 vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
262 (lbi3 & ~MPLS_IS_REPLICATE);
266 lb3 = load_balance_get(lbi3);
267 ASSERT (lb3->lb_n_buckets > 0);
268 ASSERT (is_pow2 (lb3->lb_n_buckets));
270 if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
272 hash_c3 = vnet_buffer (b3)->ip.flow_hash =
273 mpls_compute_flow_hash(h3, lb3->lb_hash_config);
274 dpo3 = load_balance_get_fwd_bucket
276 (hash_c3 & (lb3->lb_n_buckets_minus_1)));
280 dpo3 = load_balance_get_bucket_i (lb3, 0);
282 next3 = dpo3->dpoi_next_node;
284 vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
286 vlib_increment_combined_counter
287 (cm, thread_index, lbi3, 1,
288 vlib_buffer_length_in_chain (vm, b3));
292 * before we pop the label copy th values we need to maintain.
293 * The label header is in network byte order.
294 * last byte is the TTL.
295 * bits 2 to 4 inclusive are the EXP bits
297 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
298 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
299 vnet_buffer (b0)->mpls.first = 1;
300 vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
301 vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
302 vnet_buffer (b1)->mpls.first = 1;
303 vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
304 vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
305 vnet_buffer (b2)->mpls.first = 1;
306 vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
307 vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
308 vnet_buffer (b3)->mpls.first = 1;
311 * pop the label that was just used in the lookup
313 vlib_buffer_advance(b0, sizeof(*h0));
314 vlib_buffer_advance(b1, sizeof(*h1));
315 vlib_buffer_advance(b2, sizeof(*h2));
316 vlib_buffer_advance(b3, sizeof(*h3));
318 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
320 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
322 tr->next_index = next0;
324 tr->lfib_index = lfib_index0;
326 tr->label_net_byte_order = h0->label_exp_s_ttl;
329 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
331 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
333 tr->next_index = next1;
335 tr->lfib_index = lfib_index1;
337 tr->label_net_byte_order = h1->label_exp_s_ttl;
340 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
342 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
344 tr->next_index = next2;
346 tr->lfib_index = lfib_index2;
348 tr->label_net_byte_order = h2->label_exp_s_ttl;
351 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
353 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
355 tr->next_index = next3;
357 tr->lfib_index = lfib_index3;
359 tr->label_net_byte_order = h3->label_exp_s_ttl;
362 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
363 to_next, n_left_to_next,
365 next0, next1, next2, next3);
368 while (n_left_from > 0 && n_left_to_next > 0)
370 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
371 const mpls_unicast_header_t * h0;
372 const load_balance_t *lb0;
373 const dpo_id_t *dpo0;
383 b0 = vlib_get_buffer (vm, bi0);
384 h0 = vlib_buffer_get_current (b0);
386 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
387 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
389 lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
390 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
392 if (MPLS_IS_REPLICATE & lbi0)
394 next0 = mpls_lookup_to_replicate_edge;
395 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
396 (lbi0 & ~MPLS_IS_REPLICATE);
400 lb0 = load_balance_get(lbi0);
401 ASSERT (lb0->lb_n_buckets > 0);
402 ASSERT (is_pow2 (lb0->lb_n_buckets));
404 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
406 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
407 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
408 dpo0 = load_balance_get_fwd_bucket
410 (hash_c0 & (lb0->lb_n_buckets_minus_1)));
414 dpo0 = load_balance_get_bucket_i (lb0, 0);
416 next0 = dpo0->dpoi_next_node;
417 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
419 vlib_increment_combined_counter
420 (cm, thread_index, lbi0, 1,
421 vlib_buffer_length_in_chain (vm, b0));
425 * before we pop the label copy, values we need to maintain.
426 * The label header is in network byte order.
427 * last byte is the TTL.
428 * bits 2 to 4 inclusive are the EXP bits
430 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
431 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
432 vnet_buffer (b0)->mpls.first = 1;
435 * pop the label that was just used in the lookup
437 vlib_buffer_advance(b0, sizeof(*h0));
439 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
441 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
443 tr->next_index = next0;
445 tr->lfib_index = lfib_index0;
447 tr->label_net_byte_order = h0->label_exp_s_ttl;
450 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
451 to_next, n_left_to_next,
455 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
457 vlib_node_increment_counter (vm, mpls_lookup_node.index,
458 MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
459 return from_frame->n_vectors;
462 static char * mpls_error_strings[] = {
463 #define mpls_error(n,s) s,
468 VLIB_REGISTER_NODE (mpls_lookup_node, static) = {
469 .function = mpls_lookup,
470 .name = "mpls-lookup",
471 /* Takes a vector of packets. */
472 .vector_size = sizeof (u32),
473 .n_errors = MPLS_N_ERROR,
474 .error_strings = mpls_error_strings,
476 .sibling_of = "mpls-load-balance",
478 .format_buffer = format_mpls_header,
479 .format_trace = format_mpls_lookup_trace,
480 .unformat_buffer = unformat_mpls_header,
483 VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup)
489 } mpls_load_balance_trace_t;
492 format_mpls_load_balance_trace (u8 * s, va_list * args)
494 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
495 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
496 mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
498 s = format (s, "MPLS: next [%d], LB index %d hash %d",
499 t->next_index, t->lb_index, t->hash);
504 mpls_load_balance (vlib_main_t * vm,
505 vlib_node_runtime_t * node,
506 vlib_frame_t * frame)
508 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
509 u32 n_left_from, n_left_to_next, * from, * to_next;
510 u32 thread_index = vlib_get_thread_index();
513 from = vlib_frame_vector_args (frame);
514 n_left_from = frame->n_vectors;
515 next = node->cached_next_index;
517 while (n_left_from > 0)
519 vlib_get_next_frame (vm, node, next,
520 to_next, n_left_to_next);
523 while (n_left_from >= 4 && n_left_to_next >= 2)
525 const load_balance_t *lb0, *lb1;
526 vlib_buffer_t * p0, *p1;
527 u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
528 const mpls_unicast_header_t *mpls0, *mpls1;
529 const dpo_id_t *dpo0, *dpo1;
531 /* Prefetch next iteration. */
533 vlib_buffer_t * p2, * p3;
535 p2 = vlib_get_buffer (vm, from[2]);
536 p3 = vlib_get_buffer (vm, from[3]);
538 vlib_prefetch_buffer_header (p2, STORE);
539 vlib_prefetch_buffer_header (p3, STORE);
541 CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE);
542 CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE);
545 pi0 = to_next[0] = from[0];
546 pi1 = to_next[1] = from[1];
553 p0 = vlib_get_buffer (vm, pi0);
554 p1 = vlib_get_buffer (vm, pi1);
556 mpls0 = vlib_buffer_get_current (p0);
557 mpls1 = vlib_buffer_get_current (p1);
558 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
559 lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
561 lb0 = load_balance_get(lbi0);
562 lb1 = load_balance_get(lbi1);
565 * this node is for via FIBs we can re-use the hash value from the
566 * to node if present.
567 * We don't want to use the same hash value at each level in the recursion
568 * graph as that would lead to polarisation
570 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
571 hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
573 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
575 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
577 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
581 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
583 dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
587 dpo0 = load_balance_get_bucket_i (lb0, 0);
589 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
591 if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
593 hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
597 hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
599 dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1));
603 dpo1 = load_balance_get_bucket_i (lb1, 0);
606 next0 = dpo0->dpoi_next_node;
607 next1 = dpo1->dpoi_next_node;
609 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
610 vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
612 vlib_increment_combined_counter
613 (cm, thread_index, lbi0, 1,
614 vlib_buffer_length_in_chain (vm, p0));
615 vlib_increment_combined_counter
616 (cm, thread_index, lbi1, 1,
617 vlib_buffer_length_in_chain (vm, p1));
619 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
621 mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
623 tr->next_index = next0;
628 vlib_validate_buffer_enqueue_x2 (vm, node, next,
629 to_next, n_left_to_next,
630 pi0, pi1, next0, next1);
633 while (n_left_from > 0 && n_left_to_next > 0)
635 const load_balance_t *lb0;
637 u32 pi0, lbi0, hc0, next0;
638 const mpls_unicast_header_t *mpls0;
639 const dpo_id_t *dpo0;
648 p0 = vlib_get_buffer (vm, pi0);
650 mpls0 = vlib_buffer_get_current (p0);
651 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
653 lb0 = load_balance_get(lbi0);
655 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
656 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
658 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
660 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
664 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
666 dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
670 dpo0 = load_balance_get_bucket_i (lb0, 0);
673 next0 = dpo0->dpoi_next_node;
674 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
676 vlib_increment_combined_counter
677 (cm, thread_index, lbi0, 1,
678 vlib_buffer_length_in_chain (vm, p0));
680 vlib_validate_buffer_enqueue_x1 (vm, node, next,
681 to_next, n_left_to_next,
685 vlib_put_next_frame (vm, node, next, n_left_to_next);
688 return frame->n_vectors;
691 VLIB_REGISTER_NODE (mpls_load_balance_node) = {
692 .function = mpls_load_balance,
693 .name = "mpls-load-balance",
694 .vector_size = sizeof (u32),
695 .format_trace = format_mpls_load_balance_trace,
704 VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance)
707 static clib_error_t *
708 mpls_lookup_init (vlib_main_t * vm)
710 clib_error_t * error;
712 if ((error = vlib_call_init_function (vm, mpls_init)))
715 mpls_lookup_to_replicate_edge =
716 vlib_node_add_named_next(vm,
717 mpls_lookup_node.index,
723 VLIB_INIT_FUNCTION (mpls_lookup_init);