2 * mpls_lookup.c: MPLS lookup
4 * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls.h>
21 #include <vnet/fib/mpls_fib.h>
22 #include <vnet/dpo/load_balance.h>
23 #include <vnet/dpo/replicate_dpo.h>
26 * Static MPLS VLIB forwarding node
28 static vlib_node_registration_t mpls_lookup_node;
31 * The arc/edge from the MPLS lookup node to the MPLS replicate node
33 static u32 mpls_lookup_to_replicate_edge;
39 u32 label_net_byte_order;
41 } mpls_lookup_trace_t;
44 format_mpls_lookup_trace (u8 * s, va_list * args)
46 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
47 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
48 mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
50 s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %d"
52 t->next_index, t->lfib_index, t->lb_index, t->hash,
53 vnet_mpls_uc_get_label(
54 clib_net_to_host_u32(t->label_net_byte_order)),
55 vnet_mpls_uc_get_s(t->label_net_byte_order));
61 * We'll use it to select which adjacency to use for this flow. And other things.
64 mpls_compute_flow_hash (const mpls_unicast_header_t * hdr,
65 flow_hash_config_t flow_hash_config)
68 return (vnet_mpls_uc_get_label(hdr->label_exp_s_ttl));
72 mpls_lookup (vlib_main_t * vm,
73 vlib_node_runtime_t * node,
74 vlib_frame_t * from_frame)
76 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
77 u32 n_left_from, next_index, * from, * to_next;
78 mpls_main_t * mm = &mpls_main;
79 u32 thread_index = vlib_get_thread_index();
81 from = vlib_frame_vector_args (from_frame);
82 n_left_from = from_frame->n_vectors;
83 next_index = node->cached_next_index;
85 while (n_left_from > 0)
89 vlib_get_next_frame (vm, node, next_index,
90 to_next, n_left_to_next);
92 while (n_left_from >= 8 && n_left_to_next >= 4)
94 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
95 const mpls_unicast_header_t * h0;
96 const load_balance_t *lb0;
99 u32 lbi1, next1, lfib_index1, bi1, hash_c1;
100 const mpls_unicast_header_t * h1;
101 const load_balance_t *lb1;
102 const dpo_id_t *dpo1;
104 u32 lbi2, next2, lfib_index2, bi2, hash_c2;
105 const mpls_unicast_header_t * h2;
106 const load_balance_t *lb2;
107 const dpo_id_t *dpo2;
109 u32 lbi3, next3, lfib_index3, bi3, hash_c3;
110 const mpls_unicast_header_t * h3;
111 const load_balance_t *lb3;
112 const dpo_id_t *dpo3;
115 /* Prefetch next iteration. */
117 vlib_buffer_t * p2, * p3, *p4, *p5;
119 p2 = vlib_get_buffer (vm, from[2]);
120 p3 = vlib_get_buffer (vm, from[3]);
121 p4 = vlib_get_buffer (vm, from[4]);
122 p5 = vlib_get_buffer (vm, from[5]);
124 vlib_prefetch_buffer_header (p2, STORE);
125 vlib_prefetch_buffer_header (p3, STORE);
126 vlib_prefetch_buffer_header (p4, STORE);
127 vlib_prefetch_buffer_header (p5, STORE);
129 CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE);
130 CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE);
131 CLIB_PREFETCH (p4->data, sizeof (h0[0]), STORE);
132 CLIB_PREFETCH (p5->data, sizeof (h0[0]), STORE);
135 bi0 = to_next[0] = from[0];
136 bi1 = to_next[1] = from[1];
137 bi2 = to_next[2] = from[2];
138 bi3 = to_next[3] = from[3];
145 b0 = vlib_get_buffer (vm, bi0);
146 b1 = vlib_get_buffer (vm, bi1);
147 b2 = vlib_get_buffer (vm, bi2);
148 b3 = vlib_get_buffer (vm, bi3);
149 h0 = vlib_buffer_get_current (b0);
150 h1 = vlib_buffer_get_current (b1);
151 h2 = vlib_buffer_get_current (b2);
152 h3 = vlib_buffer_get_current (b3);
154 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
155 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
156 lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
157 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
158 lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
159 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
160 lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
161 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
163 lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
164 lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
165 lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
166 lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
168 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
169 hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
170 hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
171 hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
173 if (MPLS_IS_REPLICATE & lbi0)
175 next0 = mpls_lookup_to_replicate_edge;
176 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
177 (lbi0 & ~MPLS_IS_REPLICATE);
181 lb0 = load_balance_get(lbi0);
183 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
185 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
186 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
188 ASSERT (lb0->lb_n_buckets > 0);
189 ASSERT (is_pow2 (lb0->lb_n_buckets));
190 dpo0 = load_balance_get_bucket_i(lb0,
192 (lb0->lb_n_buckets_minus_1)));
193 next0 = dpo0->dpoi_next_node;
195 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
197 vlib_increment_combined_counter
198 (cm, thread_index, lbi0, 1,
199 vlib_buffer_length_in_chain (vm, b0));
201 if (MPLS_IS_REPLICATE & lbi1)
203 next1 = mpls_lookup_to_replicate_edge;
204 vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
205 (lbi1 & ~MPLS_IS_REPLICATE);
209 lb1 = load_balance_get(lbi1);
211 if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
213 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
214 mpls_compute_flow_hash(h1, lb1->lb_hash_config);
216 ASSERT (lb1->lb_n_buckets > 0);
217 ASSERT (is_pow2 (lb1->lb_n_buckets));
218 dpo1 = load_balance_get_bucket_i(lb1,
220 (lb1->lb_n_buckets_minus_1)));
221 next1 = dpo1->dpoi_next_node;
223 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
225 vlib_increment_combined_counter
226 (cm, thread_index, lbi1, 1,
227 vlib_buffer_length_in_chain (vm, b1));
229 if (MPLS_IS_REPLICATE & lbi2)
231 next2 = mpls_lookup_to_replicate_edge;
232 vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
233 (lbi2 & ~MPLS_IS_REPLICATE);
237 lb2 = load_balance_get(lbi2);
239 if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
241 hash_c2 = vnet_buffer (b2)->ip.flow_hash =
242 mpls_compute_flow_hash(h2, lb2->lb_hash_config);
244 ASSERT (lb2->lb_n_buckets > 0);
245 ASSERT (is_pow2 (lb2->lb_n_buckets));
246 dpo2 = load_balance_get_bucket_i(lb2,
248 (lb2->lb_n_buckets_minus_1)));
249 next2 = dpo2->dpoi_next_node;
251 vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
253 vlib_increment_combined_counter
254 (cm, thread_index, lbi2, 1,
255 vlib_buffer_length_in_chain (vm, b2));
257 if (MPLS_IS_REPLICATE & lbi3)
259 next3 = mpls_lookup_to_replicate_edge;
260 vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
261 (lbi3 & ~MPLS_IS_REPLICATE);
265 lb3 = load_balance_get(lbi3);
267 if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
269 hash_c3 = vnet_buffer (b3)->ip.flow_hash =
270 mpls_compute_flow_hash(h3, lb3->lb_hash_config);
272 ASSERT (lb3->lb_n_buckets > 0);
273 ASSERT (is_pow2 (lb3->lb_n_buckets));
274 dpo3 = load_balance_get_bucket_i(lb3,
276 (lb3->lb_n_buckets_minus_1)));
277 next3 = dpo3->dpoi_next_node;
279 vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
281 vlib_increment_combined_counter
282 (cm, thread_index, lbi3, 1,
283 vlib_buffer_length_in_chain (vm, b3));
287 * before we pop the label copy th values we need to maintain.
288 * The label header is in network byte order.
289 * last byte is the TTL.
290 * bits 2 to 4 inclusive are the EXP bits
292 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
293 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
294 vnet_buffer (b0)->mpls.first = 1;
295 vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
296 vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
297 vnet_buffer (b1)->mpls.first = 1;
298 vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
299 vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
300 vnet_buffer (b2)->mpls.first = 1;
301 vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
302 vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
303 vnet_buffer (b3)->mpls.first = 1;
306 * pop the label that was just used in the lookup
308 vlib_buffer_advance(b0, sizeof(*h0));
309 vlib_buffer_advance(b1, sizeof(*h1));
310 vlib_buffer_advance(b2, sizeof(*h2));
311 vlib_buffer_advance(b3, sizeof(*h3));
313 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
315 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
317 tr->next_index = next0;
319 tr->lfib_index = lfib_index0;
321 tr->label_net_byte_order = h0->label_exp_s_ttl;
324 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
326 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
328 tr->next_index = next1;
330 tr->lfib_index = lfib_index1;
332 tr->label_net_byte_order = h1->label_exp_s_ttl;
335 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
337 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
339 tr->next_index = next2;
341 tr->lfib_index = lfib_index2;
343 tr->label_net_byte_order = h2->label_exp_s_ttl;
346 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
348 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
350 tr->next_index = next3;
352 tr->lfib_index = lfib_index3;
354 tr->label_net_byte_order = h3->label_exp_s_ttl;
357 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
358 to_next, n_left_to_next,
360 next0, next1, next2, next3);
363 while (n_left_from > 0 && n_left_to_next > 0)
365 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
366 const mpls_unicast_header_t * h0;
367 const load_balance_t *lb0;
368 const dpo_id_t *dpo0;
378 b0 = vlib_get_buffer (vm, bi0);
379 h0 = vlib_buffer_get_current (b0);
381 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
382 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
384 lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
385 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
387 if (MPLS_IS_REPLICATE & lbi0)
389 next0 = mpls_lookup_to_replicate_edge;
390 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
391 (lbi0 & ~MPLS_IS_REPLICATE);
395 lb0 = load_balance_get(lbi0);
397 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
399 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
400 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
403 ASSERT (lb0->lb_n_buckets > 0);
404 ASSERT (is_pow2 (lb0->lb_n_buckets));
406 dpo0 = load_balance_get_bucket_i(lb0,
408 (lb0->lb_n_buckets_minus_1)));
410 next0 = dpo0->dpoi_next_node;
411 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
413 vlib_increment_combined_counter
414 (cm, thread_index, lbi0, 1,
415 vlib_buffer_length_in_chain (vm, b0));
419 * before we pop the label copy, values we need to maintain.
420 * The label header is in network byte order.
421 * last byte is the TTL.
422 * bits 2 to 4 inclusive are the EXP bits
424 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
425 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
426 vnet_buffer (b0)->mpls.first = 1;
429 * pop the label that was just used in the lookup
431 vlib_buffer_advance(b0, sizeof(*h0));
433 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
435 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
437 tr->next_index = next0;
439 tr->lfib_index = lfib_index0;
441 tr->label_net_byte_order = h0->label_exp_s_ttl;
444 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
445 to_next, n_left_to_next,
449 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
451 vlib_node_increment_counter (vm, mpls_lookup_node.index,
452 MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
453 return from_frame->n_vectors;
456 static char * mpls_error_strings[] = {
457 #define mpls_error(n,s) s,
462 VLIB_REGISTER_NODE (mpls_lookup_node, static) = {
463 .function = mpls_lookup,
464 .name = "mpls-lookup",
465 /* Takes a vector of packets. */
466 .vector_size = sizeof (u32),
467 .n_errors = MPLS_N_ERROR,
468 .error_strings = mpls_error_strings,
470 .sibling_of = "ip4-lookup",
472 .format_buffer = format_mpls_header,
473 .format_trace = format_mpls_lookup_trace,
474 .unformat_buffer = unformat_mpls_header,
477 VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup)
483 } mpls_load_balance_trace_t;
486 format_mpls_load_balance_trace (u8 * s, va_list * args)
488 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
489 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
490 mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
492 s = format (s, "MPLS: next [%d], LB index %d hash %d",
493 t->next_index, t->lb_index, t->hash);
498 mpls_load_balance (vlib_main_t * vm,
499 vlib_node_runtime_t * node,
500 vlib_frame_t * frame)
502 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
503 u32 n_left_from, n_left_to_next, * from, * to_next;
504 u32 thread_index = vlib_get_thread_index();
507 from = vlib_frame_vector_args (frame);
508 n_left_from = frame->n_vectors;
509 next = node->cached_next_index;
511 while (n_left_from > 0)
513 vlib_get_next_frame (vm, node, next,
514 to_next, n_left_to_next);
517 while (n_left_from >= 4 && n_left_to_next >= 2)
519 const load_balance_t *lb0, *lb1;
520 vlib_buffer_t * p0, *p1;
521 u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
522 const mpls_unicast_header_t *mpls0, *mpls1;
523 const dpo_id_t *dpo0, *dpo1;
525 /* Prefetch next iteration. */
527 vlib_buffer_t * p2, * p3;
529 p2 = vlib_get_buffer (vm, from[2]);
530 p3 = vlib_get_buffer (vm, from[3]);
532 vlib_prefetch_buffer_header (p2, STORE);
533 vlib_prefetch_buffer_header (p3, STORE);
535 CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE);
536 CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE);
539 pi0 = to_next[0] = from[0];
540 pi1 = to_next[1] = from[1];
547 p0 = vlib_get_buffer (vm, pi0);
548 p1 = vlib_get_buffer (vm, pi1);
550 mpls0 = vlib_buffer_get_current (p0);
551 mpls1 = vlib_buffer_get_current (p1);
552 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
553 lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
555 lb0 = load_balance_get(lbi0);
556 lb1 = load_balance_get(lbi1);
559 * this node is for via FIBs we can re-use the hash value from the
560 * to node if present.
561 * We don't want to use the same hash value at each level in the recursion
562 * graph as that would lead to polarisation
564 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
565 hc1 = vnet_buffer (p1)->ip.flow_hash = 0;
567 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
569 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
571 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
575 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
578 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
580 if (PREDICT_TRUE (vnet_buffer(p1)->ip.flow_hash))
582 hc1 = vnet_buffer(p1)->ip.flow_hash = vnet_buffer(p1)->ip.flow_hash >> 1;
586 hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
590 dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
591 dpo1 = load_balance_get_bucket_i(lb1, hc1 & (lb1->lb_n_buckets_minus_1));
593 next0 = dpo0->dpoi_next_node;
594 next1 = dpo1->dpoi_next_node;
596 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
597 vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
599 vlib_increment_combined_counter
600 (cm, thread_index, lbi0, 1,
601 vlib_buffer_length_in_chain (vm, p0));
602 vlib_increment_combined_counter
603 (cm, thread_index, lbi1, 1,
604 vlib_buffer_length_in_chain (vm, p1));
606 if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
608 mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
610 tr->next_index = next0;
615 vlib_validate_buffer_enqueue_x2 (vm, node, next,
616 to_next, n_left_to_next,
617 pi0, pi1, next0, next1);
620 while (n_left_from > 0 && n_left_to_next > 0)
622 const load_balance_t *lb0;
624 u32 pi0, lbi0, hc0, next0;
625 const mpls_unicast_header_t *mpls0;
626 const dpo_id_t *dpo0;
635 p0 = vlib_get_buffer (vm, pi0);
637 mpls0 = vlib_buffer_get_current (p0);
638 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
640 lb0 = load_balance_get(lbi0);
642 hc0 = vnet_buffer (p0)->ip.flow_hash = 0;
643 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
645 if (PREDICT_TRUE (vnet_buffer(p0)->ip.flow_hash))
647 hc0 = vnet_buffer(p0)->ip.flow_hash = vnet_buffer(p0)->ip.flow_hash >> 1;
651 hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
655 dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
657 next0 = dpo0->dpoi_next_node;
658 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
660 vlib_increment_combined_counter
661 (cm, thread_index, lbi0, 1,
662 vlib_buffer_length_in_chain (vm, p0));
664 vlib_validate_buffer_enqueue_x1 (vm, node, next,
665 to_next, n_left_to_next,
669 vlib_put_next_frame (vm, node, next, n_left_to_next);
672 return frame->n_vectors;
675 VLIB_REGISTER_NODE (mpls_load_balance_node) = {
676 .function = mpls_load_balance,
677 .name = "mpls-load-balance",
678 .vector_size = sizeof (u32),
679 .sibling_of = "mpls-lookup",
681 .format_trace = format_mpls_load_balance_trace,
684 VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance)
687 static clib_error_t *
688 mpls_lookup_init (vlib_main_t * vm)
690 clib_error_t * error;
692 if ((error = vlib_call_init_function (vm, mpls_init)))
695 mpls_lookup_to_replicate_edge =
696 vlib_node_add_named_next(vm,
697 mpls_lookup_node.index,
703 VLIB_INIT_FUNCTION (mpls_lookup_init);