2 * mpls_lookup.c: MPLS lookup
4 * Copyright (c) 2012-2014 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vlib/vlib.h>
19 #include <vnet/mpls/mpls_lookup.h>
20 #include <vnet/fib/mpls_fib.h>
21 #include <vnet/dpo/load_balance_map.h>
22 #include <vnet/dpo/replicate_dpo.h>
23 #include <vnet/mpls/mpls.api_enum.h>
26 * The arc/edge from the MPLS lookup node to the MPLS replicate node
28 #ifndef CLIB_MARCH_VARIANT
29 u32 mpls_lookup_to_replicate_edge;
30 #endif /* CLIB_MARCH_VARIANT */
36 u32 label_net_byte_order;
38 } mpls_lookup_trace_t;
41 format_mpls_lookup_trace (u8 * s, va_list * args)
43 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
44 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
45 mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
49 "MPLS: next [%d], lookup fib index %d, LB index %d hash 0x%08x "
51 t->next_index, t->lfib_index, t->lb_index, t->hash,
52 vnet_mpls_uc_get_label (clib_net_to_host_u32 (t->label_net_byte_order)),
53 vnet_mpls_uc_get_s (clib_net_to_host_u32 (t->label_net_byte_order)));
57 VLIB_NODE_FN (mpls_lookup_node) (vlib_main_t * vm,
58 vlib_node_runtime_t * node,
59 vlib_frame_t * from_frame)
61 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters;
62 u32 n_left_from, next_index, * from, * to_next;
63 mpls_main_t * mm = &mpls_main;
64 u32 thread_index = vlib_get_thread_index();
66 from = vlib_frame_vector_args (from_frame);
67 n_left_from = from_frame->n_vectors;
68 next_index = node->cached_next_index;
70 while (n_left_from > 0)
74 vlib_get_next_frame (vm, node, next_index,
75 to_next, n_left_to_next);
77 while (n_left_from >= 8 && n_left_to_next >= 4)
79 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
80 const mpls_unicast_header_t * h0;
81 const load_balance_t *lb0;
84 u32 lbi1, next1, lfib_index1, bi1, hash_c1;
85 const mpls_unicast_header_t * h1;
86 const load_balance_t *lb1;
89 u32 lbi2, next2, lfib_index2, bi2, hash_c2;
90 const mpls_unicast_header_t * h2;
91 const load_balance_t *lb2;
94 u32 lbi3, next3, lfib_index3, bi3, hash_c3;
95 const mpls_unicast_header_t * h3;
96 const load_balance_t *lb3;
100 /* Prefetch next iteration. */
102 vlib_buffer_t *p4, *p5, *p6, *p7;
104 p4 = vlib_get_buffer (vm, from[4]);
105 p5 = vlib_get_buffer (vm, from[5]);
106 p6 = vlib_get_buffer (vm, from[6]);
107 p7 = vlib_get_buffer (vm, from[7]);
109 vlib_prefetch_buffer_header (p4, STORE);
110 vlib_prefetch_buffer_header (p5, STORE);
111 vlib_prefetch_buffer_header (p6, STORE);
112 vlib_prefetch_buffer_header (p7, STORE);
114 CLIB_PREFETCH (p4->data, sizeof (h0[0]), LOAD);
115 CLIB_PREFETCH (p5->data, sizeof (h0[0]), LOAD);
116 CLIB_PREFETCH (p6->data, sizeof (h0[0]), LOAD);
117 CLIB_PREFETCH (p7->data, sizeof (h0[0]), LOAD);
120 bi0 = to_next[0] = from[0];
121 bi1 = to_next[1] = from[1];
122 bi2 = to_next[2] = from[2];
123 bi3 = to_next[3] = from[3];
130 b0 = vlib_get_buffer (vm, bi0);
131 b1 = vlib_get_buffer (vm, bi1);
132 b2 = vlib_get_buffer (vm, bi2);
133 b3 = vlib_get_buffer (vm, bi3);
134 h0 = vlib_buffer_get_current (b0);
135 h1 = vlib_buffer_get_current (b1);
136 h2 = vlib_buffer_get_current (b2);
137 h3 = vlib_buffer_get_current (b3);
139 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
140 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
141 lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index,
142 vnet_buffer(b1)->sw_if_index[VLIB_RX]);
143 lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index,
144 vnet_buffer(b2)->sw_if_index[VLIB_RX]);
145 lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index,
146 vnet_buffer(b3)->sw_if_index[VLIB_RX]);
148 lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0);
149 lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1);
150 lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2);
151 lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3);
153 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
154 hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0;
155 hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0;
156 hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0;
158 if (MPLS_IS_REPLICATE & lbi0)
160 next0 = mpls_lookup_to_replicate_edge;
161 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
162 (lbi0 & ~MPLS_IS_REPLICATE);
166 lb0 = load_balance_get(lbi0);
167 ASSERT (lb0->lb_n_buckets > 0);
168 ASSERT (is_pow2 (lb0->lb_n_buckets));
170 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
172 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
173 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
174 dpo0 = load_balance_get_fwd_bucket
176 (hash_c0 & (lb0->lb_n_buckets_minus_1)));
180 dpo0 = load_balance_get_bucket_i (lb0, 0);
182 next0 = dpo0->dpoi_next_node;
184 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
186 vlib_increment_combined_counter
187 (cm, thread_index, lbi0, 1,
188 vlib_buffer_length_in_chain (vm, b0));
190 if (MPLS_IS_REPLICATE & lbi1)
192 next1 = mpls_lookup_to_replicate_edge;
193 vnet_buffer (b1)->ip.adj_index[VLIB_TX] =
194 (lbi1 & ~MPLS_IS_REPLICATE);
198 lb1 = load_balance_get(lbi1);
199 ASSERT (lb1->lb_n_buckets > 0);
200 ASSERT (is_pow2 (lb1->lb_n_buckets));
202 if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
204 hash_c1 = vnet_buffer (b1)->ip.flow_hash =
205 mpls_compute_flow_hash(h1, lb1->lb_hash_config);
206 dpo1 = load_balance_get_fwd_bucket
208 (hash_c1 & (lb1->lb_n_buckets_minus_1)));
212 dpo1 = load_balance_get_bucket_i (lb1, 0);
214 next1 = dpo1->dpoi_next_node;
216 vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
218 vlib_increment_combined_counter
219 (cm, thread_index, lbi1, 1,
220 vlib_buffer_length_in_chain (vm, b1));
222 if (MPLS_IS_REPLICATE & lbi2)
224 next2 = mpls_lookup_to_replicate_edge;
225 vnet_buffer (b2)->ip.adj_index[VLIB_TX] =
226 (lbi2 & ~MPLS_IS_REPLICATE);
230 lb2 = load_balance_get(lbi2);
231 ASSERT (lb2->lb_n_buckets > 0);
232 ASSERT (is_pow2 (lb2->lb_n_buckets));
234 if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
236 hash_c2 = vnet_buffer (b2)->ip.flow_hash =
237 mpls_compute_flow_hash(h2, lb2->lb_hash_config);
238 dpo2 = load_balance_get_fwd_bucket
240 (hash_c2 & (lb2->lb_n_buckets_minus_1)));
244 dpo2 = load_balance_get_bucket_i (lb2, 0);
246 next2 = dpo2->dpoi_next_node;
248 vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
250 vlib_increment_combined_counter
251 (cm, thread_index, lbi2, 1,
252 vlib_buffer_length_in_chain (vm, b2));
254 if (MPLS_IS_REPLICATE & lbi3)
256 next3 = mpls_lookup_to_replicate_edge;
257 vnet_buffer (b3)->ip.adj_index[VLIB_TX] =
258 (lbi3 & ~MPLS_IS_REPLICATE);
262 lb3 = load_balance_get(lbi3);
263 ASSERT (lb3->lb_n_buckets > 0);
264 ASSERT (is_pow2 (lb3->lb_n_buckets));
266 if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
268 hash_c3 = vnet_buffer (b3)->ip.flow_hash =
269 mpls_compute_flow_hash(h3, lb3->lb_hash_config);
270 dpo3 = load_balance_get_fwd_bucket
272 (hash_c3 & (lb3->lb_n_buckets_minus_1)));
276 dpo3 = load_balance_get_bucket_i (lb3, 0);
278 next3 = dpo3->dpoi_next_node;
280 vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
282 vlib_increment_combined_counter
283 (cm, thread_index, lbi3, 1,
284 vlib_buffer_length_in_chain (vm, b3));
288 * before we pop the label copy th values we need to maintain.
289 * The label header is in network byte order.
290 * last byte is the TTL.
291 * bits 2 to 4 inclusive are the EXP bits
293 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
294 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
295 vnet_buffer (b0)->mpls.first = 1;
296 vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3];
297 vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1;
298 vnet_buffer (b1)->mpls.first = 1;
299 vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3];
300 vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1;
301 vnet_buffer (b2)->mpls.first = 1;
302 vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3];
303 vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1;
304 vnet_buffer (b3)->mpls.first = 1;
307 * pop the label that was just used in the lookup
309 vlib_buffer_advance(b0, sizeof(*h0));
310 vlib_buffer_advance(b1, sizeof(*h1));
311 vlib_buffer_advance(b2, sizeof(*h2));
312 vlib_buffer_advance(b3, sizeof(*h3));
314 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
316 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
318 tr->next_index = next0;
320 tr->lfib_index = lfib_index0;
322 tr->label_net_byte_order = h0->label_exp_s_ttl;
325 if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
327 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
329 tr->next_index = next1;
331 tr->lfib_index = lfib_index1;
333 tr->label_net_byte_order = h1->label_exp_s_ttl;
336 if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED))
338 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
340 tr->next_index = next2;
342 tr->lfib_index = lfib_index2;
344 tr->label_net_byte_order = h2->label_exp_s_ttl;
347 if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED))
349 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
351 tr->next_index = next3;
353 tr->lfib_index = lfib_index3;
355 tr->label_net_byte_order = h3->label_exp_s_ttl;
358 vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
359 to_next, n_left_to_next,
361 next0, next1, next2, next3);
364 while (n_left_from > 0 && n_left_to_next > 0)
366 u32 lbi0, next0, lfib_index0, bi0, hash_c0;
367 const mpls_unicast_header_t * h0;
368 const load_balance_t *lb0;
369 const dpo_id_t *dpo0;
379 b0 = vlib_get_buffer (vm, bi0);
380 h0 = vlib_buffer_get_current (b0);
382 lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index,
383 vnet_buffer(b0)->sw_if_index[VLIB_RX]);
385 lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0);
386 hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0;
388 if (MPLS_IS_REPLICATE & lbi0)
390 next0 = mpls_lookup_to_replicate_edge;
391 vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
392 (lbi0 & ~MPLS_IS_REPLICATE);
396 lb0 = load_balance_get(lbi0);
397 ASSERT (lb0->lb_n_buckets > 0);
398 ASSERT (is_pow2 (lb0->lb_n_buckets));
400 if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
402 hash_c0 = vnet_buffer (b0)->ip.flow_hash =
403 mpls_compute_flow_hash(h0, lb0->lb_hash_config);
404 dpo0 = load_balance_get_fwd_bucket
406 (hash_c0 & (lb0->lb_n_buckets_minus_1)));
410 dpo0 = load_balance_get_bucket_i (lb0, 0);
412 next0 = dpo0->dpoi_next_node;
413 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
415 vlib_increment_combined_counter
416 (cm, thread_index, lbi0, 1,
417 vlib_buffer_length_in_chain (vm, b0));
421 * before we pop the label copy, values we need to maintain.
422 * The label header is in network byte order.
423 * last byte is the TTL.
424 * bits 2 to 4 inclusive are the EXP bits
426 vnet_buffer (b0)->mpls.ttl = ((char*)h0)[3];
427 vnet_buffer (b0)->mpls.exp = (((char*)h0)[2] & 0xe) >> 1;
428 vnet_buffer (b0)->mpls.first = 1;
431 * pop the label that was just used in the lookup
433 vlib_buffer_advance(b0, sizeof(*h0));
435 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
437 mpls_lookup_trace_t *tr = vlib_add_trace (vm, node,
439 tr->next_index = next0;
441 tr->lfib_index = lfib_index0;
443 tr->label_net_byte_order = h0->label_exp_s_ttl;
446 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
447 to_next, n_left_to_next,
451 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
453 vlib_node_increment_counter (vm, mm->mpls_lookup_node_index,
454 MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
455 return from_frame->n_vectors;
458 VLIB_REGISTER_NODE (mpls_lookup_node) = {
459 .name = "mpls-lookup",
460 /* Takes a vector of packets. */
461 .vector_size = sizeof (u32),
462 .n_errors = MPLS_N_ERROR,
463 .error_counters = mpls_error_counters,
465 .sibling_of = "mpls-load-balance",
467 .format_buffer = format_mpls_header,
468 .format_trace = format_mpls_lookup_trace,
469 .unformat_buffer = unformat_mpls_header,
476 } mpls_load_balance_trace_t;
479 format_mpls_load_balance_trace (u8 * s, va_list * args)
481 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
482 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
483 mpls_load_balance_trace_t * t = va_arg (*args, mpls_load_balance_trace_t *);
485 s = format (s, "MPLS: next [%d], LB index %d hash 0x%08x", t->next_index,
486 t->lb_index, t->hash);
490 VLIB_NODE_FN (mpls_load_balance_node) (vlib_main_t * vm,
491 vlib_node_runtime_t * node,
492 vlib_frame_t * frame)
494 vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters;
495 u32 n_left_from, n_left_to_next, * from, * to_next;
496 u32 thread_index = vlib_get_thread_index();
499 from = vlib_frame_vector_args (frame);
500 n_left_from = frame->n_vectors;
501 next = node->cached_next_index;
503 while (n_left_from > 0)
505 vlib_get_next_frame (vm, node, next,
506 to_next, n_left_to_next);
509 while (n_left_from >= 4 && n_left_to_next >= 2)
511 const load_balance_t *lb0, *lb1;
512 vlib_buffer_t * p0, *p1;
513 u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1;
514 const mpls_unicast_header_t *mpls0, *mpls1;
515 const dpo_id_t *dpo0, *dpo1;
517 /* Prefetch next iteration. */
519 vlib_buffer_t * p2, * p3;
521 p2 = vlib_get_buffer (vm, from[2]);
522 p3 = vlib_get_buffer (vm, from[3]);
524 vlib_prefetch_buffer_header (p2, STORE);
525 vlib_prefetch_buffer_header (p3, STORE);
527 CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), LOAD);
528 CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), LOAD);
531 pi0 = to_next[0] = from[0];
532 pi1 = to_next[1] = from[1];
539 p0 = vlib_get_buffer (vm, pi0);
540 p1 = vlib_get_buffer (vm, pi1);
542 mpls0 = vlib_buffer_get_current (p0);
543 mpls1 = vlib_buffer_get_current (p1);
544 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
545 lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
547 lb0 = load_balance_get(lbi0);
548 lb1 = load_balance_get(lbi1);
551 * this node is for via FIBs we can re-use the hash value from the
552 * to node if present.
553 * We don't want to use the same hash value at each level in the recursion
554 * graph as that would lead to polarisation
558 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
560 if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
562 hc0 = vnet_buffer (p0)->ip.flow_hash =
563 vnet_buffer (p0)->ip.flow_hash >> 1;
567 hc0 = vnet_buffer (p0)->ip.flow_hash =
568 mpls_compute_flow_hash (mpls0, lb0->lb_hash_config);
570 dpo0 = load_balance_get_fwd_bucket (
571 lb0, (hc0 & lb0->lb_n_buckets_minus_1));
575 dpo0 = load_balance_get_bucket_i (lb0, 0);
577 if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
579 if (PREDICT_TRUE (vnet_buffer (p1)->ip.flow_hash))
581 hc1 = vnet_buffer (p1)->ip.flow_hash =
582 vnet_buffer (p1)->ip.flow_hash >> 1;
586 hc1 = vnet_buffer (p1)->ip.flow_hash =
587 mpls_compute_flow_hash (mpls1, lb1->lb_hash_config);
589 dpo1 = load_balance_get_fwd_bucket (
590 lb1, (hc1 & lb1->lb_n_buckets_minus_1));
594 dpo1 = load_balance_get_bucket_i (lb1, 0);
597 next0 = dpo0->dpoi_next_node;
598 next1 = dpo1->dpoi_next_node;
600 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
601 vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
603 vlib_increment_combined_counter (
604 cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
605 vlib_increment_combined_counter (
606 cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
608 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
610 mpls_load_balance_trace_t *tr =
611 vlib_add_trace (vm, node, p0, sizeof (*tr));
612 tr->next_index = next0;
616 if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
618 mpls_load_balance_trace_t *tr =
619 vlib_add_trace (vm, node, p1, sizeof (*tr));
620 tr->next_index = next1;
625 vlib_validate_buffer_enqueue_x2 (
626 vm, node, next, to_next, n_left_to_next, pi0, pi1, next0, next1);
629 while (n_left_from > 0 && n_left_to_next > 0)
631 const load_balance_t *lb0;
633 u32 pi0, lbi0, hc0, next0;
634 const mpls_unicast_header_t *mpls0;
635 const dpo_id_t *dpo0;
644 p0 = vlib_get_buffer (vm, pi0);
646 mpls0 = vlib_buffer_get_current (p0);
647 lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
649 lb0 = load_balance_get(lbi0);
652 if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
654 if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
656 hc0 = vnet_buffer (p0)->ip.flow_hash =
657 vnet_buffer (p0)->ip.flow_hash >> 1;
661 hc0 = vnet_buffer (p0)->ip.flow_hash =
662 mpls_compute_flow_hash (mpls0, lb0->lb_hash_config);
664 dpo0 = load_balance_get_fwd_bucket (
665 lb0, (hc0 & lb0->lb_n_buckets_minus_1));
669 dpo0 = load_balance_get_bucket_i (lb0, 0);
672 next0 = dpo0->dpoi_next_node;
673 vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
675 if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
677 mpls_load_balance_trace_t *tr =
678 vlib_add_trace (vm, node, p0, sizeof (*tr));
679 tr->next_index = next0;
684 vlib_increment_combined_counter (
685 cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
687 vlib_validate_buffer_enqueue_x1 (vm, node, next, to_next,
688 n_left_to_next, pi0, next0);
691 vlib_put_next_frame (vm, node, next, n_left_to_next);
694 return frame->n_vectors;
697 VLIB_REGISTER_NODE (mpls_load_balance_node) = {
698 .name = "mpls-load-balance",
699 .vector_size = sizeof (u32),
700 .format_trace = format_mpls_load_balance_trace,
704 [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop",
710 #ifndef CLIB_MARCH_VARIANT
711 static clib_error_t *
712 mpls_lookup_init (vlib_main_t * vm)
714 mpls_main_t *mm = &mpls_main;
715 clib_error_t * error;
716 vlib_node_t *node = vlib_get_node_by_name (vm, (u8*)"mpls-lookup" );
718 mm->mpls_lookup_node_index = node->index;
720 if ((error = vlib_call_init_function (vm, mpls_init)))
723 mpls_lookup_to_replicate_edge =
724 vlib_node_add_named_next(vm,
725 mm->mpls_lookup_node_index,
731 VLIB_INIT_FUNCTION (mpls_lookup_init);
732 #endif /* CLIB_MARCH_VARIANT */