X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fmpls%2Fmpls_lookup.c;h=c0804405db3eb7a8a29dcd2d59188c093cdfc08c;hb=6e366be38ff4f71d4ad5a24929519dc3c874ed36;hp=2d34cbde341f381ba8e6990e3bb8a6ec0bed24be;hpb=7cd468a3d7dee7d6c92f69a0bb7061ae208ec727;p=vpp.git diff --git a/src/vnet/mpls/mpls_lookup.c b/src/vnet/mpls/mpls_lookup.c index 2d34cbde341..c0804405db3 100644 --- a/src/vnet/mpls/mpls_lookup.c +++ b/src/vnet/mpls/mpls_lookup.c @@ -16,12 +16,17 @@ */ #include -#include -#include +#include #include -#include +#include +#include -vlib_node_registration_t mpls_lookup_node; +/** + * The arc/edge from the MPLS lookup node to the MPLS replicate node + */ +#ifndef CLIB_MARCH_VARIANT +u32 mpls_lookup_to_replicate_edge; +#endif /* CLIB_MARCH_VARIANT */ typedef struct { u32 next_index; @@ -38,36 +43,24 @@ format_mpls_lookup_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *); - s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %d" + s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %x " "label %d eos %d", t->next_index, t->lfib_index, t->lb_index, t->hash, vnet_mpls_uc_get_label( clib_net_to_host_u32(t->label_net_byte_order)), - vnet_mpls_uc_get_s(t->label_net_byte_order)); + vnet_mpls_uc_get_s( + clib_net_to_host_u32(t->label_net_byte_order))); return s; } -/* - * Compute flow hash. - * We'll use it to select which adjacency to use for this flow. And other things. - */ -always_inline u32 -mpls_compute_flow_hash (const mpls_unicast_header_t * hdr, - flow_hash_config_t flow_hash_config) -{ - // FIXME - return (vnet_mpls_uc_get_label(hdr->label_exp_s_ttl)); -} - -static inline uword -mpls_lookup (vlib_main_t * vm, +VLIB_NODE_FN (mpls_lookup_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; u32 n_left_from, next_index, * from, * to_next; mpls_main_t * mm = &mpls_main; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -80,7 +73,7 @@ mpls_lookup (vlib_main_t * vm, vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - while (n_left_from >= 4 && n_left_to_next >= 2) + while (n_left_from >= 8 && n_left_to_next >= 4) { u32 lbi0, next0, lfib_index0, bi0, hash_c0; const mpls_unicast_header_t * h0; @@ -92,82 +85,203 @@ mpls_lookup (vlib_main_t * vm, const load_balance_t *lb1; const dpo_id_t *dpo1; vlib_buffer_t * b1; + u32 lbi2, next2, lfib_index2, bi2, hash_c2; + const mpls_unicast_header_t * h2; + const load_balance_t *lb2; + const dpo_id_t *dpo2; + vlib_buffer_t * b2; + u32 lbi3, next3, lfib_index3, bi3, hash_c3; + const mpls_unicast_header_t * h3; + const load_balance_t *lb3; + const dpo_id_t *dpo3; + vlib_buffer_t * b3; /* Prefetch next iteration. */ { - vlib_buffer_t * p2, * p3; - - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, STORE); - vlib_prefetch_buffer_header (p3, STORE); - - CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE); - CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE); + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + vlib_prefetch_buffer_header (p4, STORE); + vlib_prefetch_buffer_header (p5, STORE); + vlib_prefetch_buffer_header (p6, STORE); + vlib_prefetch_buffer_header (p7, STORE); + + CLIB_PREFETCH (p4->data, sizeof (h0[0]), LOAD); + CLIB_PREFETCH (p5->data, sizeof (h0[0]), LOAD); + CLIB_PREFETCH (p6->data, sizeof (h0[0]), LOAD); + CLIB_PREFETCH (p7->data, sizeof (h0[0]), LOAD); } bi0 = to_next[0] = from[0]; bi1 = to_next[1] = from[1]; + bi2 = to_next[2] = from[2]; + bi3 = to_next[3] = from[3]; - from += 2; - n_left_from -= 2; - to_next += 2; - n_left_to_next -= 2; + from += 4; + n_left_from -= 4; + to_next += 4; + n_left_to_next -= 4; b0 = vlib_get_buffer (vm, bi0); b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); h0 = vlib_buffer_get_current (b0); h1 = vlib_buffer_get_current (b1); + h2 = vlib_buffer_get_current (b2); + h3 = vlib_buffer_get_current (b3); lfib_index0 = vec_elt(mm->fib_index_by_sw_if_index, vnet_buffer(b0)->sw_if_index[VLIB_RX]); lfib_index1 = vec_elt(mm->fib_index_by_sw_if_index, vnet_buffer(b1)->sw_if_index[VLIB_RX]); + lfib_index2 = vec_elt(mm->fib_index_by_sw_if_index, + vnet_buffer(b2)->sw_if_index[VLIB_RX]); + lfib_index3 = vec_elt(mm->fib_index_by_sw_if_index, + vnet_buffer(b3)->sw_if_index[VLIB_RX]); lbi0 = mpls_fib_table_forwarding_lookup (lfib_index0, h0); lbi1 = mpls_fib_table_forwarding_lookup (lfib_index1, h1); - lb0 = load_balance_get(lbi0); - lb1 = load_balance_get(lbi1); + lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2); + lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3); hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0; hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0; + hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0; + hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0; - if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) + if (MPLS_IS_REPLICATE & lbi0) { - hash_c0 = vnet_buffer (b0)->ip.flow_hash = - mpls_compute_flow_hash(h0, lb0->lb_hash_config); + next0 = mpls_lookup_to_replicate_edge; + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = + (lbi0 & ~MPLS_IS_REPLICATE); } - if (PREDICT_FALSE(lb1->lb_n_buckets > 1)) + else { - hash_c1 = vnet_buffer (b1)->ip.flow_hash = - mpls_compute_flow_hash(h1, lb1->lb_hash_config); + lb0 = load_balance_get(lbi0); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); + + if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) + { + hash_c0 = vnet_buffer (b0)->ip.flow_hash = + mpls_compute_flow_hash(h0, lb0->lb_hash_config); + dpo0 = load_balance_get_fwd_bucket + (lb0, + (hash_c0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); + } + next0 = dpo0->dpoi_next_node; + + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + + vlib_increment_combined_counter + (cm, thread_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, b0)); + } + if (MPLS_IS_REPLICATE & lbi1) + { + next1 = mpls_lookup_to_replicate_edge; + vnet_buffer (b1)->ip.adj_index[VLIB_TX] = + (lbi1 & ~MPLS_IS_REPLICATE); } + else + { + lb1 = load_balance_get(lbi1); + ASSERT (lb1->lb_n_buckets > 0); + ASSERT (is_pow2 (lb1->lb_n_buckets)); - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (lb1->lb_n_buckets > 0); - ASSERT (is_pow2 (lb1->lb_n_buckets)); + if (PREDICT_FALSE(lb1->lb_n_buckets > 1)) + { + hash_c1 = vnet_buffer (b1)->ip.flow_hash = + mpls_compute_flow_hash(h1, lb1->lb_hash_config); + dpo1 = load_balance_get_fwd_bucket + (lb1, + (hash_c1 & (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); + } + next1 = dpo1->dpoi_next_node; - dpo0 = load_balance_get_bucket_i(lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - dpo1 = load_balance_get_bucket_i(lb1, - (hash_c1 & - (lb1->lb_n_buckets_minus_1))); + vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; - next0 = dpo0->dpoi_next_node; - next1 = dpo1->dpoi_next_node; + vlib_increment_combined_counter + (cm, thread_index, lbi1, 1, + vlib_buffer_length_in_chain (vm, b1)); + } + if (MPLS_IS_REPLICATE & lbi2) + { + next2 = mpls_lookup_to_replicate_edge; + vnet_buffer (b2)->ip.adj_index[VLIB_TX] = + (lbi2 & ~MPLS_IS_REPLICATE); + } + else + { + lb2 = load_balance_get(lbi2); + ASSERT (lb2->lb_n_buckets > 0); + ASSERT (is_pow2 (lb2->lb_n_buckets)); - vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; + if (PREDICT_FALSE(lb2->lb_n_buckets > 1)) + { + hash_c2 = vnet_buffer (b2)->ip.flow_hash = + mpls_compute_flow_hash(h2, lb2->lb_hash_config); + dpo2 = load_balance_get_fwd_bucket + (lb2, + (hash_c2 & (lb2->lb_n_buckets_minus_1))); + } + else + { + dpo2 = load_balance_get_bucket_i (lb2, 0); + } + next2 = dpo2->dpoi_next_node; - vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, - vlib_buffer_length_in_chain (vm, b0)); - vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, - vlib_buffer_length_in_chain (vm, b1)); + vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index; + + vlib_increment_combined_counter + (cm, thread_index, lbi2, 1, + vlib_buffer_length_in_chain (vm, b2)); + } + if (MPLS_IS_REPLICATE & lbi3) + { + next3 = mpls_lookup_to_replicate_edge; + vnet_buffer (b3)->ip.adj_index[VLIB_TX] = + (lbi3 & ~MPLS_IS_REPLICATE); + } + else + { + lb3 = load_balance_get(lbi3); + ASSERT (lb3->lb_n_buckets > 0); + ASSERT (is_pow2 (lb3->lb_n_buckets)); + + if (PREDICT_FALSE(lb3->lb_n_buckets > 1)) + { + hash_c3 = vnet_buffer (b3)->ip.flow_hash = + mpls_compute_flow_hash(h3, lb3->lb_hash_config); + dpo3 = load_balance_get_fwd_bucket + (lb3, + (hash_c3 & (lb3->lb_n_buckets_minus_1))); + } + else + { + dpo3 = load_balance_get_bucket_i (lb3, 0); + } + next3 = dpo3->dpoi_next_node; + + vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; + + vlib_increment_combined_counter + (cm, thread_index, lbi3, 1, + vlib_buffer_length_in_chain (vm, b3)); + } /* * before we pop the label copy th values we need to maintain. @@ -181,12 +295,20 @@ mpls_lookup (vlib_main_t * vm, vnet_buffer (b1)->mpls.ttl = ((char*)h1)[3]; vnet_buffer (b1)->mpls.exp = (((char*)h1)[2] & 0xe) >> 1; vnet_buffer (b1)->mpls.first = 1; + vnet_buffer (b2)->mpls.ttl = ((char*)h2)[3]; + vnet_buffer (b2)->mpls.exp = (((char*)h2)[2] & 0xe) >> 1; + vnet_buffer (b2)->mpls.first = 1; + vnet_buffer (b3)->mpls.ttl = ((char*)h3)[3]; + vnet_buffer (b3)->mpls.exp = (((char*)h3)[2] & 0xe) >> 1; + vnet_buffer (b3)->mpls.first = 1; /* * pop the label that was just used in the lookup */ vlib_buffer_advance(b0, sizeof(*h0)); vlib_buffer_advance(b1, sizeof(*h1)); + vlib_buffer_advance(b2, sizeof(*h2)); + vlib_buffer_advance(b3, sizeof(*h3)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -210,9 +332,32 @@ mpls_lookup (vlib_main_t * vm, tr->label_net_byte_order = h1->label_exp_s_ttl; } - vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + if (PREDICT_FALSE(b2->flags & VLIB_BUFFER_IS_TRACED)) + { + mpls_lookup_trace_t *tr = vlib_add_trace (vm, node, + b2, sizeof (*tr)); + tr->next_index = next2; + tr->lb_index = lbi2; + tr->lfib_index = lfib_index2; + tr->hash = hash_c2; + tr->label_net_byte_order = h2->label_exp_s_ttl; + } + + if (PREDICT_FALSE(b3->flags & VLIB_BUFFER_IS_TRACED)) + { + mpls_lookup_trace_t *tr = vlib_add_trace (vm, node, + b3, sizeof (*tr)); + tr->next_index = next3; + tr->lb_index = lbi3; + tr->lfib_index = lfib_index3; + tr->hash = hash_c3; + tr->label_net_byte_order = h3->label_exp_s_ttl; + } + + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, n_left_to_next, - bi0, bi1, next0, next1); + bi0, bi1, bi2, bi3, + next0, next1, next2, next3); } while (n_left_from > 0 && n_left_to_next > 0) @@ -237,31 +382,42 @@ mpls_lookup (vlib_main_t * vm, vnet_buffer(b0)->sw_if_index[VLIB_RX]); lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0); - lb0 = load_balance_get(lbi0); - hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0; - if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) + + if (MPLS_IS_REPLICATE & lbi0) { - hash_c0 = vnet_buffer (b0)->ip.flow_hash = - mpls_compute_flow_hash(h0, lb0->lb_hash_config); + next0 = mpls_lookup_to_replicate_edge; + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = + (lbi0 & ~MPLS_IS_REPLICATE); } + else + { + lb0 = load_balance_get(lbi0); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - - dpo0 = load_balance_get_bucket_i(lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - - next0 = dpo0->dpoi_next_node; - vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) + { + hash_c0 = vnet_buffer (b0)->ip.flow_hash = + mpls_compute_flow_hash(h0, lb0->lb_hash_config); + dpo0 = load_balance_get_fwd_bucket + (lb0, + (hash_c0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); + } + next0 = dpo0->dpoi_next_node; + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, - vlib_buffer_length_in_chain (vm, b0)); + vlib_increment_combined_counter + (cm, thread_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, b0)); + } /* - * before we pop the label copy th values we need to maintain. + * before we pop the label copy, values we need to maintain. * The label header is in network byte order. * last byte is the TTL. * bits 2 to 4 inclusive are the EXP bits @@ -293,7 +449,7 @@ mpls_lookup (vlib_main_t * vm, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, mpls_lookup_node.index, + vlib_node_increment_counter (vm, mm->mpls_lookup_node_index, MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors); return from_frame->n_vectors; } @@ -305,22 +461,19 @@ static char * mpls_error_strings[] = { }; VLIB_REGISTER_NODE (mpls_lookup_node) = { - .function = mpls_lookup, .name = "mpls-lookup", /* Takes a vector of packets. */ .vector_size = sizeof (u32), .n_errors = MPLS_N_ERROR, .error_strings = mpls_error_strings, - .sibling_of = "ip4-lookup", + .sibling_of = "mpls-load-balance", .format_buffer = format_mpls_header, .format_trace = format_mpls_lookup_trace, .unformat_buffer = unformat_mpls_header, }; -VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup) - typedef struct { u32 next_index; u32 lb_index; @@ -339,14 +492,13 @@ format_mpls_load_balance_trace (u8 * s, va_list * args) return s; } -always_inline uword -mpls_load_balance (vlib_main_t * vm, +VLIB_NODE_FN (mpls_load_balance_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters; u32 n_left_from, n_left_to_next, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 next; from = vlib_frame_vector_args (frame); @@ -361,10 +513,9 @@ mpls_load_balance (vlib_main_t * vm, while (n_left_from >= 4 && n_left_to_next >= 2) { - mpls_lookup_next_t next0, next1; const load_balance_t *lb0, *lb1; vlib_buffer_t * p0, *p1; - u32 pi0, lbi0, hc0, pi1, lbi1, hc1; + u32 pi0, lbi0, hc0, pi1, lbi1, hc1, next0, next1; const mpls_unicast_header_t *mpls0, *mpls1; const dpo_id_t *dpo0, *dpo1; @@ -378,8 +529,8 @@ mpls_load_balance (vlib_main_t * vm, vlib_prefetch_buffer_header (p2, STORE); vlib_prefetch_buffer_header (p3, STORE); - CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE); - CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE); + CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), LOAD); + CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), LOAD); } pi0 = to_next[0] = from[0]; @@ -420,6 +571,11 @@ mpls_load_balance (vlib_main_t * vm, { hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0); } + dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { @@ -431,10 +587,12 @@ mpls_load_balance (vlib_main_t * vm, { hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1); } + dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1)); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1)); - dpo1 = load_balance_get_bucket_i(lb1, hc1 & (lb1->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -443,10 +601,10 @@ mpls_load_balance (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) @@ -465,10 +623,9 @@ mpls_load_balance (vlib_main_t * vm, while (n_left_from > 0 && n_left_to_next > 0) { - mpls_lookup_next_t next0; const load_balance_t *lb0; vlib_buffer_t * p0; - u32 pi0, lbi0, hc0; + u32 pi0, lbi0, hc0, next0; const mpls_unicast_header_t *mpls0; const dpo_id_t *dpo0; @@ -497,15 +654,18 @@ mpls_load_balance (vlib_main_t * vm, { hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0); } + dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - - dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_validate_buffer_enqueue_x1 (vm, node, next, @@ -520,12 +680,38 @@ mpls_load_balance (vlib_main_t * vm, } VLIB_REGISTER_NODE (mpls_load_balance_node) = { - .function = mpls_load_balance, .name = "mpls-load-balance", .vector_size = sizeof (u32), - .sibling_of = "mpls-lookup", - .format_trace = format_mpls_load_balance_trace, + .n_next_nodes = 1, + .next_nodes = + { + [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop", + }, + }; -VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance) + +#ifndef CLIB_MARCH_VARIANT +static clib_error_t * +mpls_lookup_init (vlib_main_t * vm) +{ + mpls_main_t *mm = &mpls_main; + clib_error_t * error; + vlib_node_t *node = vlib_get_node_by_name (vm, (u8*)"mpls-lookup" ); + + mm->mpls_lookup_node_index = node->index; + + if ((error = vlib_call_init_function (vm, mpls_init))) + return error; + + mpls_lookup_to_replicate_edge = + vlib_node_add_named_next(vm, + mm->mpls_lookup_node_index, + "mpls-replicate"); + + return (NULL); +} + +VLIB_INIT_FUNCTION (mpls_lookup_init); +#endif /* CLIB_MARCH_VARIANT */