X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fmpls%2Fmpls_lookup.c;h=b2309fb62c0713ea43e4048bdee5723ca2399578;hb=960eeea7ea48d91c1f2ec3a85a7a84f56619be2a;hp=475bb20454b4b9178cb6223f95cbc5e6c4f88f0e;hpb=696e88da9799056036f329676213f3c0c0a1db9c;p=vpp.git diff --git a/src/vnet/mpls/mpls_lookup.c b/src/vnet/mpls/mpls_lookup.c index 475bb20454b..b2309fb62c0 100644 --- a/src/vnet/mpls/mpls_lookup.c +++ b/src/vnet/mpls/mpls_lookup.c @@ -17,11 +17,17 @@ #include #include -#include +#include #include -#include +#include +#include -vlib_node_registration_t mpls_lookup_node; +/** + * The arc/edge from the MPLS lookup node to the MPLS replicate node + */ +#ifndef CLIB_MARCH_VARIANT +u32 mpls_lookup_to_replicate_edge; +#endif /* CLIB_MARCH_VARIANT */ typedef struct { u32 next_index; @@ -38,36 +44,24 @@ format_mpls_lookup_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *); - s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %d" + s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %x " "label %d eos %d", t->next_index, t->lfib_index, t->lb_index, t->hash, vnet_mpls_uc_get_label( clib_net_to_host_u32(t->label_net_byte_order)), - vnet_mpls_uc_get_s(t->label_net_byte_order)); + vnet_mpls_uc_get_s( + clib_net_to_host_u32(t->label_net_byte_order))); return s; } -/* - * Compute flow hash. - * We'll use it to select which adjacency to use for this flow. And other things. - */ -always_inline u32 -mpls_compute_flow_hash (const mpls_unicast_header_t * hdr, - flow_hash_config_t flow_hash_config) -{ - // FIXME - return (vnet_mpls_uc_get_label(hdr->label_exp_s_ttl)); -} - -static inline uword -mpls_lookup (vlib_main_t * vm, +VLIB_NODE_FN (mpls_lookup_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame) { vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; u32 n_left_from, next_index, * from, * to_next; mpls_main_t * mm = &mpls_main; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -105,22 +99,22 @@ mpls_lookup (vlib_main_t * vm, /* Prefetch next iteration. */ { - vlib_buffer_t * p2, * p3, *p4, *p5; + vlib_buffer_t *p4, *p5, *p6, *p7; - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); p4 = vlib_get_buffer (vm, from[4]); p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); - vlib_prefetch_buffer_header (p2, STORE); - vlib_prefetch_buffer_header (p3, STORE); vlib_prefetch_buffer_header (p4, STORE); vlib_prefetch_buffer_header (p5, STORE); + vlib_prefetch_buffer_header (p6, STORE); + vlib_prefetch_buffer_header (p7, STORE); - CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE); - CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE); - CLIB_PREFETCH (p4->data, sizeof (h0[0]), STORE); - CLIB_PREFETCH (p5->data, sizeof (h0[0]), STORE); + CLIB_PREFETCH (p4->data, sizeof (h0[0]), LOAD); + CLIB_PREFETCH (p5->data, sizeof (h0[0]), LOAD); + CLIB_PREFETCH (p6->data, sizeof (h0[0]), LOAD); + CLIB_PREFETCH (p7->data, sizeof (h0[0]), LOAD); } bi0 = to_next[0] = from[0]; @@ -156,81 +150,139 @@ mpls_lookup (vlib_main_t * vm, lbi2 = mpls_fib_table_forwarding_lookup (lfib_index2, h2); lbi3 = mpls_fib_table_forwarding_lookup (lfib_index3, h3); - lb0 = load_balance_get(lbi0); - lb1 = load_balance_get(lbi1); - lb2 = load_balance_get(lbi2); - lb3 = load_balance_get(lbi3); - hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0; hash_c1 = vnet_buffer(b1)->ip.flow_hash = 0; hash_c2 = vnet_buffer(b2)->ip.flow_hash = 0; hash_c3 = vnet_buffer(b3)->ip.flow_hash = 0; - if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) + if (MPLS_IS_REPLICATE & lbi0) + { + next0 = mpls_lookup_to_replicate_edge; + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = + (lbi0 & ~MPLS_IS_REPLICATE); + } + else { - hash_c0 = vnet_buffer (b0)->ip.flow_hash = - mpls_compute_flow_hash(h0, lb0->lb_hash_config); + lb0 = load_balance_get(lbi0); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); + + if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) + { + hash_c0 = vnet_buffer (b0)->ip.flow_hash = + mpls_compute_flow_hash(h0, lb0->lb_hash_config); + dpo0 = load_balance_get_fwd_bucket + (lb0, + (hash_c0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); + } + next0 = dpo0->dpoi_next_node; + + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + + vlib_increment_combined_counter + (cm, thread_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, b0)); } - if (PREDICT_FALSE(lb1->lb_n_buckets > 1)) + if (MPLS_IS_REPLICATE & lbi1) { - hash_c1 = vnet_buffer (b1)->ip.flow_hash = - mpls_compute_flow_hash(h1, lb1->lb_hash_config); + next1 = mpls_lookup_to_replicate_edge; + vnet_buffer (b1)->ip.adj_index[VLIB_TX] = + (lbi1 & ~MPLS_IS_REPLICATE); } - if (PREDICT_FALSE(lb2->lb_n_buckets > 1)) + else { - hash_c2 = vnet_buffer (b2)->ip.flow_hash = - mpls_compute_flow_hash(h2, lb2->lb_hash_config); + lb1 = load_balance_get(lbi1); + ASSERT (lb1->lb_n_buckets > 0); + ASSERT (is_pow2 (lb1->lb_n_buckets)); + + if (PREDICT_FALSE(lb1->lb_n_buckets > 1)) + { + hash_c1 = vnet_buffer (b1)->ip.flow_hash = + mpls_compute_flow_hash(h1, lb1->lb_hash_config); + dpo1 = load_balance_get_fwd_bucket + (lb1, + (hash_c1 & (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); + } + next1 = dpo1->dpoi_next_node; + + vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; + + vlib_increment_combined_counter + (cm, thread_index, lbi1, 1, + vlib_buffer_length_in_chain (vm, b1)); } - if (PREDICT_FALSE(lb3->lb_n_buckets > 1)) + if (MPLS_IS_REPLICATE & lbi2) { - hash_c3 = vnet_buffer (b3)->ip.flow_hash = - mpls_compute_flow_hash(h3, lb3->lb_hash_config); + next2 = mpls_lookup_to_replicate_edge; + vnet_buffer (b2)->ip.adj_index[VLIB_TX] = + (lbi2 & ~MPLS_IS_REPLICATE); } + else + { + lb2 = load_balance_get(lbi2); + ASSERT (lb2->lb_n_buckets > 0); + ASSERT (is_pow2 (lb2->lb_n_buckets)); - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (lb1->lb_n_buckets > 0); - ASSERT (is_pow2 (lb1->lb_n_buckets)); - ASSERT (lb2->lb_n_buckets > 0); - ASSERT (is_pow2 (lb2->lb_n_buckets)); - ASSERT (lb3->lb_n_buckets > 0); - ASSERT (is_pow2 (lb3->lb_n_buckets)); - - dpo0 = load_balance_get_bucket_i(lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - dpo1 = load_balance_get_bucket_i(lb1, - (hash_c1 & - (lb1->lb_n_buckets_minus_1))); - dpo2 = load_balance_get_bucket_i(lb2, - (hash_c2 & - (lb2->lb_n_buckets_minus_1))); - dpo3 = load_balance_get_bucket_i(lb3, - (hash_c3 & - (lb3->lb_n_buckets_minus_1))); + if (PREDICT_FALSE(lb2->lb_n_buckets > 1)) + { + hash_c2 = vnet_buffer (b2)->ip.flow_hash = + mpls_compute_flow_hash(h2, lb2->lb_hash_config); + dpo2 = load_balance_get_fwd_bucket + (lb2, + (hash_c2 & (lb2->lb_n_buckets_minus_1))); + } + else + { + dpo2 = load_balance_get_bucket_i (lb2, 0); + } + next2 = dpo2->dpoi_next_node; - next0 = dpo0->dpoi_next_node; - next1 = dpo1->dpoi_next_node; - next2 = dpo2->dpoi_next_node; - next3 = dpo3->dpoi_next_node; + vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index; - vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; - vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index; - vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; + vlib_increment_combined_counter + (cm, thread_index, lbi2, 1, + vlib_buffer_length_in_chain (vm, b2)); + } + if (MPLS_IS_REPLICATE & lbi3) + { + next3 = mpls_lookup_to_replicate_edge; + vnet_buffer (b3)->ip.adj_index[VLIB_TX] = + (lbi3 & ~MPLS_IS_REPLICATE); + } + else + { + lb3 = load_balance_get(lbi3); + ASSERT (lb3->lb_n_buckets > 0); + ASSERT (is_pow2 (lb3->lb_n_buckets)); - vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, - vlib_buffer_length_in_chain (vm, b0)); - vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, - vlib_buffer_length_in_chain (vm, b1)); - vlib_increment_combined_counter - (cm, cpu_index, lbi2, 1, - vlib_buffer_length_in_chain (vm, b2)); - vlib_increment_combined_counter - (cm, cpu_index, lbi3, 1, - vlib_buffer_length_in_chain (vm, b3)); + if (PREDICT_FALSE(lb3->lb_n_buckets > 1)) + { + hash_c3 = vnet_buffer (b3)->ip.flow_hash = + mpls_compute_flow_hash(h3, lb3->lb_hash_config); + dpo3 = load_balance_get_fwd_bucket + (lb3, + (hash_c3 & (lb3->lb_n_buckets_minus_1))); + } + else + { + dpo3 = load_balance_get_bucket_i (lb3, 0); + } + next3 = dpo3->dpoi_next_node; + + vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; + + vlib_increment_combined_counter + (cm, thread_index, lbi3, 1, + vlib_buffer_length_in_chain (vm, b3)); + } /* * before we pop the label copy th values we need to maintain. @@ -331,31 +383,42 @@ mpls_lookup (vlib_main_t * vm, vnet_buffer(b0)->sw_if_index[VLIB_RX]); lbi0 = mpls_fib_table_forwarding_lookup(lfib_index0, h0); - lb0 = load_balance_get(lbi0); - hash_c0 = vnet_buffer(b0)->ip.flow_hash = 0; - if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) + + if (MPLS_IS_REPLICATE & lbi0) { - hash_c0 = vnet_buffer (b0)->ip.flow_hash = - mpls_compute_flow_hash(h0, lb0->lb_hash_config); + next0 = mpls_lookup_to_replicate_edge; + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = + (lbi0 & ~MPLS_IS_REPLICATE); } + else + { + lb0 = load_balance_get(lbi0); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - - dpo0 = load_balance_get_bucket_i(lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - - next0 = dpo0->dpoi_next_node; - vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + if (PREDICT_FALSE(lb0->lb_n_buckets > 1)) + { + hash_c0 = vnet_buffer (b0)->ip.flow_hash = + mpls_compute_flow_hash(h0, lb0->lb_hash_config); + dpo0 = load_balance_get_fwd_bucket + (lb0, + (hash_c0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); + } + next0 = dpo0->dpoi_next_node; + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, - vlib_buffer_length_in_chain (vm, b0)); + vlib_increment_combined_counter + (cm, thread_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, b0)); + } /* - * before we pop the label copy th values we need to maintain. + * before we pop the label copy, values we need to maintain. * The label header is in network byte order. * last byte is the TTL. * bits 2 to 4 inclusive are the EXP bits @@ -387,7 +450,7 @@ mpls_lookup (vlib_main_t * vm, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, mpls_lookup_node.index, + vlib_node_increment_counter (vm, mm->mpls_lookup_node_index, MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors); return from_frame->n_vectors; } @@ -399,22 +462,19 @@ static char * mpls_error_strings[] = { }; VLIB_REGISTER_NODE (mpls_lookup_node) = { - .function = mpls_lookup, .name = "mpls-lookup", /* Takes a vector of packets. */ .vector_size = sizeof (u32), .n_errors = MPLS_N_ERROR, .error_strings = mpls_error_strings, - .sibling_of = "ip4-lookup", + .sibling_of = "mpls-load-balance", .format_buffer = format_mpls_header, .format_trace = format_mpls_lookup_trace, .unformat_buffer = unformat_mpls_header, }; -VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup) - typedef struct { u32 next_index; u32 lb_index; @@ -433,14 +493,13 @@ format_mpls_load_balance_trace (u8 * s, va_list * args) return s; } -always_inline uword -mpls_load_balance (vlib_main_t * vm, +VLIB_NODE_FN (mpls_load_balance_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters; u32 n_left_from, n_left_to_next, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 next; from = vlib_frame_vector_args (frame); @@ -471,8 +530,8 @@ mpls_load_balance (vlib_main_t * vm, vlib_prefetch_buffer_header (p2, STORE); vlib_prefetch_buffer_header (p3, STORE); - CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE); - CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE); + CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), LOAD); + CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), LOAD); } pi0 = to_next[0] = from[0]; @@ -513,6 +572,11 @@ mpls_load_balance (vlib_main_t * vm, { hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0); } + dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { @@ -524,10 +588,12 @@ mpls_load_balance (vlib_main_t * vm, { hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1); } + dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1)); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1)); - dpo1 = load_balance_get_bucket_i(lb1, hc1 & (lb1->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -536,10 +602,10 @@ mpls_load_balance (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) @@ -589,15 +655,18 @@ mpls_load_balance (vlib_main_t * vm, { hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0); } + dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - - dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_validate_buffer_enqueue_x1 (vm, node, next, @@ -612,12 +681,38 @@ mpls_load_balance (vlib_main_t * vm, } VLIB_REGISTER_NODE (mpls_load_balance_node) = { - .function = mpls_load_balance, .name = "mpls-load-balance", .vector_size = sizeof (u32), - .sibling_of = "mpls-lookup", - .format_trace = format_mpls_load_balance_trace, + .n_next_nodes = 1, + .next_nodes = + { + [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop", + }, + }; -VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance) + +#ifndef CLIB_MARCH_VARIANT +static clib_error_t * +mpls_lookup_init (vlib_main_t * vm) +{ + mpls_main_t *mm = &mpls_main; + clib_error_t * error; + vlib_node_t *node = vlib_get_node_by_name (vm, (u8*)"mpls-lookup" ); + + mm->mpls_lookup_node_index = node->index; + + if ((error = vlib_call_init_function (vm, mpls_init))) + return error; + + mpls_lookup_to_replicate_edge = + vlib_node_add_named_next(vm, + mm->mpls_lookup_node_index, + "mpls-replicate"); + + return (NULL); +} + +VLIB_INIT_FUNCTION (mpls_lookup_init); +#endif /* CLIB_MARCH_VARIANT */