*/
#include <vlib/vlib.h>
-#include <vnet/pg/pg.h>
-#include <vnet/mpls/mpls.h>
+#include <vnet/mpls/mpls_lookup.h>
#include <vnet/fib/mpls_fib.h>
-#include <vnet/dpo/load_balance.h>
+#include <vnet/dpo/load_balance_map.h>
#include <vnet/dpo/replicate_dpo.h>
-
-/**
- * Static MPLS VLIB forwarding node
- */
-static vlib_node_registration_t mpls_lookup_node;
+#include <vnet/mpls/mpls.api_enum.h>
/**
* The arc/edge from the MPLS lookup node to the MPLS replicate node
*/
-static u32 mpls_lookup_to_replicate_edge;
+#ifndef CLIB_MARCH_VARIANT
+u32 mpls_lookup_to_replicate_edge;
+#endif /* CLIB_MARCH_VARIANT */
typedef struct {
u32 next_index;
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
mpls_lookup_trace_t * t = va_arg (*args, mpls_lookup_trace_t *);
- s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %d"
+ s = format (s, "MPLS: next [%d], lookup fib index %d, LB index %d hash %x "
"label %d eos %d",
t->next_index, t->lfib_index, t->lb_index, t->hash,
vnet_mpls_uc_get_label(
clib_net_to_host_u32(t->label_net_byte_order)),
- vnet_mpls_uc_get_s(t->label_net_byte_order));
+ vnet_mpls_uc_get_s(
+ clib_net_to_host_u32(t->label_net_byte_order)));
return s;
}
-/*
- * Compute flow hash.
- * We'll use it to select which adjacency to use for this flow. And other things.
- */
-always_inline u32
-mpls_compute_flow_hash (const mpls_unicast_header_t * hdr,
- flow_hash_config_t flow_hash_config)
-{
- // FIXME
- return (vnet_mpls_uc_get_label(hdr->label_exp_s_ttl));
-}
-
-static inline uword
-mpls_lookup (vlib_main_t * vm,
+VLIB_NODE_FN (mpls_lookup_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
/* Prefetch next iteration. */
{
- vlib_buffer_t * p2, * p3, *p4, *p5;
+ vlib_buffer_t *p4, *p5, *p6, *p7;
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
p4 = vlib_get_buffer (vm, from[4]);
p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
- vlib_prefetch_buffer_header (p2, STORE);
- vlib_prefetch_buffer_header (p3, STORE);
vlib_prefetch_buffer_header (p4, STORE);
vlib_prefetch_buffer_header (p5, STORE);
+ vlib_prefetch_buffer_header (p6, STORE);
+ vlib_prefetch_buffer_header (p7, STORE);
- CLIB_PREFETCH (p2->data, sizeof (h0[0]), STORE);
- CLIB_PREFETCH (p3->data, sizeof (h0[0]), STORE);
- CLIB_PREFETCH (p4->data, sizeof (h0[0]), STORE);
- CLIB_PREFETCH (p5->data, sizeof (h0[0]), STORE);
+ CLIB_PREFETCH (p4->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (p5->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (p6->data, sizeof (h0[0]), LOAD);
+ CLIB_PREFETCH (p7->data, sizeof (h0[0]), LOAD);
}
bi0 = to_next[0] = from[0];
else
{
lb0 = load_balance_get(lbi0);
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
{
hash_c0 = vnet_buffer (b0)->ip.flow_hash =
mpls_compute_flow_hash(h0, lb0->lb_hash_config);
+ dpo0 = load_balance_get_fwd_bucket
+ (lb0,
+ (hash_c0 & (lb0->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
}
- ASSERT (lb0->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb0->lb_n_buckets));
- dpo0 = load_balance_get_bucket_i(lb0,
- (hash_c0 &
- (lb0->lb_n_buckets_minus_1)));
next0 = dpo0->dpoi_next_node;
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
else
{
lb1 = load_balance_get(lbi1);
+ ASSERT (lb1->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb1->lb_n_buckets));
if (PREDICT_FALSE(lb1->lb_n_buckets > 1))
{
hash_c1 = vnet_buffer (b1)->ip.flow_hash =
mpls_compute_flow_hash(h1, lb1->lb_hash_config);
+ dpo1 = load_balance_get_fwd_bucket
+ (lb1,
+ (hash_c1 & (lb1->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo1 = load_balance_get_bucket_i (lb1, 0);
}
- ASSERT (lb1->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb1->lb_n_buckets));
- dpo1 = load_balance_get_bucket_i(lb1,
- (hash_c1 &
- (lb1->lb_n_buckets_minus_1)));
next1 = dpo1->dpoi_next_node;
vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
else
{
lb2 = load_balance_get(lbi2);
+ ASSERT (lb2->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb2->lb_n_buckets));
if (PREDICT_FALSE(lb2->lb_n_buckets > 1))
{
hash_c2 = vnet_buffer (b2)->ip.flow_hash =
mpls_compute_flow_hash(h2, lb2->lb_hash_config);
+ dpo2 = load_balance_get_fwd_bucket
+ (lb2,
+ (hash_c2 & (lb2->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo2 = load_balance_get_bucket_i (lb2, 0);
}
- ASSERT (lb2->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb2->lb_n_buckets));
- dpo2 = load_balance_get_bucket_i(lb2,
- (hash_c2 &
- (lb2->lb_n_buckets_minus_1)));
next2 = dpo2->dpoi_next_node;
vnet_buffer (b2)->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
else
{
lb3 = load_balance_get(lbi3);
+ ASSERT (lb3->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb3->lb_n_buckets));
if (PREDICT_FALSE(lb3->lb_n_buckets > 1))
{
hash_c3 = vnet_buffer (b3)->ip.flow_hash =
mpls_compute_flow_hash(h3, lb3->lb_hash_config);
+ dpo3 = load_balance_get_fwd_bucket
+ (lb3,
+ (hash_c3 & (lb3->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo3 = load_balance_get_bucket_i (lb3, 0);
}
- ASSERT (lb3->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb3->lb_n_buckets));
- dpo3 = load_balance_get_bucket_i(lb3,
- (hash_c3 &
- (lb3->lb_n_buckets_minus_1)));
next3 = dpo3->dpoi_next_node;
vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;
else
{
lb0 = load_balance_get(lbi0);
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
{
hash_c0 = vnet_buffer (b0)->ip.flow_hash =
mpls_compute_flow_hash(h0, lb0->lb_hash_config);
+ dpo0 = load_balance_get_fwd_bucket
+ (lb0,
+ (hash_c0 & (lb0->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
}
-
- ASSERT (lb0->lb_n_buckets > 0);
- ASSERT (is_pow2 (lb0->lb_n_buckets));
-
- dpo0 = load_balance_get_bucket_i(lb0,
- (hash_c0 &
- (lb0->lb_n_buckets_minus_1)));
-
next0 = dpo0->dpoi_next_node;
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, mpls_lookup_node.index,
+ vlib_node_increment_counter (vm, mm->mpls_lookup_node_index,
MPLS_ERROR_PKTS_DECAP, from_frame->n_vectors);
return from_frame->n_vectors;
}
-static char * mpls_error_strings[] = {
-#define mpls_error(n,s) s,
-#include "error.def"
-#undef mpls_error
-};
-
-VLIB_REGISTER_NODE (mpls_lookup_node, static) = {
- .function = mpls_lookup,
+VLIB_REGISTER_NODE (mpls_lookup_node) = {
.name = "mpls-lookup",
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
.n_errors = MPLS_N_ERROR,
- .error_strings = mpls_error_strings,
+ .error_counters = mpls_error_counters,
- .sibling_of = "ip4-lookup",
+ .sibling_of = "mpls-load-balance",
.format_buffer = format_mpls_header,
.format_trace = format_mpls_lookup_trace,
.unformat_buffer = unformat_mpls_header,
};
-VLIB_NODE_FUNCTION_MULTIARCH (mpls_lookup_node, mpls_lookup)
-
typedef struct {
u32 next_index;
u32 lb_index;
return s;
}
-always_inline uword
-mpls_load_balance (vlib_main_t * vm,
+VLIB_NODE_FN (mpls_load_balance_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
vlib_prefetch_buffer_header (p2, STORE);
vlib_prefetch_buffer_header (p3, STORE);
- CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), STORE);
- CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), STORE);
+ CLIB_PREFETCH (p2->data, sizeof (mpls0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (mpls0[0]), LOAD);
}
pi0 = to_next[0] = from[0];
{
hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
}
+ dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
+ }
+ else
+ {
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
}
if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
{
{
hc1 = vnet_buffer(p1)->ip.flow_hash = mpls_compute_flow_hash(mpls1, hc1);
}
+ dpo1 = load_balance_get_fwd_bucket(lb1, (hc1 & lb1->lb_n_buckets_minus_1));
+ }
+ else
+ {
+ dpo1 = load_balance_get_bucket_i (lb1, 0);
}
-
- dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
- dpo1 = load_balance_get_bucket_i(lb1, hc1 & (lb1->lb_n_buckets_minus_1));
next0 = dpo0->dpoi_next_node;
next1 = dpo1->dpoi_next_node;
tr->lb_index = lbi0;
tr->hash = hc0;
}
+ if (PREDICT_FALSE(p1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
+ p1, sizeof (*tr));
+ tr->next_index = next1;
+ tr->lb_index = lbi1;
+ tr->hash = hc1;
+ }
vlib_validate_buffer_enqueue_x2 (vm, node, next,
to_next, n_left_to_next,
{
hc0 = vnet_buffer(p0)->ip.flow_hash = mpls_compute_flow_hash(mpls0, hc0);
}
+ dpo0 = load_balance_get_fwd_bucket(lb0, (hc0 & lb0->lb_n_buckets_minus_1));
+ }
+ else
+ {
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
}
-
- dpo0 = load_balance_get_bucket_i(lb0, hc0 & (lb0->lb_n_buckets_minus_1));
next0 = dpo0->dpoi_next_node;
vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_load_balance_trace_t *tr = vlib_add_trace (vm, node,
+ p0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->lb_index = lbi0;
+ tr->hash = hc0;
+ }
+
vlib_increment_combined_counter
(cm, thread_index, lbi0, 1,
vlib_buffer_length_in_chain (vm, p0));
}
VLIB_REGISTER_NODE (mpls_load_balance_node) = {
- .function = mpls_load_balance,
.name = "mpls-load-balance",
.vector_size = sizeof (u32),
- .sibling_of = "mpls-lookup",
-
.format_trace = format_mpls_load_balance_trace,
-};
+ .n_next_nodes = 1,
+ .next_nodes =
+ {
+ [MPLS_LOOKUP_NEXT_DROP] = "mpls-drop",
+ },
-VLIB_NODE_FUNCTION_MULTIARCH (mpls_load_balance_node, mpls_load_balance)
+};
+#ifndef CLIB_MARCH_VARIANT
static clib_error_t *
mpls_lookup_init (vlib_main_t * vm)
{
+ mpls_main_t *mm = &mpls_main;
clib_error_t * error;
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8*)"mpls-lookup" );
+
+ mm->mpls_lookup_node_index = node->index;
if ((error = vlib_call_init_function (vm, mpls_init)))
return error;
mpls_lookup_to_replicate_edge =
vlib_node_add_named_next(vm,
- mpls_lookup_node.index,
+ mm->mpls_lookup_node_index,
"mpls-replicate");
return (NULL);
}
VLIB_INIT_FUNCTION (mpls_lookup_init);
+#endif /* CLIB_MARCH_VARIANT */