#include <vnet/ip/ip.h>
#include <vnet/dpo/lookup_dpo.h>
-#include <vnet/dpo/load_balance.h>
-#include <vnet/mpls/mpls.h>
+#include <vnet/dpo/load_balance_map.h>
+#include <vnet/mpls/mpls_lookup.h>
#include <vnet/fib/fib_table.h>
#include <vnet/fib/ip4_fib.h>
#include <vnet/fib/ip6_fib.h>
static const char *const lookup_input_names[] = LOOKUP_INPUTS;
static const char *const lookup_cast_names[] = LOOKUP_CASTS;
+/**
+ * If a packet encounters a lookup DPO more than the many times
+ * then we assume there is a loop in the forward graph and drop the packet
+ */
+#define MAX_LUKPS_PER_PACKET 4
+
/**
* @brief Enumeration of the lookup subtypes
*/
{
if (LOOKUP_UNICAST == cast)
{
- fib_table_lock(fib_index, dpo_proto_to_fib(proto));
+ fib_table_lock(fib_index,
+ dpo_proto_to_fib(proto),
+ FIB_SOURCE_RR);
}
else
{
- mfib_table_lock(fib_index, dpo_proto_to_fib(proto));
+ mfib_table_lock(fib_index,
+ dpo_proto_to_fib(proto),
+ MFIB_SOURCE_RR);
}
}
lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
{
fib_index =
fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
- table_id);
+ table_id,
+ FIB_SOURCE_RR);
}
else
{
fib_index =
mfib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
- table_id);
+ table_id,
+ MFIB_SOURCE_RR);
}
}
if (LOOKUP_UNICAST == lkd->lkd_cast)
{
fib_table_unlock(lkd->lkd_fib_index,
- dpo_proto_to_fib(lkd->lkd_proto));
+ dpo_proto_to_fib(lkd->lkd_proto),
+ FIB_SOURCE_RR);
}
else
{
mfib_table_unlock(lkd->lkd_fib_index,
- dpo_proto_to_fib(lkd->lkd_proto));
+ dpo_proto_to_fib(lkd->lkd_proto),
+ MFIB_SOURCE_RR);
}
}
pool_put(lookup_dpo_pool, lkd);
(cm, thread_index, lbi1, 1,
vlib_buffer_length_in_chain (vm, b1));
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+ if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b1)->loop_counter = 0;
+ b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+ vnet_buffer2(b1)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+ if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next1 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
lookup_trace_t *tr = vlib_add_trace (vm, node,
(cm, thread_index, lbi0, 1,
vlib_buffer_length_in_chain (vm, b0));
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
lookup_trace_t *tr = vlib_add_trace (vm, node,
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
- uword indent = format_get_indent (s);
+ u32 indent = format_get_indent (s);
s = format (s, "%U fib-index:%d addr:%U load-balance:%d",
format_white_space, indent,
t->fib_index,
return s;
}
-always_inline uword
+static uword
lookup_ip4_dst (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
};
VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_node, lookup_ip4_dst)
-always_inline uword
+static uword
lookup_ip4_dst_itf (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
};
VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_itf_node, lookup_ip4_dst_itf)
-always_inline uword
+static uword
lookup_ip4_src (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0;
hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0;
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+ if (!(b1->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b1)->loop_counter = 0;
+ b1->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+ vnet_buffer2(b1)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+ if (PREDICT_FALSE(vnet_buffer2(b1)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next1 = IP_LOOKUP_NEXT_DROP;
+
if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
{
flow_hash_config0 = lb0->lb_hash_config;
next0 = dpo0->dpoi_next_node;
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = IP_LOOKUP_NEXT_DROP;
+
vlib_increment_combined_counter
(cm, thread_index, lbi0, 1,
vlib_buffer_length_in_chain (vm, b0));
return from_frame->n_vectors;
}
-always_inline uword
+static uword
lookup_ip6_dst (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
};
VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_node, lookup_ip6_dst)
-always_inline uword
+static uword
lookup_ip6_dst_itf (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
};
VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_itf_node, lookup_ip6_dst_itf)
-always_inline uword
+static uword
lookup_ip6_src (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
while (n_left_from > 0 && n_left_to_next > 0)
{
- u32 bi0, lkdi0, lbi0, fib_index0, next0;
+ u32 bi0, lkdi0, lbi0, fib_index0, next0, hash0;
const mpls_unicast_header_t * hdr0;
const load_balance_t *lb0;
const lookup_dpo_t * lkd0;
next0 = dpo0->dpoi_next_node;
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
- vlib_increment_combined_counter
- (cm, thread_index, lbi0, 1,
- vlib_buffer_length_in_chain (vm, b0));
+
+ if (MPLS_IS_REPLICATE & lbi0)
+ {
+ next0 = mpls_lookup_to_replicate_edge;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
+ (lbi0 & ~MPLS_IS_REPLICATE);
+ }
+ else
+ {
+ lb0 = load_balance_get(lbi0);
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+
+ if (PREDICT_FALSE(lb0->lb_n_buckets > 1))
+ {
+ hash0 = vnet_buffer (b0)->ip.flow_hash =
+ mpls_compute_flow_hash(hdr0, lb0->lb_hash_config);
+ dpo0 = load_balance_get_fwd_bucket
+ (lb0,
+ (hash0 & (lb0->lb_n_buckets_minus_1)));
+ }
+ else
+ {
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+ }
+ next0 = dpo0->dpoi_next_node;
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+
+ vlib_increment_combined_counter
+ (cm, thread_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ }
+
+ vnet_buffer (b0)->mpls.ttl = ((char*)hdr0)[3];
+ vnet_buffer (b0)->mpls.exp = (((char*)hdr0)[2] & 0xe) >> 1;
+ vnet_buffer (b0)->mpls.first = 1;
+ vlib_buffer_advance(b0, sizeof(*hdr0));
+
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = MPLS_LOOKUP_NEXT_DROP;
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
lookup_trace_t * t = va_arg (*args, lookup_trace_t *);
- uword indent = format_get_indent (s);
+ u32 indent = format_get_indent (s);
mpls_unicast_header_t hdr;
hdr.label_exp_s_ttl = clib_net_to_host_u32(t->hdr.label_exp_s_ttl);
return s;
}
-always_inline uword
+static uword
lookup_mpls_dst (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
};
VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_node, lookup_mpls_dst)
-always_inline uword
+static uword
lookup_mpls_dst_itf (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
typedef enum lookup_ip_dst_mcast_next_t_ {
+ LOOKUP_IP_DST_MCAST_NEXT_DROP,
LOOKUP_IP_DST_MCAST_NEXT_RPF,
LOOKUP_IP_DST_MCAST_N_NEXT,
} mfib_forward_lookup_next_t;
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
- vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ if (!(b0->flags & VNET_BUFFER_F_LOOP_COUNTER_VALID)) {
+ vnet_buffer2(b0)->loop_counter = 0;
+ b0->flags |= VNET_BUFFER_F_LOOP_COUNTER_VALID;
+ }
+
+ vnet_buffer2(b0)->loop_counter++;
+
+ if (PREDICT_FALSE(vnet_buffer2(b0)->loop_counter > MAX_LUKPS_PER_PACKET))
+ next0 = LOOKUP_IP_DST_MCAST_NEXT_DROP;
+
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
return from_frame->n_vectors;
}
-always_inline uword
+static uword
lookup_ip4_dst_mcast (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
.format_trace = format_lookup_trace,
.n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
.next_nodes = {
+ [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip4-drop",
[LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
},
};
VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_mcast_node,
lookup_ip4_dst_mcast)
+static uword
+lookup_ip6_dst_mcast (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 0));
+}
+
+VLIB_REGISTER_NODE (lookup_ip6_dst_mcast_node) = {
+ .function = lookup_ip6_dst_mcast,
+ .name = "lookup-ip6-dst-mcast",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_lookup_trace,
+ .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
+ .next_nodes = {
+ [LOOKUP_IP_DST_MCAST_NEXT_DROP] = "ip6-drop",
+ [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip6-mfib-forward-rpf",
+ },
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip6_dst_mcast_node,
+ lookup_ip6_dst_mcast)
+
static void
lookup_dpo_mem_show (void)
{
[DPO_PROTO_MPLS] = lookup_dst_from_interface_mpls_nodes,
};
+static clib_error_t *
+lookup_dpo_show (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ index_t lkdi = INDEX_INVALID;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%d", &lkdi))
+ ;
+ else
+ break;
+ }
+
+ if (INDEX_INVALID != lkdi)
+ {
+ vlib_cli_output (vm, "%U", format_lookup_dpo, lkdi);
+ }
+ else
+ {
+ lookup_dpo_t *lkd;
+
+ pool_foreach(lkd, lookup_dpo_pool,
+ ({
+ vlib_cli_output (vm, "[@%d] %U",
+ lookup_dpo_get_index(lkd),
+ format_lookup_dpo,
+ lookup_dpo_get_index(lkd));
+ }));
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (replicate_show_command, static) = {
+ .path = "show lookup-dpo",
+ .short_help = "show lookup-dpo [<index>]",
+ .function = lookup_dpo_show,
+};
void
lookup_dpo_module_init (void)