#include <vnet/ip/lookup.h>
#include <vnet/dpo/replicate_dpo.h>
#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/receive_dpo.h>
#include <vnet/adj/adj.h>
+#include <vnet/mpls/mpls_types.h>
-#undef REP_DEBUG
+/**
+ * the logger
+ */
+vlib_log_class_t replicate_logger;
-#ifdef REP_DEBUG
#define REP_DBG(_rep, _fmt, _args...) \
{ \
- u8* _tmp =NULL; \
- clib_warning("rep:[%s]:" _fmt, \
- replicate_format(replicate_get_index((_rep)), \
- 0, _tmp), \
- ##_args); \
- vec_free(_tmp); \
+ vlib_log_debug(replicate_logger, \
+ "rep:[%U]:" _fmt, \
+ format_replicate, \
+ replicate_get_index(_rep), \
+ REPLICATE_FORMAT_NONE, \
+ ##_args); \
}
-#else
-#define REP_DBG(_p, _fmt, _args...)
-#endif
#define foreach_replicate_dpo_error \
_(BUFFER_ALLOCATION_FAILURE, "Buffer Allocation Failure")
/**
* The one instance of replicate main
*/
-replicate_main_t replicate_main;
+replicate_main_t replicate_main = {
+ .repm_counters = {
+ .name = "mroutes",
+ .stat_segment_name = "/net/mroute",
+ },
+};
static inline index_t
replicate_get_index (const replicate_t *rep)
replicate_t *rep;
pool_get_aligned(replicate_pool, rep, CLIB_CACHE_LINE_BYTES);
- memset(rep, 0, sizeof(*rep));
+ clib_memset(rep, 0, sizeof(*rep));
vlib_validate_combined_counter(&(replicate_main.repm_counters),
replicate_get_index(rep));
return (rep);
}
+static u8*
+format_replicate_flags (u8 *s, va_list *args)
+{
+ int flags = va_arg (*args, int);
+
+ if (flags == REPLICATE_FLAGS_NONE)
+ {
+ s = format (s, "none");
+ }
+ else if (flags & REPLICATE_FLAGS_HAS_LOCAL)
+ {
+ s = format (s, "has-local ");
+ }
+
+ return (s);
+}
+
static u8*
replicate_format (index_t repi,
replicate_format_flags_t flags,
dpo_id_t *buckets;
u32 i;
+ repi &= ~MPLS_IS_REPLICATE;
rep = replicate_get(repi);
vlib_get_combined_counter(&(replicate_main.repm_counters), repi, &to);
buckets = replicate_get_buckets(rep);
s = format(s, "%U: ", format_dpo_type, DPO_REPLICATE);
s = format(s, "[index:%d buckets:%d ", repi, rep->rep_n_buckets);
+ s = format(s, "flags:[%U] ", format_replicate_flags, rep->rep_flags);
s = format(s, "to:[%Ld:%Ld]]", to.packets, to.bytes);
for (i = 0; i < rep->rep_n_buckets; i++)
dpo_id_t *buckets,
const dpo_id_t *next)
{
+ if (dpo_is_receive(&buckets[bucket]))
+ {
+ rep->rep_flags &= ~REPLICATE_FLAGS_HAS_LOCAL;
+ }
+ if (dpo_is_receive(next))
+ {
+ rep->rep_flags |= REPLICATE_FLAGS_HAS_LOCAL;
+ }
dpo_stack(DPO_REPLICATE, rep->rep_proto, &buckets[bucket], next);
}
replicate_t *rep;
dpo_id_t *buckets;
+ repi &= ~MPLS_IS_REPLICATE;
rep = replicate_get(repi);
buckets = replicate_get_buckets(rep);
replicate_is_drop (const dpo_id_t *dpo)
{
replicate_t *rep;
+ index_t repi;
if (DPO_REPLICATE != dpo->dpoi_type)
return (0);
- rep = replicate_get(dpo->dpoi_index);
+ repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
+ rep = replicate_get(repi);
if (1 == rep->rep_n_buckets)
{
{
replicate_t *rep;
+ repi &= ~MPLS_IS_REPLICATE;
rep = replicate_get(repi);
return (replicate_get_bucket_i(rep, bucket));
u32 n_buckets)
{
load_balance_path_t * nh;
- u16 ii, bucket;
+ u16 bucket;
bucket = 0;
*/
vec_foreach (nh, nhs)
{
- for (ii = 0; ii < nh->path_weight; ii++)
- {
- ASSERT(bucket < n_buckets);
- replicate_set_bucket_i(rep, bucket++, buckets, &nh->path_dpo);
- }
+ ASSERT(bucket < n_buckets);
+ replicate_set_bucket_i(rep, bucket++, buckets, &nh->path_dpo);
}
}
dpo_id_t *tmp_dpo;
u32 ii, n_buckets;
replicate_t *rep;
+ index_t repi;
ASSERT(DPO_REPLICATE == dpo->dpoi_type);
- rep = replicate_get(dpo->dpoi_index);
+ repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
+ rep = replicate_get(repi);
nhs = replicate_multipath_next_hop_fixup(next_hops,
rep->rep_proto);
n_buckets = vec_len(nhs);
rep->rep_locks++;
}
+index_t
+replicate_dup (replicate_flags_t flags,
+ index_t repi)
+{
+ replicate_t *rep, *copy;
+
+ rep = replicate_get(repi);
+
+ if (rep->rep_flags == flags ||
+ flags & REPLICATE_FLAGS_HAS_LOCAL)
+ {
+ /*
+ * we can include all the buckets from the original in the copy
+ */
+ return (repi);
+ }
+ else
+ {
+ /*
+ * caller doesn't want the local paths that the original has
+ */
+ if (rep->rep_n_buckets == 1)
+ {
+ /*
+ * original has only one bucket that is the local, so create
+ * a new one with only the drop
+ */
+ copy = replicate_create_i (1, rep->rep_proto);
+
+ replicate_set_bucket_i(copy, 0,
+ replicate_get_buckets(copy),
+ drop_dpo_get(rep->rep_proto));
+ }
+ else
+ {
+ dpo_id_t *old_buckets, *copy_buckets;
+ u16 bucket, pos;
+
+ copy = replicate_create_i(rep->rep_n_buckets - 1,
+ rep->rep_proto);
+
+ rep = replicate_get(repi);
+ old_buckets = replicate_get_buckets(rep);
+ copy_buckets = replicate_get_buckets(copy);
+ pos = 0;
+
+ for (bucket = 0; bucket < rep->rep_n_buckets; bucket++)
+ {
+ if (!dpo_is_receive(&old_buckets[bucket]))
+ {
+ replicate_set_bucket_i(copy, pos, copy_buckets,
+ (&old_buckets[bucket]));
+ pos++;
+ }
+ }
+ }
+ }
+
+ return (replicate_get_index(copy));
+}
+
static void
replicate_destroy (replicate_t *rep)
{
replicate_module_init (void)
{
dpo_register(DPO_REPLICATE, &rep_vft, replicate_nodes);
+ replicate_logger = vlib_log_register_class("dpo", "replicate");
}
static clib_error_t *
vlib_combined_counter_main_t * cm = &replicate_main.repm_counters;
replicate_main_t * rm = &replicate_main;
u32 n_left_from, * from, * to_next, next_index;
- u32 cpu_index = os_get_cpu_number();
+ u32 thread_index = vlib_get_thread_index();
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
rep0 = replicate_get(repi0);
vlib_increment_combined_counter(
- cm, cpu_index, repi0, 1,
+ cm, thread_index, repi0, 1,
vlib_buffer_length_in_chain(vm, b0));
- vec_validate (rm->clones[cpu_index], rep0->rep_n_buckets - 1);
+ vec_validate (rm->clones[thread_index], rep0->rep_n_buckets - 1);
- num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[cpu_index], rep0->rep_n_buckets, 128);
+ num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[thread_index],
+ rep0->rep_n_buckets,
+ VLIB_BUFFER_CLONE_HEAD_SIZE);
if (num_cloned != rep0->rep_n_buckets)
{
for (bucket = 0; bucket < num_cloned; bucket++)
{
- ci0 = rm->clones[cpu_index][bucket];
+ ci0 = rm->clones[thread_index][bucket];
c0 = vlib_get_buffer(vm, ci0);
to_next[0] = ci0;
next0 = dpo0->dpoi_next_node;
vnet_buffer (c0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
- if (PREDICT_FALSE(c0->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
- replicate_trace_t *t = vlib_add_trace (vm, node, c0, sizeof (*t));
+ replicate_trace_t *t;
+
+ if (c0 != b0)
+ {
+ vlib_buffer_copy_trace_flag (vm, b0, ci0);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (c0);
+ }
+ t = vlib_add_trace (vm, node, c0, sizeof (*t));
t->rep_index = repi0;
t->dpo = *dpo0;
}
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
}
}
- vec_reset_length (rm->clones[cpu_index]);
+ vec_reset_length (rm->clones[thread_index]);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
s = format (s, "replicate: %d via %U",
t->rep_index,
- format_dpo_id, &t->dpo);
+ format_dpo_id, &t->dpo, 0);
return s;
}
}
/**
- * @brief
+ * @brief IP4 replication node
*/
VLIB_REGISTER_NODE (ip4_replicate_node) = {
.function = ip4_replicate,
.format_trace = format_replicate_trace,
.n_next_nodes = 1,
.next_nodes = {
- [0] = "error-drop",
+ [0] = "ip4-drop",
},
};
}
/**
- * @brief
+ * @brief IPv6 replication node
*/
VLIB_REGISTER_NODE (ip6_replicate_node) = {
.function = ip6_replicate,
.format_trace = format_replicate_trace,
.n_next_nodes = 1,
.next_nodes = {
- [0] = "error-drop",
+ [0] = "ip6-drop",
+ },
+};
+
+static uword
+mpls_replicate (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (replicate_inline (vm, node, frame));
+}
+
+/**
+ * @brief MPLS replication node
+ */
+VLIB_REGISTER_NODE (mpls_replicate_node) = {
+ .function = mpls_replicate,
+ .name = "mpls-replicate",
+ .vector_size = sizeof (u32),
+
+ .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
+ .error_strings = replicate_dpo_error_strings,
+
+ .format_trace = format_replicate_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "mpls-drop",
},
};