#include <vnet/fib/fib_walk.h>
#include <vnet/fib/fib_urpf_list.h>
+/**
+ * The magic number of child entries that make a path-list popular.
+ * There's a trade-off here between convergnece and forwarding speed.
+ * Popular path-lists generate load-balance maps for the entires that
+ * use them. If the map is present there is a switch path cost to indirect
+ * through the map - this indirection provides the fast convergence - so
+ * without the map convergence is slower.
+ */
+#define FIB_PATH_LIST_POPULAR 64
+
/**
* FIB path-list
* A representation of the list/set of path trough which a prefix is reachable
static fib_path_list_t*
fib_path_list_from_fib_node (fib_node_t *node)
{
-#if CLIB_DEBUG > 0
ASSERT(FIB_NODE_TYPE_PATH_LIST == node->fn_type);
-#endif
return ((fib_path_list_t*)node);
}
return (path_list - fib_path_list_pool);
}
-static u8 *
+u8 *
format_fib_path_list (u8 * s, va_list * args)
{
+ fib_node_index_t *path_index, path_list_index;
fib_path_list_attribute_t attr;
- fib_node_index_t *path_index;
fib_path_list_t *path_list;
+ u32 indent;
- path_list = va_arg (*args, fib_path_list_t *);
-
- s = format (s, " index:%u", fib_path_list_get_index(path_list));
+ path_list_index = va_arg (*args, fib_node_index_t);
+ indent = va_arg (*args, u32);
+ path_list = fib_path_list_get(path_list_index);
+
+ s = format (s, "%Upath-list:[%d]",
+ format_white_space, indent,
+ fib_path_list_get_index(path_list));
s = format (s, " locks:%u", path_list->fpl_node.fn_locks);
if (FIB_PATH_LIST_FLAG_NONE != path_list->fpl_flags)
vec_foreach (path_index, path_list->fpl_paths)
{
- s = fib_path_format(*path_index, s);
+ s = format(s, "%U", format_fib_path, *path_index, indent+2);
s = format(s, "\n");
}
fib_path_list_format (fib_node_index_t path_list_index,
u8 * s)
{
- fib_path_list_t *path_list;
-
- path_list = fib_path_list_get(path_list_index);
-
- return (format(s, "%U", format_fib_path_list, path_list));
+ return (format(s, "%U", format_fib_path_list, path_list_index, 4));
}
static uword
load_balance_path_t *nhs;
fib_node_index_t *path_index;
- nhs = NULL;
-
- if (!dpo_id_is_valid(dpo))
- {
- /*
- * first time create
- */
- dpo_set(dpo,
- DPO_LOAD_BALANCE,
- fib_forw_chain_type_to_dpo_proto(fct),
- load_balance_create(0,
- fib_forw_chain_type_to_dpo_proto(fct),
- 0 /* FIXME FLOW HASH */));
- }
+ nhs = NULL;
/*
* We gather the DPOs from resolved paths.
* Path-list load-balances, which if used, would be shared and hence
* never need a load-balance map.
*/
+ dpo_set(dpo,
+ DPO_LOAD_BALANCE,
+ fib_forw_chain_type_to_dpo_proto(fct),
+ load_balance_create(vec_len(nhs),
+ fib_forw_chain_type_to_dpo_proto(fct),
+ 0 /* FIXME FLOW HASH */));
load_balance_multipath_update(dpo, nhs, LOAD_BALANCE_FLAG_NONE);
FIB_PATH_LIST_DBG(path_list, "mk lb: %d", dpo->dpoi_index);
/*
* propagate the backwalk further
*/
- if (32 >= fib_node_list_get_size(path_list->fpl_node.fn_children))
- {
- /*
- * only a few children. continue the walk synchronously
- */
- fib_walk_sync(FIB_NODE_TYPE_PATH_LIST, path_list_index, ctx);
- }
- else
+ if (path_list->fpl_flags & FIB_PATH_LIST_FLAG_POPULAR)
{
/*
* many children. schedule a async walk
FIB_WALK_PRIORITY_LOW,
ctx);
}
+ else
+ {
+ /*
+ * only a few children. continue the walk synchronously
+ */
+ fib_walk_sync(FIB_NODE_TYPE_PATH_LIST, path_list_index, ctx);
+ }
}
/*
{
fib_path_list_t *path_list;
+ if (FIB_NODE_INDEX_INVALID == path_list_index)
+ {
+ return (0);
+ }
+
path_list = fib_path_list_get(path_list_index);
return (vec_len(path_list->fpl_paths));
return (sw_if_index);
}
-fib_protocol_t
+dpo_proto_t
fib_path_list_get_proto (fib_node_index_t path_list_index)
{
fib_path_list_t *path_list;
return (path_list->fpl_flags & FIB_PATH_LIST_FLAG_LOOPED);
}
+int
+fib_path_list_is_popular (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ return (path_list->fpl_flags & FIB_PATH_LIST_FLAG_POPULAR);
+}
+
static fib_path_list_flags_t
fib_path_list_flags_fixup (fib_path_list_flags_t flags)
{
fib_path_create(path_list_index,
&rpaths[i]));
}
+ /*
+ * we sort the paths since the key for the path-list is
+ * the description of the paths it contains. The paths need to
+ * be sorted else this description will differ.
+ */
+ if (vec_len(path_list->fpl_paths) > 1)
+ {
+ vec_sort_with_function(path_list->fpl_paths,
+ fib_path_cmp_for_sort);
+ }
}
/*
}
fib_node_index_t
-fib_path_list_create_special (fib_protocol_t nh_proto,
+fib_path_list_create_special (dpo_proto_t nh_proto,
fib_path_list_flags_t flags,
const dpo_id_t *dpo)
{
*/
if (0 == fib_path_cmp(new_path_index, *orig_path_index))
{
+ fib_path_destroy(new_path_index);
return (*orig_path_index);
}
}
* If we find one then we can return the existing one and destroy the
* new one just created.
*/
- exist_path_list_index = fib_path_list_db_find(path_list);
- if (FIB_NODE_INDEX_INVALID != exist_path_list_index)
+ if (path_list->fpl_flags & FIB_PATH_LIST_FLAG_SHARED)
{
- fib_path_list_destroy(path_list);
+ exist_path_list_index = fib_path_list_db_find(path_list);
+ if (FIB_NODE_INDEX_INVALID != exist_path_list_index)
+ {
+ fib_path_list_destroy(path_list);
- path_list_index = exist_path_list_index;
+ path_list_index = exist_path_list_index;
+ }
+ else
+ {
+ /*
+ * if there was not a matching path-list, then this
+ * new one will need inserting into the DB and resolving.
+ */
+ fib_path_list_db_insert(path_list_index);
+
+ path_list = fib_path_list_resolve(path_list);
+ }
}
else
{
/*
- * if there was not a matching path-list, then this
- * new one will need inserting into the DB and resolving.
+ * no shared path list requested. resolve and use the one
+ * just created.
*/
- fib_path_list_db_insert(path_list_index);
-
path_list = fib_path_list_resolve(path_list);
}
void
fib_path_list_contribute_forwarding (fib_node_index_t path_list_index,
fib_forward_chain_type_t fct,
+ fib_path_list_fwd_flags_t flags,
dpo_id_t *dpo)
{
fib_path_list_t *path_list;
path_list = fib_path_list_get(path_list_index);
fib_path_list_mk_lb(path_list, fct, dpo);
+
+ ASSERT(DPO_LOAD_BALANCE == dpo->dpoi_type);
+
+ /*
+ * If there's only one bucket in the load-balance then we can
+ * squash it out.
+ */
+ if ((1 == load_balance_n_buckets(dpo->dpoi_index)) &&
+ (FIB_PATH_LIST_FWD_FLAG_COLLAPSE & flags))
+ {
+ dpo_copy(dpo, load_balance_get_bucket(dpo->dpoi_index, 0));
+ }
}
/*
fib_node_type_t child_type,
fib_node_index_t child_index)
{
- return (fib_node_child_add(FIB_NODE_TYPE_PATH_LIST,
- path_list_index,
- child_type,
- child_index));
+ u32 sibling;
+
+ sibling = fib_node_child_add(FIB_NODE_TYPE_PATH_LIST,
+ path_list_index,
+ child_type,
+ child_index);
+
+ if (FIB_PATH_LIST_POPULAR == fib_node_get_n_children(FIB_NODE_TYPE_PATH_LIST,
+ path_list_index))
+ {
+ /*
+ * Set the popular flag on the path-list once we pass the magic
+ * threshold. then walk children to update.
+ * We don't undo this action. The rational being that the number
+ * of entries using this prefix is large enough such that it is a
+ * non-trival amount of effort to converge them. If we get into the
+ * situation where we are adding and removing entries such that we
+ * flip-flop over the threshold, then this non-trivial work is added
+ * to each of those routes adds/deletes - not a situation we want.
+ */
+ fib_node_back_walk_ctx_t ctx = {
+ .fnbw_reason = FIB_NODE_BW_REASON_FLAG_EVALUATE,
+ };
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+ path_list->fpl_flags |= FIB_PATH_LIST_FLAG_POPULAR;
+
+ fib_walk_sync(FIB_NODE_TYPE_PATH_LIST, path_list_index, &ctx);
+ }
+
+ return (sibling);
}
void
vec_foreach(path_index, path_list->fpl_paths)
{
- if (!func(path_list_index, *path_index, ctx))
+ if (FIB_PATH_LIST_WALK_STOP == func(path_list_index,
+ *path_index,
+ ctx))
break;
}
}
* show all
*/
vlib_cli_output (vm, "FIB Path Lists");
- pool_foreach(path_list, fib_path_list_pool,
+ pool_foreach_index (pli, fib_path_list_pool,
({
- vlib_cli_output (vm, "%U", format_fib_path_list, path_list);
+ vlib_cli_output (vm, "%U", format_fib_path_list, pli, 0);
}));
}
return (NULL);