return;
/*
- * find and inform the covering entry that a new more specific
- * has been inserted beneath it
+ * find the covering entry
*/
fib_entry_cover_index = fib_table_get_less_specific_i(fib_table, prefix);
/*
*/
if (fib_entry_cover_index != fib_entry_index)
{
- fib_entry_cover_change_notify(fib_entry_cover_index,
- fib_entry_index);
+ /*
+ * push any inherting sources from the cover onto the covered
+ */
+ fib_entry_inherit(fib_entry_cover_index,
+ fib_entry_index);
+
+ /*
+ * inform the covering entry that a new more specific
+ * has been inserted beneath it.
+ * If the prefix that has been inserted is a host route
+ * then it is not possible that it will be the cover for any
+ * other entry, so we can elide the walk. This is particularly
+ * beneficial since there are often many host entries sharing the
+ * same cover (i.e. ADJ or RR sourced entries).
+ */
+ if (!fib_entry_is_host(fib_entry_index))
+ {
+ fib_entry_cover_change_notify(fib_entry_cover_index,
+ fib_entry_index);
+ }
}
}
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t *next_hop_labels,
+ fib_mpls_label_t *next_hop_labels,
fib_route_path_flags_t path_flags)
{
fib_route_path_t path = {
u32 next_hop_sw_if_index,
u32 next_hop_fib_index,
u32 next_hop_weight,
- mpls_label_t *next_hop_labels,
+ fib_mpls_label_t *next_hop_labels,
fib_route_path_flags_t path_flags)
{
fib_node_index_t fib_entry_index;
fib_table_entry_delete_index (fib_node_index_t fib_entry_index,
fib_source_t source)
{
- fib_prefix_t prefix;
+ const fib_prefix_t *prefix;
- fib_entry_get_prefix(fib_entry_index, &prefix);
+ prefix = fib_entry_get_prefix(fib_entry_index);
fib_table_entry_delete_i(fib_entry_get_fib_index(fib_entry_index),
- fib_entry_index, &prefix, source);
+ fib_entry_index, prefix, source);
+}
+
+u32
+fib_table_entry_get_stats_index (u32 fib_index,
+ const fib_prefix_t *prefix)
+{
+ return (fib_entry_get_stats_index(
+ fib_table_lookup_exact_match(fib_index, prefix)));
}
fib_node_index_t
flow_hash_config_t hash_config;
} fib_table_set_flow_hash_config_ctx_t;
-static int
+static fib_table_walk_rc_t
fib_table_set_flow_hash_config_cb (fib_node_index_t fib_entry_index,
void *arg)
{
fib_entry_set_flow_hash_config(fib_entry_index, ctx->hash_config);
- return (1);
+ return (FIB_TABLE_WALK_CONTINUE);
}
void
return ((NULL != fib_table ? fib_table->ft_table_id : ~0));
}
+u32
+fib_table_get_table_id (u32 fib_index,
+ fib_protocol_t proto)
+{
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, proto);
+
+ return ((NULL != fib_table ? fib_table->ft_table_id : ~0));
+}
+
u32
fib_table_find (fib_protocol_t proto,
u32 table_id)
fib_node_index_t fi;
va_list ap;
- va_start(ap, fmt);
switch (proto)
{
fi = ip4_fib_table_create_and_lock(src);
break;
case FIB_PROTOCOL_IP6:
- fi = ip6_fib_table_create_and_lock(src);
+ fi = ip6_fib_table_create_and_lock(src, FIB_TABLE_FLAG_NONE, NULL);
break;
case FIB_PROTOCOL_MPLS:
fi = mpls_fib_table_create_and_lock(src);
fib_table = fib_table_get(fi, proto);
+ va_start(ap, fmt);
+
fib_table->ft_desc = va_format(fib_table->ft_desc, fmt, &ap);
va_end(ap);
}
}
+void
+fib_table_sub_tree_walk (u32 fib_index,
+ fib_protocol_t proto,
+ const fib_prefix_t *root,
+ fib_table_walk_fn_t fn,
+ void *ctx)
+{
+ switch (proto)
+ {
+ case FIB_PROTOCOL_IP4:
+ ip4_fib_table_sub_tree_walk(ip4_fib_get(fib_index), root, fn, ctx);
+ break;
+ case FIB_PROTOCOL_IP6:
+ ip6_fib_table_sub_tree_walk(fib_index, root, fn, ctx);
+ break;
+ case FIB_PROTOCOL_MPLS:
+ break;
+ }
+}
+
void
fib_table_unlock (u32 fib_index,
fib_protocol_t proto,
fib_table_t *fib_table;
fib_table = fib_table_get(fib_index, proto);
+
+ ASSERT(fib_table->ft_locks[source] < (0xffff - 1));
+
fib_table->ft_locks[source]++;
fib_table->ft_locks[FIB_TABLE_TOTAL_LOCKS]++;
}
fib_source_t ftf_source;
} fib_table_flush_ctx_t;
-static int
+static fib_table_walk_rc_t
fib_table_flush_cb (fib_node_index_t fib_entry_index,
void *arg)
{
{
vec_add1(ctx->ftf_entries, fib_entry_index);
}
- return (1);
+ return (FIB_TABLE_WALK_CONTINUE);
}