#include <vnet/fib/fib_node_list.h>
/* Adjacency packet/byte counters indexed by adjacency index. */
-vlib_combined_counter_main_t adjacency_counters;
+vlib_combined_counter_main_t adjacency_counters = {
+ .name = "adjacency",
+ .stat_segment_name = "/net/adjacency",
+};
/*
* the single adj pool
*/
int adj_per_adj_counters;
+const ip46_address_t ADJ_BCAST_ADDR = {
+ .ip6 = {
+ .as_u64[0] = 0xffffffffffffffff,
+ .as_u64[1] = 0xffffffffffffffff,
+ },
+};
+
+/**
+ * Adj flag names
+ */
+static const char *adj_attr_names[] = ADJ_ATTR_NAMES;
+
always_inline void
adj_poison (ip_adjacency_t * adj)
{
if (CLIB_DEBUG > 0)
{
- memset (adj, 0xfe, sizeof (adj[0]));
+ clib_memset (adj, 0xfe, sizeof (adj[0]));
}
}
/* Validate adjacency counters. */
vlib_validate_combined_counter(&adjacency_counters,
adj_get_index(adj));
-
+ vlib_zero_combined_counter(&adjacency_counters,
+ adj_get_index(adj));
fib_node_init(&adj->ia_node,
FIB_NODE_TYPE_ADJ);
adj->ia_delegates = NULL;
/* lest it become a midchain in the future */
- memset(&adj->sub_type.midchain.next_dpo, 0,
+ clib_memset(&adj->sub_type.midchain.next_dpo, 0,
sizeof(adj->sub_type.midchain.next_dpo));
return (adj);
return (0);
}
+u8*
+format_adj_flags (u8 * s, va_list * args)
+{
+ adj_flags_t af;
+ adj_attr_t at;
+
+ af = va_arg (*args, int);
+
+ if (ADJ_FLAG_NONE == af)
+ {
+ return (format(s, "None"));
+ }
+ FOR_EACH_ADJ_ATTR(at)
+ {
+ if (af & (1 << at))
+ {
+ s = format(s, "%s ", adj_attr_names[at]);
+ }
+ }
+ return (s);
+}
+
/**
* @brief Pretty print helper function for formatting specific adjacencies.
* @param s - input string to format
adj_index = va_arg (*args, u32);
fiaf = va_arg (*args, format_ip_adjacency_flags_t);
+
+ if (!adj_is_valid(adj_index))
+ return format(s, "<invalid adjacency>");
+
adj = adj_get(adj_index);
-
+
switch (adj->lookup_next_index)
{
case IP_LOOKUP_NEXT_REWRITE:
+ case IP_LOOKUP_NEXT_BCAST:
s = format (s, "%U", format_adj_nbr, adj_index, 0);
break;
case IP_LOOKUP_NEXT_ARP:
case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
s = format (s, "%U", format_adj_mcast_midchain, adj_index, 0);
break;
- default:
- break;
+ case IP_LOOKUP_NEXT_DROP:
+ case IP_LOOKUP_NEXT_PUNT:
+ case IP_LOOKUP_NEXT_LOCAL:
+ case IP_LOOKUP_NEXT_ICMP_ERROR:
+ case IP_LOOKUP_N_NEXT:
+ break;
}
if (fiaf & FORMAT_IP_ADJACENCY_DETAIL)
{
- adj_delegate_type_t adt;
- adj_delegate_t *aed;
vlib_counter_t counts;
vlib_get_combined_counter(&adjacency_counters, adj_index, &counts);
- s = format (s, "\n counts:[%Ld:%Ld]", counts.packets, counts.bytes);
- s = format (s, "\n locks:%d", adj->ia_node.fn_locks);
- s = format(s, "\n delegates:\n ");
- FOR_EACH_ADJ_DELEGATE(adj, adt, aed,
+ s = format (s, "\n flags:%U", format_adj_flags, adj->ia_flags);
+ s = format (s, "\n counts:[%Ld:%Ld]", counts.packets, counts.bytes);
+ s = format (s, "\n locks:%d", adj->ia_node.fn_locks);
+ s = format(s, "\n delegates:");
+ s = adj_delegate_format(s, adj);
+
+ s = format(s, "\n children:");
+ if (fib_node_list_get_size(adj->ia_node.fn_children))
{
- s = format(s, " %U\n", format_adj_delegate, aed);
- });
-
- s = format(s, "\n children:\n ");
- s = fib_node_children_format(adj->ia_node.fn_children, s);
+ s = format(s, "\n ");
+ s = fib_node_children_format(adj->ia_node.fn_children, s);
+ }
}
return s;
}
+int
+adj_recursive_loop_detect (adj_index_t ai,
+ fib_node_index_t **entry_indicies)
+{
+ ip_adjacency_t * adj;
+
+ adj = adj_get(ai);
+
+ switch (adj->lookup_next_index)
+ {
+ case IP_LOOKUP_NEXT_REWRITE:
+ case IP_LOOKUP_NEXT_ARP:
+ case IP_LOOKUP_NEXT_GLEAN:
+ case IP_LOOKUP_NEXT_MCAST:
+ case IP_LOOKUP_NEXT_BCAST:
+ case IP_LOOKUP_NEXT_DROP:
+ case IP_LOOKUP_NEXT_PUNT:
+ case IP_LOOKUP_NEXT_LOCAL:
+ case IP_LOOKUP_NEXT_ICMP_ERROR:
+ case IP_LOOKUP_N_NEXT:
+ /*
+ * these adjacency types are terminal graph nodes, so there's no
+ * possibility of a loop down here.
+ */
+ break;
+ case IP_LOOKUP_NEXT_MIDCHAIN:
+ case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
+ return (adj_ndr_midchain_recursive_loop_detect(ai, entry_indicies));
+ }
+
+ return (0);
+}
+
/*
* adj_last_lock_gone
*
ASSERT(0 == fib_node_list_get_size(adj->ia_node.fn_children));
ADJ_DBG(adj, "last-lock-gone");
- adj_delegate_vft_lock_gone(adj);
+ adj_delegate_adj_deleted(adj);
vlib_worker_thread_barrier_sync (vm);
switch (adj->lookup_next_index)
{
case IP_LOOKUP_NEXT_MIDCHAIN:
- dpo_reset(&adj->sub_type.midchain.next_dpo);
+ adj_midchain_teardown(adj);
/* FALL THROUGH */
case IP_LOOKUP_NEXT_ARP:
case IP_LOOKUP_NEXT_REWRITE:
+ case IP_LOOKUP_NEXT_BCAST:
/*
* complete and incomplete nbr adjs
*/
adj_glean_remove(adj->ia_nh_proto,
adj->rewrite_header.sw_if_index);
break;
- case IP_LOOKUP_NEXT_MCAST:
case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
+ adj_midchain_teardown(adj);
+ /* FALL THROUGH */
+ case IP_LOOKUP_NEXT_MCAST:
adj_mcast_remove(adj->ia_nh_proto,
adj->rewrite_header.sw_if_index);
break;
}
/*
- * Context for the walk to update the cached feture flags.
+ * Context for the walk to update the cached feature flags.
*/
typedef struct adj_feature_update_t_
{
return (ADJ_WALK_RC_CONTINUE);
}
-void
+static void
adj_feature_update (u32 sw_if_index,
u8 arc_index,
- u8 is_enable)
+ u8 is_enable,
+ void *data)
{
/*
* Walk all the adjacencies on the interface to update the cached
adj = adj_get(ai);
- vnet_rewrite_update_mtu (vnet_get_main(),
+ vnet_rewrite_update_mtu (vnet_get_main(), adj->ia_link,
&adj->rewrite_header);
return (ADJ_WALK_RC_CONTINUE);
}
-static void
-adj_sw_mtu_update (vnet_main_t * vnm,
- u32 sw_if_index,
- void *ctx)
+static clib_error_t *
+adj_mtu_update (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
{
- /*
- * Walk all the adjacencies on the interface to update the cached MTU
- */
- adj_walk (sw_if_index, adj_mtu_update_walk_cb, NULL);
-}
+ adj_walk (sw_if_index, adj_mtu_update_walk_cb, NULL);
-void
-adj_mtu_update (u32 hw_if_index)
-{
- /*
- * Walk all the SW interfaces on the HW interface to update the cached MTU
- */
- vnet_hw_interface_walk_sw(vnet_get_main(),
- hw_if_index,
- adj_sw_mtu_update,
- NULL);
+ return (NULL);
}
+VNET_SW_INTERFACE_MTU_CHANGE_FUNCTION(adj_mtu_update);
+
/**
* @brief Walk the Adjacencies on a given interface
*/
adj = adj_get(ai);
- return (adj->ia_link);
+ return (adj->ia_link);
}
/**
int
adj_is_up (adj_index_t ai)
{
- const adj_delegate_t *aed;
-
- aed = adj_delegate_get(adj_get(ai), ADJ_DELEGATE_BFD);
-
- if (NULL == aed)
- {
- /*
- * no BFD tracking - resolved
- */
- return (!0);
- }
- else
- {
- /*
- * defer to the state of the BFD tracking
- */
- return (ADJ_BFD_STATE_UP == aed->ad_bfd_state);
- }
+ return (adj_bfd_is_up(ai));
}
/**
adj_back_walk_notify (fib_node_t *node,
fib_node_back_walk_ctx_t *ctx)
{
- /*
- * Que pasa. yo soj en el final!
- */
- ASSERT(0);
+ ip_adjacency_t *adj;
+
+ adj = ADJ_FROM_NODE(node);
+
+ switch (adj->lookup_next_index)
+ {
+ case IP_LOOKUP_NEXT_MIDCHAIN:
+ adj_midchain_delegate_restack(adj_get_index(adj));
+ break;
+ case IP_LOOKUP_NEXT_ARP:
+ case IP_LOOKUP_NEXT_REWRITE:
+ case IP_LOOKUP_NEXT_BCAST:
+ case IP_LOOKUP_NEXT_GLEAN:
+ case IP_LOOKUP_NEXT_MCAST:
+ case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
+ case IP_LOOKUP_NEXT_DROP:
+ case IP_LOOKUP_NEXT_PUNT:
+ case IP_LOOKUP_NEXT_LOCAL:
+ case IP_LOOKUP_NEXT_ICMP_ERROR:
+ case IP_LOOKUP_N_NEXT:
+ /*
+ * Que pasa. yo soj en el final!
+ */
+ ASSERT(0);
+ break;
+ }
return (FIB_NODE_BACK_WALK_CONTINUE);
}
adj_midchain_module_init();
adj_mcast_module_init();
+ vnet_feature_register(adj_feature_update, NULL);
+
return (NULL);
}
if (summary)
{
- vlib_cli_output (vm, "Number of adjacenies: %d", pool_elts(adj_pool));
+ vlib_cli_output (vm, "Number of adjacencies: %d", pool_elts(adj_pool));
vlib_cli_output (vm, "Per-adjacency counters: %s",
(adj_are_counters_enabled() ?
"enabled":
}
/*?
- * Enabe/disble per-adjacency counters. This is optional because it comes with
- * a non-negligible performance cost.
+ * Enable/disable per-adjacency counters. This is optional because it comes
+ * with a non-negligible performance cost.
?*/
VLIB_CLI_COMMAND (adj_cli_counters_set_command, static) = {
.path = "adjacency counters",