ip_adjacency_t @c adj->lookup_next_index
(where @c adj is the lookup result adjacency).
*/
-static uword
-ip4_lookup (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_lookup_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip4_lookup_inline (vm, node, frame,
/* lookup_for_responses_to_locally_received_packets */
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_lookup_node) =
{
- .function = ip4_lookup,
.name = "ip4-lookup",
.vector_size = sizeof (u32),
.format_trace = format_ip4_lookup_trace,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_lookup_node, ip4_lookup);
-
-static uword
-ip4_load_balance (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_load_balance_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters;
u32 n_left_from, n_left_to_next, *from, *to_next;
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_load_balance_node) =
{
- .function = ip4_load_balance,
.name = "ip4-load-balance",
.vector_size = sizeof (u32),
.sibling_of = "ip4-lookup",
- .format_trace =
- format_ip4_lookup_trace,
+ .format_trace = format_ip4_lookup_trace,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_load_balance_node, ip4_load_balance);
-
+#ifndef CLIB_MARCH_VARIANT
/* get first interface address */
ip4_address_t *
ip4_interface_first_address (ip4_main_t * im, u32 sw_if_index,
}));
/* *INDENT-ON* */
}
+#endif
/* Built-in ip4 unicast rx feature path definition */
/* *INDENT-OFF* */
/* Global IP4 main. */
ip4_main_t ip4_main;
-clib_error_t *
+static clib_error_t *
ip4_lookup_init (vlib_main_t * vm)
{
ip4_main_t *im = &ip4_main;
}
ip4_forward_next_trace_t;
+#ifndef CLIB_MARCH_VARIANT
u8 *
format_ip4_forward_next_trace (u8 * s, va_list * args)
{
format_ip4_header, t->packet_data, sizeof (t->packet_data));
return s;
}
+#endif
static u8 *
format_ip4_lookup_trace (u8 * s, va_list * args)
return s;
}
+#ifndef CLIB_MARCH_VARIANT
/* Common trace function for all ip4-forward next nodes. */
void
ip4_forward_next_trace (vlib_main_t * vm,
return p0->flags;
}
+#endif
/* *INDENT-OFF* */
VNET_FEATURE_ARC_INIT (ip4_local) =
return frame->n_vectors;
}
-static uword
-ip4_local (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_local_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip4_local_inline (vm, node, frame, 1 /* head of feature arc */ );
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_local_node) =
{
- .function = ip4_local,
.name = "ip4-local",
.vector_size = sizeof (u32),
.format_trace = format_ip4_forward_next_trace,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_node, ip4_local);
-static uword
-ip4_local_end_of_arc (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_local_end_of_arc_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip4_local_inline (vm, node, frame, 0 /* head of feature arc */ );
}
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_local_end_of_arc_node,static) = {
- .function = ip4_local_end_of_arc,
+VLIB_REGISTER_NODE (ip4_local_end_of_arc_node) = {
.name = "ip4-local-end-of-arc",
.vector_size = sizeof (u32),
.sibling_of = "ip4-local",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_local_end_of_arc_node, ip4_local_end_of_arc)
-
VNET_FEATURE_INIT (ip4_local_end_of_arc, static) = {
.arc_name = "ip4-local",
.node_name = "ip4-local-end-of-arc",
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
void
ip4_register_protocol (u32 protocol, u32 node_index)
{
lm->local_next_by_ip_protocol[protocol] =
vlib_node_add_next (vm, ip4_local_node.index, node_index);
}
+#endif
static clib_error_t *
show_ip_local_command_fn (vlib_main_t * vm,
ip_lookup_main_t *lm = &im->lookup_main;
u32 *from, *to_next_drop;
uword n_left_from, n_left_to_next_drop, next_index;
- static f64 time_last_seed_change = -1e100;
- static u32 hash_seeds[3];
- static uword hash_bitmap[256 / BITS (uword)];
+ u32 thread_index = vm->thread_index;
+ u32 seed;
f64 time_now;
if (node->flags & VLIB_NODE_FLAG_TRACE)
ip4_forward_next_trace (vm, node, frame, VLIB_TX);
time_now = vlib_time_now (vm);
- if (time_now - time_last_seed_change > 1e-3)
+ if (time_now - im->arp_throttle_last_seed_change_time[thread_index] > 1e-3)
{
- uword i;
- u32 *r = clib_random_buffer_get_data (&vm->random_buffer,
- sizeof (hash_seeds));
- for (i = 0; i < ARRAY_LEN (hash_seeds); i++)
- hash_seeds[i] = r[i];
+ (void) random_u32 (&im->arp_throttle_seeds[thread_index]);
+ memset (im->arp_throttle_bitmaps[thread_index], 0,
+ ARP_THROTTLE_BITS / BITS (u8));
- /* Mark all hash keys as been no-seen before. */
- for (i = 0; i < ARRAY_LEN (hash_bitmap); i++)
- hash_bitmap[i] = 0;
-
- time_last_seed_change = time_now;
+ im->arp_throttle_last_seed_change_time[thread_index] = time_now;
}
+ seed = im->arp_throttle_seeds[thread_index];
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
while (n_left_from > 0 && n_left_to_next_drop > 0)
{
- u32 pi0, adj_index0, a0, b0, c0, m0, sw_if_index0, drop0;
+ u32 pi0, adj_index0, r0, w0, sw_if_index0, drop0;
+ uword m0;
ip_adjacency_t *adj0;
vlib_buffer_t *p0;
ip4_header_t *ip0;
- uword bm0;
pi0 = from[0];
adj0 = adj_get (adj_index0);
ip0 = vlib_buffer_get_current (p0);
- a0 = hash_seeds[0];
- b0 = hash_seeds[1];
- c0 = hash_seeds[2];
-
sw_if_index0 = adj0->rewrite_header.sw_if_index;
vnet_buffer (p0)->sw_if_index[VLIB_TX] = sw_if_index0;
- if (is_glean)
+ if (PREDICT_TRUE (is_glean))
{
/*
* this is the Glean case, so we are ARPing for the
* packet's destination
*/
- a0 ^= ip0->dst_address.data_u32;
+ r0 = ip0->dst_address.data_u32;
}
else
{
- a0 ^= adj0->sub_type.nbr.next_hop.ip4.data_u32;
+ r0 = adj0->sub_type.nbr.next_hop.ip4.data_u32;
}
- b0 ^= sw_if_index0;
-
- hash_v3_mix32 (a0, b0, c0);
- hash_v3_finalize32 (a0, b0, c0);
- c0 &= BITS (hash_bitmap) - 1;
- m0 = (uword) 1 << (c0 % BITS (uword));
- c0 = c0 / BITS (uword);
+ r0 ^= seed;
+ /* Select bit number */
+ r0 &= ARP_THROTTLE_BITS - 1;
+ w0 = r0 / BITS (uword);
+ m0 = (uword) 1 << (r0 % BITS (uword));
- bm0 = hash_bitmap[c0];
- drop0 = (bm0 & m0) != 0;
-
- /* Mark it as seen. */
- hash_bitmap[c0] = bm0 | m0;
+ drop0 = (im->arp_throttle_bitmaps[thread_index][w0] & m0) != 0;
+ im->arp_throttle_bitmaps[thread_index][w0] |= m0;
from += 1;
n_left_from -= 1;
return frame->n_vectors;
}
-static uword
-ip4_arp (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_arp_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return (ip4_arp_inline (vm, node, frame, 0));
}
-static uword
-ip4_glean (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_glean_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return (ip4_arp_inline (vm, node, frame, 1));
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_arp_node) =
{
- .function = ip4_arp,
.name = "ip4-arp",
.vector_size = sizeof (u32),
.format_trace = format_ip4_forward_next_trace,
VLIB_REGISTER_NODE (ip4_glean_node) =
{
- .function = ip4_glean,
.name = "ip4-glean",
.vector_size = sizeof (u32),
.format_trace = format_ip4_forward_next_trace,
_(REPLICATE_DROP) \
_(REPLICATE_FAIL)
-clib_error_t *
+static clib_error_t *
arp_notrace_init (vlib_main_t * vm)
{
vlib_node_runtime_t *rt = vlib_node_get_runtime (vm, ip4_arp_node.index);
VLIB_INIT_FUNCTION (arp_notrace_init);
+#ifndef CLIB_MARCH_VARIANT
/* Send an ARP request to see if given destination is reachable on given interface. */
clib_error_t *
ip4_probe_neighbor (vlib_main_t * vm, ip4_address_t * dst, u32 sw_if_index,
adj_unlock (ai);
return /* no error */ 0;
}
+#endif
typedef enum
{
- <code> adj->rewrite_header.next_index </code>
or @c ip4-drop
*/
-static uword
-ip4_rewrite (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+
+VLIB_NODE_FN (ip4_rewrite_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 0, 0);
return ip4_rewrite_inline (vm, node, frame, 0, 0, 0);
}
-static uword
-ip4_rewrite_bcast (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_rewrite_bcast_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 0, 0);
return ip4_rewrite_inline (vm, node, frame, 0, 0, 0);
}
-static uword
-ip4_midchain (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_midchain_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 1, 0);
return ip4_rewrite_inline (vm, node, frame, 0, 1, 0);
}
-static uword
-ip4_rewrite_mcast (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_rewrite_mcast_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 0, 1);
return ip4_rewrite_inline (vm, node, frame, 0, 0, 1);
}
-static uword
-ip4_mcast_midchain (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_mcast_midchain_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
if (adj_are_counters_enabled ())
return ip4_rewrite_inline (vm, node, frame, 1, 1, 1);
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_rewrite_node) = {
- .function = ip4_rewrite,
.name = "ip4-rewrite",
.vector_size = sizeof (u32),
};
VLIB_REGISTER_NODE (ip4_rewrite_bcast_node) = {
- .function = ip4_rewrite,
.name = "ip4-rewrite-bcast",
.vector_size = sizeof (u32),
.format_trace = format_ip4_rewrite_trace,
.sibling_of = "ip4-rewrite",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_bcast_node, ip4_rewrite_bcast)
VLIB_REGISTER_NODE (ip4_rewrite_mcast_node) = {
- .function = ip4_rewrite_mcast,
.name = "ip4-rewrite-mcast",
.vector_size = sizeof (u32),
.format_trace = format_ip4_rewrite_trace,
.sibling_of = "ip4-rewrite",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_rewrite_mcast_node, ip4_rewrite_mcast)
-VLIB_REGISTER_NODE (ip4_mcast_midchain_node, static) = {
- .function = ip4_mcast_midchain,
+VLIB_REGISTER_NODE (ip4_mcast_midchain_node) = {
.name = "ip4-mcast-midchain",
.vector_size = sizeof (u32),
.format_trace = format_ip4_rewrite_trace,
.sibling_of = "ip4-rewrite",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_mcast_midchain_node, ip4_mcast_midchain)
VLIB_REGISTER_NODE (ip4_midchain_node) = {
- .function = ip4_midchain,
.name = "ip4-midchain",
.vector_size = sizeof (u32),
.format_trace = format_ip4_forward_next_trace,
.sibling_of = "ip4-rewrite",
};
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_midchain_node, ip4_midchain);
/* *INDENT-ON */
-int
+static int
ip4_lookup_validate (ip4_address_t * a, u32 fib_index0)
{
ip4_fib_mtrie_t *mtrie0;
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
int
vnet_set_ip4_flow_hash (u32 table_id, u32 flow_hash_config)
{
return 0;
}
+#endif
static clib_error_t *
set_ip_flow_hash_command_fn (vlib_main_t * vm,
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
int
vnet_set_ip4_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
u32 table_index)
return 0;
}
+#endif
static clib_error_t *
set_ip_classify_command_fn (vlib_main_t * vm,