#include <vnet/ethernet/ethernet.h>
#include <vnet/ppp/ppp.h>
#include <vnet/hdlc/hdlc.h>
+#include <vnet/util/throttle.h>
typedef struct
{
return s;
}
+static_always_inline u32
+ip4_input_set_next (u32 sw_if_index, vlib_buffer_t * b, int arc_enabled)
+{
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ u32 next;
+ u8 arc;
+
+ ip4_header_t *ip = vlib_buffer_get_current (b);
+
+ if (PREDICT_FALSE (ip4_address_is_multicast (&ip->dst_address)))
+ {
+ next = IP4_INPUT_NEXT_LOOKUP_MULTICAST;
+ arc = lm->mcast_feature_arc_index;
+ }
+ else
+ {
+ next = IP4_INPUT_NEXT_LOOKUP;
+ arc = lm->ucast_feature_arc_index;
+ }
+
+ if (arc_enabled)
+ vnet_feature_arc_start (arc, sw_if_index, &next, b);
+
+ return next;
+}
+
+static_always_inline void
+ip4_input_check_sw_if_index (vlib_main_t * vm,
+ vlib_simple_counter_main_t * cm, u32 sw_if_index,
+ u32 * last_sw_if_index, u32 * cnt,
+ int *arc_enabled)
+{
+ ip4_main_t *im = &ip4_main;
+ ip_lookup_main_t *lm = &im->lookup_main;
+ u32 thread_index;
+ if (*last_sw_if_index == sw_if_index)
+ {
+ (*cnt)++;
+ return;
+ }
+
+ thread_index = vm->thread_index;
+ if (*cnt)
+ vlib_increment_simple_counter (cm, thread_index, *last_sw_if_index, *cnt);
+ *cnt = 1;
+ *last_sw_if_index = sw_if_index;
+
+ if (vnet_have_features (lm->ucast_feature_arc_index, sw_if_index) ||
+ vnet_have_features (lm->mcast_feature_arc_index, sw_if_index))
+ *arc_enabled = 1;
+ else
+ *arc_enabled = 0;
+}
+
/* Validate IP v4 packets and pass them either to forwarding code
or drop/punt exception packets. */
always_inline uword
vlib_node_runtime_t * node,
vlib_frame_t * frame, int verify_checksum)
{
- ip4_main_t *im = &ip4_main;
vnet_main_t *vnm = vnet_get_main ();
- ip_lookup_main_t *lm = &im->lookup_main;
- u32 n_left_from, *from, *to_next;
- ip4_input_next_t next_index;
+ u32 n_left_from, *from;
+ u32 thread_index = vm->thread_index;
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip4_input_node.index);
vlib_simple_counter_main_t *cm;
- u32 thread_index = vlib_get_thread_index ();
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ ip4_header_t *ip[4];
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+ u32 sw_if_index[4];
+ u32 last_sw_if_index = ~0;
+ u32 cnt = 0;
+ int arc_enabled = 0;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
if (node->flags & VLIB_NODE_FLAG_TRACE)
vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors,
cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
VNET_INTERFACE_COUNTER_IP4);
- while (n_left_from > 0)
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+ b = bufs;
+ next = nexts;
+#if (CLIB_N_PREFETCHES >= 8)
+ while (n_left_from >= 4)
{
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ u32 x = 0;
- while (n_left_from >= 4 && n_left_to_next >= 2)
+ /* Prefetch next iteration. */
+ if (n_left_from >= 12)
{
- vlib_buffer_t *p0, *p1;
- ip4_header_t *ip0, *ip1;
- u32 sw_if_index0, pi0, next0;
- u32 sw_if_index1, pi1, next1;
- u8 arc0, arc1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p2, *p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
- CLIB_PREFETCH (p3->data, sizeof (ip1[0]), LOAD);
- }
-
- to_next[0] = pi0 = from[0];
- to_next[1] = pi1 = from[1];
- from += 2;
- to_next += 2;
- n_left_from -= 2;
- n_left_to_next -= 2;
+ vlib_prefetch_buffer_header (b[8], LOAD);
+ vlib_prefetch_buffer_header (b[9], LOAD);
+ vlib_prefetch_buffer_header (b[10], LOAD);
+ vlib_prefetch_buffer_header (b[11], LOAD);
+
+ vlib_prefetch_buffer_data (b[4], LOAD);
+ vlib_prefetch_buffer_data (b[5], LOAD);
+ vlib_prefetch_buffer_data (b[6], LOAD);
+ vlib_prefetch_buffer_data (b[7], LOAD);
+ }
- p0 = vlib_get_buffer (vm, pi0);
- p1 = vlib_get_buffer (vm, pi1);
+ vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = ~0;
+ vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = ~0;
+ vnet_buffer (b[2])->ip.adj_index[VLIB_RX] = ~0;
+ vnet_buffer (b[3])->ip.adj_index[VLIB_RX] = ~0;
- ip0 = vlib_buffer_get_current (p0);
- ip1 = vlib_buffer_get_current (p1);
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
+ sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
+ sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
- sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (p1)->sw_if_index[VLIB_RX];
+ x |= sw_if_index[0] ^ last_sw_if_index;
+ x |= sw_if_index[1] ^ last_sw_if_index;
+ x |= sw_if_index[2] ^ last_sw_if_index;
+ x |= sw_if_index[3] ^ last_sw_if_index;
- if (PREDICT_FALSE (ip4_address_is_multicast (&ip0->dst_address)))
+ if (PREDICT_TRUE (x == 0))
+ {
+ /* we deal with 4 more packets sharing the same sw_if_index
+ with the previous one, so we can optimize */
+ cnt += 4;
+ if (arc_enabled)
{
- arc0 = lm->mcast_feature_arc_index;
- next0 = IP4_INPUT_NEXT_LOOKUP_MULTICAST;
+ next[0] = ip4_input_set_next (sw_if_index[0], b[0], 1);
+ next[1] = ip4_input_set_next (sw_if_index[1], b[1], 1);
+ next[2] = ip4_input_set_next (sw_if_index[2], b[2], 1);
+ next[3] = ip4_input_set_next (sw_if_index[3], b[3], 1);
}
else
{
- arc0 = lm->ucast_feature_arc_index;
- next0 = IP4_INPUT_NEXT_LOOKUP;
+ next[0] = ip4_input_set_next (sw_if_index[0], b[0], 0);
+ next[1] = ip4_input_set_next (sw_if_index[1], b[1], 0);
+ next[2] = ip4_input_set_next (sw_if_index[2], b[2], 0);
+ next[3] = ip4_input_set_next (sw_if_index[3], b[3], 0);
}
+ }
+ else
+ {
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[0],
+ &last_sw_if_index, &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[1],
+ &last_sw_if_index, &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[2],
+ &last_sw_if_index, &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[3],
+ &last_sw_if_index, &cnt, &arc_enabled);
+
+ next[0] = ip4_input_set_next (sw_if_index[0], b[0], 1);
+ next[1] = ip4_input_set_next (sw_if_index[1], b[1], 1);
+ next[2] = ip4_input_set_next (sw_if_index[2], b[2], 1);
+ next[3] = ip4_input_set_next (sw_if_index[3], b[3], 1);
+ }
- if (PREDICT_FALSE (ip4_address_is_multicast (&ip1->dst_address)))
- {
- arc1 = lm->mcast_feature_arc_index;
- next1 = IP4_INPUT_NEXT_LOOKUP_MULTICAST;
- }
- else
- {
- arc1 = lm->ucast_feature_arc_index;
- next1 = IP4_INPUT_NEXT_LOOKUP;
- }
+ ip[0] = vlib_buffer_get_current (b[0]);
+ ip[1] = vlib_buffer_get_current (b[1]);
+ ip[2] = vlib_buffer_get_current (b[2]);
+ ip[3] = vlib_buffer_get_current (b[3]);
- vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0;
- vnet_buffer (p1)->ip.adj_index[VLIB_RX] = ~0;
+ ip4_input_check_x4 (vm, error_node, b, ip, next, verify_checksum);
- vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0);
- vnet_feature_arc_start (arc1, sw_if_index1, &next1, p1);
+ /* next */
+ b += 4;
+ next += 4;
+ n_left_from -= 4;
+ }
+#elif (CLIB_N_PREFETCHES >= 4)
+ while (n_left_from >= 2)
+ {
+ u32 x = 0;
+ u32 next0, next1;
- vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1);
- vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1);
- ip4_input_check_x2 (vm, error_node,
- p0, p1, ip0, ip1,
- &next0, &next1, verify_checksum);
+ /* Prefetch next iteration. */
+ if (n_left_from >= 6)
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- pi0, pi1, next0, next1);
+ vlib_prefetch_buffer_data (b[2], LOAD);
+ vlib_prefetch_buffer_data (b[3], LOAD);
}
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- vlib_buffer_t *p0;
- ip4_header_t *ip0;
- u32 sw_if_index0, pi0, next0;
- u8 arc0;
- pi0 = from[0];
- to_next[0] = pi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = ~0;
+ vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = ~0;
- p0 = vlib_get_buffer (vm, pi0);
- ip0 = vlib_buffer_get_current (p0);
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
- sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ x |= sw_if_index[0] ^ last_sw_if_index;
+ x |= sw_if_index[1] ^ last_sw_if_index;
- if (PREDICT_FALSE (ip4_address_is_multicast (&ip0->dst_address)))
+ if (PREDICT_TRUE (x == 0))
+ {
+ /* we deal with 2 more packets sharing the same sw_if_index
+ with the previous one, so we can optimize */
+ cnt += 2;
+ if (arc_enabled)
{
- arc0 = lm->mcast_feature_arc_index;
- next0 = IP4_INPUT_NEXT_LOOKUP_MULTICAST;
+ next0 = ip4_input_set_next (sw_if_index[0], b[0], 1);
+ next1 = ip4_input_set_next (sw_if_index[1], b[1], 1);
}
else
{
- arc0 = lm->ucast_feature_arc_index;
- next0 = IP4_INPUT_NEXT_LOOKUP;
+ next0 = ip4_input_set_next (sw_if_index[0], b[0], 0);
+ next1 = ip4_input_set_next (sw_if_index[1], b[1], 0);
}
+ }
+ else
+ {
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[0],
+ &last_sw_if_index, &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[1],
+ &last_sw_if_index, &cnt, &arc_enabled);
- vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0;
- vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0);
+ next0 = ip4_input_set_next (sw_if_index[0], b[0], 1);
+ next1 = ip4_input_set_next (sw_if_index[1], b[1], 1);
+ }
- vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1);
- ip4_input_check_x1 (vm, error_node, p0, ip0, &next0,
- verify_checksum);
+ ip[0] = vlib_buffer_get_current (b[0]);
+ ip[1] = vlib_buffer_get_current (b[1]);
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- pi0, next0);
- }
+ ip4_input_check_x2 (vm, error_node, b[0], b[1], ip[0], ip[1],
+ &next0, &next1, verify_checksum);
+ next[0] = (u16) next0;
+ next[1] = (u16) next1;
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ /* next */
+ b += 2;
+ next += 2;
+ n_left_from -= 2;
}
+#endif
+ while (n_left_from)
+ {
+ u32 next0;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = ~0;
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[0], &last_sw_if_index,
+ &cnt, &arc_enabled);
+ next0 = ip4_input_set_next (sw_if_index[0], b[0], arc_enabled);
+ ip[0] = vlib_buffer_get_current (b[0]);
+ ip4_input_check_x1 (vm, error_node, b[0], ip[0], &next0,
+ verify_checksum);
+ next[0] = next0;
+
+ /* next */
+ b += 1;
+ next += 1;
+ n_left_from -= 1;
+ }
+
+ vlib_increment_simple_counter (cm, thread_index, last_sw_if_index, cnt);
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
return frame->n_vectors;
}
<code> vnet_get_config_data (... &next0 ...); </code>
or @c error-drop
*/
-static uword
-ip4_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip4_input_inline (vm, node, frame, /* verify_checksum */ 1);
}
-static uword
-ip4_input_no_checksum (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_input_no_checksum_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip4_input_inline (vm, node, frame, /* verify_checksum */ 0);
}
+#ifndef CLIB_MARCH_VARIANT
char *ip4_error_strings[] = {
#define _(sym,string) string,
foreach_ip4_error
#undef _
};
+#endif
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_input_node) = {
- .function = ip4_input,
.name = "ip4-input",
.vector_size = sizeof (u32),
+ .protocol_hint = VLIB_NODE_PROTO_HINT_IP4,
.n_errors = IP4_N_ERROR,
.error_strings = ip4_error_strings,
.next_nodes = {
[IP4_INPUT_NEXT_DROP] = "error-drop",
[IP4_INPUT_NEXT_PUNT] = "error-punt",
+ [IP4_INPUT_NEXT_OPTIONS] = "ip4-options",
[IP4_INPUT_NEXT_LOOKUP] = "ip4-lookup",
[IP4_INPUT_NEXT_LOOKUP_MULTICAST] = "ip4-mfib-forward-lookup",
[IP4_INPUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
- [IP4_INPUT_NEXT_REASSEMBLY] = "ip4-reassembly",
+ [IP4_INPUT_NEXT_REASSEMBLY] = "ip4-full-reassembly",
},
.format_buffer = format_ip4_header,
.format_trace = format_ip4_input_trace,
};
-/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_input_node, ip4_input);
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (ip4_input_no_checksum_node,static) = {
- .function = ip4_input_no_checksum,
+VLIB_REGISTER_NODE (ip4_input_no_checksum_node) = {
.name = "ip4-input-no-checksum",
.vector_size = sizeof (u32),
- .n_next_nodes = IP4_INPUT_N_NEXT,
- .next_nodes = {
- [IP4_INPUT_NEXT_DROP] = "error-drop",
- [IP4_INPUT_NEXT_PUNT] = "error-punt",
- [IP4_INPUT_NEXT_LOOKUP] = "ip4-lookup",
- [IP4_INPUT_NEXT_LOOKUP_MULTICAST] = "ip4-mfib-forward-lookup",
- [IP4_INPUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
- [IP4_INPUT_NEXT_REASSEMBLY] = "ip4-reassembly",
- },
-
+ .sibling_of = "ip4-input",
.format_buffer = format_ip4_header,
.format_trace = format_ip4_input_trace,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_input_no_checksum_node,
- ip4_input_no_checksum);
-
static clib_error_t *
ip4_init (vlib_main_t * vm)
{
hdlc_register_input_protocol (vm, HDLC_PROTOCOL_ip4, ip4_input_node.index);
{
+ extern vlib_node_registration_t ip4_input_no_checksum_node;
pg_node_t *pn;
pn = pg_get_node (ip4_input_node.index);
pn->unformat_edit = unformat_pg_ip4_header;
if ((error = vlib_call_init_function (vm, ip4_cli_init)))
return error;
- if ((error = vlib_call_init_function (vm, ip4_source_check_init)))
- return error;
-
if ((error = vlib_call_init_function
(vm, ip4_source_and_port_range_check_init)))
return error;
VLIB_INIT_FUNCTION (ip4_init);
+static clib_error_t *
+ip4_main_loop_enter (vlib_main_t * vm)
+{
+ ip4_main_t *im = &ip4_main;
+ vlib_thread_main_t *tm = &vlib_thread_main;
+ u32 n_vlib_mains = tm->n_vlib_mains;
+
+ throttle_init (&im->arp_throttle, n_vlib_mains, 1e-3);
+
+ return (NULL);
+}
+
+VLIB_MAIN_LOOP_ENTER_FUNCTION (ip4_main_loop_enter);
+
/*
* fd.io coding-style-patch-verification: ON
*