#include <vnet/ethernet/ethernet.h>
#include <vnet/ppp/ppp.h>
#include <vnet/hdlc/hdlc.h>
+#include <vnet/util/throttle.h>
typedef struct
{
u8 packet_data[64];
} ip4_input_trace_t;
-#ifndef CLIB_MARCH_VARIANT
static u8 *
format_ip4_input_trace (u8 * s, va_list * va)
{
return s;
}
-#endif
static_always_inline u32
ip4_input_set_next (u32 sw_if_index, vlib_buffer_t * b, int arc_enabled)
}
static_always_inline void
-ip4_input_check_sw_if_index (vlib_simple_counter_main_t * cm, u32 sw_if_index,
+ip4_input_check_sw_if_index (vlib_main_t * vm,
+ vlib_simple_counter_main_t * cm, u32 sw_if_index,
u32 * last_sw_if_index, u32 * cnt,
int *arc_enabled)
{
return;
}
- thread_index = vlib_get_thread_index ();
+ thread_index = vm->thread_index;
if (*cnt)
vlib_increment_simple_counter (cm, thread_index, *last_sw_if_index, *cnt);
*cnt = 1;
{
vnet_main_t *vnm = vnet_get_main ();
u32 n_left_from, *from;
- u32 thread_index = vlib_get_thread_index ();
+ u32 thread_index = vm->thread_index;
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip4_input_node.index);
vlib_simple_counter_main_t *cm;
vlib_get_buffers (vm, from, bufs, n_left_from);
b = bufs;
next = nexts;
+#if (CLIB_N_PREFETCHES >= 8)
while (n_left_from >= 4)
{
u32 x = 0;
vlib_prefetch_buffer_header (b[10], LOAD);
vlib_prefetch_buffer_header (b[11], LOAD);
- CLIB_PREFETCH (b[4]->data, sizeof (ip4_header_t), LOAD);
- CLIB_PREFETCH (b[5]->data, sizeof (ip4_header_t), LOAD);
- CLIB_PREFETCH (b[6]->data, sizeof (ip4_header_t), LOAD);
- CLIB_PREFETCH (b[7]->data, sizeof (ip4_header_t), LOAD);
+ vlib_prefetch_buffer_data (b[4], LOAD);
+ vlib_prefetch_buffer_data (b[5], LOAD);
+ vlib_prefetch_buffer_data (b[6], LOAD);
+ vlib_prefetch_buffer_data (b[7], LOAD);
}
vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = ~0;
}
else
{
- ip4_input_check_sw_if_index (cm, sw_if_index[0], &last_sw_if_index,
- &cnt, &arc_enabled);
- ip4_input_check_sw_if_index (cm, sw_if_index[1], &last_sw_if_index,
- &cnt, &arc_enabled);
- ip4_input_check_sw_if_index (cm, sw_if_index[2], &last_sw_if_index,
- &cnt, &arc_enabled);
- ip4_input_check_sw_if_index (cm, sw_if_index[3], &last_sw_if_index,
- &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[0],
+ &last_sw_if_index, &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[1],
+ &last_sw_if_index, &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[2],
+ &last_sw_if_index, &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[3],
+ &last_sw_if_index, &cnt, &arc_enabled);
next[0] = ip4_input_set_next (sw_if_index[0], b[0], 1);
next[1] = ip4_input_set_next (sw_if_index[1], b[1], 1);
next += 4;
n_left_from -= 4;
}
+#elif (CLIB_N_PREFETCHES >= 4)
+ while (n_left_from >= 2)
+ {
+ u32 x = 0;
+ u32 next0, next1;
+
+ /* Prefetch next iteration. */
+ if (n_left_from >= 6)
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+
+ vlib_prefetch_buffer_data (b[2], LOAD);
+ vlib_prefetch_buffer_data (b[3], LOAD);
+ }
+
+ vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = ~0;
+ vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = ~0;
+
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
+
+ x |= sw_if_index[0] ^ last_sw_if_index;
+ x |= sw_if_index[1] ^ last_sw_if_index;
+
+ if (PREDICT_TRUE (x == 0))
+ {
+ /* we deal with 2 more packets sharing the same sw_if_index
+ with the previous one, so we can optimize */
+ cnt += 2;
+ if (arc_enabled)
+ {
+ next0 = ip4_input_set_next (sw_if_index[0], b[0], 1);
+ next1 = ip4_input_set_next (sw_if_index[1], b[1], 1);
+ }
+ else
+ {
+ next0 = ip4_input_set_next (sw_if_index[0], b[0], 0);
+ next1 = ip4_input_set_next (sw_if_index[1], b[1], 0);
+ }
+ }
+ else
+ {
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[0],
+ &last_sw_if_index, &cnt, &arc_enabled);
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[1],
+ &last_sw_if_index, &cnt, &arc_enabled);
+
+ next0 = ip4_input_set_next (sw_if_index[0], b[0], 1);
+ next1 = ip4_input_set_next (sw_if_index[1], b[1], 1);
+ }
+
+ ip[0] = vlib_buffer_get_current (b[0]);
+ ip[1] = vlib_buffer_get_current (b[1]);
+
+ ip4_input_check_x2 (vm, error_node, b[0], b[1], ip[0], ip[1],
+ &next0, &next1, verify_checksum);
+ next[0] = (u16) next0;
+ next[1] = (u16) next1;
+
+ /* next */
+ b += 2;
+ next += 2;
+ n_left_from -= 2;
+ }
+#endif
+
while (n_left_from)
{
u32 next0;
vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = ~0;
sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
- ip4_input_check_sw_if_index (cm, sw_if_index[0], &last_sw_if_index,
+ ip4_input_check_sw_if_index (vm, cm, sw_if_index[0], &last_sw_if_index,
&cnt, &arc_enabled);
next0 = ip4_input_set_next (sw_if_index[0], b[0], arc_enabled);
ip[0] = vlib_buffer_get_current (b[0]);
foreach_ip4_error
#undef _
};
+#endif
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_input_node) = {
.name = "ip4-input",
.vector_size = sizeof (u32),
+ .protocol_hint = VLIB_NODE_PROTO_HINT_IP4,
.n_errors = IP4_N_ERROR,
.error_strings = ip4_error_strings,
[IP4_INPUT_NEXT_LOOKUP] = "ip4-lookup",
[IP4_INPUT_NEXT_LOOKUP_MULTICAST] = "ip4-mfib-forward-lookup",
[IP4_INPUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
- [IP4_INPUT_NEXT_REASSEMBLY] = "ip4-reassembly",
+ [IP4_INPUT_NEXT_REASSEMBLY] = "ip4-full-reassembly",
},
.format_buffer = format_ip4_header,
.name = "ip4-input-no-checksum",
.vector_size = sizeof (u32),
- .n_next_nodes = IP4_INPUT_N_NEXT,
- .next_nodes = {
- [IP4_INPUT_NEXT_DROP] = "error-drop",
- [IP4_INPUT_NEXT_PUNT] = "error-punt",
- [IP4_INPUT_NEXT_OPTIONS] = "ip4-options",
- [IP4_INPUT_NEXT_LOOKUP] = "ip4-lookup",
- [IP4_INPUT_NEXT_LOOKUP_MULTICAST] = "ip4-mfib-forward-lookup",
- [IP4_INPUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
- [IP4_INPUT_NEXT_REASSEMBLY] = "ip4-reassembly",
- },
-
+ .sibling_of = "ip4-input",
.format_buffer = format_ip4_header,
.format_trace = format_ip4_input_trace,
};
hdlc_register_input_protocol (vm, HDLC_PROTOCOL_ip4, ip4_input_node.index);
{
+ extern vlib_node_registration_t ip4_input_no_checksum_node;
pg_node_t *pn;
pn = pg_get_node (ip4_input_node.index);
pn->unformat_edit = unformat_pg_ip4_header;
}
VLIB_INIT_FUNCTION (ip4_init);
-#endif
+
+static clib_error_t *
+ip4_main_loop_enter (vlib_main_t * vm)
+{
+ ip4_main_t *im = &ip4_main;
+ vlib_thread_main_t *tm = &vlib_thread_main;
+ u32 n_vlib_mains = tm->n_vlib_mains;
+
+ throttle_init (&im->arp_throttle, n_vlib_mains, 1e-3);
+
+ return (NULL);
+}
+
+VLIB_MAIN_LOOP_ENTER_FUNCTION (ip4_main_loop_enter);
/*
* fd.io coding-style-patch-verification: ON