+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+ b = bufs;
+ next = nexts;
+
+ while (n_left_from >= 6)
+ {
+ u8 not_batch = 0;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+
+ CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ error[0] = error[1] = IP4_ERROR_UNKNOWN_PROTOCOL;
+
+ ip[0] = vlib_buffer_get_current (b[0]);
+ ip[1] = vlib_buffer_get_current (b[1]);
+
+ vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data;
+ vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data;
+
+ pt[0] = ip4_local_classify (b[0], ip[0], &next[0]);
+ pt[1] = ip4_local_classify (b[1], ip[1], &next[1]);
+
+ not_batch = pt[0] ^ pt[1];
+
+ if (head_of_feature_arc == 0 || (pt[0] && not_batch == 0))
+ goto skip_checks;
+
+ if (PREDICT_TRUE (not_batch == 0))
+ {
+ ip4_local_check_l4_csum_x2 (vm, b, ip, error);
+ ip4_local_check_src_x2 (b, ip, &last_check, error);
+ }
+ else
+ {
+ if (!pt[0])
+ {
+ ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]);
+ ip4_local_check_src (b[0], ip[0], &last_check, &error[0]);
+ }
+ if (!pt[1])
+ {
+ ip4_local_check_l4_csum (vm, b[1], ip[1], &error[1]);
+ ip4_local_check_src (b[1], ip[1], &last_check, &error[1]);
+ }
+ }
+
+ skip_checks:
+
+ ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0],
+ head_of_feature_arc);
+ ip4_local_set_next_and_error (error_node, b[1], &next[1], error[1],
+ head_of_feature_arc);
+
+ b += 2;
+ next += 2;
+ n_left_from -= 2;
+ }
+
+ while (n_left_from > 0)
+ {
+ error[0] = IP4_ERROR_UNKNOWN_PROTOCOL;
+
+ ip[0] = vlib_buffer_get_current (b[0]);
+ vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data;
+ pt[0] = ip4_local_classify (b[0], ip[0], &next[0]);
+
+ if (head_of_feature_arc == 0 || pt[0])
+ goto skip_check;
+
+ ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]);
+ ip4_local_check_src (b[0], ip[0], &last_check, &error[0]);
+
+ skip_check:
+
+ ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0],
+ head_of_feature_arc);
+
+ b += 1;
+ next += 1;
+ n_left_from -= 1;
+ }
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (ip4_local_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return ip4_local_inline (vm, node, frame, 1 /* head of feature arc */ );
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ip4_local_node) =
+{
+ .name = "ip4-local",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ip4_forward_next_trace,