static_always_inline void
calc_checksums (vlib_main_t * vm, vlib_buffer_t * b)
{
- ip4_header_t *ip4;
- ip6_header_t *ip6;
tcp_header_t *th;
udp_header_t *uh;
ASSERT (!(is_ip4 && is_ip6));
- ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
- ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
th = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
uh = (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
if (is_ip4)
{
+ ip4_header_t *ip4;
+
ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
ip4->checksum = ip4_header_checksum (ip4);
th->checksum = 0;
th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
}
- if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+ else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
uh->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
}
- if (is_ip6)
+ else if (is_ip6)
{
int bogus;
+ ip6_header_t *ip6;
+
+ ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
{
th->checksum = 0;
th->checksum =
ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
}
- if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+ else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
{
uh->checksum = 0;
uh->checksum =
vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
tso_init_buf_from_template_base (b0, sb0, default_bflags,
- l4_hdr_sz + first_data_size);
+ l234_sz + first_data_size);
u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
if (total_src_left)
csbi0 = next_bi;
csb0 = vlib_get_buffer (vm, csbi0);
src_left = csb0->current_length;
- src_ptr = csb0->data;
+ src_ptr = vlib_buffer_get_current (csb0);
}
else
{
u8 arc = im->output_feature_arc_index;
vnet_interface_per_thread_data_t *ptd =
vec_elt_at_index (im->per_thread_data, thread_index);
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
n_buffers = frame->n_vectors;
vnet_interface_output_trace (vm, node, frame, n_buffers);
from = vlib_frame_vector_args (frame);
+ vlib_get_buffers (vm, from, b, n_buffers);
if (rt->is_deleted)
return vlib_error_drop_buffers (vm, node, from,
while (from + 8 <= from_end && n_left_to_tx >= 4)
{
u32 bi0, bi1, bi2, bi3;
- vlib_buffer_t *b0, *b1, *b2, *b3;
u32 tx_swif0, tx_swif1, tx_swif2, tx_swif3;
u32 or_flags;
/* Prefetch next iteration. */
- vlib_prefetch_buffer_with_index (vm, from[4], LOAD);
- vlib_prefetch_buffer_with_index (vm, from[5], LOAD);
- vlib_prefetch_buffer_with_index (vm, from[6], LOAD);
- vlib_prefetch_buffer_with_index (vm, from[7], LOAD);
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
bi0 = from[0];
bi1 = from[1];
to_tx[1] = bi1;
to_tx[2] = bi2;
to_tx[3] = bi3;
- if (!do_segmentation)
- {
- from += 4;
- to_tx += 4;
- n_left_to_tx -= 4;
- }
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
- b2 = vlib_get_buffer (vm, bi2);
- b3 = vlib_get_buffer (vm, bi3);
+ or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
if (do_segmentation)
{
- or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
-
/* go to single loop if we need TSO segmentation */
if (PREDICT_FALSE (or_flags & VNET_BUFFER_F_GSO))
break;
- from += 4;
- to_tx += 4;
- n_left_to_tx -= 4;
}
+ from += 4;
+ to_tx += 4;
+ n_left_to_tx -= 4;
/* Be grumpy about zero length buffers for benefit of
driver tx function. */
- ASSERT (b0->current_length > 0);
- ASSERT (b1->current_length > 0);
- ASSERT (b2->current_length > 0);
- ASSERT (b3->current_length > 0);
-
- n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
- n_bytes_b1 = vlib_buffer_length_in_chain (vm, b1);
- n_bytes_b2 = vlib_buffer_length_in_chain (vm, b2);
- n_bytes_b3 = vlib_buffer_length_in_chain (vm, b3);
- tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
- tx_swif1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
- tx_swif2 = vnet_buffer (b2)->sw_if_index[VLIB_TX];
- tx_swif3 = vnet_buffer (b3)->sw_if_index[VLIB_TX];
+ ASSERT (b[0]->current_length > 0);
+ ASSERT (b[1]->current_length > 0);
+ ASSERT (b[2]->current_length > 0);
+ ASSERT (b[3]->current_length > 0);
+
+ n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
+ n_bytes_b1 = vlib_buffer_length_in_chain (vm, b[1]);
+ n_bytes_b2 = vlib_buffer_length_in_chain (vm, b[2]);
+ n_bytes_b3 = vlib_buffer_length_in_chain (vm, b[3]);
+ tx_swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
+ tx_swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
+ tx_swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
+ tx_swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
n_bytes += n_bytes_b0 + n_bytes_b1;
n_bytes += n_bytes_b2 + n_bytes_b3;
if (PREDICT_FALSE (current_config_index != ~0))
{
- vnet_buffer (b0)->feature_arc_index = arc;
- vnet_buffer (b1)->feature_arc_index = arc;
- vnet_buffer (b2)->feature_arc_index = arc;
- vnet_buffer (b3)->feature_arc_index = arc;
- b0->current_config_index = current_config_index;
- b1->current_config_index = current_config_index;
- b2->current_config_index = current_config_index;
- b3->current_config_index = current_config_index;
+ vnet_buffer (b[0])->feature_arc_index = arc;
+ vnet_buffer (b[1])->feature_arc_index = arc;
+ vnet_buffer (b[2])->feature_arc_index = arc;
+ vnet_buffer (b[3])->feature_arc_index = arc;
+ b[0]->current_config_index = current_config_index;
+ b[1]->current_config_index = current_config_index;
+ b[2]->current_config_index = current_config_index;
+ b[3]->current_config_index = current_config_index;
}
/* update vlan subif tx counts, if required */
n_bytes_b3);
}
- if (!do_segmentation)
- or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
-
if (do_tx_offloads)
{
if (or_flags &
VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
VNET_BUFFER_F_OFFLOAD_IP_CKSUM))
{
- calc_checksums (vm, b0);
- calc_checksums (vm, b1);
- calc_checksums (vm, b2);
- calc_checksums (vm, b3);
+ calc_checksums (vm, b[0]);
+ calc_checksums (vm, b[1]);
+ calc_checksums (vm, b[2]);
+ calc_checksums (vm, b[3]);
}
}
+ b += 4;
+
}
while (from + 1 <= from_end && n_left_to_tx >= 1)
{
u32 bi0;
- vlib_buffer_t *b0;
u32 tx_swif0;
bi0 = from[0];
to_tx += 1;
n_left_to_tx -= 1;
- b0 = vlib_get_buffer (vm, bi0);
-
/* Be grumpy about zero length buffers for benefit of
driver tx function. */
- ASSERT (b0->current_length > 0);
+ ASSERT (b[0]->current_length > 0);
- n_bytes_b0 = vlib_buffer_length_in_chain (vm, b0);
- tx_swif0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
+ tx_swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
n_bytes += n_bytes_b0;
n_packets += 1;
if (PREDICT_FALSE (current_config_index != ~0))
{
- vnet_buffer (b0)->feature_arc_index = arc;
- b0->current_config_index = current_config_index;
+ vnet_buffer (b[0])->feature_arc_index = arc;
+ b[0]->current_config_index = current_config_index;
}
if (do_segmentation)
{
- if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
+ if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
{
/*
* Undo the enqueue of the b0 - it is not going anywhere,
u32 n_tx_bytes = 0;
n_tx_bytes =
- tso_segment_buffer (vm, ptd, do_tx_offloads, bi0, b0,
+ tso_segment_buffer (vm, ptd, do_tx_offloads, bi0, b[0],
n_bytes_b0);
if (PREDICT_FALSE (n_tx_bytes == 0))
{
drop_one_buffer_and_count (vm, vnm, node, from - 1,
VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO);
+ b += 1;
continue;
}
vlib_get_new_next_frame (vm, node, next_index,
to_tx, n_left_to_tx);
}
- else
+ while (n_tx_bufs > 0)
{
- while (n_tx_bufs > 0)
- {
- to_tx[0] = from_tx_seg[0];
- to_tx += 1;
- from_tx_seg += 1;
- n_left_to_tx -= 1;
- n_tx_bufs -= 1;
- n_packets += 1;
- }
+ to_tx[0] = from_tx_seg[0];
+ to_tx += 1;
+ from_tx_seg += 1;
+ n_left_to_tx -= 1;
+ n_tx_bufs -= 1;
+ n_packets += 1;
}
}
n_bytes += n_tx_bytes;
_vec_len (ptd->split_buffers) = 0;
/* Free the now segmented buffer */
vlib_buffer_free_one (vm, bi0);
+ b += 1;
continue;
}
}
}
if (do_tx_offloads)
- calc_checksums (vm, b0);
+ calc_checksums (vm, b[0]);
+
+ b += 1;
}
vlib_put_next_frame (vm, node, next_index, n_left_to_tx);
vnet_hw_interface_t * hi,
int do_tx_offloads)
{
- vnet_interface_pcap_tx_trace (vm, node, frame,
- 0 /* sw_if_index_from_buffer */ );
-
/*
* The 3-headed "if" is here because we want to err on the side
* of not impacting the non-GSO performance - so for the more
return frame->n_vectors;
}
-always_inline u32
-counter_index (vlib_main_t * vm, vlib_error_t e)
+typedef struct vnet_error_trace_t_
{
- vlib_node_t *n;
- u32 ci, ni;
-
- ni = vlib_error_get_node (e);
- n = vlib_get_node (vm, ni);
-
- ci = vlib_error_get_code (e);
- ASSERT (ci < n->n_errors);
-
- ci += n->error_heap_index;
+ u32 sw_if_index;
+} vnet_error_trace_t;
- return ci;
-}
static u8 *
format_vnet_error_trace (u8 * s, va_list * va)
{
- vlib_main_t *vm = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
- vlib_error_t *e = va_arg (*va, vlib_error_t *);
- vlib_node_t *error_node;
- vlib_error_main_t *em = &vm->error_main;
- u32 i;
+ vnet_error_trace_t *t = va_arg (*va, vnet_error_trace_t *);
- error_node = vlib_get_node (vm, vlib_error_get_node (e[0]));
- i = counter_index (vm, e[0]);
- s = format (s, "%v: %s", error_node->name, em->error_strings_heap[i]);
+ s = format (s, "rx:%U", format_vnet_sw_if_index_name,
+ vnet_get_main (), t->sw_if_index);
return s;
}
static void
-trace_errors_with_buffers (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+interface_trace_buffers (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
u32 n_left, *buffers;
{
u32 bi0, bi1;
vlib_buffer_t *b0, *b1;
- vlib_error_t *t0, *t1;
+ vnet_error_trace_t *t0, *t1;
/* Prefetch next iteration. */
vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
- t0[0] = b0->error;
+ t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
}
if (b1->flags & VLIB_BUFFER_IS_TRACED)
{
t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
- t1[0] = b1->error;
+ t1->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
}
buffers += 2;
n_left -= 2;
{
u32 bi0;
vlib_buffer_t *b0;
- vlib_error_t *t0;
+ vnet_error_trace_t *t0;
bi0 = buffers[0];
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
- t0[0] = b0->error;
+ t0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
}
buffers += 1;
n_left -= 1;
}
}
-static u8 *
-validate_error (vlib_main_t * vm, vlib_error_t * e, u32 index)
-{
- uword node_index = vlib_error_get_node (e[0]);
- uword code = vlib_error_get_code (e[0]);
- vlib_node_t *n;
-
- if (node_index >= vec_len (vm->node_main.nodes))
- return format (0, "[%d], node index out of range 0x%x, error 0x%x",
- index, node_index, e[0]);
-
- n = vlib_get_node (vm, node_index);
- if (code >= n->n_errors)
- return format (0, "[%d], code %d out of range for node %v",
- index, code, n->name);
-
- return 0;
-}
-
-static u8 *
-validate_error_frame (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * f)
-{
- u32 *buffers = vlib_frame_vector_args (f);
- vlib_buffer_t *b;
- u8 *msg = 0;
- uword i;
-
- for (i = 0; i < f->n_vectors; i++)
- {
- b = vlib_get_buffer (vm, buffers[i]);
- msg = validate_error (vm, &b->error, i);
- if (msg)
- return msg;
- }
-
- return msg;
-}
-
typedef enum
{
VNET_ERROR_DISPOSITION_DROP,
VNET_ERROR_N_DISPOSITION,
} vnet_error_disposition_t;
-always_inline void
-do_packet (vlib_main_t * vm, vlib_error_t a)
-{
- vlib_error_main_t *em = &vm->error_main;
- u32 i = counter_index (vm, a);
- em->counters[i] += 1;
- vlib_error_elog_count (vm, i, 1);
-}
-
static_always_inline uword
-process_drop_punt (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, vnet_error_disposition_t disposition)
+interface_drop_punt (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ vnet_error_disposition_t disposition)
{
- vnet_main_t *vnm = vnet_get_main ();
- vlib_error_main_t *em = &vm->error_main;
- u32 *buffers, *first_buffer;
- vlib_error_t current_error;
- u32 current_counter_index, n_errors_left;
- u32 current_sw_if_index, n_errors_current_sw_if_index;
- u64 current_counter;
+ u32 *from, n_left, thread_index, *sw_if_index;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ u32 sw_if_indices[VLIB_FRAME_SIZE];
vlib_simple_counter_main_t *cm;
- u32 thread_index = vm->thread_index;
-
- static vlib_error_t memory[VNET_ERROR_N_DISPOSITION];
- static char memory_init[VNET_ERROR_N_DISPOSITION];
-
- buffers = vlib_frame_vector_args (frame);
- first_buffer = buffers;
-
- {
- vlib_buffer_t *b = vlib_get_buffer (vm, first_buffer[0]);
+ u16 nexts[VLIB_FRAME_SIZE];
+ vnet_main_t *vnm;
- if (!memory_init[disposition])
- {
- memory_init[disposition] = 1;
- memory[disposition] = b->error;
- }
-
- current_sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
- n_errors_current_sw_if_index = 0;
- }
+ vnm = vnet_get_main ();
+ thread_index = vm->thread_index;
+ from = vlib_frame_vector_args (frame);
+ n_left = frame->n_vectors;
+ b = bufs;
+ sw_if_index = sw_if_indices;
- current_error = memory[disposition];
- current_counter_index = counter_index (vm, memory[disposition]);
- current_counter = em->counters[current_counter_index];
+ vlib_get_buffers (vm, from, bufs, n_left);
if (node->flags & VLIB_NODE_FLAG_TRACE)
- trace_errors_with_buffers (vm, node, frame);
+ interface_trace_buffers (vm, node, frame);
+
+ /* All going to drop regardless, this is just a counting exercise */
+ clib_memset (nexts, 0, sizeof (nexts));
- n_errors_left = frame->n_vectors;
cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
(disposition == VNET_ERROR_DISPOSITION_PUNT
? VNET_INTERFACE_COUNTER_PUNT
: VNET_INTERFACE_COUNTER_DROP));
- while (n_errors_left >= 2)
+ /* collect the array of interfaces first ... */
+ while (n_left >= 4)
{
- vlib_buffer_t *b0, *b1;
- vnet_sw_interface_t *sw_if0, *sw_if1;
- vlib_error_t e0, e1;
- u32 bi0, bi1;
- u32 sw_if_index0, sw_if_index1;
-
- bi0 = buffers[0];
- bi1 = buffers[1];
-
- buffers += 2;
- n_errors_left -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- e0 = b0->error;
- e1 = b1->error;
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
-
- /* Speculate that sw_if_index == sw_if_index[01]. */
- n_errors_current_sw_if_index += 2;
-
- /* Speculatively assume all 2 (node, code) pairs are equal
- to current (node, code). */
- current_counter += 2;
-
- if (PREDICT_FALSE (e0 != current_error
- || e1 != current_error
- || sw_if_index0 != current_sw_if_index
- || sw_if_index1 != current_sw_if_index))
+ if (n_left >= 12)
{
- current_counter -= 2;
- n_errors_current_sw_if_index -= 2;
-
- vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1);
- vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1);
-
- /* Increment super-interface drop/punt counters for
- sub-interfaces. */
- sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
- vlib_increment_simple_counter
- (cm, thread_index, sw_if0->sup_sw_if_index,
- sw_if0->sup_sw_if_index != sw_if_index0);
-
- sw_if1 = vnet_get_sw_interface (vnm, sw_if_index1);
- vlib_increment_simple_counter
- (cm, thread_index, sw_if1->sup_sw_if_index,
- sw_if1->sup_sw_if_index != sw_if_index1);
-
- em->counters[current_counter_index] = current_counter;
- do_packet (vm, e0);
- do_packet (vm, e1);
-
- /* For 2 repeated errors, change current error. */
- if (e0 == e1 && e1 != current_error)
- {
- current_error = e0;
- current_counter_index = counter_index (vm, e0);
- }
- current_counter = em->counters[current_counter_index];
+ /* Prefetch 8 ahead - there's not much going on in each iteration */
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
}
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
+ sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
+ sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
+
+ sw_if_index += 4;
+ n_left -= 4;
+ b += 4;
}
-
- while (n_errors_left >= 1)
+ while (n_left)
{
- vlib_buffer_t *b0;
- vnet_sw_interface_t *sw_if0;
- vlib_error_t e0;
- u32 bi0, sw_if_index0;
-
- bi0 = buffers[0];
-
- buffers += 1;
- n_errors_left -= 1;
- current_counter += 1;
-
- b0 = vlib_get_buffer (vm, bi0);
- e0 = b0->error;
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-
- /* Increment drop/punt counters. */
- vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1);
-
- /* Increment super-interface drop/punt counters for sub-interfaces. */
- sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0);
- vlib_increment_simple_counter (cm, thread_index,
- sw_if0->sup_sw_if_index,
- sw_if0->sup_sw_if_index != sw_if_index0);
-
- if (PREDICT_FALSE (e0 != current_error))
- {
- current_counter -= 1;
-
- vlib_error_elog_count (vm, current_counter_index,
- (current_counter
- - em->counters[current_counter_index]));
-
- em->counters[current_counter_index] = current_counter;
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
- do_packet (vm, e0);
- current_error = e0;
- current_counter_index = counter_index (vm, e0);
- current_counter = em->counters[current_counter_index];
- }
+ sw_if_index += 1;
+ n_left -= 1;
+ b += 1;
}
- if (n_errors_current_sw_if_index > 0)
- {
- vnet_sw_interface_t *si;
-
- vlib_increment_simple_counter (cm, thread_index, current_sw_if_index,
- n_errors_current_sw_if_index);
+ /* ... then count against them in blocks */
+ n_left = frame->n_vectors;
- si = vnet_get_sw_interface (vnm, current_sw_if_index);
- if (si->sup_sw_if_index != current_sw_if_index)
- vlib_increment_simple_counter (cm, thread_index, si->sup_sw_if_index,
- n_errors_current_sw_if_index);
- }
+ while (n_left)
+ {
+ vnet_sw_interface_t *sw_if0;
+ u16 off, count;
- vlib_error_elog_count (vm, current_counter_index,
- (current_counter
- - em->counters[current_counter_index]));
+ off = frame->n_vectors - n_left;
- /* Return cached counter. */
- em->counters[current_counter_index] = current_counter;
+ sw_if_index = sw_if_indices + off;
- /* Save memory for next iteration. */
- memory[disposition] = current_error;
+ count = clib_count_equal_u32 (sw_if_index, n_left);
+ n_left -= count;
- if (disposition == VNET_ERROR_DISPOSITION_DROP || !vm->os_punt_frame)
- {
- vlib_buffer_free (vm, first_buffer, frame->n_vectors);
+ vlib_increment_simple_counter (cm, thread_index, sw_if_index[0], count);
- /* If there is no punt function, free the frame as well. */
- if (disposition == VNET_ERROR_DISPOSITION_PUNT && !vm->os_punt_frame)
- vlib_frame_free (vm, node, frame);
+ /* Increment super-interface drop/punt counters for
+ sub-interfaces. */
+ sw_if0 = vnet_get_sw_interface (vnm, sw_if_index[0]);
+ if (sw_if0->sup_sw_if_index != sw_if_index[0])
+ vlib_increment_simple_counter
+ (cm, thread_index, sw_if0->sup_sw_if_index, count);
}
- else
- vm->os_punt_frame (vm, node, frame);
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
return frame->n_vectors;
}
}
#endif /* CLIB_MARCH_VARIANT */
-VLIB_NODE_FN (drop_buffers) (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (interface_drop) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
vnet_interface_main_t *im = &vnet_get_main ()->interface_main;
if (PREDICT_FALSE (im->drop_pcap_enable))
pcap_drop_trace (vm, im, frame);
- return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_DROP);
+ return interface_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_DROP);
}
-VLIB_NODE_FN (punt_buffers) (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (interface_punt) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
- return process_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
+ return interface_drop_punt (vm, node, frame, VNET_ERROR_DISPOSITION_PUNT);
}
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (drop_buffers) = {
+VLIB_REGISTER_NODE (interface_drop) = {
.name = "error-drop",
- .flags = VLIB_NODE_FLAG_IS_DROP,
.vector_size = sizeof (u32),
.format_trace = format_vnet_error_trace,
- .validate_frame = validate_error_frame,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "drop",
+ },
};
/* *INDENT-ON* */
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (punt_buffers) = {
- .flags = (VLIB_NODE_FLAG_FRAME_NO_FREE_AFTER_DISPATCH
- | VLIB_NODE_FLAG_IS_PUNT),
+VLIB_REGISTER_NODE (interface_punt) = {
.name = "error-punt",
.vector_size = sizeof (u32),
.format_trace = format_vnet_error_trace,
- .validate_frame = validate_error_frame,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "punt",
+ },
};
/* *INDENT-ON* */
};
/* *INDENT-ON* */
-/* Convenience node to drop a vector of buffers with a "misc error". */
-static uword
-misc_drop_buffers (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
-{
- return vlib_error_drop_buffers (vm, node, vlib_frame_vector_args (frame),
- /* buffer stride */ 1,
- frame->n_vectors,
- /* next */ 0,
- node->node_index,
- /* error */ 0);
-}
-
-static char *misc_drop_buffers_error_strings[] = {
- [0] = "misc. errors",
-};
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (misc_drop_buffers_node,static) = {
- .function = misc_drop_buffers,
- .name = "misc-drop-buffers",
- .vector_size = sizeof (u32),
- .n_errors = 1,
- .n_next_nodes = 1,
- .next_nodes = {
- "error-drop",
- },
- .error_strings = misc_drop_buffers_error_strings,
-};
-/* *INDENT-ON* */
-
static uword
interface_tx_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * from_frame)