desc_data[i] = r->shm + offset;
if (len > max_len)
max_len = len;
- xor_status |= desc_status[i].as_u8;
n_rx_bytes += len;
}
+ xor_status |= desc_status[i].as_u8;
}
ptd->max_desc_len = max_len;
u32 n_buffers = 0;
u32 n_left = ptd->n_packets;
u32 packet_len;
- int i = 0;
+ int i = -1;
+ int bad_packets = 0;
/* construct copy and packet vector out of ring slots */
while (n_left)
dst_off = start_offset;
next_slot:
+ i++; /* next descriptor */
n_bytes_left = desc_len[i];
packet_len += n_bytes_left;
mb0 = desc_data[i];
if (PREDICT_FALSE (desc_status[i].err))
- vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC, 1);
+ {
+ vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC,
+ 1);
+ bad_packets++;
+ ASSERT (n_buffers > 0);
+ n_buffers--;
+ goto next_packet;
+ }
else
do
{
}
while (PREDICT_FALSE (n_bytes_left));
- /* next descriptor */
- i++;
-
if (desc_status[i].next)
{
src_off = 0;
po->packet_len = packet_len;
po++;
+ next_packet:
/* next packet */
n_left--;
}
+ ASSERT (ptd->n_packets >= bad_packets);
+ ptd->n_packets -= bad_packets;
return n_buffers;
}
static_always_inline void
}
}
+static_always_inline void
+memif_advance_ring (memif_ring_type_t type, memif_queue_t *mq,
+ memif_ring_t *ring, u16 cur_slot)
+{
+ if (type == MEMIF_RING_S2M)
+ {
+ __atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
+ mq->last_head = cur_slot;
+ }
+ else
+ {
+ mq->last_tail = cur_slot;
+ }
+}
+
static_always_inline uword
memif_device_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
memif_if_t *mif, memif_ring_type_t type, u16 qid,
else
n_buffers = memif_process_desc (vm, node, ptd, mif);
+ if (PREDICT_FALSE (n_buffers == 0))
+ {
+ /* All descriptors are bad. Release slots in the ring and bail */
+ memif_advance_ring (type, mq, ring, cur_slot);
+ goto refill;
+ }
+
/* allocate free buffers */
vec_validate_aligned (ptd->buffers, n_buffers - 1, CLIB_CACHE_LINE_BYTES);
n_alloc = vlib_buffer_alloc_from_pool (vm, ptd->buffers, n_buffers,
vlib_get_buffers (vm, ptd->buffers, buffer_ptrs, n_buffers);
- for (i = 0; i < n_pkts - 8; i++)
+ for (i = 0; i + 8 < n_pkts; i++)
{
clib_prefetch_load (desc_data[i + 8]);
clib_prefetch_store (buffer_ptrs[i + 8]->data);
u32 n_pkts = vec_len (ptd->copy_ops);
co = ptd->copy_ops;
- for (i = 0; i < n_pkts - 8; i++)
+ for (i = 0; i + 8 < n_pkts; i++)
{
clib_prefetch_load (co[i + 8].data);
b = vlib_get_buffer (vm, ptd->buffers[co[i].buffer_vec_index]);
}
/* release slots from the ring */
- if (type == MEMIF_RING_S2M)
- {
- __atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
- mq->last_head = cur_slot;
- }
- else
- {
- mq->last_tail = cur_slot;
- }
+ memif_advance_ring (type, mq, ring, cur_slot);
/* prepare buffer template and next indices */
vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] = mif->sw_if_index;