#include <vlib/pci/pci.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/interface/rx_queue_funcs.h>
+#include <vnet/interface/tx_queue_funcs.h>
#include <avf/avf.h>
{
clib_error_t *err;
avf_txq_t *txq;
+ u16 n;
u8 bpi = vlib_buffer_pool_get_default_for_numa (vm,
ad->numa_node);
{
qid = qid % ad->num_queue_pairs;
txq = vec_elt_at_index (ad->txqs, qid);
- if (txq->lock == 0)
- clib_spinlock_init (&txq->lock);
+ clib_spinlock_init (&txq->lock);
ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
return 0;
}
txq->size = txq_size;
txq->next = 0;
- /* Prepare a placeholder buffer to maintain a 1-1
- relationship between bufs and descs when a context
- descriptor is added in descs */
- if (!vlib_buffer_alloc_from_pool
- (vm, &txq->ctx_desc_placeholder_bi, 1, bpi))
+ /* Prepare a placeholder buffer(s) to maintain a 1-1 relationship between
+ * bufs and descs when a context descriptor is added in descs. Worst case
+ * every second descriptor is context descriptor and due to b->ref_count
+ * being u8 we need one for each block of 510 descriptors */
+
+ n = (txq->size / 510) + 1;
+ vec_validate_aligned (txq->ph_bufs, n, CLIB_CACHE_LINE_BYTES);
+
+ if (!vlib_buffer_alloc_from_pool (vm, txq->ph_bufs, n, bpi))
return clib_error_return (0, "buffer allocation error");
txq->descs = vlib_physmem_alloc_aligned_on_numa (vm, txq->size *
txq->n_enqueued);
}
/* Free the placeholder buffer */
- vlib_buffer_free_one(vm, txq->ctx_desc_placeholder_bi);
+ vlib_buffer_free (vm, txq->ph_bufs, vec_len (txq->ph_bufs));
+ vec_free (txq->ph_bufs);
vec_free (txq->bufs);
clib_ring_free (txq->rs_slots);
vec_free (txq->tmp_bufs);
}
ad->rxqs[i].queue_index = qi;
}
+
+ for (i = 0; i < ad->n_tx_queues; i++)
+ {
+ u32 qi = vnet_hw_if_register_tx_queue (vnm, ad->hw_if_index, i);
+ vnet_hw_if_tx_queue_assign_thread (vnm, qi, i);
+ ad->txqs[i].queue_index = qi;
+ }
+
vnet_hw_if_update_runtime_data (vnm, ad->hw_if_index);
if (pool_elts (am->devices) == 1)