rxq = vec_elt_at_index (ad->rxqs, qid);
rxq->size = rxq_size;
rxq->next = 0;
- rxq->descs = vlib_physmem_alloc_aligned (vm, rxq->size *
- sizeof (avf_rx_desc_t),
- 2 * CLIB_CACHE_LINE_BYTES);
+ rxq->descs = vlib_physmem_alloc_aligned_on_numa (vm, rxq->size *
+ sizeof (avf_rx_desc_t),
+ 2 * CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+
if (rxq->descs == 0)
return vlib_physmem_last_error (vm);
txq = vec_elt_at_index (ad->txqs, qid);
txq->size = txq_size;
txq->next = 0;
- txq->descs = vlib_physmem_alloc_aligned (vm, txq->size *
- sizeof (avf_tx_desc_t),
- 2 * CLIB_CACHE_LINE_BYTES);
+ txq->descs = vlib_physmem_alloc_aligned_on_numa (vm, txq->size *
+ sizeof (avf_tx_desc_t),
+ 2 * CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
if (txq->descs == 0)
return vlib_physmem_last_error (vm);
return;
}
ad->pci_dev_handle = h;
+ ad->numa_node = vlib_pci_get_numa_node (vm, h);
vlib_pci_set_private_data (vm, h, ad->dev_instance);
if ((error = vlib_pci_enable_msix_irq (vm, h, 0, 2)))
goto error;
- if (!(ad->atq = vlib_physmem_alloc (vm, sizeof (avf_aq_desc_t) *
- AVF_MBOX_LEN)))
+ ad->atq = vlib_physmem_alloc_aligned_on_numa (vm, sizeof (avf_aq_desc_t) *
+ AVF_MBOX_LEN,
+ CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+ if (ad->atq == 0)
{
error = vlib_physmem_last_error (vm);
goto error;
if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
goto error;
- if (!(ad->arq = vlib_physmem_alloc (vm, sizeof (avf_aq_desc_t) *
- AVF_MBOX_LEN)))
+ ad->arq = vlib_physmem_alloc_aligned_on_numa (vm, sizeof (avf_aq_desc_t) *
+ AVF_MBOX_LEN,
+ CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+ if (ad->arq == 0)
{
error = vlib_physmem_last_error (vm);
goto error;
if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
goto error;
- if (!(ad->atq_bufs = vlib_physmem_alloc (vm, AVF_MBOX_BUF_SZ *
- AVF_MBOX_LEN)))
+ ad->atq_bufs = vlib_physmem_alloc_aligned_on_numa (vm, AVF_MBOX_BUF_SZ *
+ AVF_MBOX_LEN,
+ CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+ if (ad->atq_bufs == 0)
{
error = vlib_physmem_last_error (vm);
goto error;
if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
goto error;
- if (!(ad->arq_bufs = vlib_physmem_alloc (vm, AVF_MBOX_BUF_SZ *
- AVF_MBOX_LEN)))
+ ad->arq_bufs = vlib_physmem_alloc_aligned_on_numa (vm, AVF_MBOX_BUF_SZ *
+ AVF_MBOX_LEN,
+ CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+ if (ad->arq_bufs == 0)
{
error = vlib_physmem_last_error (vm);
goto error;
linux_pci_device_type_t type;
vlib_pci_dev_handle_t handle;
vlib_pci_addr_t addr;
+ u32 numa_node;
/* Resource file descriptors. */
linux_pci_region_t *regions;
return &d->addr;
}
+u32
+vlib_pci_get_numa_node (vlib_main_t * vm, vlib_pci_dev_handle_t h)
+{
+ linux_pci_device_t *d = linux_pci_get_device (h);
+ return d->numa_node;
+}
+
/* Call to allocate/initialize the pci subsystem.
This is not an init function so that users can explicitly enable
pci only when it's needed. */
p->handle = p - lpm->linux_pci_devices;
p->addr.as_u32 = di->addr.as_u32;
p->intx_irq.fd = -1;
+ p->numa_node = di->numa_node;
/*
* pci io bar read/write fd
*/
return clib_pmalloc_alloc_aligned (pm, n_bytes, alignment);
}
+always_inline void *
+vlib_physmem_alloc_aligned_on_numa (vlib_main_t * vm, uword n_bytes,
+ uword alignment, u32 numa_node)
+{
+ clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
+ return clib_pmalloc_alloc_aligned_on_numa (pm, n_bytes, alignment,
+ numa_node);
+}
+
/* By default allocate I/O memory with cache line alignment. */
always_inline void *
vlib_physmem_alloc (vlib_main_t * vm, uword n_bytes)