*/
#include <vlib/vlib.h>
+#include <vppinfra/ring.h>
#include <vlib/unix/unix.h>
#include <vlib/pci/pci.h>
#include <vnet/ethernet/ethernet.h>
#define AVF_MBOX_BUF_SZ 512
#define AVF_RXQ_SZ 512
#define AVF_TXQ_SZ 512
-#define AVF_ITR_INT 8160
+#define AVF_ITR_INT 32
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_AVF 0x1889
{0},
};
+const static char *virtchnl_event_names[] = {
+#define _(v, n) [v] = #n,
+ foreach_virtchnl_event_code
+#undef _
+};
+
+const static char *virtchnl_link_speed_str[] = {
+#define _(v, n, s) [v] = s,
+ foreach_virtchnl_link_speed
+#undef _
+};
+
static inline void
avf_irq_0_disable (avf_device_t * ad)
{
avf_aq_desc_enq (vlib_main_t * vm, avf_device_t * ad, avf_aq_desc_t * dt,
void *data, int len)
{
- avf_main_t *am = &avf_main;
clib_error_t *err = 0;
avf_aq_desc_t *d, dc;
- int n_retry = 5;
+ f64 t0, suspend_time = AVF_AQ_ENQ_SUSPEND_TIME;
d = &ad->atq[ad->atq_next_slot];
clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
CLIB_MEMORY_BARRIER ();
- vlib_log_debug (am->log_class, "%U", format_hexdump, data, len);
ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
avf_reg_write (ad, AVF_ATQT, ad->atq_next_slot);
avf_reg_flush (ad);
+ t0 = vlib_time_now (vm);
retry:
- vlib_process_suspend (vm, 10e-6);
+ vlib_process_suspend (vm, suspend_time);
if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
{
- if (--n_retry == 0)
+ f64 t = vlib_time_now (vm) - t0;
+ if (t > AVF_AQ_ENQ_MAX_WAIT_TIME)
{
+ avf_log_err (ad, "aq_desc_enq failed (timeout %.3fs)", t);
err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
d->opcode);
goto done;
}
+ suspend_time *= 2;
goto retry;
}
rxq = vec_elt_at_index (ad->rxqs, qid);
rxq->size = rxq_size;
rxq->next = 0;
- rxq->descs = vlib_physmem_alloc_aligned (vm, rxq->size *
- sizeof (avf_rx_desc_t),
- 2 * CLIB_CACHE_LINE_BYTES);
+ rxq->descs = vlib_physmem_alloc_aligned_on_numa (vm, rxq->size *
+ sizeof (avf_rx_desc_t),
+ 2 * CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+
+ rxq->buffer_pool_index =
+ vlib_buffer_pool_get_default_for_numa (vm, ad->numa_node);
+
if (rxq->descs == 0)
return vlib_physmem_last_error (vm);
vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
- n_alloc = vlib_buffer_alloc (vm, rxq->bufs, rxq->size - 8);
+ n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
+ rxq->buffer_pool_index);
if (n_alloc == 0)
return clib_error_return (0, "buffer allocation error");
txq = vec_elt_at_index (ad->txqs, qid);
txq->size = txq_size;
txq->next = 0;
- txq->descs = vlib_physmem_alloc_aligned (vm, txq->size *
- sizeof (avf_tx_desc_t),
- 2 * CLIB_CACHE_LINE_BYTES);
+ txq->descs = vlib_physmem_alloc_aligned_on_numa (vm, txq->size *
+ sizeof (avf_tx_desc_t),
+ 2 * CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
if (txq->descs == 0)
return vlib_physmem_last_error (vm);
vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
+ /* initialize ring of pending RS slots */
+ clib_ring_new_aligned (txq->rs_slots, 32, CLIB_CACHE_LINE_BYTES);
+
ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
return 0;
}
clib_error_t *err;
avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
u32 head;
- int n_retry = 5;
-
+ f64 t0, suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
- /* supppres interrupt in the next adminq receive slot
+ /* suppress interrupt in the next adminq receive slot
as we are going to wait for response
we only need interrupts when event is received */
d = &ad->arq[ad->arq_next_slot];
if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
return err;
+ t0 = vlib_time_now (vm);
retry:
head = avf_get_u32 (ad->bar0, AVF_ARQH);
if (ad->arq_next_slot == head)
{
- if (--n_retry == 0)
- return clib_error_return (0, "timeout");
- vlib_process_suspend (vm, 10e-3);
+ f64 t = vlib_time_now (vm) - t0;
+ if (t > AVF_SEND_TO_PF_MAX_WAIT_TIME)
+ {
+ avf_log_err (ad, "send_to_pf failed (timeout %.3fs)", t);
+ return clib_error_return (0, "timeout");
+ }
+ vlib_process_suspend (vm, suspend_time);
+ suspend_time *= 2;
goto retry;
}
clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
avf_arq_slot_init (ad, ad->arq_next_slot);
ad->arq_next_slot++;
- n_retry = 5;
+ /* reset timer */
+ t0 = vlib_time_now (vm);
+ suspend_time = AVF_SEND_TO_PF_SUSPEND_TIME;
goto retry;
}
.minor = VIRTCHNL_VERSION_MINOR,
};
+ avf_log_debug (ad, "version: major %u minor %u", myver.major, myver.minor);
+
err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
sizeof (virtchnl_version_info_t), ver,
sizeof (virtchnl_version_info_t));
avf_op_get_vf_resources (vlib_main_t * vm, avf_device_t * ad,
virtchnl_vf_resource_t * res)
{
+ clib_error_t *err = 0;
u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
VIRTCHNL_VF_OFFLOAD_RX_POLLING);
- return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
- sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
+ avf_log_debug (ad, "get_vf_reqources: bitmap 0x%x", bitmap);
+ err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
+ sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
+
+ if (err == 0)
+ {
+ int i;
+ avf_log_debug (ad, "get_vf_reqources: num_vsis %u num_queue_pairs %u "
+ "max_vectors %u max_mtu %u vf_offload_flags 0x%04x "
+ "rss_key_size %u rss_lut_size %u",
+ res->num_vsis, res->num_queue_pairs, res->max_vectors,
+ res->max_mtu, res->vf_offload_flags, res->rss_key_size,
+ res->rss_lut_size);
+ for (i = 0; i < res->num_vsis; i++)
+ avf_log_debug (ad, "get_vf_reqources_vsi[%u]: vsi_id %u "
+ "num_queue_pairs %u vsi_type %u qset_handle %u "
+ "default_mac_addr %U", i,
+ res->vsi_res[i].vsi_id,
+ res->vsi_res[i].num_queue_pairs,
+ res->vsi_res[i].vsi_type,
+ res->vsi_res[i].qset_handle,
+ format_ethernet_address,
+ res->vsi_res[i].default_mac_addr);
+ }
+
+ return err;
}
clib_error_t *
for (i = 0; i < ad->rss_lut_size; i++)
rl->lut[i] = i % ad->n_rx_queues;
+ avf_log_debug (ad, "config_rss_lut: vsi_id %u rss_lut_size %u lut 0x%U",
+ rl->vsi_id, rl->lut_entries, format_hex_bytes_no_wrap,
+ rl->lut, rl->lut_entries);
+
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
0);
}
for (i = 0; i < ad->rss_key_size; i++)
rk->key[i] = (u8) random_u32 (&seed);
+ avf_log_debug (ad, "config_rss_key: vsi_id %u rss_key_size %u key 0x%U",
+ rk->vsi_id, rk->key_len, format_hex_bytes_no_wrap, rk->key,
+ rk->key_len);
+
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
0);
}
clib_error_t *
avf_op_disable_vlan_stripping (vlib_main_t * vm, avf_device_t * ad)
{
+ avf_log_debug (ad, "disable_vlan_stripping");
+
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
0);
}
virtchnl_promisc_info_t pi = { 0 };
pi.vsi_id = ad->vsi_id;
- pi.flags = 1;
+ pi.flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
+
+ avf_log_debug (ad, "config_promisc_mode: unicast %s multicast %s",
+ pi.flags & FLAG_VF_UNICAST_PROMISC ? "on" : "off",
+ pi.flags & FLAG_VF_MULTICAST_PROMISC ? "on" : "off");
+
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
sizeof (virtchnl_promisc_info_t), 0, 0);
}
ci->vsi_id = ad->vsi_id;
ci->num_queue_pairs = n_qp;
+ avf_log_debug (ad, "config_vsi_queues: vsi_id %u num_queue_pairs %u",
+ ad->vsi_id, ci->num_queue_pairs);
+
for (i = 0; i < n_qp; i++)
{
virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
rxq->vsi_id = ad->vsi_id;
rxq->queue_id = i;
- rxq->max_pkt_size = 1518;
+ rxq->max_pkt_size = ETHERNET_MAX_PACKET_BYTES;
if (i < vec_len (ad->rxqs))
{
avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
rxq->ring_len = q->size;
- rxq->databuffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
+ rxq->databuffer_size = vlib_buffer_get_default_data_size (vm);
rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
}
+ avf_log_debug (ad, "config_vsi_queues_rx[%u]: max_pkt_size %u "
+ "ring_len %u databuffer_size %u dma_ring_addr 0x%llx",
+ i, rxq->max_pkt_size, rxq->ring_len,
+ rxq->databuffer_size, rxq->dma_ring_addr);
- avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
txq->vsi_id = ad->vsi_id;
+ txq->queue_id = i;
if (i < vec_len (ad->txqs))
{
- txq->queue_id = i;
+ avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
txq->ring_len = q->size;
txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
}
+ avf_log_debug (ad, "config_vsi_queues_tx[%u]: ring_len %u "
+ "dma_ring_addr 0x%llx", i, txq->ring_len,
+ txq->dma_ring_addr);
}
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
imi->vecmap[0].vector_id = 1;
imi->vecmap[0].vsi_id = ad->vsi_id;
- imi->vecmap[0].rxq_map = 1;
+ imi->vecmap[0].rxq_map = (1 << ad->n_rx_queues) - 1;
+ imi->vecmap[0].txq_map = (1 << ad->n_tx_queues) - 1;
+
+ avf_log_debug (ad, "config_irq_map: vsi_id %u vector_id %u rxq_map %u",
+ ad->vsi_id, imi->vecmap[0].vector_id,
+ imi->vecmap[0].rxq_map);
+
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
0);
}
al = (virtchnl_ether_addr_list_t *) msg;
al->vsi_id = ad->vsi_id;
al->num_elements = count;
+
+ avf_log_debug (ad, "add_eth_addr: vsi_id %u num_elements %u",
+ ad->vsi_id, al->num_elements);
+
for (i = 0; i < count; i++)
- clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
+ {
+ clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
+ avf_log_debug (ad, "add_eth_addr[%u]: %U", i,
+ format_ethernet_address, &al->list[i].addr);
+ }
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
0);
}
avf_op_enable_queues (vlib_main_t * vm, avf_device_t * ad, u32 rx, u32 tx)
{
virtchnl_queue_select_t qs = { 0 };
- int i;
+ int i = 0;
qs.vsi_id = ad->vsi_id;
qs.rx_queues = rx;
qs.tx_queues = tx;
- for (i = 0; i < ad->n_rx_queues; i++)
+
+ avf_log_debug (ad, "enable_queues: vsi_id %u rx_queues %u tx_queues %u",
+ ad->vsi_id, qs.rx_queues, qs.tx_queues);
+
+ while (rx)
{
- avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
- avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
+ if (rx & (1 << i))
+ {
+ avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
+ avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
+ rx &= ~(1 << i);
+ }
+ i++;
}
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
sizeof (virtchnl_queue_select_t), 0, 0);
{
virtchnl_queue_select_t qs = { 0 };
qs.vsi_id = ad->vsi_id;
+
+ avf_log_debug (ad, "get_stats: vsi_id %u", ad->vsi_id);
+
return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
&qs, sizeof (virtchnl_queue_select_t),
es, sizeof (virtchnl_eth_stats_t));
avf_aq_desc_t d = { 0 };
clib_error_t *error;
u32 rstat;
- int n_retry = 20;
+ f64 t0, t = 0, suspend_time = AVF_RESET_SUSPEND_TIME;
+
+ avf_log_debug (ad, "reset");
d.opcode = 0x801;
d.v_opcode = VIRTCHNL_OP_RESET_VF;
if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
return error;
+ t0 = vlib_time_now (vm);
retry:
- vlib_process_suspend (vm, 10e-3);
+ vlib_process_suspend (vm, suspend_time);
+
rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
if (rstat == 2 || rstat == 3)
- return 0;
+ {
+ avf_log_debug (ad, "reset completed in %.3fs", t);
+ return 0;
+ }
- if (--n_retry == 0)
- return clib_error_return (0, "reset failed (timeout)");
+ t = vlib_time_now (vm) - t0;
+ if (t > AVF_RESET_MAX_WAIT_TIME)
+ {
+ avf_log_err (ad, "reset failed (timeout %.3fs)", t);
+ return clib_error_return (0, "reset failed (timeout)");
+ }
+ suspend_time *= 2;
goto retry;
}
virtchnl_vf_res_request_t res_req = { 0 };
clib_error_t *error;
u32 rstat;
- int n_retry = 20;
+ f64 t0, t, suspend_time = AVF_RESET_SUSPEND_TIME;
res_req.num_queue_pairs = num_queue_pairs;
+ avf_log_debug (ad, "request_queues: num_queue_pairs %u", num_queue_pairs);
+
error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
sizeof (virtchnl_vf_res_request_t), &res_req,
sizeof (virtchnl_vf_res_request_t));
/*
- * if PF respondes, the request failed
+ * if PF responds, the request failed
* else PF initializes restart and avf_send_to_pf returns an error
*/
if (!error)
res_req.num_queue_pairs);
}
+ t0 = vlib_time_now (vm);
retry:
- vlib_process_suspend (vm, 10e-3);
+ vlib_process_suspend (vm, suspend_time);
+ t = vlib_time_now (vm) - t0;
+
rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
goto done;
- if (--n_retry == 0)
- return clib_error_return (0, "reset failed (timeout)");
+ if (t > AVF_RESET_MAX_WAIT_TIME)
+ {
+ avf_log_err (ad, "request queues failed (timeout %.3f seconds)", t);
+ return clib_error_return (0, "request queues failed (timeout)");
+ }
+ suspend_time *= 2;
goto retry;
done:
avf_adminq_init (vm, ad);
- /* request more queues only if we need them */
- if ((error = avf_request_queues (vm, ad, tm->n_vlib_mains)))
+ if ((error = avf_request_queues (vm, ad, clib_max (tm->n_vlib_mains,
+ args->rxq_num))))
{
/* we failed to get more queues, but still we want to proceed */
clib_error_free (error);
"(remote %d.%d)", ver.major, ver.minor);
/*
- * OP_GET_VF_RESOUCES
+ * OP_GET_VF_RESOURCES
*/
if ((error = avf_op_get_vf_resources (vm, ad, &res)))
return error;
else if (args->rxq_num > ad->num_queue_pairs)
{
args->rxq_num = ad->num_queue_pairs;
- vlib_log_warn (am->log_class, "Requested more rx queues than"
- "queue pairs available. Using %u rx queues.",
- args->rxq_num);
+ avf_log_warn (ad, "Requested more rx queues than queue pairs available."
+ "Using %u rx queues.", args->rxq_num);
}
for (i = 0; i < args->rxq_num; i++)
if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
return error;
- if ((error = avf_op_enable_queues (vm, ad, ad->n_rx_queues, 0)))
- return error;
-
- if ((error = avf_op_enable_queues (vm, ad, 0, ad->n_tx_queues)))
+ if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
+ pow2_mask (ad->n_tx_queues))))
return error;
ad->flags |= AVF_DEVICE_F_INITIALIZED;
if ((r & 0xf0000000) != (1ULL << 31))
{
ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
+ avf_log_err (ad, "error: %U", format_clib_error, ad->error);
goto error;
}
if ((r & 0xf0000000) != (1ULL << 31))
{
ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
+ avf_log_err (ad, "error: %U", format_clib_error, ad->error);
goto error;
}
/* *INDENT-OFF* */
vec_foreach (e, ad->events)
{
+ avf_log_debug (ad, "event: %s (%u) sev %d",
+ virtchnl_event_names[e->event], e->event, e->severity);
if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
{
int link_up = e->event_data.link_event.link_status;
u32 flags = 0;
u32 kbps = 0;
+ avf_log_debug (ad, "event_link_change: status %d speed '%s' (%d)",
+ link_up,
+ speed < ARRAY_LEN (virtchnl_link_speed_str) ?
+ virtchnl_link_speed_str[speed] : "unknown", speed);
+
if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
{
ad->flags |= AVF_DEVICE_F_LINK_UP;
kbps = 25000000;
else if (speed == VIRTCHNL_LINK_SPEED_10GB)
kbps = 10000000;
+ else if (speed == VIRTCHNL_LINK_SPEED_5GB)
+ kbps = 5000000;
+ else if (speed == VIRTCHNL_LINK_SPEED_2_5GB)
+ kbps = 2500000;
else if (speed == VIRTCHNL_LINK_SPEED_1GB)
kbps = 1000000;
else if (speed == VIRTCHNL_LINK_SPEED_100MB)
txq->n_enqueued);
}
vec_free (txq->bufs);
+ clib_ring_free (txq->rs_slots);
}
/* *INDENT-ON* */
vec_free (ad->txqs);
return;
}
ad->pci_dev_handle = h;
+ ad->pci_addr = args->addr;
+ ad->numa_node = vlib_pci_get_numa_node (vm, h);
vlib_pci_set_private_data (vm, h, ad->dev_instance);
if ((error = vlib_pci_enable_msix_irq (vm, h, 0, 2)))
goto error;
- if (!(ad->atq = vlib_physmem_alloc (vm, sizeof (avf_aq_desc_t) *
- AVF_MBOX_LEN)))
+ ad->atq = vlib_physmem_alloc_aligned_on_numa (vm, sizeof (avf_aq_desc_t) *
+ AVF_MBOX_LEN,
+ CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+ if (ad->atq == 0)
{
error = vlib_physmem_last_error (vm);
goto error;
if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
goto error;
- if (!(ad->arq = vlib_physmem_alloc (vm, sizeof (avf_aq_desc_t) *
- AVF_MBOX_LEN)))
+ ad->arq = vlib_physmem_alloc_aligned_on_numa (vm, sizeof (avf_aq_desc_t) *
+ AVF_MBOX_LEN,
+ CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+ if (ad->arq == 0)
{
error = vlib_physmem_last_error (vm);
goto error;
if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
goto error;
- if (!(ad->atq_bufs = vlib_physmem_alloc (vm, AVF_MBOX_BUF_SZ *
- AVF_MBOX_LEN)))
+ ad->atq_bufs = vlib_physmem_alloc_aligned_on_numa (vm, AVF_MBOX_BUF_SZ *
+ AVF_MBOX_LEN,
+ CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+ if (ad->atq_bufs == 0)
{
error = vlib_physmem_last_error (vm);
goto error;
if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
goto error;
- if (!(ad->arq_bufs = vlib_physmem_alloc (vm, AVF_MBOX_BUF_SZ *
- AVF_MBOX_LEN)))
+ ad->arq_bufs = vlib_physmem_alloc_aligned_on_numa (vm, AVF_MBOX_BUF_SZ *
+ AVF_MBOX_LEN,
+ CLIB_CACHE_LINE_BYTES,
+ ad->numa_node);
+ if (ad->arq_bufs == 0)
{
error = vlib_physmem_last_error (vm);
goto error;
args->rv = VNET_API_ERROR_INVALID_INTERFACE;
args->error = clib_error_return (error, "pci-addr %U",
format_vlib_pci_addr, &args->addr);
- vlib_log_err (am->log_class, "%U", format_clib_error, args->error);
+ avf_log_err (ad, "error: %U", format_clib_error, args->error);
}
static clib_error_t *
#undef _
};
+static void
+avf_clear_hw_interface_counters (u32 instance)
+{
+ avf_main_t *am = &avf_main;
+ avf_device_t *ad = vec_elt_at_index (am->devices, instance);
+ clib_memcpy_fast (&ad->last_cleared_eth_stats,
+ &ad->eth_stats, sizeof (ad->eth_stats));
+}
+
/* *INDENT-OFF* */
VNET_DEVICE_CLASS (avf_device_class,) =
{
.name = "Adaptive Virtual Function (AVF) interface",
+ .clear_counters = avf_clear_hw_interface_counters,
.format_device = format_avf_device,
.format_device_name = format_avf_device_name,
.admin_up_down_function = avf_interface_admin_up_down,
avf_init (vlib_main_t * vm)
{
avf_main_t *am = &avf_main;
- clib_error_t *error;
vlib_thread_main_t *tm = vlib_get_thread_main ();
- int i;
-
- if ((error = vlib_call_init_function (vm, pci_bus_init)))
- return error;
vec_validate_aligned (am->per_thread_data, tm->n_vlib_mains - 1,
CLIB_CACHE_LINE_BYTES);
- /* initialize ptype based loopup table */
- vec_validate_aligned (am->ptypes, 255, CLIB_CACHE_LINE_BYTES);
-
- /* *INDENT-OFF* */
- vec_foreach_index (i, am->ptypes)
- {
- avf_ptype_t *p = vec_elt_at_index (am->ptypes, i);
- if ((i >= 22) && (i <= 87))
- {
- p->next_node = VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
- p->flags = VNET_BUFFER_F_IS_IP4;
- }
- else if ((i >= 88) && (i <= 153))
- {
- p->next_node = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
- p->flags = VNET_BUFFER_F_IS_IP6;
- }
- else
- p->next_node = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
- p->buffer_advance = device_input_next_node_advance[p->next_node];
- p->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
- }
- /* *INDENT-ON* */
-
- am->log_class = vlib_log_register_class ("avf_plugin", 0);
+ am->log_class = vlib_log_register_class ("avf", 0);
vlib_log_debug (am->log_class, "initialized");
return 0;
}
-VLIB_INIT_FUNCTION (avf_init);
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (avf_init) =
+{
+ .runs_after = VLIB_INITS ("pci_bus_init"),
+};
+/* *INDENT-OFF* */
/*
* fd.io coding-style-patch-verification: ON