#include <vlib/unix/unix.h>
#include <vlib/pci/pci.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface/rx_queue_funcs.h>
#include <avf/avf.h>
{
clib_error_t *err;
avf_txq_t *txq;
+ u8 bpi = vlib_buffer_pool_get_default_for_numa (vm,
+ ad->numa_node);
if (qid >= ad->num_queue_pairs)
{
txq = vec_elt_at_index (ad->txqs, qid);
txq->size = txq_size;
txq->next = 0;
+
+ /* Prepare a placeholder buffer to maintain a 1-1
+ relationship between bufs and descs when a context
+ descriptor is added in descs */
+ if (!vlib_buffer_alloc_from_pool
+ (vm, &txq->ctx_desc_placeholder_bi, 1, bpi))
+ return clib_error_return (0, "buffer allocation error");
+
txq->descs = vlib_physmem_alloc_aligned_on_numa (vm, txq->size *
sizeof (avf_tx_desc_t),
2 * CLIB_CACHE_LINE_BYTES,
format_ethernet_address, &al->list[i].addr);
}
return avf_send_to_pf (vm, ad, is_add ? VIRTCHNL_OP_ADD_ETH_ADDR :
- VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0, 0);
+ VIRTCHNL_OP_DEL_ETH_ADDR, msg, msg_len, 0, 0);
}
clib_error_t *
vlib_log_err (avf_log.class, "%U", format_clib_error, ad->error);
}
-static clib_error_t *
-avf_process_request (vlib_main_t * vm, avf_process_req_t * req)
-{
- uword *event_data = 0;
- req->calling_process_index = vlib_get_current_process_node_index (vm);
- vlib_process_signal_event_pointer (vm, avf_process_node.index,
- AVF_PROCESS_EVENT_REQ, req);
-
- vlib_process_wait_for_event_or_clock (vm, 5.0);
-
- if (vlib_process_get_events (vm, &event_data) != 0)
- clib_panic ("avf process node failed to reply in 5 seconds");
- vec_free (event_data);
-
- return req->error;
-}
-
static void
avf_process_handle_request (vlib_main_t * vm, avf_process_req_t * req)
{
else
clib_panic ("BUG: unknown avf proceess request type");
- vlib_process_signal_event (vm, req->calling_process_index, 0, 0);
+ if (req->calling_process_index != avf_process_node.index)
+ vlib_process_signal_event (vm, req->calling_process_index, 0, 0);
+}
+
+static clib_error_t *
+avf_process_request (vlib_main_t * vm, avf_process_req_t * req)
+{
+ uword *event_data = 0;
+ req->calling_process_index = vlib_get_current_process_node_index (vm);
+
+ if (req->calling_process_index != avf_process_node.index)
+ {
+ vlib_process_signal_event_pointer (vm, avf_process_node.index,
+ AVF_PROCESS_EVENT_REQ, req);
+
+ vlib_process_wait_for_event_or_clock (vm, 5.0);
+
+ if (vlib_process_get_events (vm, &event_data) != 0)
+ clib_panic ("avf process node failed to reply in 5 seconds");
+ vec_free (event_data);
+ }
+ else
+ avf_process_handle_request (vm, req);
+
+ return req->error;
}
static u32
* during suspend */
vec_reset_length (dev_pointers);
/* *INDENT-OFF* */
- pool_foreach_index (i, am->devices,
+ pool_foreach_index (i, am->devices)
{
vec_add1 (dev_pointers, avf_get_device (i));
- });
+ }
vec_foreach_index (i, dev_pointers)
{
vnet_main_t *vnm = vnet_get_main ();
uword pd = vlib_pci_get_private_data (vm, h);
avf_device_t *ad = avf_get_device (pd);
+ avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, line - 1);
if (ad->flags & AVF_DEVICE_F_ELOG)
{
line--;
- if (ad->flags & AVF_DEVICE_F_RX_INT && ad->rxqs[line].int_mode)
- vnet_device_input_set_interrupt_pending (vnm, ad->hw_if_index, line);
+ if (ad->flags & AVF_DEVICE_F_RX_INT && rxq->int_mode)
+ vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index);
avf_irq_n_set_state (ad, line, AVF_IRQ_STATE_ENABLED);
}
if (with_barrier)
vlib_worker_thread_barrier_sync (vm);
vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
- vnet_hw_interface_unassign_rx_thread (vnm, ad->hw_if_index, 0);
ethernet_delete_interface (vnm, ad->hw_if_index);
if (with_barrier)
vlib_worker_thread_barrier_release (vm);
vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
txq->n_enqueued);
}
+ /* Free the placeholder buffer */
+ vlib_buffer_free_one(vm, txq->ctx_desc_placeholder_bi);
vec_free (txq->bufs);
clib_ring_free (txq->rs_slots);
}
return;
/* *INDENT-OFF* */
- pool_foreach (adp, am->devices, ({
+ pool_foreach (adp, am->devices) {
if ((*adp)->pci_addr.as_u32 == args->addr.as_u32)
{
args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
&args->addr, "pci address in use");
return;
}
- }));
+ }
/* *INDENT-ON* */
pool_get (am->devices, adp);
/* Indicate ability to support L3 DMAC filtering and
* initialize interface to L3 non-promisc mode */
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ad->hw_if_index);
- hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER;
+ hi->flags |=
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
ethernet_set_flags (vnm, ad->hw_if_index,
ETHERNET_INTERFACE_FLAG_DEFAULT_L3);
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, ad->hw_if_index);
hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
- vnet_hw_interface_set_input_node (vnm, ad->hw_if_index,
- avf_input_node.index);
+ vnet_hw_if_set_input_node (vnm, ad->hw_if_index, avf_input_node.index);
for (i = 0; i < ad->n_rx_queues; i++)
- vnet_hw_interface_assign_rx_thread (vnm, ad->hw_if_index, i, ~0);
+ {
+ u32 qi, fi;
+ qi = vnet_hw_if_register_rx_queue (vnm, ad->hw_if_index, i,
+ VNET_HW_IF_RXQ_THREAD_ANY);
+
+ if (ad->flags & AVF_DEVICE_F_RX_INT)
+ {
+ fi = vlib_pci_get_msix_file_index (vm, ad->pci_dev_handle, i + 1);
+ vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi);
+ }
+ ad->rxqs[i].queue_index = qi;
+ }
+ vnet_hw_if_update_runtime_data (vnm, ad->hw_if_index);
if (pool_elts (am->devices) == 1)
vlib_process_signal_event (vm, avf_process_node.index,