}
static u64
-virtio_pci_legacy_get_features (vlib_main_t * vm, virtio_if_t * vif)
+virtio_pci_legacy_get_host_features (vlib_main_t * vm, virtio_if_t * vif)
{
u32 features;
vlib_pci_read_io_u32 (vm, vif->pci_dev_handle, VIRTIO_PCI_HOST_FEATURES,
}
static u32
-virtio_pci_legacy_set_features (vlib_main_t * vm, virtio_if_t * vif,
- u64 features)
+virtio_pci_legacy_get_guest_features (vlib_main_t * vm, virtio_if_t * vif)
+{
+ u32 feature = 0;
+ vlib_pci_read_io_u32 (vm, vif->pci_dev_handle, VIRTIO_PCI_GUEST_FEATURES,
+ &feature);
+ vif->features = feature;
+ return feature;
+}
+
+static u32
+virtio_pci_legacy_set_guest_features (vlib_main_t * vm, virtio_if_t * vif,
+ u64 features)
{
if ((features >> 32) != 0)
{
virtio_log_debug (vif, "max queue pair is %x", max_queue_pairs);
if (max_queue_pairs < 1 || max_queue_pairs > 0x8000)
- return clib_error_return (error, "max queue pair is %x", max_queue_pairs);
+ return clib_error_return (error, "max queue pair is %x,"
+ " should be in range [1, 0x8000]",
+ max_queue_pairs);
vif->max_queue_pairs = max_queue_pairs;
return error;
return status;
}
+static int
+virtio_pci_disable_offload (vlib_main_t * vm, virtio_if_t * vif)
+{
+ struct virtio_ctrl_msg offload_hdr;
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+
+ offload_hdr.ctrl.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
+ offload_hdr.ctrl.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
+ offload_hdr.status = VIRTIO_NET_ERR;
+ u64 offloads = 0ULL;
+ clib_memcpy (offload_hdr.data, &offloads, sizeof (offloads));
+
+ status =
+ virtio_pci_send_ctrl_msg (vm, vif, &offload_hdr, sizeof (offloads));
+ virtio_log_debug (vif, "disable offloads");
+ vif->remote_features = virtio_pci_legacy_get_host_features (vm, vif);
+ virtio_pci_legacy_get_guest_features (vm, vif);
+ return status;
+}
+
+static int
+virtio_pci_enable_checksum_offload (vlib_main_t * vm, virtio_if_t * vif)
+{
+ struct virtio_ctrl_msg csum_offload_hdr;
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+
+ csum_offload_hdr.ctrl.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
+ csum_offload_hdr.ctrl.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
+ csum_offload_hdr.status = VIRTIO_NET_ERR;
+ u64 offloads = 0ULL;
+ offloads |= VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM);
+ clib_memcpy (csum_offload_hdr.data, &offloads, sizeof (offloads));
+
+ status =
+ virtio_pci_send_ctrl_msg (vm, vif, &csum_offload_hdr, sizeof (offloads));
+ virtio_log_debug (vif, "enable checksum offload");
+ vif->remote_features = virtio_pci_legacy_get_host_features (vm, vif);
+ virtio_pci_legacy_get_guest_features (vm, vif);
+ return status;
+}
+
static int
virtio_pci_enable_gso (vlib_main_t * vm, virtio_if_t * vif)
{
gso_hdr.status = VIRTIO_NET_ERR;
u64 offloads = VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)
| VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO4)
- | VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO6)
- | VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_UFO);
+ | VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO6);
clib_memcpy (gso_hdr.data, &offloads, sizeof (offloads));
status = virtio_pci_send_ctrl_msg (vm, vif, &gso_hdr, sizeof (offloads));
virtio_log_debug (vif, "enable gso");
+ vif->remote_features = virtio_pci_legacy_get_host_features (vm, vif);
+ virtio_pci_legacy_get_guest_features (vm, vif);
return status;
}
+static int
+virtio_pci_offloads (vlib_main_t * vm, virtio_if_t * vif, int gso_enabled,
+ int csum_offload_enabled)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
+
+ if ((vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ)) &&
+ (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)))
+ {
+ if (gso_enabled
+ && (vif->features & (VIRTIO_FEATURE (VIRTIO_NET_F_HOST_TSO4) |
+ VIRTIO_FEATURE (VIRTIO_NET_F_HOST_TSO6))))
+ {
+ if (virtio_pci_enable_gso (vm, vif))
+ {
+ virtio_log_warning (vif, "gso is not enabled");
+ }
+ else
+ {
+ vif->gso_enabled = 1;
+ vif->csum_offload_enabled = 0;
+ hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD;
+ }
+ }
+ else if (csum_offload_enabled
+ && (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM)))
+ {
+ if (virtio_pci_enable_checksum_offload (vm, vif))
+ {
+ virtio_log_warning (vif, "checksum offload is not enabled");
+ }
+ else
+ {
+ vif->csum_offload_enabled = 1;
+ vif->gso_enabled = 0;
+ hw->flags &= ~VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
+ hw->flags |=
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD;
+ }
+ }
+ else
+ {
+ if (virtio_pci_disable_offload (vm, vif))
+ {
+ virtio_log_warning (vif, "offloads are not disabled");
+ }
+ else
+ {
+ vif->csum_offload_enabled = 0;
+ vif->gso_enabled = 0;
+ hw->flags &= ~(VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
+ }
+ }
+ }
+
+ return 0;
+}
+
static int
virtio_pci_enable_multiqueue (vlib_main_t * vm, virtio_if_t * vif,
u16 num_queues)
virtio_pci_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 queue_num)
{
clib_error_t *error = 0;
- vlib_thread_main_t *vtm = vlib_get_thread_main ();
u16 queue_size = 0;
virtio_vring_t *vring;
struct vring vr;
if (queue_num % 2)
{
- if (TX_QUEUE_ACCESS (queue_num) > vtm->n_vlib_mains)
- return error;
vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num),
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num));
{
virtio_log_debug (vif, "tx-queue: number %u, size %u", queue_num,
queue_size);
+ clib_memset_u32 (vring->buffers, ~0, queue_size);
}
else
{
vif->features &= ~VIRTIO_FEATURE (VIRTIO_NET_F_MTU);
}
- vif->features = virtio_pci_legacy_set_features (vm, vif, vif->features);
+ vif->features =
+ virtio_pci_legacy_set_guest_features (vm, vif, vif->features);
}
void
virtio_pci_read_device_feature (vlib_main_t * vm, virtio_if_t * vif)
{
- vif->remote_features = virtio_pci_legacy_get_features (vm, vif);
+ vif->remote_features = virtio_pci_legacy_get_host_features (vm, vif);
}
int
virtio_pci_create_if_args_t * args)
{
clib_error_t *error = 0;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
u8 status = 0;
if ((error = virtio_pci_read_caps (vm, vif)))
- clib_error_return (error, "Device is not supported");
+ {
+ args->rv = VNET_API_ERROR_UNSUPPORTED;
+ virtio_log_error (vif, "Device is not supported");
+ clib_error_return (error, "Device is not supported");
+ }
if (virtio_pci_reset_device (vm, vif) < 0)
{
+ args->rv = VNET_API_ERROR_INIT_FAILED;
virtio_log_error (vif, "Failed to reset the device");
clib_error_return (error, "Failed to reset the device");
}
status = virtio_pci_legacy_get_status (vm, vif);
if (!(status & VIRTIO_CONFIG_STATUS_FEATURES_OK))
{
+ args->rv = VNET_API_ERROR_UNSUPPORTED;
virtio_log_error (vif,
"error encountered: Device doesn't support requested features");
clib_error_return (error, "Device doesn't support requested features");
* Initialize the virtqueues
*/
if ((error = virtio_pci_get_max_virtqueue_pairs (vm, vif)))
- goto err;
+ {
+ args->rv = VNET_API_ERROR_EXCEEDED_NUMBER_OF_RANGES_CAPACITY;
+ goto err;
+ }
for (int i = 0; i < vif->max_queue_pairs; i++)
{
if ((error = virtio_pci_vring_init (vm, vif, RX_QUEUE (i))))
{
- virtio_log_warning (vif, "%s (%u) %s", "error in rxq-queue",
- RX_QUEUE (i), "initialization");
+ args->rv = VNET_API_ERROR_INIT_FAILED;
+ virtio_log_error (vif, "%s (%u) %s", "error in rxq-queue",
+ RX_QUEUE (i), "initialization");
+ clib_error_return (error, "%s (%u) %s", "error in rxq-queue",
+ RX_QUEUE (i), "initialization");
}
else
{
vif->num_rxqs++;
}
+ if (i >= vtm->n_vlib_mains)
+ {
+ /*
+ * There is 1:1 mapping between tx queue and vpp worker thread.
+ * tx queue 0 is bind with thread index 0, tx queue 1 on thread
+ * index 1 and so on.
+ * Multiple worker threads can poll same tx queue when number of
+ * workers are more than tx queues. In this case, 1:N mapping
+ * between tx queue and vpp worker thread.
+ */
+ virtio_log_debug (vif, "%s %u, %s", "tx-queue: number",
+ TX_QUEUE (i),
+ "no VPP worker thread is available");
+ continue;
+ }
+
if ((error = virtio_pci_vring_init (vm, vif, TX_QUEUE (i))))
{
- virtio_log_warning (vif, "%s (%u) %s", "error in txq-queue",
- TX_QUEUE (i), "initialization");
+ args->rv = VNET_API_ERROR_INIT_FAILED;
+ virtio_log_error (vif, "%s (%u) %s", "error in txq-queue",
+ TX_QUEUE (i), "initialization");
+ clib_error_return (error, "%s (%u) %s", "error in txq-queue",
+ TX_QUEUE (i), "initialization");
}
else
{
pool_foreach (vif, vim->interfaces, ({
if (vif->pci_addr.as_u32 == args->addr)
{
- args->rv = VNET_API_ERROR_INVALID_VALUE;
+ args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
args->error =
clib_error_return (error, "PCI address in use");
vlib_log (VLIB_LOG_LEVEL_ERR, vim->log_default, "%U: %s",
if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
&virtio_pci_irq_0_handler)))
{
+ args->rv = VNET_API_ERROR_INVALID_REGISTRATION;
virtio_log_error (vif,
"error encountered on pci register msix handler 0");
goto error;
if ((error = vlib_pci_register_msix_handler (vm, h, 1, 1,
&virtio_pci_irq_1_handler)))
{
+ args->rv = VNET_API_ERROR_INVALID_REGISTRATION;
virtio_log_error (vif,
"error encountered on pci register msix handler 1");
goto error;
if (error)
{
+ args->rv = VNET_API_ERROR_INVALID_REGISTRATION;
virtio_log_error (vif,
"error encountered on ethernet register interface");
goto error;
else
vnet_hw_interface_set_flags (vnm, vif->hw_if_index, 0);
- if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ))
+ virtio_pci_offloads (vm, vif, args->gso_enabled,
+ args->checksum_offload_enabled);
+
+ if ((vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ)) &&
+ (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_MQ)))
{
- if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) &&
- args->gso_enabled)
- {
- if (virtio_pci_enable_gso (vm, vif))
- {
- virtio_log_warning (vif, "gso is not enabled");
- }
- else
- {
- vif->gso_enabled = 1;
- hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
- }
- }
- if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_MQ))
- {
- if (virtio_pci_enable_multiqueue (vm, vif, vif->max_queue_pairs))
- virtio_log_warning (vif, "multiqueue is not set");
- }
+ if (virtio_pci_enable_multiqueue (vm, vif, vif->max_queue_pairs))
+ virtio_log_warning (vif, "multiqueue is not set");
}
return;
-
error:
virtio_pci_delete_if (vm, vif);
- args->rv = VNET_API_ERROR_INVALID_INTERFACE;
+ if (args->rv == 0)
+ args->rv = VNET_API_ERROR_INVALID_INTERFACE;
args->error = error;
}
return 0;
}
+int
+virtio_pci_enable_disable_offloads (vlib_main_t * vm, virtio_if_t * vif,
+ int gso_enabled,
+ int checksum_offload_enabled,
+ int offloads_disabled)
+{
+ if (vif->type != VIRTIO_IF_TYPE_PCI)
+ return VNET_API_ERROR_INVALID_INTERFACE;
+
+ if (gso_enabled)
+ virtio_pci_offloads (vm, vif, 1, 0);
+ else if (checksum_offload_enabled)
+ virtio_pci_offloads (vm, vif, 0, 1);
+ else if (offloads_disabled)
+ virtio_pci_offloads (vm, vif, 0, 0);
+
+ return 0;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*