2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <sys/types.h>
23 #include <linux/if_tun.h>
25 #include <net/if_tun.h>
26 #endif /* __linux__ */
27 #include <sys/ioctl.h>
28 #include <sys/eventfd.h>
30 #include <vlib/vlib.h>
31 #include <vlib/pci/pci.h>
32 #include <vlib/unix/unix.h>
33 #include <vnet/ethernet/ethernet.h>
34 #include <vnet/ip/ip4_packet.h>
35 #include <vnet/ip/ip6_packet.h>
36 #include <vnet/devices/virtio/virtio.h>
37 #include <vnet/devices/virtio/virtio_inline.h>
38 #include <vnet/devices/virtio/pci.h>
39 #include <vnet/interface/rx_queue_funcs.h>
40 #include <vnet/interface/tx_queue_funcs.h>
42 virtio_main_t virtio_main;
44 #define _IOCTL(fd,a,...) \
45 if (ioctl (fd, a, __VA_ARGS__) < 0) \
47 err = clib_error_return_unix (0, "ioctl(" #a ")"); \
52 call_read_ready (clib_file_t * uf)
54 vnet_main_t *vnm = vnet_get_main ();
57 CLIB_UNUSED (ssize_t size) = read (uf->file_descriptor, &b, sizeof (b));
58 vnet_hw_if_rx_queue_set_int_pending (vnm, uf->private_data);
65 virtio_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 idx, u16 sz)
67 vnet_virtio_vring_t *vring;
71 return clib_error_return (0, "ring size must be power of 2");
74 return clib_error_return (0, "ring size must be 32768 or lower");
81 vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (idx),
82 CLIB_CACHE_LINE_BYTES);
83 vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (idx));
84 clib_spinlock_init (&vring->lockp);
88 vec_validate_aligned (vif->rxq_vrings, RX_QUEUE_ACCESS (idx),
89 CLIB_CACHE_LINE_BYTES);
90 vring = vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (idx));
92 i = sizeof (vnet_virtio_vring_desc_t) * sz;
93 i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
94 vring->desc = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
95 clib_memset (vring->desc, 0, i);
97 i = sizeof (vnet_virtio_vring_avail_t) + sz * sizeof (vring->avail->ring[0]);
98 i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
99 vring->avail = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
100 clib_memset (vring->avail, 0, i);
101 // tell kernel that we don't need interrupt
102 vring->avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
104 i = sizeof (vnet_virtio_vring_used_t) +
105 sz * sizeof (vnet_virtio_vring_used_elem_t);
106 i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
107 vring->used = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
108 clib_memset (vring->used, 0, i);
110 vring->queue_id = idx;
111 ASSERT (vring->buffers == 0);
112 vec_validate_aligned (vring->buffers, sz, CLIB_CACHE_LINE_BYTES);
116 clib_memset_u32 (vring->buffers, ~0, sz);
117 // tx path: suppress the interrupts from kernel
121 vring->call_fd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
123 vring->total_packets = 0;
124 vring->queue_size = sz;
125 vring->kick_fd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
126 virtio_log_debug (vif, "vring %u size %u call_fd %d kick_fd %d", idx,
127 vring->queue_size, vring->call_fd, vring->kick_fd);
133 virtio_free_buffers (vlib_main_t *vm, vnet_virtio_vring_t *vring)
135 u16 used = vring->desc_in_use;
136 u16 last = vring->last_used_idx;
137 u16 mask = vring->queue_size - 1;
141 vlib_buffer_free (vm, &vring->buffers[last & mask], 1);
148 virtio_vring_free_rx (vlib_main_t * vm, virtio_if_t * vif, u32 idx)
150 vnet_virtio_vring_t *vring =
151 vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (idx));
153 clib_file_del_by_index (&file_main, vring->call_file_index);
154 close (vring->kick_fd);
155 close (vring->call_fd);
158 virtio_free_buffers (vm, vring);
159 clib_mem_free (vring->used);
162 clib_mem_free (vring->desc);
164 clib_mem_free (vring->avail);
165 vec_free (vring->buffers);
170 virtio_vring_free_tx (vlib_main_t * vm, virtio_if_t * vif, u32 idx)
172 vnet_virtio_vring_t *vring =
173 vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (idx));
175 close (vring->kick_fd);
178 virtio_free_buffers (vm, vring);
179 clib_mem_free (vring->used);
182 clib_mem_free (vring->desc);
184 clib_mem_free (vring->avail);
185 vec_free (vring->buffers);
186 gro_flow_table_free (vring->flow_table);
187 virtio_vring_buffering_free (vm, vring->buffering);
188 clib_spinlock_free (&vring->lockp);
193 virtio_set_packet_coalesce (virtio_if_t * vif)
195 vnet_main_t *vnm = vnet_get_main ();
196 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
197 vnet_virtio_vring_t *vring;
198 vif->packet_coalesce = 1;
199 vec_foreach (vring, vif->txq_vrings)
201 gro_flow_table_init (&vring->flow_table,
202 vif->type & (VIRTIO_IF_TYPE_TAP |
203 VIRTIO_IF_TYPE_PCI), hw->tx_node_index);
208 virtio_set_packet_buffering (virtio_if_t * vif, u16 buffering_size)
210 vnet_main_t *vnm = vnet_get_main ();
211 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
212 vnet_virtio_vring_t *vring;
213 clib_error_t *error = 0;
215 vec_foreach (vring, vif->txq_vrings)
218 virtio_vring_buffering_init (&vring->buffering, hw->tx_node_index,
229 virtio_vring_fill (vlib_main_t *vm, virtio_if_t *vif,
230 vnet_virtio_vring_t *vring)
233 virtio_refill_vring_packed (vm, vif, vif->type, vring,
234 vif->virtio_net_hdr_sz,
235 virtio_input_node.index);
237 virtio_refill_vring_split (vm, vif, vif->type, vring,
238 vif->virtio_net_hdr_sz,
239 virtio_input_node.index);
243 virtio_vring_set_rx_queues (vlib_main_t *vm, virtio_if_t *vif)
245 vnet_main_t *vnm = vnet_get_main ();
246 vnet_virtio_vring_t *vring;
249 vnet_hw_if_set_input_node (vnm, vif->hw_if_index, virtio_input_node.index);
251 vec_foreach (vring, vif->rxq_vrings)
253 vring->queue_index = vnet_hw_if_register_rx_queue (
254 vnm, vif->hw_if_index, RX_QUEUE_ACCESS (vring->queue_id),
255 VNET_HW_IF_RXQ_THREAD_ANY);
256 vring->buffer_pool_index = vlib_buffer_pool_get_default_for_numa (
257 vm, vnet_hw_if_get_rx_queue_numa_node (vnm, vring->queue_index));
258 if (vif->type == VIRTIO_IF_TYPE_TAP || vif->type == VIRTIO_IF_TYPE_TUN)
262 .read_function = call_read_ready,
263 .flags = UNIX_FILE_EVENT_EDGE_TRIGGERED,
264 .file_descriptor = vring->call_fd,
265 .private_data = vring->queue_index,
266 .description = format (0, "%U vring %u", format_virtio_device_name,
267 vif->dev_instance, vring->queue_id),
270 vring->call_file_index = clib_file_add (&file_main, &f);
271 vnet_hw_if_set_rx_queue_file_index (vnm, vring->queue_index,
272 vring->call_file_index);
274 else if ((vif->type == VIRTIO_IF_TYPE_PCI) && (vif->support_int_mode) &&
275 (vif->msix_enabled == VIRTIO_MSIX_ENABLED))
279 vlib_pci_get_msix_file_index (vm, vif->pci_dev_handle, i + 1);
280 vnet_hw_if_set_rx_queue_file_index (vnm, vring->queue_index,
284 vnet_hw_if_set_rx_queue_mode (vnm, vring->queue_index,
285 VNET_HW_IF_RX_MODE_POLLING);
286 vring->mode = VNET_HW_IF_RX_MODE_POLLING;
287 virtio_vring_fill (vm, vif, vring);
289 vnet_hw_if_update_runtime_data (vnm, vif->hw_if_index);
293 virtio_vring_set_tx_queues (vlib_main_t *vm, virtio_if_t *vif)
295 vnet_main_t *vnm = vnet_get_main ();
296 vnet_virtio_vring_t *vring;
298 vec_foreach (vring, vif->txq_vrings)
300 vring->queue_index = vnet_hw_if_register_tx_queue (
301 vnm, vif->hw_if_index, TX_QUEUE_ACCESS (vring->queue_id));
304 if (vif->num_txqs == 0)
306 virtio_log_error (vif, "Interface %U has 0 txq",
307 format_vnet_hw_if_index_name, vnm, vif->hw_if_index);
311 for (u32 j = 0; j < vlib_get_n_threads (); j++)
313 u32 qi = vif->txq_vrings[j % vif->num_txqs].queue_index;
314 vnet_hw_if_tx_queue_assign_thread (vnm, qi, j);
317 vnet_hw_if_update_runtime_data (vnm, vif->hw_if_index);
321 virtio_set_net_hdr_size (virtio_if_t * vif)
323 if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF) ||
324 vif->features & VIRTIO_FEATURE (VIRTIO_F_VERSION_1))
325 vif->virtio_net_hdr_sz = sizeof (vnet_virtio_net_hdr_v1_t);
327 vif->virtio_net_hdr_sz = sizeof (vnet_virtio_net_hdr_t);
331 virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
332 virtio_if_type_t type)
334 u32 i, j, hw_if_index;
336 vnet_main_t *vnm = &vnet_main;
337 virtio_main_t *mm = &virtio_main;
338 vnet_virtio_vring_t *vring;
344 struct feat_struct *feat_entry;
346 static struct feat_struct feat_array[] = {
347 #define _(s,b) { .str = #s, .bit = b, },
348 foreach_virtio_net_features
353 struct feat_struct *flag_entry;
354 static struct feat_struct flags_array[] = {
355 #define _(b,e,s) { .bit = b, .str = s, },
356 foreach_virtio_if_flag
364 for (hw_if_index = 0; hw_if_index < vec_len (hw_if_indices); hw_if_index++)
366 vnet_hw_interface_t *hi =
367 vnet_get_hw_interface (vnm, hw_if_indices[hw_if_index]);
368 vif = pool_elt_at_index (mm->interfaces, hi->dev_instance);
369 if (vif->type != type)
371 vlib_cli_output (vm, "Interface: %U (ifindex %d)",
372 format_vnet_hw_if_index_name, vnm,
373 hw_if_indices[hw_if_index], vif->hw_if_index);
374 if (type == VIRTIO_IF_TYPE_PCI)
376 vlib_cli_output (vm, " PCI Address: %U", format_vlib_pci_addr,
379 if (type & (VIRTIO_IF_TYPE_TAP | VIRTIO_IF_TYPE_TUN))
382 if (vif->host_if_name)
383 vlib_cli_output (vm, " name \"%s\"", vif->host_if_name);
385 vlib_cli_output (vm, " host-ns \"%s\"", vif->net_ns);
386 if (vif->host_mtu_size)
387 vlib_cli_output (vm, " host-mtu-size \"%d\"",
389 if (type == VIRTIO_IF_TYPE_TAP)
390 vlib_cli_output (vm, " host-mac-addr: %U",
391 format_ethernet_address, vif->host_mac_addr);
392 vlib_cli_output (vm, " host-carrier-up: %u", vif->host_carrier_up);
394 vec_foreach_index (i, vif->vhost_fds)
395 str = format (str, " %d", vif->vhost_fds[i]);
396 vlib_cli_output (vm, " vhost-fds%v", str);
398 vec_foreach_index (i, vif->tap_fds)
399 str = format (str, " %d", vif->tap_fds[i]);
400 vlib_cli_output (vm, " tap-fds%v", str);
403 vlib_cli_output (vm, " gso-enabled %d", vif->gso_enabled);
404 vlib_cli_output (vm, " csum-enabled %d", vif->csum_offload_enabled);
405 vlib_cli_output (vm, " packet-coalesce %d", vif->packet_coalesce);
406 vlib_cli_output (vm, " packet-buffering %d", vif->packet_buffering);
407 if (type & (VIRTIO_IF_TYPE_TAP | VIRTIO_IF_TYPE_PCI))
408 vlib_cli_output (vm, " Mac Address: %U", format_ethernet_address,
410 vlib_cli_output (vm, " Device instance: %u", vif->dev_instance);
411 vlib_cli_output (vm, " flags 0x%x", vif->flags);
412 flag_entry = (struct feat_struct *) &flags_array;
413 while (flag_entry->str)
415 if (vif->flags & (1ULL << flag_entry->bit))
416 vlib_cli_output (vm, " %s (%d)", flag_entry->str,
420 if (type == VIRTIO_IF_TYPE_PCI)
422 device_status (vm, vif);
424 vlib_cli_output (vm, " features 0x%lx", vif->features);
425 feat_entry = (struct feat_struct *) &feat_array;
426 while (feat_entry->str)
428 if (vif->features & (1ULL << feat_entry->bit))
429 vlib_cli_output (vm, " %s (%d)", feat_entry->str,
433 vlib_cli_output (vm, " remote-features 0x%lx", vif->remote_features);
434 feat_entry = (struct feat_struct *) &feat_array;
435 while (feat_entry->str)
437 if (vif->remote_features & (1ULL << feat_entry->bit))
438 vlib_cli_output (vm, " %s (%d)", feat_entry->str,
442 vlib_cli_output (vm, " Number of RX Virtqueue %u", vif->num_rxqs);
443 vlib_cli_output (vm, " Number of TX Virtqueue %u", vif->num_txqs);
444 if (type == VIRTIO_IF_TYPE_PCI && vif->cxq_vring != NULL &&
445 vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ))
446 vlib_cli_output (vm, " Number of CTRL Virtqueue 1");
447 vec_foreach_index (i, vif->rxq_vrings)
449 vring = vec_elt_at_index (vif->rxq_vrings, i);
450 vlib_cli_output (vm, " Virtqueue (RX) %d", vring->queue_id);
452 vm, " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
453 vring->queue_size, vring->last_used_idx, vring->desc_next,
458 " driver_event.flags 0x%x driver_event.off_wrap %d device_event.flags 0x%x device_event.off_wrap %d",
459 vring->driver_event->flags,
460 vring->driver_event->off_wrap,
461 vring->device_event->flags,
462 vring->device_event->off_wrap);
464 " avail wrap counter %d, used wrap counter %d",
465 vring->avail_wrap_counter,
466 vring->used_wrap_counter);
470 " avail.flags 0x%x avail.idx %d used.flags 0x%x used.idx %d",
471 vring->avail->flags, vring->avail->idx,
472 vring->used->flags, vring->used->idx);
473 if (type & (VIRTIO_IF_TYPE_TAP | VIRTIO_IF_TYPE_TUN))
475 vlib_cli_output (vm, " kickfd %d, callfd %d", vring->kick_fd,
480 vlib_cli_output (vm, "\n descriptor table:\n");
482 " id addr len flags next/id user_addr\n");
484 " ===== ================== ===== ====== ======= ==================\n");
485 for (j = 0; j < vring->queue_size; j++)
489 vnet_virtio_vring_packed_desc_t *desc =
490 &vring->packed_desc[j];
492 " %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
495 desc->flags, desc->id, desc->addr);
499 vnet_virtio_vring_desc_t *desc = &vring->desc[j];
501 " %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
504 desc->flags, desc->next, desc->addr);
509 vec_foreach_index (i, vif->txq_vrings)
511 vring = vec_elt_at_index (vif->txq_vrings, i);
512 vlib_cli_output (vm, " Virtqueue (TX) %d", vring->queue_id);
514 vm, " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
515 vring->queue_size, vring->last_used_idx, vring->desc_next,
520 " driver_event.flags 0x%x driver_event.off_wrap %d device_event.flags 0x%x device_event.off_wrap %d",
521 vring->driver_event->flags,
522 vring->driver_event->off_wrap,
523 vring->device_event->flags,
524 vring->device_event->off_wrap);
526 " avail wrap counter %d, used wrap counter %d",
527 vring->avail_wrap_counter,
528 vring->used_wrap_counter);
532 " avail.flags 0x%x avail.idx %d used.flags 0x%x used.idx %d",
533 vring->avail->flags, vring->avail->idx,
534 vring->used->flags, vring->used->idx);
535 if (type & (VIRTIO_IF_TYPE_TAP | VIRTIO_IF_TYPE_TUN))
537 vlib_cli_output (vm, " kickfd %d, callfd %d", vring->kick_fd,
540 if (vring->flow_table)
542 vlib_cli_output (vm, " %U", gro_flow_table_format,
545 if (vif->packet_buffering)
547 vlib_cli_output (vm, " %U", virtio_vring_buffering_format,
552 vlib_cli_output (vm, "\n descriptor table:\n");
554 " id addr len flags next/id user_addr\n");
556 " ===== ================== ===== ====== ======== ==================\n");
557 for (j = 0; j < vring->queue_size; j++)
561 vnet_virtio_vring_packed_desc_t *desc =
562 &vring->packed_desc[j];
564 " %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
567 desc->flags, desc->id, desc->addr);
571 vnet_virtio_vring_desc_t *desc = &vring->desc[j];
573 " %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
576 desc->flags, desc->next, desc->addr);
581 if (type == VIRTIO_IF_TYPE_PCI && vif->cxq_vring != NULL &&
582 vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ))
584 vring = vif->cxq_vring;
585 vlib_cli_output (vm, " Virtqueue (CTRL) %d", vring->queue_id);
587 vm, " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
588 vring->queue_size, vring->last_used_idx, vring->desc_next,
593 " driver_event.flags 0x%x driver_event.off_wrap %d device_event.flags 0x%x device_event.off_wrap %d",
594 vring->driver_event->flags,
595 vring->driver_event->off_wrap,
596 vring->device_event->flags,
597 vring->device_event->off_wrap);
599 " avail wrap counter %d, used wrap counter %d",
600 vring->avail_wrap_counter,
601 vring->used_wrap_counter);
606 " avail.flags 0x%x avail.idx %d used.flags 0x%x used.idx %d",
607 vring->avail->flags, vring->avail->idx,
608 vring->used->flags, vring->used->idx);
612 vlib_cli_output (vm, "\n descriptor table:\n");
614 " id addr len flags next/id user_addr\n");
616 " ===== ================== ===== ====== ======== ==================\n");
617 for (j = 0; j < vring->queue_size; j++)
621 vnet_virtio_vring_packed_desc_t *desc =
622 &vring->packed_desc[j];
624 " %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
627 desc->flags, desc->id, desc->addr);
631 vnet_virtio_vring_desc_t *desc = &vring->desc[j];
633 " %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
636 desc->flags, desc->next, desc->addr);
645 static clib_error_t *
646 virtio_init (vlib_main_t * vm)
648 virtio_main_t *vim = &virtio_main;
649 clib_error_t *error = 0;
651 vim->log_default = vlib_log_register_class ("virtio", 0);
652 vlib_log_debug (vim->log_default, "initialized");
657 VLIB_INIT_FUNCTION (virtio_init);
660 * fd.io coding-style-patch-verification: ON
663 * eval: (c-set-style "gnu")