2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <sys/types.h>
22 #include <linux/if_tun.h>
23 #include <sys/ioctl.h>
24 #include <linux/virtio_net.h>
25 #include <linux/vhost.h>
26 #include <sys/eventfd.h>
28 #include <vlib/vlib.h>
29 #include <vlib/pci/pci.h>
30 #include <vlib/unix/unix.h>
31 #include <vnet/ethernet/ethernet.h>
32 #include <vnet/ip/ip4_packet.h>
33 #include <vnet/ip/ip6_packet.h>
34 #include <vnet/devices/virtio/virtio.h>
35 #include <vnet/devices/virtio/pci.h>
37 virtio_main_t virtio_main;
39 #define _IOCTL(fd,a,...) \
40 if (ioctl (fd, a, __VA_ARGS__) < 0) \
42 err = clib_error_return_unix (0, "ioctl(" #a ")"); \
47 call_read_ready (clib_file_t * uf)
49 virtio_main_t *nm = &virtio_main;
50 vnet_main_t *vnm = vnet_get_main ();
51 u16 qid = uf->private_data & 0xFFFF;
53 vec_elt_at_index (nm->interfaces, uf->private_data >> 16);
56 CLIB_UNUSED (ssize_t size) = read (uf->file_descriptor, &b, sizeof (b));
58 vnet_device_input_set_interrupt_pending (vnm, vif->hw_if_index, qid);
65 virtio_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 idx, u16 sz)
67 clib_error_t *err = 0;
68 virtio_vring_t *vring;
69 struct vhost_vring_state state = { 0 };
70 struct vhost_vring_addr addr = { 0 };
71 struct vhost_vring_file file = { 0 };
72 clib_file_t t = { 0 };
76 return clib_error_return (0, "ring size must be power of 2");
79 return clib_error_return (0, "ring size must be 32768 or lower");
86 vlib_thread_main_t *thm = vlib_get_thread_main ();
87 vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (idx),
88 CLIB_CACHE_LINE_BYTES);
89 vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (idx));
90 if (thm->n_vlib_mains > 1)
91 clib_spinlock_init (&vring->lockp);
95 vec_validate_aligned (vif->rxq_vrings, RX_QUEUE_ACCESS (idx),
96 CLIB_CACHE_LINE_BYTES);
97 vring = vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (idx));
99 i = sizeof (struct vring_desc) * sz;
100 i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
101 vring->desc = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
102 clib_memset (vring->desc, 0, i);
104 i = sizeof (struct vring_avail) + sz * sizeof (vring->avail->ring[0]);
105 i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
106 vring->avail = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
107 clib_memset (vring->avail, 0, i);
108 // tell kernel that we don't need interrupt
109 vring->avail->flags = VIRTIO_RING_FLAG_MASK_INT;
111 i = sizeof (struct vring_used) + sz * sizeof (struct vring_used_elem);
112 i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
113 vring->used = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
114 clib_memset (vring->used, 0, i);
116 vring->queue_id = idx;
117 ASSERT (vring->buffers == 0);
118 vec_validate_aligned (vring->buffers, sz, CLIB_CACHE_LINE_BYTES);
121 vring->call_fd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
122 vring->kick_fd = eventfd (0, EFD_CLOEXEC);
124 t.read_function = call_read_ready;
125 t.file_descriptor = vring->call_fd;
126 t.private_data = vif->dev_instance << 16 | idx;
127 t.description = format (0, "%U vring %u", format_virtio_device_name,
128 vif->dev_instance, idx);
129 vring->call_file_index = clib_file_add (&file_main, &t);
133 _IOCTL (vif->fd, VHOST_SET_VRING_NUM, &state);
137 addr.desc_user_addr = pointer_to_uword (vring->desc);
138 addr.avail_user_addr = pointer_to_uword (vring->avail);
139 addr.used_user_addr = pointer_to_uword (vring->used);
140 _IOCTL (vif->fd, VHOST_SET_VRING_ADDR, &addr);
143 file.fd = vring->kick_fd;
144 _IOCTL (vif->fd, VHOST_SET_VRING_KICK, &file);
145 file.fd = vring->call_fd;
146 _IOCTL (vif->fd, VHOST_SET_VRING_CALL, &file);
147 file.fd = vif->tap_fd;
148 _IOCTL (vif->fd, VHOST_NET_SET_BACKEND, &file);
155 virtio_free_rx_buffers (vlib_main_t * vm, virtio_vring_t * vring)
157 u16 used = vring->desc_in_use;
158 u16 last = vring->last_used_idx;
159 u16 mask = vring->size - 1;
163 vlib_buffer_free (vm, &vring->buffers[last & mask], 1);
170 virtio_vring_free_rx (vlib_main_t * vm, virtio_if_t * vif, u32 idx)
172 virtio_vring_t *vring =
173 vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (idx));
175 clib_file_del_by_index (&file_main, vring->call_file_index);
176 close (vring->kick_fd);
177 close (vring->call_fd);
180 virtio_free_rx_buffers (vm, vring);
181 clib_mem_free (vring->used);
184 clib_mem_free (vring->desc);
186 clib_mem_free (vring->avail);
187 vec_free (vring->buffers);
192 virtio_free_used_desc (vlib_main_t * vm, virtio_vring_t * vring)
194 u16 used = vring->desc_in_use;
195 u16 sz = vring->size;
197 u16 last = vring->last_used_idx;
198 u16 n_left = vring->used->idx - last;
205 struct vring_used_elem *e = &vring->used->ring[last & mask];
208 vlib_buffer_free (vm, &vring->buffers[slot], 1);
213 vring->desc_in_use = used;
214 vring->last_used_idx = last;
218 virtio_vring_free_tx (vlib_main_t * vm, virtio_if_t * vif, u32 idx)
220 virtio_vring_t *vring =
221 vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (idx));
223 clib_file_del_by_index (&file_main, vring->call_file_index);
224 close (vring->kick_fd);
225 close (vring->call_fd);
228 virtio_free_used_desc (vm, vring);
229 clib_mem_free (vring->used);
232 clib_mem_free (vring->desc);
234 clib_mem_free (vring->avail);
235 vec_free (vring->buffers);
236 clib_spinlock_free (&vring->lockp);
241 virtio_vring_set_numa_node (vlib_main_t * vm, virtio_if_t * vif, u32 idx)
243 vnet_main_t *vnm = vnet_get_main ();
245 virtio_vring_t *vring =
246 vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (idx));
248 vnet_get_device_input_thread_index (vnm, vif->hw_if_index,
249 RX_QUEUE_ACCESS (idx));
250 vring->buffer_pool_index =
251 vlib_buffer_pool_get_default_for_numa (vm,
253 [thread_index]->numa_node);
257 virtio_set_net_hdr_size (virtio_if_t * vif)
259 if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF) ||
260 vif->features & VIRTIO_FEATURE (VIRTIO_F_VERSION_1))
261 vif->virtio_net_hdr_sz = sizeof (struct virtio_net_hdr_v1);
263 vif->virtio_net_hdr_sz = sizeof (struct virtio_net_hdr);
267 virtio_show (vlib_main_t * vm, u32 * hw_if_indices, u8 show_descr, u32 type)
269 u32 i, j, hw_if_index;
271 vnet_main_t *vnm = &vnet_main;
272 virtio_main_t *mm = &virtio_main;
273 virtio_vring_t *vring;
279 struct feat_struct *feat_entry;
281 static struct feat_struct feat_array[] = {
282 #define _(s,b) { .str = #s, .bit = b, },
283 foreach_virtio_net_features
288 struct feat_struct *flag_entry;
289 static struct feat_struct flags_array[] = {
290 #define _(b,e,s) { .bit = b, .str = s, },
291 foreach_virtio_if_flag
299 for (hw_if_index = 0; hw_if_index < vec_len (hw_if_indices); hw_if_index++)
301 vnet_hw_interface_t *hi =
302 vnet_get_hw_interface (vnm, hw_if_indices[hw_if_index]);
303 vif = pool_elt_at_index (mm->interfaces, hi->dev_instance);
304 if (vif->type != type)
306 vlib_cli_output (vm, "Interface: %U (ifindex %d)",
307 format_vnet_hw_if_index_name, vnm,
308 hw_if_indices[hw_if_index], vif->hw_if_index);
309 if (type == VIRTIO_IF_TYPE_PCI)
311 vlib_cli_output (vm, " PCI Address: %U", format_vlib_pci_addr,
314 if (type == VIRTIO_IF_TYPE_TAP)
316 if (vif->host_if_name)
317 vlib_cli_output (vm, " name \"%s\"", vif->host_if_name);
319 vlib_cli_output (vm, " host-ns \"%s\"", vif->net_ns);
320 vlib_cli_output (vm, " fd %d", vif->fd);
321 vlib_cli_output (vm, " tap-fd %d", vif->tap_fd);
322 vlib_cli_output (vm, " gso-enabled %d", vif->gso_enabled);
324 vlib_cli_output (vm, " Mac Address: %U", format_ethernet_address,
326 vlib_cli_output (vm, " Device instance: %u", vif->dev_instance);
327 vlib_cli_output (vm, " flags 0x%x", vif->flags);
328 flag_entry = (struct feat_struct *) &flags_array;
329 while (flag_entry->str)
331 if (vif->flags & (1ULL << flag_entry->bit))
332 vlib_cli_output (vm, " %s (%d)", flag_entry->str,
336 if (type == VIRTIO_IF_TYPE_PCI)
338 device_status (vm, vif);
340 vlib_cli_output (vm, " features 0x%lx", vif->features);
341 feat_entry = (struct feat_struct *) &feat_array;
342 while (feat_entry->str)
344 if (vif->features & (1ULL << feat_entry->bit))
345 vlib_cli_output (vm, " %s (%d)", feat_entry->str,
349 vlib_cli_output (vm, " remote-features 0x%lx", vif->remote_features);
350 feat_entry = (struct feat_struct *) &feat_array;
351 while (feat_entry->str)
353 if (vif->remote_features & (1ULL << feat_entry->bit))
354 vlib_cli_output (vm, " %s (%d)", feat_entry->str,
358 vlib_cli_output (vm, " Number of RX Virtqueue %u", vif->num_rxqs);
359 vlib_cli_output (vm, " Number of TX Virtqueue %u", vif->num_txqs);
360 if (vif->cxq_vring != NULL
361 && vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ))
362 vlib_cli_output (vm, " Number of CTRL Virtqueue 1");
363 vec_foreach_index (i, vif->rxq_vrings)
365 vring = vec_elt_at_index (vif->rxq_vrings, i);
366 vlib_cli_output (vm, " Virtqueue (RX) %d", vring->queue_id);
368 " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
369 vring->size, vring->last_used_idx, vring->desc_next,
372 " avail.flags 0x%x avail.idx %d used.flags 0x%x used.idx %d",
373 vring->avail->flags, vring->avail->idx,
374 vring->used->flags, vring->used->idx);
375 if (type == VIRTIO_IF_TYPE_TAP)
377 vlib_cli_output (vm, " kickfd %d, callfd %d", vring->kick_fd,
382 vlib_cli_output (vm, "\n descriptor table:\n");
384 " id addr len flags next user_addr\n");
386 " ===== ================== ===== ====== ===== ==================\n");
387 for (j = 0; j < vring->size; j++)
389 struct vring_desc *desc = &vring->desc[j];
391 " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
394 desc->flags, desc->next, desc->addr);
398 vec_foreach_index (i, vif->txq_vrings)
400 vring = vec_elt_at_index (vif->txq_vrings, i);
401 vlib_cli_output (vm, " Virtqueue (TX) %d", vring->queue_id);
403 " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
404 vring->size, vring->last_used_idx, vring->desc_next,
407 " avail.flags 0x%x avail.idx %d used.flags 0x%x used.idx %d",
408 vring->avail->flags, vring->avail->idx,
409 vring->used->flags, vring->used->idx);
410 if (type == VIRTIO_IF_TYPE_TAP)
412 vlib_cli_output (vm, " kickfd %d, callfd %d", vring->kick_fd,
417 vlib_cli_output (vm, "\n descriptor table:\n");
419 " id addr len flags next user_addr\n");
421 " ===== ================== ===== ====== ===== ==================\n");
422 for (j = 0; j < vring->size; j++)
424 struct vring_desc *desc = &vring->desc[j];
426 " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
429 desc->flags, desc->next, desc->addr);
433 if (vif->cxq_vring != NULL
434 && vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ))
436 vring = vif->cxq_vring;
437 vlib_cli_output (vm, " Virtqueue (CTRL) %d", vring->queue_id);
439 " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
440 vring->size, vring->last_used_idx,
441 vring->desc_next, vring->desc_in_use);
443 " avail.flags 0x%x avail.idx %d used.flags 0x%x used.idx %d",
444 vring->avail->flags, vring->avail->idx,
445 vring->used->flags, vring->used->idx);
446 if (type == VIRTIO_IF_TYPE_TAP)
448 vlib_cli_output (vm, " kickfd %d, callfd %d", vring->kick_fd,
453 vlib_cli_output (vm, "\n descriptor table:\n");
455 " id addr len flags next user_addr\n");
457 " ===== ================== ===== ====== ===== ==================\n");
458 for (j = 0; j < vring->size; j++)
460 struct vring_desc *desc = &vring->desc[j];
462 " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
465 desc->flags, desc->next, desc->addr);
475 * fd.io coding-style-patch-verification: ON
478 * eval: (c-set-style "gnu")