New upstream version 18.08
[deb_dpdk.git] / lib / librte_vhost / vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <linux/vhost.h>
6 #include <linux/virtio_net.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <stdlib.h>
10 #ifdef RTE_LIBRTE_VHOST_NUMA
11 #include <numaif.h>
12 #endif
13
14 #include <rte_errno.h>
15 #include <rte_ethdev.h>
16 #include <rte_log.h>
17 #include <rte_string_fns.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost.h>
21 #include <rte_rwlock.h>
22
23 #include "iotlb.h"
24 #include "vhost.h"
25 #include "vhost_user.h"
26
27 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
28
29 /* Called with iotlb_lock read-locked */
30 uint64_t
31 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
32                     uint64_t iova, uint64_t *size, uint8_t perm)
33 {
34         uint64_t vva, tmp_size;
35
36         if (unlikely(!*size))
37                 return 0;
38
39         tmp_size = *size;
40
41         vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
42         if (tmp_size == *size)
43                 return vva;
44
45         iova += tmp_size;
46
47         if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
48                 /*
49                  * iotlb_lock is read-locked for a full burst,
50                  * but it only protects the iotlb cache.
51                  * In case of IOTLB miss, we might block on the socket,
52                  * which could cause a deadlock with QEMU if an IOTLB update
53                  * is being handled. We can safely unlock here to avoid it.
54                  */
55                 vhost_user_iotlb_rd_unlock(vq);
56
57                 vhost_user_iotlb_pending_insert(vq, iova, perm);
58                 if (vhost_user_iotlb_miss(dev, iova, perm)) {
59                         RTE_LOG(ERR, VHOST_CONFIG,
60                                 "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
61                                 iova);
62                         vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
63                 }
64
65                 vhost_user_iotlb_rd_lock(vq);
66         }
67
68         return 0;
69 }
70
71 void
72 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
73 {
74         if ((vq->callfd >= 0) && (destroy != 0))
75                 close(vq->callfd);
76         if (vq->kickfd >= 0)
77                 close(vq->kickfd);
78 }
79
80 /*
81  * Unmap any memory, close any file descriptors and
82  * free any memory owned by a device.
83  */
84 void
85 cleanup_device(struct virtio_net *dev, int destroy)
86 {
87         uint32_t i;
88
89         vhost_backend_cleanup(dev);
90
91         for (i = 0; i < dev->nr_vring; i++)
92                 cleanup_vq(dev->virtqueue[i], destroy);
93 }
94
95 void
96 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
97 {
98         if (vq_is_packed(dev))
99                 rte_free(vq->shadow_used_packed);
100         else
101                 rte_free(vq->shadow_used_split);
102         rte_free(vq->batch_copy_elems);
103         rte_mempool_free(vq->iotlb_pool);
104         rte_free(vq);
105 }
106
107 /*
108  * Release virtqueues and device memory.
109  */
110 static void
111 free_device(struct virtio_net *dev)
112 {
113         uint32_t i;
114
115         for (i = 0; i < dev->nr_vring; i++)
116                 free_vq(dev, dev->virtqueue[i]);
117
118         rte_free(dev);
119 }
120
121 static int
122 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
123 {
124         uint64_t req_size, size;
125
126         req_size = sizeof(struct vring_desc) * vq->size;
127         size = req_size;
128         vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
129                                                 vq->ring_addrs.desc_user_addr,
130                                                 &size, VHOST_ACCESS_RW);
131         if (!vq->desc || size != req_size)
132                 return -1;
133
134         req_size = sizeof(struct vring_avail);
135         req_size += sizeof(uint16_t) * vq->size;
136         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
137                 req_size += sizeof(uint16_t);
138         size = req_size;
139         vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
140                                                 vq->ring_addrs.avail_user_addr,
141                                                 &size, VHOST_ACCESS_RW);
142         if (!vq->avail || size != req_size)
143                 return -1;
144
145         req_size = sizeof(struct vring_used);
146         req_size += sizeof(struct vring_used_elem) * vq->size;
147         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
148                 req_size += sizeof(uint16_t);
149         size = req_size;
150         vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
151                                                 vq->ring_addrs.used_user_addr,
152                                                 &size, VHOST_ACCESS_RW);
153         if (!vq->used || size != req_size)
154                 return -1;
155
156         return 0;
157 }
158
159 static int
160 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
161 {
162         uint64_t req_size, size;
163
164         req_size = sizeof(struct vring_packed_desc) * vq->size;
165         size = req_size;
166         vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
167                 vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr,
168                                 &size, VHOST_ACCESS_RW);
169         if (!vq->desc_packed || size != req_size)
170                 return -1;
171
172         req_size = sizeof(struct vring_packed_desc_event);
173         size = req_size;
174         vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t)
175                 vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr,
176                                 &size, VHOST_ACCESS_RW);
177         if (!vq->driver_event || size != req_size)
178                 return -1;
179
180         req_size = sizeof(struct vring_packed_desc_event);
181         size = req_size;
182         vq->device_event = (struct vring_packed_desc_event *)(uintptr_t)
183                 vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr,
184                                 &size, VHOST_ACCESS_RW);
185         if (!vq->device_event || size != req_size)
186                 return -1;
187
188         return 0;
189 }
190
191 int
192 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
193 {
194
195         if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
196                 goto out;
197
198         if (vq_is_packed(dev)) {
199                 if (vring_translate_packed(dev, vq) < 0)
200                         return -1;
201         } else {
202                 if (vring_translate_split(dev, vq) < 0)
203                         return -1;
204         }
205 out:
206         vq->access_ok = 1;
207
208         return 0;
209 }
210
211 void
212 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
213 {
214         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
215                 vhost_user_iotlb_wr_lock(vq);
216
217         vq->access_ok = 0;
218         vq->desc = NULL;
219         vq->avail = NULL;
220         vq->used = NULL;
221
222         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
223                 vhost_user_iotlb_wr_unlock(vq);
224 }
225
226 static void
227 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
228 {
229         struct vhost_virtqueue *vq;
230
231         if (vring_idx >= VHOST_MAX_VRING) {
232                 RTE_LOG(ERR, VHOST_CONFIG,
233                                 "Failed not init vring, out of bound (%d)\n",
234                                 vring_idx);
235                 return;
236         }
237
238         vq = dev->virtqueue[vring_idx];
239
240         memset(vq, 0, sizeof(struct vhost_virtqueue));
241
242         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
243         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
244
245         vhost_user_iotlb_init(dev, vring_idx);
246         /* Backends are set to -1 indicating an inactive device. */
247         vq->backend = -1;
248
249         TAILQ_INIT(&vq->zmbuf_list);
250 }
251
252 static void
253 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
254 {
255         struct vhost_virtqueue *vq;
256         int callfd;
257
258         if (vring_idx >= VHOST_MAX_VRING) {
259                 RTE_LOG(ERR, VHOST_CONFIG,
260                                 "Failed not init vring, out of bound (%d)\n",
261                                 vring_idx);
262                 return;
263         }
264
265         vq = dev->virtqueue[vring_idx];
266         callfd = vq->callfd;
267         init_vring_queue(dev, vring_idx);
268         vq->callfd = callfd;
269 }
270
271 int
272 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
273 {
274         struct vhost_virtqueue *vq;
275
276         vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
277         if (vq == NULL) {
278                 RTE_LOG(ERR, VHOST_CONFIG,
279                         "Failed to allocate memory for vring:%u.\n", vring_idx);
280                 return -1;
281         }
282
283         dev->virtqueue[vring_idx] = vq;
284         init_vring_queue(dev, vring_idx);
285         rte_spinlock_init(&vq->access_lock);
286         vq->avail_wrap_counter = 1;
287         vq->used_wrap_counter = 1;
288         vq->signalled_used_valid = false;
289
290         dev->nr_vring += 1;
291
292         return 0;
293 }
294
295 /*
296  * Reset some variables in device structure, while keeping few
297  * others untouched, such as vid, ifname, nr_vring: they
298  * should be same unless the device is removed.
299  */
300 void
301 reset_device(struct virtio_net *dev)
302 {
303         uint32_t i;
304
305         dev->features = 0;
306         dev->protocol_features = 0;
307         dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
308
309         for (i = 0; i < dev->nr_vring; i++)
310                 reset_vring_queue(dev, i);
311 }
312
313 /*
314  * Invoked when there is a new vhost-user connection established (when
315  * there is a new virtio device being attached).
316  */
317 int
318 vhost_new_device(void)
319 {
320         struct virtio_net *dev;
321         int i;
322
323         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
324                 if (vhost_devices[i] == NULL)
325                         break;
326         }
327
328         if (i == MAX_VHOST_DEVICE) {
329                 RTE_LOG(ERR, VHOST_CONFIG,
330                         "Failed to find a free slot for new device.\n");
331                 return -1;
332         }
333
334         dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
335         if (dev == NULL) {
336                 RTE_LOG(ERR, VHOST_CONFIG,
337                         "Failed to allocate memory for new dev.\n");
338                 return -1;
339         }
340
341         vhost_devices[i] = dev;
342         dev->vid = i;
343         dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
344         dev->slave_req_fd = -1;
345         dev->vdpa_dev_id = -1;
346         rte_spinlock_init(&dev->slave_req_lock);
347
348         return i;
349 }
350
351 void
352 vhost_destroy_device_notify(struct virtio_net *dev)
353 {
354         struct rte_vdpa_device *vdpa_dev;
355         int did;
356
357         if (dev->flags & VIRTIO_DEV_RUNNING) {
358                 did = dev->vdpa_dev_id;
359                 vdpa_dev = rte_vdpa_get_device(did);
360                 if (vdpa_dev && vdpa_dev->ops->dev_close)
361                         vdpa_dev->ops->dev_close(dev->vid);
362                 dev->flags &= ~VIRTIO_DEV_RUNNING;
363                 dev->notify_ops->destroy_device(dev->vid);
364         }
365 }
366
367 /*
368  * Invoked when there is the vhost-user connection is broken (when
369  * the virtio device is being detached).
370  */
371 void
372 vhost_destroy_device(int vid)
373 {
374         struct virtio_net *dev = get_device(vid);
375
376         if (dev == NULL)
377                 return;
378
379         vhost_destroy_device_notify(dev);
380
381         cleanup_device(dev, 1);
382         free_device(dev);
383
384         vhost_devices[vid] = NULL;
385 }
386
387 void
388 vhost_attach_vdpa_device(int vid, int did)
389 {
390         struct virtio_net *dev = get_device(vid);
391
392         if (dev == NULL)
393                 return;
394
395         if (rte_vdpa_get_device(did) == NULL)
396                 return;
397
398         dev->vdpa_dev_id = did;
399 }
400
401 void
402 vhost_detach_vdpa_device(int vid)
403 {
404         struct virtio_net *dev = get_device(vid);
405
406         if (dev == NULL)
407                 return;
408
409         vhost_user_host_notifier_ctrl(vid, false);
410
411         dev->vdpa_dev_id = -1;
412 }
413
414 void
415 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
416 {
417         struct virtio_net *dev;
418         unsigned int len;
419
420         dev = get_device(vid);
421         if (dev == NULL)
422                 return;
423
424         len = if_len > sizeof(dev->ifname) ?
425                 sizeof(dev->ifname) : if_len;
426
427         strncpy(dev->ifname, if_name, len);
428         dev->ifname[sizeof(dev->ifname) - 1] = '\0';
429 }
430
431 void
432 vhost_enable_dequeue_zero_copy(int vid)
433 {
434         struct virtio_net *dev = get_device(vid);
435
436         if (dev == NULL)
437                 return;
438
439         dev->dequeue_zero_copy = 1;
440 }
441
442 void
443 vhost_set_builtin_virtio_net(int vid, bool enable)
444 {
445         struct virtio_net *dev = get_device(vid);
446
447         if (dev == NULL)
448                 return;
449
450         if (enable)
451                 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
452         else
453                 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
454 }
455
456 int
457 rte_vhost_get_mtu(int vid, uint16_t *mtu)
458 {
459         struct virtio_net *dev = get_device(vid);
460
461         if (!dev)
462                 return -ENODEV;
463
464         if (!(dev->flags & VIRTIO_DEV_READY))
465                 return -EAGAIN;
466
467         if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
468                 return -ENOTSUP;
469
470         *mtu = dev->mtu;
471
472         return 0;
473 }
474
475 int
476 rte_vhost_get_numa_node(int vid)
477 {
478 #ifdef RTE_LIBRTE_VHOST_NUMA
479         struct virtio_net *dev = get_device(vid);
480         int numa_node;
481         int ret;
482
483         if (dev == NULL)
484                 return -1;
485
486         ret = get_mempolicy(&numa_node, NULL, 0, dev,
487                             MPOL_F_NODE | MPOL_F_ADDR);
488         if (ret < 0) {
489                 RTE_LOG(ERR, VHOST_CONFIG,
490                         "(%d) failed to query numa node: %s\n",
491                         vid, rte_strerror(errno));
492                 return -1;
493         }
494
495         return numa_node;
496 #else
497         RTE_SET_USED(vid);
498         return -1;
499 #endif
500 }
501
502 uint32_t
503 rte_vhost_get_queue_num(int vid)
504 {
505         struct virtio_net *dev = get_device(vid);
506
507         if (dev == NULL)
508                 return 0;
509
510         return dev->nr_vring / 2;
511 }
512
513 uint16_t
514 rte_vhost_get_vring_num(int vid)
515 {
516         struct virtio_net *dev = get_device(vid);
517
518         if (dev == NULL)
519                 return 0;
520
521         return dev->nr_vring;
522 }
523
524 int
525 rte_vhost_get_ifname(int vid, char *buf, size_t len)
526 {
527         struct virtio_net *dev = get_device(vid);
528
529         if (dev == NULL)
530                 return -1;
531
532         len = RTE_MIN(len, sizeof(dev->ifname));
533
534         strncpy(buf, dev->ifname, len);
535         buf[len - 1] = '\0';
536
537         return 0;
538 }
539
540 int
541 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
542 {
543         struct virtio_net *dev;
544
545         dev = get_device(vid);
546         if (!dev)
547                 return -1;
548
549         *features = dev->features;
550         return 0;
551 }
552
553 int
554 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
555 {
556         struct virtio_net *dev;
557         struct rte_vhost_memory *m;
558         size_t size;
559
560         dev = get_device(vid);
561         if (!dev)
562                 return -1;
563
564         size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
565         m = malloc(sizeof(struct rte_vhost_memory) + size);
566         if (!m)
567                 return -1;
568
569         m->nregions = dev->mem->nregions;
570         memcpy(m->regions, dev->mem->regions, size);
571         *mem = m;
572
573         return 0;
574 }
575
576 int
577 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
578                           struct rte_vhost_vring *vring)
579 {
580         struct virtio_net *dev;
581         struct vhost_virtqueue *vq;
582
583         dev = get_device(vid);
584         if (!dev)
585                 return -1;
586
587         if (vring_idx >= VHOST_MAX_VRING)
588                 return -1;
589
590         vq = dev->virtqueue[vring_idx];
591         if (!vq)
592                 return -1;
593
594         vring->desc  = vq->desc;
595         vring->avail = vq->avail;
596         vring->used  = vq->used;
597         vring->log_guest_addr  = vq->log_guest_addr;
598
599         vring->callfd  = vq->callfd;
600         vring->kickfd  = vq->kickfd;
601         vring->size    = vq->size;
602
603         return 0;
604 }
605
606 int
607 rte_vhost_vring_call(int vid, uint16_t vring_idx)
608 {
609         struct virtio_net *dev;
610         struct vhost_virtqueue *vq;
611
612         dev = get_device(vid);
613         if (!dev)
614                 return -1;
615
616         if (vring_idx >= VHOST_MAX_VRING)
617                 return -1;
618
619         vq = dev->virtqueue[vring_idx];
620         if (!vq)
621                 return -1;
622
623         if (vq_is_packed(dev))
624                 vhost_vring_call_packed(dev, vq);
625         else
626                 vhost_vring_call_split(dev, vq);
627
628         return 0;
629 }
630
631 uint16_t
632 rte_vhost_avail_entries(int vid, uint16_t queue_id)
633 {
634         struct virtio_net *dev;
635         struct vhost_virtqueue *vq;
636
637         dev = get_device(vid);
638         if (!dev)
639                 return 0;
640
641         vq = dev->virtqueue[queue_id];
642         if (!vq->enabled)
643                 return 0;
644
645         return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
646 }
647
648 static inline void
649 vhost_enable_notify_split(struct vhost_virtqueue *vq, int enable)
650 {
651         if (enable)
652                 vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
653         else
654                 vq->used->flags |= VRING_USED_F_NO_NOTIFY;
655 }
656
657 static inline void
658 vhost_enable_notify_packed(struct virtio_net *dev,
659                 struct vhost_virtqueue *vq, int enable)
660 {
661         uint16_t flags;
662
663         if (!enable)
664                 vq->device_event->flags = VRING_EVENT_F_DISABLE;
665
666         flags = VRING_EVENT_F_ENABLE;
667         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
668                 flags = VRING_EVENT_F_DESC;
669                 vq->device_event->off_wrap = vq->last_avail_idx |
670                         vq->avail_wrap_counter << 15;
671         }
672
673         rte_smp_wmb();
674
675         vq->device_event->flags = flags;
676 }
677
678 int
679 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
680 {
681         struct virtio_net *dev = get_device(vid);
682         struct vhost_virtqueue *vq;
683
684         if (!dev)
685                 return -1;
686
687         vq = dev->virtqueue[queue_id];
688
689         if (vq_is_packed(dev))
690                 vhost_enable_notify_packed(dev, vq, enable);
691         else
692                 vhost_enable_notify_split(vq, enable);
693
694         return 0;
695 }
696
697 void
698 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
699 {
700         struct virtio_net *dev = get_device(vid);
701
702         if (dev == NULL)
703                 return;
704
705         vhost_log_write(dev, addr, len);
706 }
707
708 void
709 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
710                          uint64_t offset, uint64_t len)
711 {
712         struct virtio_net *dev;
713         struct vhost_virtqueue *vq;
714
715         dev = get_device(vid);
716         if (dev == NULL)
717                 return;
718
719         if (vring_idx >= VHOST_MAX_VRING)
720                 return;
721         vq = dev->virtqueue[vring_idx];
722         if (!vq)
723                 return;
724
725         vhost_log_used_vring(dev, vq, offset, len);
726 }
727
728 uint32_t
729 rte_vhost_rx_queue_count(int vid, uint16_t qid)
730 {
731         struct virtio_net *dev;
732         struct vhost_virtqueue *vq;
733
734         dev = get_device(vid);
735         if (dev == NULL)
736                 return 0;
737
738         if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
739                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
740                         dev->vid, __func__, qid);
741                 return 0;
742         }
743
744         vq = dev->virtqueue[qid];
745         if (vq == NULL)
746                 return 0;
747
748         if (unlikely(vq->enabled == 0 || vq->avail == NULL))
749                 return 0;
750
751         return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
752 }
753
754 int rte_vhost_get_vdpa_device_id(int vid)
755 {
756         struct virtio_net *dev = get_device(vid);
757
758         if (dev == NULL)
759                 return -1;
760
761         return dev->vdpa_dev_id;
762 }
763
764 int rte_vhost_get_log_base(int vid, uint64_t *log_base,
765                 uint64_t *log_size)
766 {
767         struct virtio_net *dev = get_device(vid);
768
769         if (!dev)
770                 return -1;
771
772         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
773                 RTE_LOG(ERR, VHOST_DATA,
774                         "(%d) %s: built-in vhost net backend is disabled.\n",
775                         dev->vid, __func__);
776                 return -1;
777         }
778
779         *log_base = dev->log_base;
780         *log_size = dev->log_size;
781
782         return 0;
783 }
784
785 int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
786                 uint16_t *last_avail_idx, uint16_t *last_used_idx)
787 {
788         struct virtio_net *dev = get_device(vid);
789
790         if (!dev)
791                 return -1;
792
793         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
794                 RTE_LOG(ERR, VHOST_DATA,
795                         "(%d) %s: built-in vhost net backend is disabled.\n",
796                         dev->vid, __func__);
797                 return -1;
798         }
799
800         *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
801         *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
802
803         return 0;
804 }
805
806 int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
807                 uint16_t last_avail_idx, uint16_t last_used_idx)
808 {
809         struct virtio_net *dev = get_device(vid);
810
811         if (!dev)
812                 return -1;
813
814         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
815                 RTE_LOG(ERR, VHOST_DATA,
816                         "(%d) %s: built-in vhost net backend is disabled.\n",
817                         dev->vid, __func__);
818                 return -1;
819         }
820
821         dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
822         dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
823
824         return 0;
825 }