New upstream version 18.08
[deb_dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <assert.h>
31 #ifdef RTE_LIBRTE_VHOST_NUMA
32 #include <numaif.h>
33 #endif
34
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_log.h>
38
39 #include "iotlb.h"
40 #include "vhost.h"
41 #include "vhost_user.h"
42
43 #define VIRTIO_MIN_MTU 68
44 #define VIRTIO_MAX_MTU 65535
45
46 static const char *vhost_message_str[VHOST_USER_MAX] = {
47         [VHOST_USER_NONE] = "VHOST_USER_NONE",
48         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
49         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
50         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
51         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
52         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
53         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
54         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
55         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
56         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
57         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
58         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
59         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
60         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
61         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
62         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
63         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
64         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
65         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
66         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
67         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
68         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
69         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
70         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
71         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
72 };
73
74 static uint64_t
75 get_blk_size(int fd)
76 {
77         struct stat stat;
78         int ret;
79
80         ret = fstat(fd, &stat);
81         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
82 }
83
84 static void
85 free_mem_region(struct virtio_net *dev)
86 {
87         uint32_t i;
88         struct rte_vhost_mem_region *reg;
89
90         if (!dev || !dev->mem)
91                 return;
92
93         for (i = 0; i < dev->mem->nregions; i++) {
94                 reg = &dev->mem->regions[i];
95                 if (reg->host_user_addr) {
96                         munmap(reg->mmap_addr, reg->mmap_size);
97                         close(reg->fd);
98                 }
99         }
100 }
101
102 void
103 vhost_backend_cleanup(struct virtio_net *dev)
104 {
105         if (dev->mem) {
106                 free_mem_region(dev);
107                 rte_free(dev->mem);
108                 dev->mem = NULL;
109         }
110
111         free(dev->guest_pages);
112         dev->guest_pages = NULL;
113
114         if (dev->log_addr) {
115                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
116                 dev->log_addr = 0;
117         }
118
119         if (dev->slave_req_fd >= 0) {
120                 close(dev->slave_req_fd);
121                 dev->slave_req_fd = -1;
122         }
123 }
124
125 /*
126  * This function just returns success at the moment unless
127  * the device hasn't been initialised.
128  */
129 static int
130 vhost_user_set_owner(void)
131 {
132         return 0;
133 }
134
135 static int
136 vhost_user_reset_owner(struct virtio_net *dev)
137 {
138         vhost_destroy_device_notify(dev);
139
140         cleanup_device(dev, 0);
141         reset_device(dev);
142         return 0;
143 }
144
145 /*
146  * The features that we support are requested.
147  */
148 static uint64_t
149 vhost_user_get_features(struct virtio_net *dev)
150 {
151         uint64_t features = 0;
152
153         rte_vhost_driver_get_features(dev->ifname, &features);
154         return features;
155 }
156
157 /*
158  * The queue number that we support are requested.
159  */
160 static uint32_t
161 vhost_user_get_queue_num(struct virtio_net *dev)
162 {
163         uint32_t queue_num = 0;
164
165         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
166         return queue_num;
167 }
168
169 /*
170  * We receive the negotiated features supported by us and the virtio device.
171  */
172 static int
173 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
174 {
175         uint64_t vhost_features = 0;
176         struct rte_vdpa_device *vdpa_dev;
177         int did = -1;
178
179         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
180         if (features & ~vhost_features) {
181                 RTE_LOG(ERR, VHOST_CONFIG,
182                         "(%d) received invalid negotiated features.\n",
183                         dev->vid);
184                 return -1;
185         }
186
187         if (dev->flags & VIRTIO_DEV_RUNNING) {
188                 if (dev->features == features)
189                         return 0;
190
191                 /*
192                  * Error out if master tries to change features while device is
193                  * in running state. The exception being VHOST_F_LOG_ALL, which
194                  * is enabled when the live-migration starts.
195                  */
196                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
197                         RTE_LOG(ERR, VHOST_CONFIG,
198                                 "(%d) features changed while device is running.\n",
199                                 dev->vid);
200                         return -1;
201                 }
202
203                 if (dev->notify_ops->features_changed)
204                         dev->notify_ops->features_changed(dev->vid, features);
205         }
206
207         dev->features = features;
208         if (dev->features &
209                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
210                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
211         } else {
212                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
213         }
214         VHOST_LOG_DEBUG(VHOST_CONFIG,
215                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
216                 dev->vid,
217                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
218                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
219
220         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
221             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
222                 /*
223                  * Remove all but first queue pair if MQ hasn't been
224                  * negotiated. This is safe because the device is not
225                  * running at this stage.
226                  */
227                 while (dev->nr_vring > 2) {
228                         struct vhost_virtqueue *vq;
229
230                         vq = dev->virtqueue[--dev->nr_vring];
231                         if (!vq)
232                                 continue;
233
234                         dev->virtqueue[dev->nr_vring] = NULL;
235                         cleanup_vq(vq, 1);
236                         free_vq(dev, vq);
237                 }
238         }
239
240         did = dev->vdpa_dev_id;
241         vdpa_dev = rte_vdpa_get_device(did);
242         if (vdpa_dev && vdpa_dev->ops->set_features)
243                 vdpa_dev->ops->set_features(dev->vid);
244
245         return 0;
246 }
247
248 /*
249  * The virtio device sends us the size of the descriptor ring.
250  */
251 static int
252 vhost_user_set_vring_num(struct virtio_net *dev,
253                          VhostUserMsg *msg)
254 {
255         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
256
257         vq->size = msg->payload.state.num;
258
259         /* VIRTIO 1.0, 2.4 Virtqueues says:
260          *
261          *   Queue Size value is always a power of 2. The maximum Queue Size
262          *   value is 32768.
263          */
264         if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
265                 RTE_LOG(ERR, VHOST_CONFIG,
266                         "invalid virtqueue size %u\n", vq->size);
267                 return -1;
268         }
269
270         if (dev->dequeue_zero_copy) {
271                 vq->nr_zmbuf = 0;
272                 vq->last_zmbuf_idx = 0;
273                 vq->zmbuf_size = vq->size;
274                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
275                                          sizeof(struct zcopy_mbuf), 0);
276                 if (vq->zmbufs == NULL) {
277                         RTE_LOG(WARNING, VHOST_CONFIG,
278                                 "failed to allocate mem for zero copy; "
279                                 "zero copy is force disabled\n");
280                         dev->dequeue_zero_copy = 0;
281                 }
282                 TAILQ_INIT(&vq->zmbuf_list);
283         }
284
285         if (vq_is_packed(dev)) {
286                 vq->shadow_used_packed = rte_malloc(NULL,
287                                 vq->size *
288                                 sizeof(struct vring_used_elem_packed),
289                                 RTE_CACHE_LINE_SIZE);
290                 if (!vq->shadow_used_packed) {
291                         RTE_LOG(ERR, VHOST_CONFIG,
292                                         "failed to allocate memory for shadow used ring.\n");
293                         return -1;
294                 }
295
296         } else {
297                 vq->shadow_used_split = rte_malloc(NULL,
298                                 vq->size * sizeof(struct vring_used_elem),
299                                 RTE_CACHE_LINE_SIZE);
300                 if (!vq->shadow_used_split) {
301                         RTE_LOG(ERR, VHOST_CONFIG,
302                                         "failed to allocate memory for shadow used ring.\n");
303                         return -1;
304                 }
305         }
306
307         vq->batch_copy_elems = rte_malloc(NULL,
308                                 vq->size * sizeof(struct batch_copy_elem),
309                                 RTE_CACHE_LINE_SIZE);
310         if (!vq->batch_copy_elems) {
311                 RTE_LOG(ERR, VHOST_CONFIG,
312                         "failed to allocate memory for batching copy.\n");
313                 return -1;
314         }
315
316         return 0;
317 }
318
319 /*
320  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
321  * same numa node as the memory of vring descriptor.
322  */
323 #ifdef RTE_LIBRTE_VHOST_NUMA
324 static struct virtio_net*
325 numa_realloc(struct virtio_net *dev, int index)
326 {
327         int oldnode, newnode;
328         struct virtio_net *old_dev;
329         struct vhost_virtqueue *old_vq, *vq;
330         struct zcopy_mbuf *new_zmbuf;
331         struct vring_used_elem *new_shadow_used_split;
332         struct vring_used_elem_packed *new_shadow_used_packed;
333         struct batch_copy_elem *new_batch_copy_elems;
334         int ret;
335
336         old_dev = dev;
337         vq = old_vq = dev->virtqueue[index];
338
339         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
340                             MPOL_F_NODE | MPOL_F_ADDR);
341
342         /* check if we need to reallocate vq */
343         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
344                              MPOL_F_NODE | MPOL_F_ADDR);
345         if (ret) {
346                 RTE_LOG(ERR, VHOST_CONFIG,
347                         "Unable to get vq numa information.\n");
348                 return dev;
349         }
350         if (oldnode != newnode) {
351                 RTE_LOG(INFO, VHOST_CONFIG,
352                         "reallocate vq from %d to %d node\n", oldnode, newnode);
353                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
354                 if (!vq)
355                         return dev;
356
357                 memcpy(vq, old_vq, sizeof(*vq));
358                 TAILQ_INIT(&vq->zmbuf_list);
359
360                 new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
361                         sizeof(struct zcopy_mbuf), 0, newnode);
362                 if (new_zmbuf) {
363                         rte_free(vq->zmbufs);
364                         vq->zmbufs = new_zmbuf;
365                 }
366
367                 if (vq_is_packed(dev)) {
368                         new_shadow_used_packed = rte_malloc_socket(NULL,
369                                         vq->size *
370                                         sizeof(struct vring_used_elem_packed),
371                                         RTE_CACHE_LINE_SIZE,
372                                         newnode);
373                         if (new_shadow_used_packed) {
374                                 rte_free(vq->shadow_used_packed);
375                                 vq->shadow_used_packed = new_shadow_used_packed;
376                         }
377                 } else {
378                         new_shadow_used_split = rte_malloc_socket(NULL,
379                                         vq->size *
380                                         sizeof(struct vring_used_elem),
381                                         RTE_CACHE_LINE_SIZE,
382                                         newnode);
383                         if (new_shadow_used_split) {
384                                 rte_free(vq->shadow_used_split);
385                                 vq->shadow_used_split = new_shadow_used_split;
386                         }
387                 }
388
389                 new_batch_copy_elems = rte_malloc_socket(NULL,
390                         vq->size * sizeof(struct batch_copy_elem),
391                         RTE_CACHE_LINE_SIZE,
392                         newnode);
393                 if (new_batch_copy_elems) {
394                         rte_free(vq->batch_copy_elems);
395                         vq->batch_copy_elems = new_batch_copy_elems;
396                 }
397
398                 rte_free(old_vq);
399         }
400
401         /* check if we need to reallocate dev */
402         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
403                             MPOL_F_NODE | MPOL_F_ADDR);
404         if (ret) {
405                 RTE_LOG(ERR, VHOST_CONFIG,
406                         "Unable to get dev numa information.\n");
407                 goto out;
408         }
409         if (oldnode != newnode) {
410                 RTE_LOG(INFO, VHOST_CONFIG,
411                         "reallocate dev from %d to %d node\n",
412                         oldnode, newnode);
413                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
414                 if (!dev) {
415                         dev = old_dev;
416                         goto out;
417                 }
418
419                 memcpy(dev, old_dev, sizeof(*dev));
420                 rte_free(old_dev);
421         }
422
423 out:
424         dev->virtqueue[index] = vq;
425         vhost_devices[dev->vid] = dev;
426
427         if (old_vq != vq)
428                 vhost_user_iotlb_init(dev, index);
429
430         return dev;
431 }
432 #else
433 static struct virtio_net*
434 numa_realloc(struct virtio_net *dev, int index __rte_unused)
435 {
436         return dev;
437 }
438 #endif
439
440 /* Converts QEMU virtual address to Vhost virtual address. */
441 static uint64_t
442 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
443 {
444         struct rte_vhost_mem_region *r;
445         uint32_t i;
446
447         /* Find the region where the address lives. */
448         for (i = 0; i < dev->mem->nregions; i++) {
449                 r = &dev->mem->regions[i];
450
451                 if (qva >= r->guest_user_addr &&
452                     qva <  r->guest_user_addr + r->size) {
453
454                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
455                                 *len = r->guest_user_addr + r->size - qva;
456
457                         return qva - r->guest_user_addr +
458                                r->host_user_addr;
459                 }
460         }
461         *len = 0;
462
463         return 0;
464 }
465
466
467 /*
468  * Converts ring address to Vhost virtual address.
469  * If IOMMU is enabled, the ring address is a guest IO virtual address,
470  * else it is a QEMU virtual address.
471  */
472 static uint64_t
473 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
474                 uint64_t ra, uint64_t *size)
475 {
476         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
477                 uint64_t vva;
478
479                 vva = vhost_user_iotlb_cache_find(vq, ra,
480                                         size, VHOST_ACCESS_RW);
481                 if (!vva)
482                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
483
484                 return vva;
485         }
486
487         return qva_to_vva(dev, ra, size);
488 }
489
490 static struct virtio_net *
491 translate_ring_addresses(struct virtio_net *dev, int vq_index)
492 {
493         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
494         struct vhost_vring_addr *addr = &vq->ring_addrs;
495         uint64_t len;
496
497         if (vq_is_packed(dev)) {
498                 len = sizeof(struct vring_packed_desc) * vq->size;
499                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
500                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
501                 vq->log_guest_addr = 0;
502                 if (vq->desc_packed == NULL ||
503                                 len != sizeof(struct vring_packed_desc) *
504                                 vq->size) {
505                         RTE_LOG(DEBUG, VHOST_CONFIG,
506                                 "(%d) failed to map desc_packed ring.\n",
507                                 dev->vid);
508                         return dev;
509                 }
510
511                 dev = numa_realloc(dev, vq_index);
512                 vq = dev->virtqueue[vq_index];
513                 addr = &vq->ring_addrs;
514
515                 len = sizeof(struct vring_packed_desc_event);
516                 vq->driver_event = (struct vring_packed_desc_event *)
517                                         (uintptr_t)ring_addr_to_vva(dev,
518                                         vq, addr->avail_user_addr, &len);
519                 if (vq->driver_event == NULL ||
520                                 len != sizeof(struct vring_packed_desc_event)) {
521                         RTE_LOG(DEBUG, VHOST_CONFIG,
522                                 "(%d) failed to find driver area address.\n",
523                                 dev->vid);
524                         return dev;
525                 }
526
527                 len = sizeof(struct vring_packed_desc_event);
528                 vq->device_event = (struct vring_packed_desc_event *)
529                                         (uintptr_t)ring_addr_to_vva(dev,
530                                         vq, addr->used_user_addr, &len);
531                 if (vq->device_event == NULL ||
532                                 len != sizeof(struct vring_packed_desc_event)) {
533                         RTE_LOG(DEBUG, VHOST_CONFIG,
534                                 "(%d) failed to find device area address.\n",
535                                 dev->vid);
536                         return dev;
537                 }
538
539                 return dev;
540         }
541
542         /* The addresses are converted from QEMU virtual to Vhost virtual. */
543         if (vq->desc && vq->avail && vq->used)
544                 return dev;
545
546         len = sizeof(struct vring_desc) * vq->size;
547         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
548                         vq, addr->desc_user_addr, &len);
549         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
550                 RTE_LOG(DEBUG, VHOST_CONFIG,
551                         "(%d) failed to map desc ring.\n",
552                         dev->vid);
553                 return dev;
554         }
555
556         dev = numa_realloc(dev, vq_index);
557         vq = dev->virtqueue[vq_index];
558         addr = &vq->ring_addrs;
559
560         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
561         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
562                         vq, addr->avail_user_addr, &len);
563         if (vq->avail == 0 ||
564                         len != sizeof(struct vring_avail) +
565                         sizeof(uint16_t) * vq->size) {
566                 RTE_LOG(DEBUG, VHOST_CONFIG,
567                         "(%d) failed to map avail ring.\n",
568                         dev->vid);
569                 return dev;
570         }
571
572         len = sizeof(struct vring_used) +
573                 sizeof(struct vring_used_elem) * vq->size;
574         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
575                         vq, addr->used_user_addr, &len);
576         if (vq->used == 0 || len != sizeof(struct vring_used) +
577                         sizeof(struct vring_used_elem) * vq->size) {
578                 RTE_LOG(DEBUG, VHOST_CONFIG,
579                         "(%d) failed to map used ring.\n",
580                         dev->vid);
581                 return dev;
582         }
583
584         if (vq->last_used_idx != vq->used->idx) {
585                 RTE_LOG(WARNING, VHOST_CONFIG,
586                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
587                         "some packets maybe resent for Tx and dropped for Rx\n",
588                         vq->last_used_idx, vq->used->idx);
589                 vq->last_used_idx  = vq->used->idx;
590                 vq->last_avail_idx = vq->used->idx;
591         }
592
593         vq->log_guest_addr = addr->log_guest_addr;
594
595         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
596                         dev->vid, vq->desc);
597         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
598                         dev->vid, vq->avail);
599         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
600                         dev->vid, vq->used);
601         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
602                         dev->vid, vq->log_guest_addr);
603
604         return dev;
605 }
606
607 /*
608  * The virtio device sends us the desc, used and avail ring addresses.
609  * This function then converts these to our address space.
610  */
611 static int
612 vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
613 {
614         struct vhost_virtqueue *vq;
615         struct vhost_vring_addr *addr = &msg->payload.addr;
616         struct virtio_net *dev = *pdev;
617
618         if (dev->mem == NULL)
619                 return -1;
620
621         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
622         vq = dev->virtqueue[msg->payload.addr.index];
623
624         /*
625          * Rings addresses should not be interpreted as long as the ring is not
626          * started and enabled
627          */
628         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
629
630         vring_invalidate(dev, vq);
631
632         if (vq->enabled && (dev->features &
633                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
634                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
635                 if (!dev)
636                         return -1;
637
638                 *pdev = dev;
639         }
640
641         return 0;
642 }
643
644 /*
645  * The virtio device sends us the available ring last used index.
646  */
647 static int
648 vhost_user_set_vring_base(struct virtio_net *dev,
649                           VhostUserMsg *msg)
650 {
651         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
652                         msg->payload.state.num;
653         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
654                         msg->payload.state.num;
655
656         return 0;
657 }
658
659 static int
660 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
661                    uint64_t host_phys_addr, uint64_t size)
662 {
663         struct guest_page *page, *last_page;
664
665         if (dev->nr_guest_pages == dev->max_guest_pages) {
666                 dev->max_guest_pages *= 2;
667                 dev->guest_pages = realloc(dev->guest_pages,
668                                         dev->max_guest_pages * sizeof(*page));
669                 if (!dev->guest_pages) {
670                         RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
671                         return -1;
672                 }
673         }
674
675         if (dev->nr_guest_pages > 0) {
676                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
677                 /* merge if the two pages are continuous */
678                 if (host_phys_addr == last_page->host_phys_addr +
679                                       last_page->size) {
680                         last_page->size += size;
681                         return 0;
682                 }
683         }
684
685         page = &dev->guest_pages[dev->nr_guest_pages++];
686         page->guest_phys_addr = guest_phys_addr;
687         page->host_phys_addr  = host_phys_addr;
688         page->size = size;
689
690         return 0;
691 }
692
693 static int
694 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
695                 uint64_t page_size)
696 {
697         uint64_t reg_size = reg->size;
698         uint64_t host_user_addr  = reg->host_user_addr;
699         uint64_t guest_phys_addr = reg->guest_phys_addr;
700         uint64_t host_phys_addr;
701         uint64_t size;
702
703         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
704         size = page_size - (guest_phys_addr & (page_size - 1));
705         size = RTE_MIN(size, reg_size);
706
707         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
708                 return -1;
709
710         host_user_addr  += size;
711         guest_phys_addr += size;
712         reg_size -= size;
713
714         while (reg_size > 0) {
715                 size = RTE_MIN(reg_size, page_size);
716                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
717                                                   host_user_addr);
718                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
719                                 size) < 0)
720                         return -1;
721
722                 host_user_addr  += size;
723                 guest_phys_addr += size;
724                 reg_size -= size;
725         }
726
727         return 0;
728 }
729
730 #ifdef RTE_LIBRTE_VHOST_DEBUG
731 /* TODO: enable it only in debug mode? */
732 static void
733 dump_guest_pages(struct virtio_net *dev)
734 {
735         uint32_t i;
736         struct guest_page *page;
737
738         for (i = 0; i < dev->nr_guest_pages; i++) {
739                 page = &dev->guest_pages[i];
740
741                 RTE_LOG(INFO, VHOST_CONFIG,
742                         "guest physical page region %u\n"
743                         "\t guest_phys_addr: %" PRIx64 "\n"
744                         "\t host_phys_addr : %" PRIx64 "\n"
745                         "\t size           : %" PRIx64 "\n",
746                         i,
747                         page->guest_phys_addr,
748                         page->host_phys_addr,
749                         page->size);
750         }
751 }
752 #else
753 #define dump_guest_pages(dev)
754 #endif
755
756 static bool
757 vhost_memory_changed(struct VhostUserMemory *new,
758                      struct rte_vhost_memory *old)
759 {
760         uint32_t i;
761
762         if (new->nregions != old->nregions)
763                 return true;
764
765         for (i = 0; i < new->nregions; ++i) {
766                 VhostUserMemoryRegion *new_r = &new->regions[i];
767                 struct rte_vhost_mem_region *old_r = &old->regions[i];
768
769                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
770                         return true;
771                 if (new_r->memory_size != old_r->size)
772                         return true;
773                 if (new_r->userspace_addr != old_r->guest_user_addr)
774                         return true;
775         }
776
777         return false;
778 }
779
780 static int
781 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
782 {
783         struct virtio_net *dev = *pdev;
784         struct VhostUserMemory memory = pmsg->payload.memory;
785         struct rte_vhost_mem_region *reg;
786         void *mmap_addr;
787         uint64_t mmap_size;
788         uint64_t mmap_offset;
789         uint64_t alignment;
790         uint32_t i;
791         int populate;
792         int fd;
793
794         if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
795                 RTE_LOG(ERR, VHOST_CONFIG,
796                         "too many memory regions (%u)\n", memory.nregions);
797                 return -1;
798         }
799
800         if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
801                 RTE_LOG(INFO, VHOST_CONFIG,
802                         "(%d) memory regions not changed\n", dev->vid);
803
804                 for (i = 0; i < memory.nregions; i++)
805                         close(pmsg->fds[i]);
806
807                 return 0;
808         }
809
810         if (dev->mem) {
811                 free_mem_region(dev);
812                 rte_free(dev->mem);
813                 dev->mem = NULL;
814         }
815
816         /* Flush IOTLB cache as previous HVAs are now invalid */
817         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
818                 for (i = 0; i < dev->nr_vring; i++)
819                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
820
821         dev->nr_guest_pages = 0;
822         if (!dev->guest_pages) {
823                 dev->max_guest_pages = 8;
824                 dev->guest_pages = malloc(dev->max_guest_pages *
825                                                 sizeof(struct guest_page));
826                 if (dev->guest_pages == NULL) {
827                         RTE_LOG(ERR, VHOST_CONFIG,
828                                 "(%d) failed to allocate memory "
829                                 "for dev->guest_pages\n",
830                                 dev->vid);
831                         return -1;
832                 }
833         }
834
835         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
836                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
837         if (dev->mem == NULL) {
838                 RTE_LOG(ERR, VHOST_CONFIG,
839                         "(%d) failed to allocate memory for dev->mem\n",
840                         dev->vid);
841                 return -1;
842         }
843         dev->mem->nregions = memory.nregions;
844
845         for (i = 0; i < memory.nregions; i++) {
846                 fd  = pmsg->fds[i];
847                 reg = &dev->mem->regions[i];
848
849                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
850                 reg->guest_user_addr = memory.regions[i].userspace_addr;
851                 reg->size            = memory.regions[i].memory_size;
852                 reg->fd              = fd;
853
854                 mmap_offset = memory.regions[i].mmap_offset;
855
856                 /* Check for memory_size + mmap_offset overflow */
857                 if (mmap_offset >= -reg->size) {
858                         RTE_LOG(ERR, VHOST_CONFIG,
859                                 "mmap_offset (%#"PRIx64") and memory_size "
860                                 "(%#"PRIx64") overflow\n",
861                                 mmap_offset, reg->size);
862                         goto err_mmap;
863                 }
864
865                 mmap_size = reg->size + mmap_offset;
866
867                 /* mmap() without flag of MAP_ANONYMOUS, should be called
868                  * with length argument aligned with hugepagesz at older
869                  * longterm version Linux, like 2.6.32 and 3.2.72, or
870                  * mmap() will fail with EINVAL.
871                  *
872                  * to avoid failure, make sure in caller to keep length
873                  * aligned.
874                  */
875                 alignment = get_blk_size(fd);
876                 if (alignment == (uint64_t)-1) {
877                         RTE_LOG(ERR, VHOST_CONFIG,
878                                 "couldn't get hugepage size through fstat\n");
879                         goto err_mmap;
880                 }
881                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
882
883                 populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
884                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
885                                  MAP_SHARED | populate, fd, 0);
886
887                 if (mmap_addr == MAP_FAILED) {
888                         RTE_LOG(ERR, VHOST_CONFIG,
889                                 "mmap region %u failed.\n", i);
890                         goto err_mmap;
891                 }
892
893                 reg->mmap_addr = mmap_addr;
894                 reg->mmap_size = mmap_size;
895                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
896                                       mmap_offset;
897
898                 if (dev->dequeue_zero_copy)
899                         if (add_guest_pages(dev, reg, alignment) < 0) {
900                                 RTE_LOG(ERR, VHOST_CONFIG,
901                                         "adding guest pages to region %u failed.\n",
902                                         i);
903                                 goto err_mmap;
904                         }
905
906                 RTE_LOG(INFO, VHOST_CONFIG,
907                         "guest memory region %u, size: 0x%" PRIx64 "\n"
908                         "\t guest physical addr: 0x%" PRIx64 "\n"
909                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
910                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
911                         "\t mmap addr : 0x%" PRIx64 "\n"
912                         "\t mmap size : 0x%" PRIx64 "\n"
913                         "\t mmap align: 0x%" PRIx64 "\n"
914                         "\t mmap off  : 0x%" PRIx64 "\n",
915                         i, reg->size,
916                         reg->guest_phys_addr,
917                         reg->guest_user_addr,
918                         reg->host_user_addr,
919                         (uint64_t)(uintptr_t)mmap_addr,
920                         mmap_size,
921                         alignment,
922                         mmap_offset);
923         }
924
925         for (i = 0; i < dev->nr_vring; i++) {
926                 struct vhost_virtqueue *vq = dev->virtqueue[i];
927
928                 if (vq->desc || vq->avail || vq->used) {
929                         /*
930                          * If the memory table got updated, the ring addresses
931                          * need to be translated again as virtual addresses have
932                          * changed.
933                          */
934                         vring_invalidate(dev, vq);
935
936                         dev = translate_ring_addresses(dev, i);
937                         if (!dev)
938                                 return -1;
939
940                         *pdev = dev;
941                 }
942         }
943
944         dump_guest_pages(dev);
945
946         return 0;
947
948 err_mmap:
949         free_mem_region(dev);
950         rte_free(dev->mem);
951         dev->mem = NULL;
952         return -1;
953 }
954
955 static bool
956 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
957 {
958         bool rings_ok;
959
960         if (!vq)
961                 return false;
962
963         if (vq_is_packed(dev))
964                 rings_ok = !!vq->desc_packed;
965         else
966                 rings_ok = vq->desc && vq->avail && vq->used;
967
968         return rings_ok &&
969                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
970                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
971 }
972
973 static int
974 virtio_is_ready(struct virtio_net *dev)
975 {
976         struct vhost_virtqueue *vq;
977         uint32_t i;
978
979         if (dev->nr_vring == 0)
980                 return 0;
981
982         for (i = 0; i < dev->nr_vring; i++) {
983                 vq = dev->virtqueue[i];
984
985                 if (!vq_is_ready(dev, vq))
986                         return 0;
987         }
988
989         RTE_LOG(INFO, VHOST_CONFIG,
990                 "virtio is now ready for processing.\n");
991         return 1;
992 }
993
994 static void
995 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
996 {
997         struct vhost_vring_file file;
998         struct vhost_virtqueue *vq;
999
1000         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1001         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1002                 file.fd = VIRTIO_INVALID_EVENTFD;
1003         else
1004                 file.fd = pmsg->fds[0];
1005         RTE_LOG(INFO, VHOST_CONFIG,
1006                 "vring call idx:%d file:%d\n", file.index, file.fd);
1007
1008         vq = dev->virtqueue[file.index];
1009         if (vq->callfd >= 0)
1010                 close(vq->callfd);
1011
1012         vq->callfd = file.fd;
1013 }
1014
1015 static void
1016 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
1017 {
1018         struct vhost_vring_file file;
1019         struct vhost_virtqueue *vq;
1020         struct virtio_net *dev = *pdev;
1021
1022         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1023         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1024                 file.fd = VIRTIO_INVALID_EVENTFD;
1025         else
1026                 file.fd = pmsg->fds[0];
1027         RTE_LOG(INFO, VHOST_CONFIG,
1028                 "vring kick idx:%d file:%d\n", file.index, file.fd);
1029
1030         /* Interpret ring addresses only when ring is started. */
1031         dev = translate_ring_addresses(dev, file.index);
1032         if (!dev)
1033                 return;
1034
1035         *pdev = dev;
1036
1037         vq = dev->virtqueue[file.index];
1038
1039         /*
1040          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1041          * the ring starts already enabled. Otherwise, it is enabled via
1042          * the SET_VRING_ENABLE message.
1043          */
1044         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
1045                 vq->enabled = 1;
1046
1047         if (vq->kickfd >= 0)
1048                 close(vq->kickfd);
1049         vq->kickfd = file.fd;
1050 }
1051
1052 static void
1053 free_zmbufs(struct vhost_virtqueue *vq)
1054 {
1055         struct zcopy_mbuf *zmbuf, *next;
1056
1057         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1058              zmbuf != NULL; zmbuf = next) {
1059                 next = TAILQ_NEXT(zmbuf, next);
1060
1061                 rte_pktmbuf_free(zmbuf->mbuf);
1062                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1063         }
1064
1065         rte_free(vq->zmbufs);
1066 }
1067
1068 /*
1069  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1070  */
1071 static int
1072 vhost_user_get_vring_base(struct virtio_net *dev,
1073                           VhostUserMsg *msg)
1074 {
1075         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1076
1077         /* We have to stop the queue (virtio) if it is running. */
1078         vhost_destroy_device_notify(dev);
1079
1080         dev->flags &= ~VIRTIO_DEV_READY;
1081         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1082
1083         /* Here we are safe to get the last avail index */
1084         msg->payload.state.num = vq->last_avail_idx;
1085
1086         RTE_LOG(INFO, VHOST_CONFIG,
1087                 "vring base idx:%d file:%d\n", msg->payload.state.index,
1088                 msg->payload.state.num);
1089         /*
1090          * Based on current qemu vhost-user implementation, this message is
1091          * sent and only sent in vhost_vring_stop.
1092          * TODO: cleanup the vring, it isn't usable since here.
1093          */
1094         if (vq->kickfd >= 0)
1095                 close(vq->kickfd);
1096
1097         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1098
1099         if (vq->callfd >= 0)
1100                 close(vq->callfd);
1101
1102         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1103
1104         if (dev->dequeue_zero_copy)
1105                 free_zmbufs(vq);
1106         if (vq_is_packed(dev)) {
1107                 rte_free(vq->shadow_used_packed);
1108                 vq->shadow_used_packed = NULL;
1109         } else {
1110                 rte_free(vq->shadow_used_split);
1111                 vq->shadow_used_split = NULL;
1112         }
1113
1114         rte_free(vq->batch_copy_elems);
1115         vq->batch_copy_elems = NULL;
1116
1117         return 0;
1118 }
1119
1120 /*
1121  * when virtio queues are ready to work, qemu will send us to
1122  * enable the virtio queue pair.
1123  */
1124 static int
1125 vhost_user_set_vring_enable(struct virtio_net *dev,
1126                             VhostUserMsg *msg)
1127 {
1128         int enable = (int)msg->payload.state.num;
1129         int index = (int)msg->payload.state.index;
1130         struct rte_vdpa_device *vdpa_dev;
1131         int did = -1;
1132
1133         RTE_LOG(INFO, VHOST_CONFIG,
1134                 "set queue enable: %d to qp idx: %d\n",
1135                 enable, index);
1136
1137         did = dev->vdpa_dev_id;
1138         vdpa_dev = rte_vdpa_get_device(did);
1139         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
1140                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
1141
1142         if (dev->notify_ops->vring_state_changed)
1143                 dev->notify_ops->vring_state_changed(dev->vid,
1144                                 index, enable);
1145
1146         dev->virtqueue[index]->enabled = enable;
1147
1148         return 0;
1149 }
1150
1151 static void
1152 vhost_user_get_protocol_features(struct virtio_net *dev,
1153                                  struct VhostUserMsg *msg)
1154 {
1155         uint64_t features, protocol_features;
1156
1157         rte_vhost_driver_get_features(dev->ifname, &features);
1158         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
1159
1160         /*
1161          * REPLY_ACK protocol feature is only mandatory for now
1162          * for IOMMU feature. If IOMMU is explicitly disabled by the
1163          * application, disable also REPLY_ACK feature for older buggy
1164          * Qemu versions (from v2.7.0 to v2.9.0).
1165          */
1166         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
1167                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
1168
1169         msg->payload.u64 = protocol_features;
1170         msg->size = sizeof(msg->payload.u64);
1171 }
1172
1173 static void
1174 vhost_user_set_protocol_features(struct virtio_net *dev,
1175                                  uint64_t protocol_features)
1176 {
1177         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
1178                 return;
1179
1180         dev->protocol_features = protocol_features;
1181 }
1182
1183 static int
1184 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
1185 {
1186         int fd = msg->fds[0];
1187         uint64_t size, off;
1188         void *addr;
1189
1190         if (fd < 0) {
1191                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
1192                 return -1;
1193         }
1194
1195         if (msg->size != sizeof(VhostUserLog)) {
1196                 RTE_LOG(ERR, VHOST_CONFIG,
1197                         "invalid log base msg size: %"PRId32" != %d\n",
1198                         msg->size, (int)sizeof(VhostUserLog));
1199                 return -1;
1200         }
1201
1202         size = msg->payload.log.mmap_size;
1203         off  = msg->payload.log.mmap_offset;
1204
1205         /* Don't allow mmap_offset to point outside the mmap region */
1206         if (off > size) {
1207                 RTE_LOG(ERR, VHOST_CONFIG,
1208                         "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
1209                         off, size);
1210                 return -1;
1211         }
1212
1213         RTE_LOG(INFO, VHOST_CONFIG,
1214                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
1215                 size, off);
1216
1217         /*
1218          * mmap from 0 to workaround a hugepage mmap bug: mmap will
1219          * fail when offset is not page size aligned.
1220          */
1221         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1222         close(fd);
1223         if (addr == MAP_FAILED) {
1224                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
1225                 return -1;
1226         }
1227
1228         /*
1229          * Free previously mapped log memory on occasionally
1230          * multiple VHOST_USER_SET_LOG_BASE.
1231          */
1232         if (dev->log_addr) {
1233                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
1234         }
1235         dev->log_addr = (uint64_t)(uintptr_t)addr;
1236         dev->log_base = dev->log_addr + off;
1237         dev->log_size = size;
1238
1239         return 0;
1240 }
1241
1242 /*
1243  * An rarp packet is constructed and broadcasted to notify switches about
1244  * the new location of the migrated VM, so that packets from outside will
1245  * not be lost after migration.
1246  *
1247  * However, we don't actually "send" a rarp packet here, instead, we set
1248  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
1249  */
1250 static int
1251 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
1252 {
1253         uint8_t *mac = (uint8_t *)&msg->payload.u64;
1254         struct rte_vdpa_device *vdpa_dev;
1255         int did = -1;
1256
1257         RTE_LOG(DEBUG, VHOST_CONFIG,
1258                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
1259                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1260         memcpy(dev->mac.addr_bytes, mac, 6);
1261
1262         /*
1263          * Set the flag to inject a RARP broadcast packet at
1264          * rte_vhost_dequeue_burst().
1265          *
1266          * rte_smp_wmb() is for making sure the mac is copied
1267          * before the flag is set.
1268          */
1269         rte_smp_wmb();
1270         rte_atomic16_set(&dev->broadcast_rarp, 1);
1271         did = dev->vdpa_dev_id;
1272         vdpa_dev = rte_vdpa_get_device(did);
1273         if (vdpa_dev && vdpa_dev->ops->migration_done)
1274                 vdpa_dev->ops->migration_done(dev->vid);
1275
1276         return 0;
1277 }
1278
1279 static int
1280 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
1281 {
1282         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
1283                         msg->payload.u64 > VIRTIO_MAX_MTU) {
1284                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
1285                                 msg->payload.u64);
1286
1287                 return -1;
1288         }
1289
1290         dev->mtu = msg->payload.u64;
1291
1292         return 0;
1293 }
1294
1295 static int
1296 vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
1297 {
1298         int fd = msg->fds[0];
1299
1300         if (fd < 0) {
1301                 RTE_LOG(ERR, VHOST_CONFIG,
1302                                 "Invalid file descriptor for slave channel (%d)\n",
1303                                 fd);
1304                 return -1;
1305         }
1306
1307         dev->slave_req_fd = fd;
1308
1309         return 0;
1310 }
1311
1312 static int
1313 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
1314 {
1315         struct vhost_vring_addr *ra;
1316         uint64_t start, end;
1317
1318         start = imsg->iova;
1319         end = start + imsg->size;
1320
1321         ra = &vq->ring_addrs;
1322         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
1323                 return 1;
1324         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
1325                 return 1;
1326         if (ra->used_user_addr >= start && ra->used_user_addr < end)
1327                 return 1;
1328
1329         return 0;
1330 }
1331
1332 static int
1333 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1334                                 struct vhost_iotlb_msg *imsg)
1335 {
1336         uint64_t istart, iend, vstart, vend;
1337
1338         istart = imsg->iova;
1339         iend = istart + imsg->size - 1;
1340
1341         vstart = (uintptr_t)vq->desc;
1342         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1343         if (vstart <= iend && istart <= vend)
1344                 return 1;
1345
1346         vstart = (uintptr_t)vq->avail;
1347         vend = vstart + sizeof(struct vring_avail);
1348         vend += sizeof(uint16_t) * vq->size - 1;
1349         if (vstart <= iend && istart <= vend)
1350                 return 1;
1351
1352         vstart = (uintptr_t)vq->used;
1353         vend = vstart + sizeof(struct vring_used);
1354         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1355         if (vstart <= iend && istart <= vend)
1356                 return 1;
1357
1358         return 0;
1359 }
1360
1361 static int
1362 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
1363 {
1364         struct virtio_net *dev = *pdev;
1365         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1366         uint16_t i;
1367         uint64_t vva, len;
1368
1369         switch (imsg->type) {
1370         case VHOST_IOTLB_UPDATE:
1371                 len = imsg->size;
1372                 vva = qva_to_vva(dev, imsg->uaddr, &len);
1373                 if (!vva)
1374                         return -1;
1375
1376                 for (i = 0; i < dev->nr_vring; i++) {
1377                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1378
1379                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1380                                         len, imsg->perm);
1381
1382                         if (is_vring_iotlb_update(vq, imsg))
1383                                 *pdev = dev = translate_ring_addresses(dev, i);
1384                 }
1385                 break;
1386         case VHOST_IOTLB_INVALIDATE:
1387                 for (i = 0; i < dev->nr_vring; i++) {
1388                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1389
1390                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1391                                         imsg->size);
1392
1393                         if (is_vring_iotlb_invalidate(vq, imsg))
1394                                 vring_invalidate(dev, vq);
1395                 }
1396                 break;
1397         default:
1398                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1399                                 imsg->type);
1400                 return -1;
1401         }
1402
1403         return 0;
1404 }
1405
1406 /* return bytes# of read on success or negative val on failure. */
1407 static int
1408 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1409 {
1410         int ret;
1411
1412         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1413                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
1414         if (ret <= 0)
1415                 return ret;
1416
1417         if (msg && msg->size) {
1418                 if (msg->size > sizeof(msg->payload)) {
1419                         RTE_LOG(ERR, VHOST_CONFIG,
1420                                 "invalid msg size: %d\n", msg->size);
1421                         return -1;
1422                 }
1423                 ret = read(sockfd, &msg->payload, msg->size);
1424                 if (ret <= 0)
1425                         return ret;
1426                 if (ret != (int)msg->size) {
1427                         RTE_LOG(ERR, VHOST_CONFIG,
1428                                 "read control message failed\n");
1429                         return -1;
1430                 }
1431         }
1432
1433         return ret;
1434 }
1435
1436 static int
1437 send_vhost_message(int sockfd, struct VhostUserMsg *msg, int *fds, int fd_num)
1438 {
1439         if (!msg)
1440                 return 0;
1441
1442         return send_fd_message(sockfd, (char *)msg,
1443                 VHOST_USER_HDR_SIZE + msg->size, fds, fd_num);
1444 }
1445
1446 static int
1447 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1448 {
1449         if (!msg)
1450                 return 0;
1451
1452         msg->flags &= ~VHOST_USER_VERSION_MASK;
1453         msg->flags &= ~VHOST_USER_NEED_REPLY;
1454         msg->flags |= VHOST_USER_VERSION;
1455         msg->flags |= VHOST_USER_REPLY_MASK;
1456
1457         return send_vhost_message(sockfd, msg, NULL, 0);
1458 }
1459
1460 static int
1461 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg,
1462                          int *fds, int fd_num)
1463 {
1464         int ret;
1465
1466         if (msg->flags & VHOST_USER_NEED_REPLY)
1467                 rte_spinlock_lock(&dev->slave_req_lock);
1468
1469         ret = send_vhost_message(dev->slave_req_fd, msg, fds, fd_num);
1470         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
1471                 rte_spinlock_unlock(&dev->slave_req_lock);
1472
1473         return ret;
1474 }
1475
1476 /*
1477  * Allocate a queue pair if it hasn't been allocated yet
1478  */
1479 static int
1480 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
1481 {
1482         uint16_t vring_idx;
1483
1484         switch (msg->request.master) {
1485         case VHOST_USER_SET_VRING_KICK:
1486         case VHOST_USER_SET_VRING_CALL:
1487         case VHOST_USER_SET_VRING_ERR:
1488                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1489                 break;
1490         case VHOST_USER_SET_VRING_NUM:
1491         case VHOST_USER_SET_VRING_BASE:
1492         case VHOST_USER_SET_VRING_ENABLE:
1493                 vring_idx = msg->payload.state.index;
1494                 break;
1495         case VHOST_USER_SET_VRING_ADDR:
1496                 vring_idx = msg->payload.addr.index;
1497                 break;
1498         default:
1499                 return 0;
1500         }
1501
1502         if (vring_idx >= VHOST_MAX_VRING) {
1503                 RTE_LOG(ERR, VHOST_CONFIG,
1504                         "invalid vring index: %u\n", vring_idx);
1505                 return -1;
1506         }
1507
1508         if (dev->virtqueue[vring_idx])
1509                 return 0;
1510
1511         return alloc_vring_queue(dev, vring_idx);
1512 }
1513
1514 static void
1515 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
1516 {
1517         unsigned int i = 0;
1518         unsigned int vq_num = 0;
1519
1520         while (vq_num < dev->nr_vring) {
1521                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1522
1523                 if (vq) {
1524                         rte_spinlock_lock(&vq->access_lock);
1525                         vq_num++;
1526                 }
1527                 i++;
1528         }
1529 }
1530
1531 static void
1532 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
1533 {
1534         unsigned int i = 0;
1535         unsigned int vq_num = 0;
1536
1537         while (vq_num < dev->nr_vring) {
1538                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1539
1540                 if (vq) {
1541                         rte_spinlock_unlock(&vq->access_lock);
1542                         vq_num++;
1543                 }
1544                 i++;
1545         }
1546 }
1547
1548 int
1549 vhost_user_msg_handler(int vid, int fd)
1550 {
1551         struct virtio_net *dev;
1552         struct VhostUserMsg msg;
1553         struct rte_vdpa_device *vdpa_dev;
1554         int did = -1;
1555         int ret;
1556         int unlock_required = 0;
1557         uint32_t skip_master = 0;
1558
1559         dev = get_device(vid);
1560         if (dev == NULL)
1561                 return -1;
1562
1563         if (!dev->notify_ops) {
1564                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1565                 if (!dev->notify_ops) {
1566                         RTE_LOG(ERR, VHOST_CONFIG,
1567                                 "failed to get callback ops for driver %s\n",
1568                                 dev->ifname);
1569                         return -1;
1570                 }
1571         }
1572
1573         ret = read_vhost_message(fd, &msg);
1574         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1575                 if (ret < 0)
1576                         RTE_LOG(ERR, VHOST_CONFIG,
1577                                 "vhost read message failed\n");
1578                 else if (ret == 0)
1579                         RTE_LOG(INFO, VHOST_CONFIG,
1580                                 "vhost peer closed\n");
1581                 else
1582                         RTE_LOG(ERR, VHOST_CONFIG,
1583                                 "vhost read incorrect message\n");
1584
1585                 return -1;
1586         }
1587
1588         ret = 0;
1589         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1590                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1591                         vhost_message_str[msg.request.master]);
1592         else
1593                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1594                         vhost_message_str[msg.request.master]);
1595
1596         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1597         if (ret < 0) {
1598                 RTE_LOG(ERR, VHOST_CONFIG,
1599                         "failed to alloc queue\n");
1600                 return -1;
1601         }
1602
1603         /*
1604          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
1605          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
1606          * and device is destroyed. destroy_device waits for queues to be
1607          * inactive, so it is safe. Otherwise taking the access_lock
1608          * would cause a dead lock.
1609          */
1610         switch (msg.request.master) {
1611         case VHOST_USER_SET_FEATURES:
1612         case VHOST_USER_SET_PROTOCOL_FEATURES:
1613         case VHOST_USER_SET_OWNER:
1614         case VHOST_USER_SET_MEM_TABLE:
1615         case VHOST_USER_SET_LOG_BASE:
1616         case VHOST_USER_SET_LOG_FD:
1617         case VHOST_USER_SET_VRING_NUM:
1618         case VHOST_USER_SET_VRING_ADDR:
1619         case VHOST_USER_SET_VRING_BASE:
1620         case VHOST_USER_SET_VRING_KICK:
1621         case VHOST_USER_SET_VRING_CALL:
1622         case VHOST_USER_SET_VRING_ERR:
1623         case VHOST_USER_SET_VRING_ENABLE:
1624         case VHOST_USER_SEND_RARP:
1625         case VHOST_USER_NET_SET_MTU:
1626         case VHOST_USER_SET_SLAVE_REQ_FD:
1627                 vhost_user_lock_all_queue_pairs(dev);
1628                 unlock_required = 1;
1629                 break;
1630         default:
1631                 break;
1632
1633         }
1634
1635         if (dev->extern_ops.pre_msg_handle) {
1636                 uint32_t need_reply;
1637
1638                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
1639                                 (void *)&msg, &need_reply, &skip_master);
1640                 if (ret < 0)
1641                         goto skip_to_reply;
1642
1643                 if (need_reply)
1644                         send_vhost_reply(fd, &msg);
1645
1646                 if (skip_master)
1647                         goto skip_to_post_handle;
1648         }
1649
1650         switch (msg.request.master) {
1651         case VHOST_USER_GET_FEATURES:
1652                 msg.payload.u64 = vhost_user_get_features(dev);
1653                 msg.size = sizeof(msg.payload.u64);
1654                 send_vhost_reply(fd, &msg);
1655                 break;
1656         case VHOST_USER_SET_FEATURES:
1657                 ret = vhost_user_set_features(dev, msg.payload.u64);
1658                 if (ret)
1659                         return -1;
1660                 break;
1661
1662         case VHOST_USER_GET_PROTOCOL_FEATURES:
1663                 vhost_user_get_protocol_features(dev, &msg);
1664                 send_vhost_reply(fd, &msg);
1665                 break;
1666         case VHOST_USER_SET_PROTOCOL_FEATURES:
1667                 vhost_user_set_protocol_features(dev, msg.payload.u64);
1668                 break;
1669
1670         case VHOST_USER_SET_OWNER:
1671                 vhost_user_set_owner();
1672                 break;
1673         case VHOST_USER_RESET_OWNER:
1674                 vhost_user_reset_owner(dev);
1675                 break;
1676
1677         case VHOST_USER_SET_MEM_TABLE:
1678                 ret = vhost_user_set_mem_table(&dev, &msg);
1679                 break;
1680
1681         case VHOST_USER_SET_LOG_BASE:
1682                 vhost_user_set_log_base(dev, &msg);
1683
1684                 /* it needs a reply */
1685                 msg.size = sizeof(msg.payload.u64);
1686                 send_vhost_reply(fd, &msg);
1687                 break;
1688         case VHOST_USER_SET_LOG_FD:
1689                 close(msg.fds[0]);
1690                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1691                 break;
1692
1693         case VHOST_USER_SET_VRING_NUM:
1694                 vhost_user_set_vring_num(dev, &msg);
1695                 break;
1696         case VHOST_USER_SET_VRING_ADDR:
1697                 vhost_user_set_vring_addr(&dev, &msg);
1698                 break;
1699         case VHOST_USER_SET_VRING_BASE:
1700                 vhost_user_set_vring_base(dev, &msg);
1701                 break;
1702
1703         case VHOST_USER_GET_VRING_BASE:
1704                 vhost_user_get_vring_base(dev, &msg);
1705                 msg.size = sizeof(msg.payload.state);
1706                 send_vhost_reply(fd, &msg);
1707                 break;
1708
1709         case VHOST_USER_SET_VRING_KICK:
1710                 vhost_user_set_vring_kick(&dev, &msg);
1711                 break;
1712         case VHOST_USER_SET_VRING_CALL:
1713                 vhost_user_set_vring_call(dev, &msg);
1714                 break;
1715
1716         case VHOST_USER_SET_VRING_ERR:
1717                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1718                         close(msg.fds[0]);
1719                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1720                 break;
1721
1722         case VHOST_USER_GET_QUEUE_NUM:
1723                 msg.payload.u64 = (uint64_t)vhost_user_get_queue_num(dev);
1724                 msg.size = sizeof(msg.payload.u64);
1725                 send_vhost_reply(fd, &msg);
1726                 break;
1727
1728         case VHOST_USER_SET_VRING_ENABLE:
1729                 vhost_user_set_vring_enable(dev, &msg);
1730                 break;
1731         case VHOST_USER_SEND_RARP:
1732                 vhost_user_send_rarp(dev, &msg);
1733                 break;
1734
1735         case VHOST_USER_NET_SET_MTU:
1736                 ret = vhost_user_net_set_mtu(dev, &msg);
1737                 break;
1738
1739         case VHOST_USER_SET_SLAVE_REQ_FD:
1740                 ret = vhost_user_set_req_fd(dev, &msg);
1741                 break;
1742
1743         case VHOST_USER_IOTLB_MSG:
1744                 ret = vhost_user_iotlb_msg(&dev, &msg);
1745                 break;
1746
1747         default:
1748                 ret = -1;
1749                 break;
1750         }
1751
1752 skip_to_post_handle:
1753         if (dev->extern_ops.post_msg_handle) {
1754                 uint32_t need_reply;
1755
1756                 ret = (*dev->extern_ops.post_msg_handle)(
1757                                 dev->vid, (void *)&msg, &need_reply);
1758                 if (ret < 0)
1759                         goto skip_to_reply;
1760
1761                 if (need_reply)
1762                         send_vhost_reply(fd, &msg);
1763         }
1764
1765 skip_to_reply:
1766         if (unlock_required)
1767                 vhost_user_unlock_all_queue_pairs(dev);
1768
1769         if (msg.flags & VHOST_USER_NEED_REPLY) {
1770                 msg.payload.u64 = !!ret;
1771                 msg.size = sizeof(msg.payload.u64);
1772                 send_vhost_reply(fd, &msg);
1773         }
1774
1775         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1776                 dev->flags |= VIRTIO_DEV_READY;
1777
1778                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1779                         if (dev->dequeue_zero_copy) {
1780                                 RTE_LOG(INFO, VHOST_CONFIG,
1781                                                 "dequeue zero copy is enabled\n");
1782                         }
1783
1784                         if (dev->notify_ops->new_device(dev->vid) == 0)
1785                                 dev->flags |= VIRTIO_DEV_RUNNING;
1786                 }
1787         }
1788
1789         did = dev->vdpa_dev_id;
1790         vdpa_dev = rte_vdpa_get_device(did);
1791         if (vdpa_dev && virtio_is_ready(dev) &&
1792                         !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
1793                         msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
1794                 if (vdpa_dev->ops->dev_conf)
1795                         vdpa_dev->ops->dev_conf(dev->vid);
1796                 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
1797                 if (vhost_user_host_notifier_ctrl(dev->vid, true) != 0) {
1798                         RTE_LOG(INFO, VHOST_CONFIG,
1799                                 "(%d) software relay is used for vDPA, performance may be low.\n",
1800                                 dev->vid);
1801                 }
1802         }
1803
1804         return 0;
1805 }
1806
1807 static int process_slave_message_reply(struct virtio_net *dev,
1808                                        const VhostUserMsg *msg)
1809 {
1810         VhostUserMsg msg_reply;
1811         int ret;
1812
1813         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
1814                 return 0;
1815
1816         if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
1817                 ret = -1;
1818                 goto out;
1819         }
1820
1821         if (msg_reply.request.slave != msg->request.slave) {
1822                 RTE_LOG(ERR, VHOST_CONFIG,
1823                         "Received unexpected msg type (%u), expected %u\n",
1824                         msg_reply.request.slave, msg->request.slave);
1825                 ret = -1;
1826                 goto out;
1827         }
1828
1829         ret = msg_reply.payload.u64 ? -1 : 0;
1830
1831 out:
1832         rte_spinlock_unlock(&dev->slave_req_lock);
1833         return ret;
1834 }
1835
1836 int
1837 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1838 {
1839         int ret;
1840         struct VhostUserMsg msg = {
1841                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1842                 .flags = VHOST_USER_VERSION,
1843                 .size = sizeof(msg.payload.iotlb),
1844                 .payload.iotlb = {
1845                         .iova = iova,
1846                         .perm = perm,
1847                         .type = VHOST_IOTLB_MISS,
1848                 },
1849         };
1850
1851         ret = send_vhost_message(dev->slave_req_fd, &msg, NULL, 0);
1852         if (ret < 0) {
1853                 RTE_LOG(ERR, VHOST_CONFIG,
1854                                 "Failed to send IOTLB miss message (%d)\n",
1855                                 ret);
1856                 return ret;
1857         }
1858
1859         return 0;
1860 }
1861
1862 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
1863                                                     int index, int fd,
1864                                                     uint64_t offset,
1865                                                     uint64_t size)
1866 {
1867         int *fdp = NULL;
1868         size_t fd_num = 0;
1869         int ret;
1870         struct VhostUserMsg msg = {
1871                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1872                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
1873                 .size = sizeof(msg.payload.area),
1874                 .payload.area = {
1875                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
1876                         .size = size,
1877                         .offset = offset,
1878                 },
1879         };
1880
1881         if (fd < 0)
1882                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1883         else {
1884                 fdp = &fd;
1885                 fd_num = 1;
1886         }
1887
1888         ret = send_vhost_slave_message(dev, &msg, fdp, fd_num);
1889         if (ret < 0) {
1890                 RTE_LOG(ERR, VHOST_CONFIG,
1891                         "Failed to set host notifier (%d)\n", ret);
1892                 return ret;
1893         }
1894
1895         return process_slave_message_reply(dev, &msg);
1896 }
1897
1898 int vhost_user_host_notifier_ctrl(int vid, bool enable)
1899 {
1900         struct virtio_net *dev;
1901         struct rte_vdpa_device *vdpa_dev;
1902         int vfio_device_fd, did, ret = 0;
1903         uint64_t offset, size;
1904         unsigned int i;
1905
1906         dev = get_device(vid);
1907         if (!dev)
1908                 return -ENODEV;
1909
1910         did = dev->vdpa_dev_id;
1911         if (did < 0)
1912                 return -EINVAL;
1913
1914         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
1915             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
1916             !(dev->protocol_features &
1917                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
1918             !(dev->protocol_features &
1919                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
1920             !(dev->protocol_features &
1921                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
1922                 return -ENOTSUP;
1923
1924         vdpa_dev = rte_vdpa_get_device(did);
1925         if (!vdpa_dev)
1926                 return -ENODEV;
1927
1928         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
1929         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
1930
1931         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
1932         if (vfio_device_fd < 0)
1933                 return -ENOTSUP;
1934
1935         if (enable) {
1936                 for (i = 0; i < dev->nr_vring; i++) {
1937                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
1938                                         &size) < 0) {
1939                                 ret = -ENOTSUP;
1940                                 goto disable;
1941                         }
1942
1943                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
1944                                         vfio_device_fd, offset, size) < 0) {
1945                                 ret = -EFAULT;
1946                                 goto disable;
1947                         }
1948                 }
1949         } else {
1950 disable:
1951                 for (i = 0; i < dev->nr_vring; i++) {
1952                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
1953                                         0, 0);
1954                 }
1955         }
1956
1957         return ret;
1958 }