New upstream version 17.11.3
[deb_dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 IGEL Co., Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <stdbool.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_memcpy.h>
42 #include <rte_bus_vdev.h>
43 #include <rte_kvargs.h>
44 #include <rte_vhost.h>
45 #include <rte_spinlock.h>
46
47 #include "rte_eth_vhost.h"
48
49 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
50
51 #define ETH_VHOST_IFACE_ARG             "iface"
52 #define ETH_VHOST_QUEUES_ARG            "queues"
53 #define ETH_VHOST_CLIENT_ARG            "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
55 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
56 #define VHOST_MAX_PKT_BURST 32
57
58 static const char *valid_arguments[] = {
59         ETH_VHOST_IFACE_ARG,
60         ETH_VHOST_QUEUES_ARG,
61         ETH_VHOST_CLIENT_ARG,
62         ETH_VHOST_DEQUEUE_ZERO_COPY,
63         ETH_VHOST_IOMMU_SUPPORT,
64         NULL
65 };
66
67 static struct ether_addr base_eth_addr = {
68         .addr_bytes = {
69                 0x56 /* V */,
70                 0x48 /* H */,
71                 0x4F /* O */,
72                 0x53 /* S */,
73                 0x54 /* T */,
74                 0x00
75         }
76 };
77
78 enum vhost_xstats_pkts {
79         VHOST_UNDERSIZE_PKT = 0,
80         VHOST_64_PKT,
81         VHOST_65_TO_127_PKT,
82         VHOST_128_TO_255_PKT,
83         VHOST_256_TO_511_PKT,
84         VHOST_512_TO_1023_PKT,
85         VHOST_1024_TO_1522_PKT,
86         VHOST_1523_TO_MAX_PKT,
87         VHOST_BROADCAST_PKT,
88         VHOST_MULTICAST_PKT,
89         VHOST_UNICAST_PKT,
90         VHOST_ERRORS_PKT,
91         VHOST_ERRORS_FRAGMENTED,
92         VHOST_ERRORS_JABBER,
93         VHOST_UNKNOWN_PROTOCOL,
94         VHOST_XSTATS_MAX,
95 };
96
97 struct vhost_stats {
98         uint64_t pkts;
99         uint64_t bytes;
100         uint64_t missed_pkts;
101         uint64_t xstats[VHOST_XSTATS_MAX];
102 };
103
104 struct vhost_queue {
105         int vid;
106         rte_atomic32_t allow_queuing;
107         rte_atomic32_t while_queuing;
108         struct pmd_internal *internal;
109         struct rte_mempool *mb_pool;
110         uint16_t port;
111         uint16_t virtqueue_id;
112         struct vhost_stats stats;
113 };
114
115 struct pmd_internal {
116         rte_atomic32_t dev_attached;
117         char *dev_name;
118         char *iface_name;
119         uint16_t max_queues;
120         int vid;
121         rte_atomic32_t started;
122 };
123
124 struct internal_list {
125         TAILQ_ENTRY(internal_list) next;
126         struct rte_eth_dev *eth_dev;
127 };
128
129 TAILQ_HEAD(internal_list_head, internal_list);
130 static struct internal_list_head internal_list =
131         TAILQ_HEAD_INITIALIZER(internal_list);
132
133 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
134
135 static struct rte_eth_link pmd_link = {
136                 .link_speed = 10000,
137                 .link_duplex = ETH_LINK_FULL_DUPLEX,
138                 .link_status = ETH_LINK_DOWN
139 };
140
141 struct rte_vhost_vring_state {
142         rte_spinlock_t lock;
143
144         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
145         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
146         unsigned int index;
147         unsigned int max_vring;
148 };
149
150 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
151
152 #define VHOST_XSTATS_NAME_SIZE 64
153
154 struct vhost_xstats_name_off {
155         char name[VHOST_XSTATS_NAME_SIZE];
156         uint64_t offset;
157 };
158
159 /* [rx]_is prepended to the name string here */
160 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
161         {"good_packets",
162          offsetof(struct vhost_queue, stats.pkts)},
163         {"total_bytes",
164          offsetof(struct vhost_queue, stats.bytes)},
165         {"missed_pkts",
166          offsetof(struct vhost_queue, stats.missed_pkts)},
167         {"broadcast_packets",
168          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
169         {"multicast_packets",
170          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
171         {"unicast_packets",
172          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
173          {"undersize_packets",
174          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
175         {"size_64_packets",
176          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
177         {"size_65_to_127_packets",
178          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
179         {"size_128_to_255_packets",
180          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
181         {"size_256_to_511_packets",
182          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
183         {"size_512_to_1023_packets",
184          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
185         {"size_1024_to_1522_packets",
186          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
187         {"size_1523_to_max_packets",
188          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
189         {"errors_with_bad_CRC",
190          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
191         {"fragmented_errors",
192          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
193         {"jabber_errors",
194          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
195         {"unknown_protos_packets",
196          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
197 };
198
199 /* [tx]_ is prepended to the name string here */
200 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
201         {"good_packets",
202          offsetof(struct vhost_queue, stats.pkts)},
203         {"total_bytes",
204          offsetof(struct vhost_queue, stats.bytes)},
205         {"missed_pkts",
206          offsetof(struct vhost_queue, stats.missed_pkts)},
207         {"broadcast_packets",
208          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
209         {"multicast_packets",
210          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
211         {"unicast_packets",
212          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
213         {"undersize_packets",
214          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
215         {"size_64_packets",
216          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
217         {"size_65_to_127_packets",
218          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
219         {"size_128_to_255_packets",
220          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
221         {"size_256_to_511_packets",
222          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
223         {"size_512_to_1023_packets",
224          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
225         {"size_1024_to_1522_packets",
226          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
227         {"size_1523_to_max_packets",
228          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
229         {"errors_with_bad_CRC",
230          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
231 };
232
233 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
234                                 sizeof(vhost_rxport_stat_strings[0]))
235
236 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
237                                 sizeof(vhost_txport_stat_strings[0]))
238
239 static void
240 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
241 {
242         struct vhost_queue *vq = NULL;
243         unsigned int i = 0;
244
245         for (i = 0; i < dev->data->nb_rx_queues; i++) {
246                 vq = dev->data->rx_queues[i];
247                 if (!vq)
248                         continue;
249                 memset(&vq->stats, 0, sizeof(vq->stats));
250         }
251         for (i = 0; i < dev->data->nb_tx_queues; i++) {
252                 vq = dev->data->tx_queues[i];
253                 if (!vq)
254                         continue;
255                 memset(&vq->stats, 0, sizeof(vq->stats));
256         }
257 }
258
259 static int
260 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
261                            struct rte_eth_xstat_name *xstats_names,
262                            unsigned int limit __rte_unused)
263 {
264         unsigned int t = 0;
265         int count = 0;
266         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
267
268         if (!xstats_names)
269                 return nstats;
270         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
271                 snprintf(xstats_names[count].name,
272                          sizeof(xstats_names[count].name),
273                          "rx_%s", vhost_rxport_stat_strings[t].name);
274                 count++;
275         }
276         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
277                 snprintf(xstats_names[count].name,
278                          sizeof(xstats_names[count].name),
279                          "tx_%s", vhost_txport_stat_strings[t].name);
280                 count++;
281         }
282         return count;
283 }
284
285 static int
286 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
287                      unsigned int n)
288 {
289         unsigned int i;
290         unsigned int t;
291         unsigned int count = 0;
292         struct vhost_queue *vq = NULL;
293         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
294
295         if (n < nxstats)
296                 return nxstats;
297
298         for (i = 0; i < dev->data->nb_rx_queues; i++) {
299                 vq = dev->data->rx_queues[i];
300                 if (!vq)
301                         continue;
302                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
303                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
304                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
305         }
306         for (i = 0; i < dev->data->nb_tx_queues; i++) {
307                 vq = dev->data->tx_queues[i];
308                 if (!vq)
309                         continue;
310                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
311                                 + vq->stats.missed_pkts
312                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
313                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
314         }
315         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
316                 xstats[count].value = 0;
317                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
318                         vq = dev->data->rx_queues[i];
319                         if (!vq)
320                                 continue;
321                         xstats[count].value +=
322                                 *(uint64_t *)(((char *)vq)
323                                 + vhost_rxport_stat_strings[t].offset);
324                 }
325                 xstats[count].id = count;
326                 count++;
327         }
328         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
329                 xstats[count].value = 0;
330                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
331                         vq = dev->data->tx_queues[i];
332                         if (!vq)
333                                 continue;
334                         xstats[count].value +=
335                                 *(uint64_t *)(((char *)vq)
336                                 + vhost_txport_stat_strings[t].offset);
337                 }
338                 xstats[count].id = count;
339                 count++;
340         }
341         return count;
342 }
343
344 static inline void
345 vhost_count_multicast_broadcast(struct vhost_queue *vq,
346                                 struct rte_mbuf *mbuf)
347 {
348         struct ether_addr *ea = NULL;
349         struct vhost_stats *pstats = &vq->stats;
350
351         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
352         if (is_multicast_ether_addr(ea)) {
353                 if (is_broadcast_ether_addr(ea))
354                         pstats->xstats[VHOST_BROADCAST_PKT]++;
355                 else
356                         pstats->xstats[VHOST_MULTICAST_PKT]++;
357         }
358 }
359
360 static void
361 vhost_update_packet_xstats(struct vhost_queue *vq,
362                            struct rte_mbuf **bufs,
363                            uint16_t count)
364 {
365         uint32_t pkt_len = 0;
366         uint64_t i = 0;
367         uint64_t index;
368         struct vhost_stats *pstats = &vq->stats;
369
370         for (i = 0; i < count ; i++) {
371                 pkt_len = bufs[i]->pkt_len;
372                 if (pkt_len == 64) {
373                         pstats->xstats[VHOST_64_PKT]++;
374                 } else if (pkt_len > 64 && pkt_len < 1024) {
375                         index = (sizeof(pkt_len) * 8)
376                                 - __builtin_clz(pkt_len) - 5;
377                         pstats->xstats[index]++;
378                 } else {
379                         if (pkt_len < 64)
380                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
381                         else if (pkt_len <= 1522)
382                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
383                         else if (pkt_len > 1522)
384                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
385                 }
386                 vhost_count_multicast_broadcast(vq, bufs[i]);
387         }
388 }
389
390 static uint16_t
391 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
392 {
393         struct vhost_queue *r = q;
394         uint16_t i, nb_rx = 0;
395         uint16_t nb_receive = nb_bufs;
396
397         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
398                 return 0;
399
400         rte_atomic32_set(&r->while_queuing, 1);
401
402         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
403                 goto out;
404
405         /* Dequeue packets from guest TX queue */
406         while (nb_receive) {
407                 uint16_t nb_pkts;
408                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
409                                                  VHOST_MAX_PKT_BURST);
410
411                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
412                                                   r->mb_pool, &bufs[nb_rx],
413                                                   num);
414
415                 nb_rx += nb_pkts;
416                 nb_receive -= nb_pkts;
417                 if (nb_pkts < num)
418                         break;
419         }
420
421         r->stats.pkts += nb_rx;
422
423         for (i = 0; likely(i < nb_rx); i++) {
424                 bufs[i]->port = r->port;
425                 r->stats.bytes += bufs[i]->pkt_len;
426         }
427
428         vhost_update_packet_xstats(r, bufs, nb_rx);
429
430 out:
431         rte_atomic32_set(&r->while_queuing, 0);
432
433         return nb_rx;
434 }
435
436 static uint16_t
437 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
438 {
439         struct vhost_queue *r = q;
440         uint16_t i, nb_tx = 0;
441         uint16_t nb_send = nb_bufs;
442
443         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
444                 return 0;
445
446         rte_atomic32_set(&r->while_queuing, 1);
447
448         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
449                 goto out;
450
451         /* Enqueue packets to guest RX queue */
452         while (nb_send) {
453                 uint16_t nb_pkts;
454                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
455                                                  VHOST_MAX_PKT_BURST);
456
457                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
458                                                   &bufs[nb_tx], num);
459
460                 nb_tx += nb_pkts;
461                 nb_send -= nb_pkts;
462                 if (nb_pkts < num)
463                         break;
464         }
465
466         r->stats.pkts += nb_tx;
467         r->stats.missed_pkts += nb_bufs - nb_tx;
468
469         for (i = 0; likely(i < nb_tx); i++)
470                 r->stats.bytes += bufs[i]->pkt_len;
471
472         vhost_update_packet_xstats(r, bufs, nb_tx);
473
474         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
475          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
476          * are increased when packets are not transmitted successfully.
477          */
478         for (i = nb_tx; i < nb_bufs; i++)
479                 vhost_count_multicast_broadcast(r, bufs[i]);
480
481         for (i = 0; likely(i < nb_tx); i++)
482                 rte_pktmbuf_free(bufs[i]);
483 out:
484         rte_atomic32_set(&r->while_queuing, 0);
485
486         return nb_tx;
487 }
488
489 static int
490 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
491 {
492         return 0;
493 }
494
495 static inline struct internal_list *
496 find_internal_resource(char *ifname)
497 {
498         int found = 0;
499         struct internal_list *list;
500         struct pmd_internal *internal;
501
502         if (ifname == NULL)
503                 return NULL;
504
505         pthread_mutex_lock(&internal_list_lock);
506
507         TAILQ_FOREACH(list, &internal_list, next) {
508                 internal = list->eth_dev->data->dev_private;
509                 if (!strcmp(internal->iface_name, ifname)) {
510                         found = 1;
511                         break;
512                 }
513         }
514
515         pthread_mutex_unlock(&internal_list_lock);
516
517         if (!found)
518                 return NULL;
519
520         return list;
521 }
522
523 static void
524 update_queuing_status(struct rte_eth_dev *dev)
525 {
526         struct pmd_internal *internal = dev->data->dev_private;
527         struct vhost_queue *vq;
528         unsigned int i;
529         int allow_queuing = 1;
530
531         if (!dev->data->rx_queues || !dev->data->tx_queues)
532                 return;
533
534         if (rte_atomic32_read(&internal->started) == 0 ||
535             rte_atomic32_read(&internal->dev_attached) == 0)
536                 allow_queuing = 0;
537
538         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
539         for (i = 0; i < dev->data->nb_rx_queues; i++) {
540                 vq = dev->data->rx_queues[i];
541                 if (vq == NULL)
542                         continue;
543                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
544                 while (rte_atomic32_read(&vq->while_queuing))
545                         rte_pause();
546         }
547
548         for (i = 0; i < dev->data->nb_tx_queues; i++) {
549                 vq = dev->data->tx_queues[i];
550                 if (vq == NULL)
551                         continue;
552                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
553                 while (rte_atomic32_read(&vq->while_queuing))
554                         rte_pause();
555         }
556 }
557
558 static void
559 queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
560 {
561         struct vhost_queue *vq;
562         int i;
563
564         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
565                 vq = eth_dev->data->rx_queues[i];
566                 if (!vq)
567                         continue;
568                 vq->vid = internal->vid;
569                 vq->internal = internal;
570                 vq->port = eth_dev->data->port_id;
571         }
572         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
573                 vq = eth_dev->data->tx_queues[i];
574                 if (!vq)
575                         continue;
576                 vq->vid = internal->vid;
577                 vq->internal = internal;
578                 vq->port = eth_dev->data->port_id;
579         }
580 }
581
582 static int
583 new_device(int vid)
584 {
585         struct rte_eth_dev *eth_dev;
586         struct internal_list *list;
587         struct pmd_internal *internal;
588         unsigned i;
589         char ifname[PATH_MAX];
590 #ifdef RTE_LIBRTE_VHOST_NUMA
591         int newnode;
592 #endif
593
594         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
595         list = find_internal_resource(ifname);
596         if (list == NULL) {
597                 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
598                 return -1;
599         }
600
601         eth_dev = list->eth_dev;
602         internal = eth_dev->data->dev_private;
603
604 #ifdef RTE_LIBRTE_VHOST_NUMA
605         newnode = rte_vhost_get_numa_node(vid);
606         if (newnode >= 0)
607                 eth_dev->data->numa_node = newnode;
608 #endif
609
610         internal->vid = vid;
611         if (rte_atomic32_read(&internal->started) == 1)
612                 queue_setup(eth_dev, internal);
613         else
614                 RTE_LOG(INFO, PMD, "RX/TX queues not exist yet\n");
615
616         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
617                 rte_vhost_enable_guest_notification(vid, i, 0);
618
619         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
620
621         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
622
623         rte_atomic32_set(&internal->dev_attached, 1);
624         update_queuing_status(eth_dev);
625
626         RTE_LOG(INFO, PMD, "New connection established\n");
627
628         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
629                                       NULL, NULL);
630
631         return 0;
632 }
633
634 static void
635 destroy_device(int vid)
636 {
637         struct rte_eth_dev *eth_dev;
638         struct pmd_internal *internal;
639         struct vhost_queue *vq;
640         struct internal_list *list;
641         char ifname[PATH_MAX];
642         unsigned i;
643         struct rte_vhost_vring_state *state;
644
645         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
646         list = find_internal_resource(ifname);
647         if (list == NULL) {
648                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
649                 return;
650         }
651         eth_dev = list->eth_dev;
652         internal = eth_dev->data->dev_private;
653
654         rte_atomic32_set(&internal->dev_attached, 0);
655         update_queuing_status(eth_dev);
656
657         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
658
659         if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
660                 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
661                         vq = eth_dev->data->rx_queues[i];
662                         if (!vq)
663                                 continue;
664                         vq->vid = -1;
665                 }
666                 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
667                         vq = eth_dev->data->tx_queues[i];
668                         if (!vq)
669                                 continue;
670                         vq->vid = -1;
671                 }
672         }
673
674         state = vring_states[eth_dev->data->port_id];
675         rte_spinlock_lock(&state->lock);
676         for (i = 0; i <= state->max_vring; i++) {
677                 state->cur[i] = false;
678                 state->seen[i] = false;
679         }
680         state->max_vring = 0;
681         rte_spinlock_unlock(&state->lock);
682
683         RTE_LOG(INFO, PMD, "Connection closed\n");
684
685         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
686                                       NULL, NULL);
687 }
688
689 static int
690 vring_state_changed(int vid, uint16_t vring, int enable)
691 {
692         struct rte_vhost_vring_state *state;
693         struct rte_eth_dev *eth_dev;
694         struct internal_list *list;
695         char ifname[PATH_MAX];
696
697         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
698         list = find_internal_resource(ifname);
699         if (list == NULL) {
700                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
701                 return -1;
702         }
703
704         eth_dev = list->eth_dev;
705         /* won't be NULL */
706         state = vring_states[eth_dev->data->port_id];
707         rte_spinlock_lock(&state->lock);
708         state->cur[vring] = enable;
709         state->max_vring = RTE_MAX(vring, state->max_vring);
710         rte_spinlock_unlock(&state->lock);
711
712         RTE_LOG(INFO, PMD, "vring%u is %s\n",
713                         vring, enable ? "enabled" : "disabled");
714
715         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE,
716                                       NULL, NULL);
717
718         return 0;
719 }
720
721 static struct vhost_device_ops vhost_ops = {
722         .new_device          = new_device,
723         .destroy_device      = destroy_device,
724         .vring_state_changed = vring_state_changed,
725 };
726
727 int
728 rte_eth_vhost_get_queue_event(uint16_t port_id,
729                 struct rte_eth_vhost_queue_event *event)
730 {
731         struct rte_vhost_vring_state *state;
732         unsigned int i;
733         int idx;
734
735         if (port_id >= RTE_MAX_ETHPORTS) {
736                 RTE_LOG(ERR, PMD, "Invalid port id\n");
737                 return -1;
738         }
739
740         state = vring_states[port_id];
741         if (!state) {
742                 RTE_LOG(ERR, PMD, "Unused port\n");
743                 return -1;
744         }
745
746         rte_spinlock_lock(&state->lock);
747         for (i = 0; i <= state->max_vring; i++) {
748                 idx = state->index++ % (state->max_vring + 1);
749
750                 if (state->cur[idx] != state->seen[idx]) {
751                         state->seen[idx] = state->cur[idx];
752                         event->queue_id = idx / 2;
753                         event->rx = idx & 1;
754                         event->enable = state->cur[idx];
755                         rte_spinlock_unlock(&state->lock);
756                         return 0;
757                 }
758         }
759         rte_spinlock_unlock(&state->lock);
760
761         return -1;
762 }
763
764 int
765 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
766 {
767         struct internal_list *list;
768         struct rte_eth_dev *eth_dev;
769         struct vhost_queue *vq;
770         int vid = -1;
771
772         if (!rte_eth_dev_is_valid_port(port_id))
773                 return -1;
774
775         pthread_mutex_lock(&internal_list_lock);
776
777         TAILQ_FOREACH(list, &internal_list, next) {
778                 eth_dev = list->eth_dev;
779                 if (eth_dev->data->port_id == port_id) {
780                         vq = eth_dev->data->rx_queues[0];
781                         if (vq) {
782                                 vid = vq->vid;
783                         }
784                         break;
785                 }
786         }
787
788         pthread_mutex_unlock(&internal_list_lock);
789
790         return vid;
791 }
792
793 static int
794 eth_dev_start(struct rte_eth_dev *eth_dev)
795 {
796         struct pmd_internal *internal = eth_dev->data->dev_private;
797
798         queue_setup(eth_dev, internal);
799         rte_atomic32_set(&internal->started, 1);
800         update_queuing_status(eth_dev);
801
802         return 0;
803 }
804
805 static void
806 eth_dev_stop(struct rte_eth_dev *dev)
807 {
808         struct pmd_internal *internal = dev->data->dev_private;
809
810         rte_atomic32_set(&internal->started, 0);
811         update_queuing_status(dev);
812 }
813
814 static void
815 eth_dev_close(struct rte_eth_dev *dev)
816 {
817         struct pmd_internal *internal;
818         struct internal_list *list;
819         unsigned int i;
820
821         internal = dev->data->dev_private;
822         if (!internal)
823                 return;
824
825         eth_dev_stop(dev);
826
827         rte_vhost_driver_unregister(internal->iface_name);
828
829         list = find_internal_resource(internal->iface_name);
830         if (!list)
831                 return;
832
833         pthread_mutex_lock(&internal_list_lock);
834         TAILQ_REMOVE(&internal_list, list, next);
835         pthread_mutex_unlock(&internal_list_lock);
836         rte_free(list);
837
838         if (dev->data->rx_queues)
839                 for (i = 0; i < dev->data->nb_rx_queues; i++)
840                         rte_free(dev->data->rx_queues[i]);
841
842         if (dev->data->tx_queues)
843                 for (i = 0; i < dev->data->nb_tx_queues; i++)
844                         rte_free(dev->data->tx_queues[i]);
845
846         rte_free(dev->data->mac_addrs);
847         free(internal->dev_name);
848         free(internal->iface_name);
849         rte_free(internal);
850
851         dev->data->dev_private = NULL;
852 }
853
854 static int
855 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
856                    uint16_t nb_rx_desc __rte_unused,
857                    unsigned int socket_id,
858                    const struct rte_eth_rxconf *rx_conf __rte_unused,
859                    struct rte_mempool *mb_pool)
860 {
861         struct vhost_queue *vq;
862
863         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
864                         RTE_CACHE_LINE_SIZE, socket_id);
865         if (vq == NULL) {
866                 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
867                 return -ENOMEM;
868         }
869
870         vq->mb_pool = mb_pool;
871         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
872         dev->data->rx_queues[rx_queue_id] = vq;
873
874         return 0;
875 }
876
877 static int
878 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
879                    uint16_t nb_tx_desc __rte_unused,
880                    unsigned int socket_id,
881                    const struct rte_eth_txconf *tx_conf __rte_unused)
882 {
883         struct vhost_queue *vq;
884
885         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
886                         RTE_CACHE_LINE_SIZE, socket_id);
887         if (vq == NULL) {
888                 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
889                 return -ENOMEM;
890         }
891
892         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
893         dev->data->tx_queues[tx_queue_id] = vq;
894
895         return 0;
896 }
897
898 static void
899 eth_dev_info(struct rte_eth_dev *dev,
900              struct rte_eth_dev_info *dev_info)
901 {
902         struct pmd_internal *internal;
903
904         internal = dev->data->dev_private;
905         if (internal == NULL) {
906                 RTE_LOG(ERR, PMD, "Invalid device specified\n");
907                 return;
908         }
909
910         dev_info->max_mac_addrs = 1;
911         dev_info->max_rx_pktlen = (uint32_t)-1;
912         dev_info->max_rx_queues = internal->max_queues;
913         dev_info->max_tx_queues = internal->max_queues;
914         dev_info->min_rx_bufsize = 0;
915 }
916
917 static int
918 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
919 {
920         unsigned i;
921         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
922         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
923         struct vhost_queue *vq;
924
925         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
926                         i < dev->data->nb_rx_queues; i++) {
927                 if (dev->data->rx_queues[i] == NULL)
928                         continue;
929                 vq = dev->data->rx_queues[i];
930                 stats->q_ipackets[i] = vq->stats.pkts;
931                 rx_total += stats->q_ipackets[i];
932
933                 stats->q_ibytes[i] = vq->stats.bytes;
934                 rx_total_bytes += stats->q_ibytes[i];
935         }
936
937         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
938                         i < dev->data->nb_tx_queues; i++) {
939                 if (dev->data->tx_queues[i] == NULL)
940                         continue;
941                 vq = dev->data->tx_queues[i];
942                 stats->q_opackets[i] = vq->stats.pkts;
943                 tx_missed_total += vq->stats.missed_pkts;
944                 tx_total += stats->q_opackets[i];
945
946                 stats->q_obytes[i] = vq->stats.bytes;
947                 tx_total_bytes += stats->q_obytes[i];
948         }
949
950         stats->ipackets = rx_total;
951         stats->opackets = tx_total;
952         stats->oerrors = tx_missed_total;
953         stats->ibytes = rx_total_bytes;
954         stats->obytes = tx_total_bytes;
955
956         return 0;
957 }
958
959 static void
960 eth_stats_reset(struct rte_eth_dev *dev)
961 {
962         struct vhost_queue *vq;
963         unsigned i;
964
965         for (i = 0; i < dev->data->nb_rx_queues; i++) {
966                 if (dev->data->rx_queues[i] == NULL)
967                         continue;
968                 vq = dev->data->rx_queues[i];
969                 vq->stats.pkts = 0;
970                 vq->stats.bytes = 0;
971         }
972         for (i = 0; i < dev->data->nb_tx_queues; i++) {
973                 if (dev->data->tx_queues[i] == NULL)
974                         continue;
975                 vq = dev->data->tx_queues[i];
976                 vq->stats.pkts = 0;
977                 vq->stats.bytes = 0;
978                 vq->stats.missed_pkts = 0;
979         }
980 }
981
982 static void
983 eth_queue_release(void *q)
984 {
985         rte_free(q);
986 }
987
988 static int
989 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
990 {
991         /*
992          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
993          * and releases mbuf, so nothing to cleanup.
994          */
995         return 0;
996 }
997
998 static int
999 eth_link_update(struct rte_eth_dev *dev __rte_unused,
1000                 int wait_to_complete __rte_unused)
1001 {
1002         return 0;
1003 }
1004
1005 static uint32_t
1006 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1007 {
1008         struct vhost_queue *vq;
1009
1010         vq = dev->data->rx_queues[rx_queue_id];
1011         if (vq == NULL)
1012                 return 0;
1013
1014         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
1015 }
1016
1017 static const struct eth_dev_ops ops = {
1018         .dev_start = eth_dev_start,
1019         .dev_stop = eth_dev_stop,
1020         .dev_close = eth_dev_close,
1021         .dev_configure = eth_dev_configure,
1022         .dev_infos_get = eth_dev_info,
1023         .rx_queue_setup = eth_rx_queue_setup,
1024         .tx_queue_setup = eth_tx_queue_setup,
1025         .rx_queue_release = eth_queue_release,
1026         .tx_queue_release = eth_queue_release,
1027         .tx_done_cleanup = eth_tx_done_cleanup,
1028         .rx_queue_count = eth_rx_queue_count,
1029         .link_update = eth_link_update,
1030         .stats_get = eth_stats_get,
1031         .stats_reset = eth_stats_reset,
1032         .xstats_reset = vhost_dev_xstats_reset,
1033         .xstats_get = vhost_dev_xstats_get,
1034         .xstats_get_names = vhost_dev_xstats_get_names,
1035 };
1036
1037 static struct rte_vdev_driver pmd_vhost_drv;
1038
1039 static int
1040 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1041         int16_t queues, const unsigned int numa_node, uint64_t flags)
1042 {
1043         const char *name = rte_vdev_device_name(dev);
1044         struct rte_eth_dev_data *data = NULL;
1045         struct pmd_internal *internal = NULL;
1046         struct rte_eth_dev *eth_dev = NULL;
1047         struct ether_addr *eth_addr = NULL;
1048         struct rte_vhost_vring_state *vring_state = NULL;
1049         struct internal_list *list = NULL;
1050
1051         RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1052                 numa_node);
1053
1054         /* now do all data allocation - for eth_dev structure and internal
1055          * (private) data
1056          */
1057         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1058         if (data == NULL)
1059                 goto error;
1060
1061         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1062         if (list == NULL)
1063                 goto error;
1064
1065         /* reserve an ethdev entry */
1066         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1067         if (eth_dev == NULL)
1068                 goto error;
1069
1070         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1071         if (eth_addr == NULL)
1072                 goto error;
1073         *eth_addr = base_eth_addr;
1074         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1075
1076         vring_state = rte_zmalloc_socket(name,
1077                         sizeof(*vring_state), 0, numa_node);
1078         if (vring_state == NULL)
1079                 goto error;
1080
1081         /* now put it all together
1082          * - store queue data in internal,
1083          * - point eth_dev_data to internals
1084          * - and point eth_dev structure to new eth_dev_data structure
1085          */
1086         internal = eth_dev->data->dev_private;
1087         internal->dev_name = strdup(name);
1088         if (internal->dev_name == NULL)
1089                 goto error;
1090         internal->iface_name = strdup(iface_name);
1091         if (internal->iface_name == NULL)
1092                 goto error;
1093
1094         list->eth_dev = eth_dev;
1095         pthread_mutex_lock(&internal_list_lock);
1096         TAILQ_INSERT_TAIL(&internal_list, list, next);
1097         pthread_mutex_unlock(&internal_list_lock);
1098
1099         rte_spinlock_init(&vring_state->lock);
1100         vring_states[eth_dev->data->port_id] = vring_state;
1101
1102         /* We'll replace the 'data' originally allocated by eth_dev. So the
1103          * vhost PMD resources won't be shared between multi processes.
1104          */
1105         rte_memcpy(data, eth_dev->data, sizeof(*data));
1106         eth_dev->data = data;
1107
1108         data->nb_rx_queues = queues;
1109         data->nb_tx_queues = queues;
1110         internal->max_queues = queues;
1111         internal->vid = -1;
1112         data->dev_link = pmd_link;
1113         data->mac_addrs = eth_addr;
1114         data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1115
1116         eth_dev->dev_ops = &ops;
1117
1118         /* finally assign rx and tx ops */
1119         eth_dev->rx_pkt_burst = eth_vhost_rx;
1120         eth_dev->tx_pkt_burst = eth_vhost_tx;
1121
1122         if (rte_vhost_driver_register(iface_name, flags))
1123                 goto error;
1124
1125         if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1126                 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
1127                 goto error;
1128         }
1129
1130         if (rte_vhost_driver_start(iface_name) < 0) {
1131                 RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
1132                         iface_name);
1133                 goto error;
1134         }
1135
1136         return data->port_id;
1137
1138 error:
1139         if (internal) {
1140                 free(internal->iface_name);
1141                 free(internal->dev_name);
1142         }
1143         rte_free(vring_state);
1144         rte_free(eth_addr);
1145         if (eth_dev)
1146                 rte_eth_dev_release_port(eth_dev);
1147         rte_free(internal);
1148         rte_free(list);
1149         rte_free(data);
1150
1151         return -1;
1152 }
1153
1154 static inline int
1155 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1156 {
1157         const char **iface_name = extra_args;
1158
1159         if (value == NULL)
1160                 return -1;
1161
1162         *iface_name = value;
1163
1164         return 0;
1165 }
1166
1167 static inline int
1168 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1169 {
1170         uint16_t *n = extra_args;
1171
1172         if (value == NULL || extra_args == NULL)
1173                 return -EINVAL;
1174
1175         *n = (uint16_t)strtoul(value, NULL, 0);
1176         if (*n == USHRT_MAX && errno == ERANGE)
1177                 return -1;
1178
1179         return 0;
1180 }
1181
1182 static int
1183 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1184 {
1185         struct rte_kvargs *kvlist = NULL;
1186         int ret = 0;
1187         char *iface_name;
1188         uint16_t queues;
1189         uint64_t flags = 0;
1190         int client_mode = 0;
1191         int dequeue_zero_copy = 0;
1192         int iommu_support = 0;
1193
1194         RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
1195                 rte_vdev_device_name(dev));
1196
1197         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1198         if (kvlist == NULL)
1199                 return -1;
1200
1201         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1202                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1203                                          &open_iface, &iface_name);
1204                 if (ret < 0)
1205                         goto out_free;
1206         } else {
1207                 ret = -1;
1208                 goto out_free;
1209         }
1210
1211         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1212                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1213                                          &open_int, &queues);
1214                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1215                         goto out_free;
1216
1217         } else
1218                 queues = 1;
1219
1220         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1221                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1222                                          &open_int, &client_mode);
1223                 if (ret < 0)
1224                         goto out_free;
1225
1226                 if (client_mode)
1227                         flags |= RTE_VHOST_USER_CLIENT;
1228         }
1229
1230         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1231                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1232                                          &open_int, &dequeue_zero_copy);
1233                 if (ret < 0)
1234                         goto out_free;
1235
1236                 if (dequeue_zero_copy)
1237                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1238         }
1239
1240         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1241                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1242                                          &open_int, &iommu_support);
1243                 if (ret < 0)
1244                         goto out_free;
1245
1246                 if (iommu_support)
1247                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1248         }
1249
1250         if (dev->device.numa_node == SOCKET_ID_ANY)
1251                 dev->device.numa_node = rte_socket_id();
1252
1253         eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
1254                 flags);
1255
1256 out_free:
1257         rte_kvargs_free(kvlist);
1258         return ret;
1259 }
1260
1261 static int
1262 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1263 {
1264         const char *name;
1265         struct rte_eth_dev *eth_dev = NULL;
1266
1267         name = rte_vdev_device_name(dev);
1268         RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1269
1270         /* find an ethdev entry */
1271         eth_dev = rte_eth_dev_allocated(name);
1272         if (eth_dev == NULL)
1273                 return -ENODEV;
1274
1275         eth_dev_close(eth_dev);
1276
1277         rte_free(vring_states[eth_dev->data->port_id]);
1278         vring_states[eth_dev->data->port_id] = NULL;
1279
1280         rte_free(eth_dev->data);
1281
1282         rte_eth_dev_release_port(eth_dev);
1283
1284         return 0;
1285 }
1286
1287 static struct rte_vdev_driver pmd_vhost_drv = {
1288         .probe = rte_pmd_vhost_probe,
1289         .remove = rte_pmd_vhost_remove,
1290 };
1291
1292 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1293 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1294 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1295         "iface=<ifc> "
1296         "queues=<int>");