New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 IGEL Co., Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <stdbool.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_memcpy.h>
42 #include <rte_bus_vdev.h>
43 #include <rte_kvargs.h>
44 #include <rte_vhost.h>
45 #include <rte_spinlock.h>
46
47 #include "rte_eth_vhost.h"
48
49 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
50
51 #define ETH_VHOST_IFACE_ARG             "iface"
52 #define ETH_VHOST_QUEUES_ARG            "queues"
53 #define ETH_VHOST_CLIENT_ARG            "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
55 #define ETH_VHOST_IOMMU_SUPPORT         "iommu-support"
56 #define VHOST_MAX_PKT_BURST 32
57
58 static const char *valid_arguments[] = {
59         ETH_VHOST_IFACE_ARG,
60         ETH_VHOST_QUEUES_ARG,
61         ETH_VHOST_CLIENT_ARG,
62         ETH_VHOST_DEQUEUE_ZERO_COPY,
63         ETH_VHOST_IOMMU_SUPPORT,
64         NULL
65 };
66
67 static struct ether_addr base_eth_addr = {
68         .addr_bytes = {
69                 0x56 /* V */,
70                 0x48 /* H */,
71                 0x4F /* O */,
72                 0x53 /* S */,
73                 0x54 /* T */,
74                 0x00
75         }
76 };
77
78 enum vhost_xstats_pkts {
79         VHOST_UNDERSIZE_PKT = 0,
80         VHOST_64_PKT,
81         VHOST_65_TO_127_PKT,
82         VHOST_128_TO_255_PKT,
83         VHOST_256_TO_511_PKT,
84         VHOST_512_TO_1023_PKT,
85         VHOST_1024_TO_1522_PKT,
86         VHOST_1523_TO_MAX_PKT,
87         VHOST_BROADCAST_PKT,
88         VHOST_MULTICAST_PKT,
89         VHOST_UNICAST_PKT,
90         VHOST_ERRORS_PKT,
91         VHOST_ERRORS_FRAGMENTED,
92         VHOST_ERRORS_JABBER,
93         VHOST_UNKNOWN_PROTOCOL,
94         VHOST_XSTATS_MAX,
95 };
96
97 struct vhost_stats {
98         uint64_t pkts;
99         uint64_t bytes;
100         uint64_t missed_pkts;
101         uint64_t xstats[VHOST_XSTATS_MAX];
102 };
103
104 struct vhost_queue {
105         int vid;
106         rte_atomic32_t allow_queuing;
107         rte_atomic32_t while_queuing;
108         struct pmd_internal *internal;
109         struct rte_mempool *mb_pool;
110         uint16_t port;
111         uint16_t virtqueue_id;
112         struct vhost_stats stats;
113 };
114
115 struct pmd_internal {
116         rte_atomic32_t dev_attached;
117         char *dev_name;
118         char *iface_name;
119         uint16_t max_queues;
120         rte_atomic32_t started;
121 };
122
123 struct internal_list {
124         TAILQ_ENTRY(internal_list) next;
125         struct rte_eth_dev *eth_dev;
126 };
127
128 TAILQ_HEAD(internal_list_head, internal_list);
129 static struct internal_list_head internal_list =
130         TAILQ_HEAD_INITIALIZER(internal_list);
131
132 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
133
134 static struct rte_eth_link pmd_link = {
135                 .link_speed = 10000,
136                 .link_duplex = ETH_LINK_FULL_DUPLEX,
137                 .link_status = ETH_LINK_DOWN
138 };
139
140 struct rte_vhost_vring_state {
141         rte_spinlock_t lock;
142
143         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
144         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
145         unsigned int index;
146         unsigned int max_vring;
147 };
148
149 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
150
151 #define VHOST_XSTATS_NAME_SIZE 64
152
153 struct vhost_xstats_name_off {
154         char name[VHOST_XSTATS_NAME_SIZE];
155         uint64_t offset;
156 };
157
158 /* [rx]_is prepended to the name string here */
159 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
160         {"good_packets",
161          offsetof(struct vhost_queue, stats.pkts)},
162         {"total_bytes",
163          offsetof(struct vhost_queue, stats.bytes)},
164         {"missed_pkts",
165          offsetof(struct vhost_queue, stats.missed_pkts)},
166         {"broadcast_packets",
167          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
168         {"multicast_packets",
169          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
170         {"unicast_packets",
171          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
172          {"undersize_packets",
173          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
174         {"size_64_packets",
175          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
176         {"size_65_to_127_packets",
177          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
178         {"size_128_to_255_packets",
179          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
180         {"size_256_to_511_packets",
181          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
182         {"size_512_to_1023_packets",
183          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
184         {"size_1024_to_1522_packets",
185          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
186         {"size_1523_to_max_packets",
187          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
188         {"errors_with_bad_CRC",
189          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
190         {"fragmented_errors",
191          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
192         {"jabber_errors",
193          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
194         {"unknown_protos_packets",
195          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
196 };
197
198 /* [tx]_ is prepended to the name string here */
199 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
200         {"good_packets",
201          offsetof(struct vhost_queue, stats.pkts)},
202         {"total_bytes",
203          offsetof(struct vhost_queue, stats.bytes)},
204         {"missed_pkts",
205          offsetof(struct vhost_queue, stats.missed_pkts)},
206         {"broadcast_packets",
207          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
208         {"multicast_packets",
209          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
210         {"unicast_packets",
211          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
212         {"undersize_packets",
213          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
214         {"size_64_packets",
215          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
216         {"size_65_to_127_packets",
217          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
218         {"size_128_to_255_packets",
219          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
220         {"size_256_to_511_packets",
221          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
222         {"size_512_to_1023_packets",
223          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
224         {"size_1024_to_1522_packets",
225          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
226         {"size_1523_to_max_packets",
227          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
228         {"errors_with_bad_CRC",
229          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
230 };
231
232 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
233                                 sizeof(vhost_rxport_stat_strings[0]))
234
235 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
236                                 sizeof(vhost_txport_stat_strings[0]))
237
238 static void
239 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
240 {
241         struct vhost_queue *vq = NULL;
242         unsigned int i = 0;
243
244         for (i = 0; i < dev->data->nb_rx_queues; i++) {
245                 vq = dev->data->rx_queues[i];
246                 if (!vq)
247                         continue;
248                 memset(&vq->stats, 0, sizeof(vq->stats));
249         }
250         for (i = 0; i < dev->data->nb_tx_queues; i++) {
251                 vq = dev->data->tx_queues[i];
252                 if (!vq)
253                         continue;
254                 memset(&vq->stats, 0, sizeof(vq->stats));
255         }
256 }
257
258 static int
259 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
260                            struct rte_eth_xstat_name *xstats_names,
261                            unsigned int limit __rte_unused)
262 {
263         unsigned int t = 0;
264         int count = 0;
265         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
266
267         if (!xstats_names)
268                 return nstats;
269         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
270                 snprintf(xstats_names[count].name,
271                          sizeof(xstats_names[count].name),
272                          "rx_%s", vhost_rxport_stat_strings[t].name);
273                 count++;
274         }
275         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
276                 snprintf(xstats_names[count].name,
277                          sizeof(xstats_names[count].name),
278                          "tx_%s", vhost_txport_stat_strings[t].name);
279                 count++;
280         }
281         return count;
282 }
283
284 static int
285 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
286                      unsigned int n)
287 {
288         unsigned int i;
289         unsigned int t;
290         unsigned int count = 0;
291         struct vhost_queue *vq = NULL;
292         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
293
294         if (n < nxstats)
295                 return nxstats;
296
297         for (i = 0; i < dev->data->nb_rx_queues; i++) {
298                 vq = dev->data->rx_queues[i];
299                 if (!vq)
300                         continue;
301                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
302                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
303                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
304         }
305         for (i = 0; i < dev->data->nb_tx_queues; i++) {
306                 vq = dev->data->tx_queues[i];
307                 if (!vq)
308                         continue;
309                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
310                                 + vq->stats.missed_pkts
311                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
312                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
313         }
314         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
315                 xstats[count].value = 0;
316                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
317                         vq = dev->data->rx_queues[i];
318                         if (!vq)
319                                 continue;
320                         xstats[count].value +=
321                                 *(uint64_t *)(((char *)vq)
322                                 + vhost_rxport_stat_strings[t].offset);
323                 }
324                 xstats[count].id = count;
325                 count++;
326         }
327         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
328                 xstats[count].value = 0;
329                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
330                         vq = dev->data->tx_queues[i];
331                         if (!vq)
332                                 continue;
333                         xstats[count].value +=
334                                 *(uint64_t *)(((char *)vq)
335                                 + vhost_txport_stat_strings[t].offset);
336                 }
337                 xstats[count].id = count;
338                 count++;
339         }
340         return count;
341 }
342
343 static inline void
344 vhost_count_multicast_broadcast(struct vhost_queue *vq,
345                                 struct rte_mbuf *mbuf)
346 {
347         struct ether_addr *ea = NULL;
348         struct vhost_stats *pstats = &vq->stats;
349
350         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
351         if (is_multicast_ether_addr(ea)) {
352                 if (is_broadcast_ether_addr(ea))
353                         pstats->xstats[VHOST_BROADCAST_PKT]++;
354                 else
355                         pstats->xstats[VHOST_MULTICAST_PKT]++;
356         }
357 }
358
359 static void
360 vhost_update_packet_xstats(struct vhost_queue *vq,
361                            struct rte_mbuf **bufs,
362                            uint16_t count)
363 {
364         uint32_t pkt_len = 0;
365         uint64_t i = 0;
366         uint64_t index;
367         struct vhost_stats *pstats = &vq->stats;
368
369         for (i = 0; i < count ; i++) {
370                 pkt_len = bufs[i]->pkt_len;
371                 if (pkt_len == 64) {
372                         pstats->xstats[VHOST_64_PKT]++;
373                 } else if (pkt_len > 64 && pkt_len < 1024) {
374                         index = (sizeof(pkt_len) * 8)
375                                 - __builtin_clz(pkt_len) - 5;
376                         pstats->xstats[index]++;
377                 } else {
378                         if (pkt_len < 64)
379                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
380                         else if (pkt_len <= 1522)
381                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
382                         else if (pkt_len > 1522)
383                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
384                 }
385                 vhost_count_multicast_broadcast(vq, bufs[i]);
386         }
387 }
388
389 static uint16_t
390 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
391 {
392         struct vhost_queue *r = q;
393         uint16_t i, nb_rx = 0;
394         uint16_t nb_receive = nb_bufs;
395
396         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
397                 return 0;
398
399         rte_atomic32_set(&r->while_queuing, 1);
400
401         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
402                 goto out;
403
404         /* Dequeue packets from guest TX queue */
405         while (nb_receive) {
406                 uint16_t nb_pkts;
407                 uint16_t num = (uint16_t)RTE_MIN(nb_receive,
408                                                  VHOST_MAX_PKT_BURST);
409
410                 nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
411                                                   r->mb_pool, &bufs[nb_rx],
412                                                   num);
413
414                 nb_rx += nb_pkts;
415                 nb_receive -= nb_pkts;
416                 if (nb_pkts < num)
417                         break;
418         }
419
420         r->stats.pkts += nb_rx;
421
422         for (i = 0; likely(i < nb_rx); i++) {
423                 bufs[i]->port = r->port;
424                 r->stats.bytes += bufs[i]->pkt_len;
425         }
426
427         vhost_update_packet_xstats(r, bufs, nb_rx);
428
429 out:
430         rte_atomic32_set(&r->while_queuing, 0);
431
432         return nb_rx;
433 }
434
435 static uint16_t
436 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
437 {
438         struct vhost_queue *r = q;
439         uint16_t i, nb_tx = 0;
440         uint16_t nb_send = nb_bufs;
441
442         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
443                 return 0;
444
445         rte_atomic32_set(&r->while_queuing, 1);
446
447         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
448                 goto out;
449
450         /* Enqueue packets to guest RX queue */
451         while (nb_send) {
452                 uint16_t nb_pkts;
453                 uint16_t num = (uint16_t)RTE_MIN(nb_send,
454                                                  VHOST_MAX_PKT_BURST);
455
456                 nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
457                                                   &bufs[nb_tx], num);
458
459                 nb_tx += nb_pkts;
460                 nb_send -= nb_pkts;
461                 if (nb_pkts < num)
462                         break;
463         }
464
465         r->stats.pkts += nb_tx;
466         r->stats.missed_pkts += nb_bufs - nb_tx;
467
468         for (i = 0; likely(i < nb_tx); i++)
469                 r->stats.bytes += bufs[i]->pkt_len;
470
471         vhost_update_packet_xstats(r, bufs, nb_tx);
472
473         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
474          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
475          * are increased when packets are not transmitted successfully.
476          */
477         for (i = nb_tx; i < nb_bufs; i++)
478                 vhost_count_multicast_broadcast(r, bufs[i]);
479
480         for (i = 0; likely(i < nb_tx); i++)
481                 rte_pktmbuf_free(bufs[i]);
482 out:
483         rte_atomic32_set(&r->while_queuing, 0);
484
485         return nb_tx;
486 }
487
488 static int
489 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
490 {
491         return 0;
492 }
493
494 static inline struct internal_list *
495 find_internal_resource(char *ifname)
496 {
497         int found = 0;
498         struct internal_list *list;
499         struct pmd_internal *internal;
500
501         if (ifname == NULL)
502                 return NULL;
503
504         pthread_mutex_lock(&internal_list_lock);
505
506         TAILQ_FOREACH(list, &internal_list, next) {
507                 internal = list->eth_dev->data->dev_private;
508                 if (!strcmp(internal->iface_name, ifname)) {
509                         found = 1;
510                         break;
511                 }
512         }
513
514         pthread_mutex_unlock(&internal_list_lock);
515
516         if (!found)
517                 return NULL;
518
519         return list;
520 }
521
522 static void
523 update_queuing_status(struct rte_eth_dev *dev)
524 {
525         struct pmd_internal *internal = dev->data->dev_private;
526         struct vhost_queue *vq;
527         unsigned int i;
528         int allow_queuing = 1;
529
530         if (rte_atomic32_read(&internal->started) == 0 ||
531             rte_atomic32_read(&internal->dev_attached) == 0)
532                 allow_queuing = 0;
533
534         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
535         for (i = 0; i < dev->data->nb_rx_queues; i++) {
536                 vq = dev->data->rx_queues[i];
537                 if (vq == NULL)
538                         continue;
539                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
540                 while (rte_atomic32_read(&vq->while_queuing))
541                         rte_pause();
542         }
543
544         for (i = 0; i < dev->data->nb_tx_queues; i++) {
545                 vq = dev->data->tx_queues[i];
546                 if (vq == NULL)
547                         continue;
548                 rte_atomic32_set(&vq->allow_queuing, allow_queuing);
549                 while (rte_atomic32_read(&vq->while_queuing))
550                         rte_pause();
551         }
552 }
553
554 static int
555 new_device(int vid)
556 {
557         struct rte_eth_dev *eth_dev;
558         struct internal_list *list;
559         struct pmd_internal *internal;
560         struct vhost_queue *vq;
561         unsigned i;
562         char ifname[PATH_MAX];
563 #ifdef RTE_LIBRTE_VHOST_NUMA
564         int newnode;
565 #endif
566
567         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
568         list = find_internal_resource(ifname);
569         if (list == NULL) {
570                 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
571                 return -1;
572         }
573
574         eth_dev = list->eth_dev;
575         internal = eth_dev->data->dev_private;
576
577 #ifdef RTE_LIBRTE_VHOST_NUMA
578         newnode = rte_vhost_get_numa_node(vid);
579         if (newnode >= 0)
580                 eth_dev->data->numa_node = newnode;
581 #endif
582
583         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
584                 vq = eth_dev->data->rx_queues[i];
585                 if (vq == NULL)
586                         continue;
587                 vq->vid = vid;
588                 vq->internal = internal;
589                 vq->port = eth_dev->data->port_id;
590         }
591         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
592                 vq = eth_dev->data->tx_queues[i];
593                 if (vq == NULL)
594                         continue;
595                 vq->vid = vid;
596                 vq->internal = internal;
597                 vq->port = eth_dev->data->port_id;
598         }
599
600         for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
601                 rte_vhost_enable_guest_notification(vid, i, 0);
602
603         rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
604
605         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
606
607         rte_atomic32_set(&internal->dev_attached, 1);
608         update_queuing_status(eth_dev);
609
610         RTE_LOG(INFO, PMD, "New connection established\n");
611
612         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
613                                       NULL, NULL);
614
615         return 0;
616 }
617
618 static void
619 destroy_device(int vid)
620 {
621         struct rte_eth_dev *eth_dev;
622         struct pmd_internal *internal;
623         struct vhost_queue *vq;
624         struct internal_list *list;
625         char ifname[PATH_MAX];
626         unsigned i;
627         struct rte_vhost_vring_state *state;
628
629         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
630         list = find_internal_resource(ifname);
631         if (list == NULL) {
632                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
633                 return;
634         }
635         eth_dev = list->eth_dev;
636         internal = eth_dev->data->dev_private;
637
638         rte_atomic32_set(&internal->dev_attached, 0);
639         update_queuing_status(eth_dev);
640
641         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
642
643         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
644                 vq = eth_dev->data->rx_queues[i];
645                 if (vq == NULL)
646                         continue;
647                 vq->vid = -1;
648         }
649         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
650                 vq = eth_dev->data->tx_queues[i];
651                 if (vq == NULL)
652                         continue;
653                 vq->vid = -1;
654         }
655
656         state = vring_states[eth_dev->data->port_id];
657         rte_spinlock_lock(&state->lock);
658         for (i = 0; i <= state->max_vring; i++) {
659                 state->cur[i] = false;
660                 state->seen[i] = false;
661         }
662         state->max_vring = 0;
663         rte_spinlock_unlock(&state->lock);
664
665         RTE_LOG(INFO, PMD, "Connection closed\n");
666
667         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
668                                       NULL, NULL);
669 }
670
671 static int
672 vring_state_changed(int vid, uint16_t vring, int enable)
673 {
674         struct rte_vhost_vring_state *state;
675         struct rte_eth_dev *eth_dev;
676         struct internal_list *list;
677         char ifname[PATH_MAX];
678
679         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
680         list = find_internal_resource(ifname);
681         if (list == NULL) {
682                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
683                 return -1;
684         }
685
686         eth_dev = list->eth_dev;
687         /* won't be NULL */
688         state = vring_states[eth_dev->data->port_id];
689         rte_spinlock_lock(&state->lock);
690         state->cur[vring] = enable;
691         state->max_vring = RTE_MAX(vring, state->max_vring);
692         rte_spinlock_unlock(&state->lock);
693
694         RTE_LOG(INFO, PMD, "vring%u is %s\n",
695                         vring, enable ? "enabled" : "disabled");
696
697         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE,
698                                       NULL, NULL);
699
700         return 0;
701 }
702
703 static struct vhost_device_ops vhost_ops = {
704         .new_device          = new_device,
705         .destroy_device      = destroy_device,
706         .vring_state_changed = vring_state_changed,
707 };
708
709 int
710 rte_eth_vhost_get_queue_event(uint16_t port_id,
711                 struct rte_eth_vhost_queue_event *event)
712 {
713         struct rte_vhost_vring_state *state;
714         unsigned int i;
715         int idx;
716
717         if (port_id >= RTE_MAX_ETHPORTS) {
718                 RTE_LOG(ERR, PMD, "Invalid port id\n");
719                 return -1;
720         }
721
722         state = vring_states[port_id];
723         if (!state) {
724                 RTE_LOG(ERR, PMD, "Unused port\n");
725                 return -1;
726         }
727
728         rte_spinlock_lock(&state->lock);
729         for (i = 0; i <= state->max_vring; i++) {
730                 idx = state->index++ % (state->max_vring + 1);
731
732                 if (state->cur[idx] != state->seen[idx]) {
733                         state->seen[idx] = state->cur[idx];
734                         event->queue_id = idx / 2;
735                         event->rx = idx & 1;
736                         event->enable = state->cur[idx];
737                         rte_spinlock_unlock(&state->lock);
738                         return 0;
739                 }
740         }
741         rte_spinlock_unlock(&state->lock);
742
743         return -1;
744 }
745
746 int
747 rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
748 {
749         struct internal_list *list;
750         struct rte_eth_dev *eth_dev;
751         struct vhost_queue *vq;
752         int vid = -1;
753
754         if (!rte_eth_dev_is_valid_port(port_id))
755                 return -1;
756
757         pthread_mutex_lock(&internal_list_lock);
758
759         TAILQ_FOREACH(list, &internal_list, next) {
760                 eth_dev = list->eth_dev;
761                 if (eth_dev->data->port_id == port_id) {
762                         vq = eth_dev->data->rx_queues[0];
763                         if (vq) {
764                                 vid = vq->vid;
765                         }
766                         break;
767                 }
768         }
769
770         pthread_mutex_unlock(&internal_list_lock);
771
772         return vid;
773 }
774
775 static int
776 eth_dev_start(struct rte_eth_dev *dev)
777 {
778         struct pmd_internal *internal = dev->data->dev_private;
779
780         rte_atomic32_set(&internal->started, 1);
781         update_queuing_status(dev);
782
783         return 0;
784 }
785
786 static void
787 eth_dev_stop(struct rte_eth_dev *dev)
788 {
789         struct pmd_internal *internal = dev->data->dev_private;
790
791         rte_atomic32_set(&internal->started, 0);
792         update_queuing_status(dev);
793 }
794
795 static void
796 eth_dev_close(struct rte_eth_dev *dev)
797 {
798         struct pmd_internal *internal;
799         struct internal_list *list;
800         unsigned int i;
801
802         internal = dev->data->dev_private;
803         if (!internal)
804                 return;
805
806         eth_dev_stop(dev);
807
808         rte_vhost_driver_unregister(internal->iface_name);
809
810         list = find_internal_resource(internal->iface_name);
811         if (!list)
812                 return;
813
814         pthread_mutex_lock(&internal_list_lock);
815         TAILQ_REMOVE(&internal_list, list, next);
816         pthread_mutex_unlock(&internal_list_lock);
817         rte_free(list);
818
819         for (i = 0; i < dev->data->nb_rx_queues; i++)
820                 rte_free(dev->data->rx_queues[i]);
821         for (i = 0; i < dev->data->nb_tx_queues; i++)
822                 rte_free(dev->data->tx_queues[i]);
823
824         rte_free(dev->data->mac_addrs);
825         free(internal->dev_name);
826         free(internal->iface_name);
827         rte_free(internal);
828
829         dev->data->dev_private = NULL;
830 }
831
832 static int
833 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
834                    uint16_t nb_rx_desc __rte_unused,
835                    unsigned int socket_id,
836                    const struct rte_eth_rxconf *rx_conf __rte_unused,
837                    struct rte_mempool *mb_pool)
838 {
839         struct vhost_queue *vq;
840
841         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
842                         RTE_CACHE_LINE_SIZE, socket_id);
843         if (vq == NULL) {
844                 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
845                 return -ENOMEM;
846         }
847
848         vq->mb_pool = mb_pool;
849         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
850         dev->data->rx_queues[rx_queue_id] = vq;
851
852         return 0;
853 }
854
855 static int
856 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
857                    uint16_t nb_tx_desc __rte_unused,
858                    unsigned int socket_id,
859                    const struct rte_eth_txconf *tx_conf __rte_unused)
860 {
861         struct vhost_queue *vq;
862
863         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
864                         RTE_CACHE_LINE_SIZE, socket_id);
865         if (vq == NULL) {
866                 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
867                 return -ENOMEM;
868         }
869
870         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
871         dev->data->tx_queues[tx_queue_id] = vq;
872
873         return 0;
874 }
875
876 static void
877 eth_dev_info(struct rte_eth_dev *dev,
878              struct rte_eth_dev_info *dev_info)
879 {
880         struct pmd_internal *internal;
881
882         internal = dev->data->dev_private;
883         if (internal == NULL) {
884                 RTE_LOG(ERR, PMD, "Invalid device specified\n");
885                 return;
886         }
887
888         dev_info->max_mac_addrs = 1;
889         dev_info->max_rx_pktlen = (uint32_t)-1;
890         dev_info->max_rx_queues = internal->max_queues;
891         dev_info->max_tx_queues = internal->max_queues;
892         dev_info->min_rx_bufsize = 0;
893 }
894
895 static int
896 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
897 {
898         unsigned i;
899         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
900         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
901         struct vhost_queue *vq;
902
903         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
904                         i < dev->data->nb_rx_queues; i++) {
905                 if (dev->data->rx_queues[i] == NULL)
906                         continue;
907                 vq = dev->data->rx_queues[i];
908                 stats->q_ipackets[i] = vq->stats.pkts;
909                 rx_total += stats->q_ipackets[i];
910
911                 stats->q_ibytes[i] = vq->stats.bytes;
912                 rx_total_bytes += stats->q_ibytes[i];
913         }
914
915         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
916                         i < dev->data->nb_tx_queues; i++) {
917                 if (dev->data->tx_queues[i] == NULL)
918                         continue;
919                 vq = dev->data->tx_queues[i];
920                 stats->q_opackets[i] = vq->stats.pkts;
921                 tx_missed_total += vq->stats.missed_pkts;
922                 tx_total += stats->q_opackets[i];
923
924                 stats->q_obytes[i] = vq->stats.bytes;
925                 tx_total_bytes += stats->q_obytes[i];
926         }
927
928         stats->ipackets = rx_total;
929         stats->opackets = tx_total;
930         stats->oerrors = tx_missed_total;
931         stats->ibytes = rx_total_bytes;
932         stats->obytes = tx_total_bytes;
933
934         return 0;
935 }
936
937 static void
938 eth_stats_reset(struct rte_eth_dev *dev)
939 {
940         struct vhost_queue *vq;
941         unsigned i;
942
943         for (i = 0; i < dev->data->nb_rx_queues; i++) {
944                 if (dev->data->rx_queues[i] == NULL)
945                         continue;
946                 vq = dev->data->rx_queues[i];
947                 vq->stats.pkts = 0;
948                 vq->stats.bytes = 0;
949         }
950         for (i = 0; i < dev->data->nb_tx_queues; i++) {
951                 if (dev->data->tx_queues[i] == NULL)
952                         continue;
953                 vq = dev->data->tx_queues[i];
954                 vq->stats.pkts = 0;
955                 vq->stats.bytes = 0;
956                 vq->stats.missed_pkts = 0;
957         }
958 }
959
960 static void
961 eth_queue_release(void *q)
962 {
963         rte_free(q);
964 }
965
966 static int
967 eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
968 {
969         /*
970          * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
971          * and releases mbuf, so nothing to cleanup.
972          */
973         return 0;
974 }
975
976 static int
977 eth_link_update(struct rte_eth_dev *dev __rte_unused,
978                 int wait_to_complete __rte_unused)
979 {
980         return 0;
981 }
982
983 static uint32_t
984 eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
985 {
986         struct vhost_queue *vq;
987
988         vq = dev->data->rx_queues[rx_queue_id];
989         if (vq == NULL)
990                 return 0;
991
992         return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
993 }
994
995 static const struct eth_dev_ops ops = {
996         .dev_start = eth_dev_start,
997         .dev_stop = eth_dev_stop,
998         .dev_close = eth_dev_close,
999         .dev_configure = eth_dev_configure,
1000         .dev_infos_get = eth_dev_info,
1001         .rx_queue_setup = eth_rx_queue_setup,
1002         .tx_queue_setup = eth_tx_queue_setup,
1003         .rx_queue_release = eth_queue_release,
1004         .tx_queue_release = eth_queue_release,
1005         .tx_done_cleanup = eth_tx_done_cleanup,
1006         .rx_queue_count = eth_rx_queue_count,
1007         .link_update = eth_link_update,
1008         .stats_get = eth_stats_get,
1009         .stats_reset = eth_stats_reset,
1010         .xstats_reset = vhost_dev_xstats_reset,
1011         .xstats_get = vhost_dev_xstats_get,
1012         .xstats_get_names = vhost_dev_xstats_get_names,
1013 };
1014
1015 static struct rte_vdev_driver pmd_vhost_drv;
1016
1017 static int
1018 eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
1019         int16_t queues, const unsigned int numa_node, uint64_t flags)
1020 {
1021         const char *name = rte_vdev_device_name(dev);
1022         struct rte_eth_dev_data *data = NULL;
1023         struct pmd_internal *internal = NULL;
1024         struct rte_eth_dev *eth_dev = NULL;
1025         struct ether_addr *eth_addr = NULL;
1026         struct rte_vhost_vring_state *vring_state = NULL;
1027         struct internal_list *list = NULL;
1028
1029         RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1030                 numa_node);
1031
1032         /* now do all data allocation - for eth_dev structure and internal
1033          * (private) data
1034          */
1035         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1036         if (data == NULL)
1037                 goto error;
1038
1039         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1040         if (list == NULL)
1041                 goto error;
1042
1043         /* reserve an ethdev entry */
1044         eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
1045         if (eth_dev == NULL)
1046                 goto error;
1047
1048         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1049         if (eth_addr == NULL)
1050                 goto error;
1051         *eth_addr = base_eth_addr;
1052         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1053
1054         vring_state = rte_zmalloc_socket(name,
1055                         sizeof(*vring_state), 0, numa_node);
1056         if (vring_state == NULL)
1057                 goto error;
1058
1059         /* now put it all together
1060          * - store queue data in internal,
1061          * - point eth_dev_data to internals
1062          * - and point eth_dev structure to new eth_dev_data structure
1063          */
1064         internal = eth_dev->data->dev_private;
1065         internal->dev_name = strdup(name);
1066         if (internal->dev_name == NULL)
1067                 goto error;
1068         internal->iface_name = strdup(iface_name);
1069         if (internal->iface_name == NULL)
1070                 goto error;
1071
1072         list->eth_dev = eth_dev;
1073         pthread_mutex_lock(&internal_list_lock);
1074         TAILQ_INSERT_TAIL(&internal_list, list, next);
1075         pthread_mutex_unlock(&internal_list_lock);
1076
1077         rte_spinlock_init(&vring_state->lock);
1078         vring_states[eth_dev->data->port_id] = vring_state;
1079
1080         /* We'll replace the 'data' originally allocated by eth_dev. So the
1081          * vhost PMD resources won't be shared between multi processes.
1082          */
1083         rte_memcpy(data, eth_dev->data, sizeof(*data));
1084         eth_dev->data = data;
1085
1086         data->nb_rx_queues = queues;
1087         data->nb_tx_queues = queues;
1088         internal->max_queues = queues;
1089         data->dev_link = pmd_link;
1090         data->mac_addrs = eth_addr;
1091         data->dev_flags = RTE_ETH_DEV_INTR_LSC;
1092
1093         eth_dev->dev_ops = &ops;
1094
1095         /* finally assign rx and tx ops */
1096         eth_dev->rx_pkt_burst = eth_vhost_rx;
1097         eth_dev->tx_pkt_burst = eth_vhost_tx;
1098
1099         if (rte_vhost_driver_register(iface_name, flags))
1100                 goto error;
1101
1102         if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
1103                 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
1104                 goto error;
1105         }
1106
1107         if (rte_vhost_driver_start(iface_name) < 0) {
1108                 RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
1109                         iface_name);
1110                 goto error;
1111         }
1112
1113         return data->port_id;
1114
1115 error:
1116         if (internal) {
1117                 free(internal->iface_name);
1118                 free(internal->dev_name);
1119         }
1120         rte_free(vring_state);
1121         rte_free(eth_addr);
1122         if (eth_dev)
1123                 rte_eth_dev_release_port(eth_dev);
1124         rte_free(internal);
1125         rte_free(list);
1126         rte_free(data);
1127
1128         return -1;
1129 }
1130
1131 static inline int
1132 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1133 {
1134         const char **iface_name = extra_args;
1135
1136         if (value == NULL)
1137                 return -1;
1138
1139         *iface_name = value;
1140
1141         return 0;
1142 }
1143
1144 static inline int
1145 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1146 {
1147         uint16_t *n = extra_args;
1148
1149         if (value == NULL || extra_args == NULL)
1150                 return -EINVAL;
1151
1152         *n = (uint16_t)strtoul(value, NULL, 0);
1153         if (*n == USHRT_MAX && errno == ERANGE)
1154                 return -1;
1155
1156         return 0;
1157 }
1158
1159 static int
1160 rte_pmd_vhost_probe(struct rte_vdev_device *dev)
1161 {
1162         struct rte_kvargs *kvlist = NULL;
1163         int ret = 0;
1164         char *iface_name;
1165         uint16_t queues;
1166         uint64_t flags = 0;
1167         int client_mode = 0;
1168         int dequeue_zero_copy = 0;
1169         int iommu_support = 0;
1170
1171         RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
1172                 rte_vdev_device_name(dev));
1173
1174         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1175         if (kvlist == NULL)
1176                 return -1;
1177
1178         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1179                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1180                                          &open_iface, &iface_name);
1181                 if (ret < 0)
1182                         goto out_free;
1183         } else {
1184                 ret = -1;
1185                 goto out_free;
1186         }
1187
1188         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1189                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1190                                          &open_int, &queues);
1191                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1192                         goto out_free;
1193
1194         } else
1195                 queues = 1;
1196
1197         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1198                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1199                                          &open_int, &client_mode);
1200                 if (ret < 0)
1201                         goto out_free;
1202
1203                 if (client_mode)
1204                         flags |= RTE_VHOST_USER_CLIENT;
1205         }
1206
1207         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1208                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1209                                          &open_int, &dequeue_zero_copy);
1210                 if (ret < 0)
1211                         goto out_free;
1212
1213                 if (dequeue_zero_copy)
1214                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1215         }
1216
1217         if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
1218                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
1219                                          &open_int, &iommu_support);
1220                 if (ret < 0)
1221                         goto out_free;
1222
1223                 if (iommu_support)
1224                         flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
1225         }
1226
1227         if (dev->device.numa_node == SOCKET_ID_ANY)
1228                 dev->device.numa_node = rte_socket_id();
1229
1230         eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
1231                 flags);
1232
1233 out_free:
1234         rte_kvargs_free(kvlist);
1235         return ret;
1236 }
1237
1238 static int
1239 rte_pmd_vhost_remove(struct rte_vdev_device *dev)
1240 {
1241         const char *name;
1242         struct rte_eth_dev *eth_dev = NULL;
1243
1244         name = rte_vdev_device_name(dev);
1245         RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1246
1247         /* find an ethdev entry */
1248         eth_dev = rte_eth_dev_allocated(name);
1249         if (eth_dev == NULL)
1250                 return -ENODEV;
1251
1252         eth_dev_close(eth_dev);
1253
1254         rte_free(vring_states[eth_dev->data->port_id]);
1255         vring_states[eth_dev->data->port_id] = NULL;
1256
1257         rte_free(eth_dev->data);
1258
1259         rte_eth_dev_release_port(eth_dev);
1260
1261         return 0;
1262 }
1263
1264 static struct rte_vdev_driver pmd_vhost_drv = {
1265         .probe = rte_pmd_vhost_probe,
1266         .remove = rte_pmd_vhost_remove,
1267 };
1268
1269 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1270 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1271 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1272         "iface=<ifc> "
1273         "queues=<int>");