8fde6030e7e6f947c1209d4525111dbc21418020
[deb_dpdk.git] / drivers / net / vhost / rte_eth_vhost.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 IGEL Co., Ltd.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <stdbool.h>
36 #ifdef RTE_LIBRTE_VHOST_NUMA
37 #include <numaif.h>
38 #endif
39
40 #include <rte_mbuf.h>
41 #include <rte_ethdev.h>
42 #include <rte_malloc.h>
43 #include <rte_memcpy.h>
44 #include <rte_vdev.h>
45 #include <rte_kvargs.h>
46 #include <rte_virtio_net.h>
47 #include <rte_spinlock.h>
48
49 #include "rte_eth_vhost.h"
50
51 #define ETH_VHOST_IFACE_ARG             "iface"
52 #define ETH_VHOST_QUEUES_ARG            "queues"
53 #define ETH_VHOST_CLIENT_ARG            "client"
54 #define ETH_VHOST_DEQUEUE_ZERO_COPY     "dequeue-zero-copy"
55
56 static const char *drivername = "VHOST PMD";
57
58 static const char *valid_arguments[] = {
59         ETH_VHOST_IFACE_ARG,
60         ETH_VHOST_QUEUES_ARG,
61         ETH_VHOST_CLIENT_ARG,
62         ETH_VHOST_DEQUEUE_ZERO_COPY,
63         NULL
64 };
65
66 static struct ether_addr base_eth_addr = {
67         .addr_bytes = {
68                 0x56 /* V */,
69                 0x48 /* H */,
70                 0x4F /* O */,
71                 0x53 /* S */,
72                 0x54 /* T */,
73                 0x00
74         }
75 };
76
77 enum vhost_xstats_pkts {
78         VHOST_UNDERSIZE_PKT = 0,
79         VHOST_64_PKT,
80         VHOST_65_TO_127_PKT,
81         VHOST_128_TO_255_PKT,
82         VHOST_256_TO_511_PKT,
83         VHOST_512_TO_1023_PKT,
84         VHOST_1024_TO_1522_PKT,
85         VHOST_1523_TO_MAX_PKT,
86         VHOST_BROADCAST_PKT,
87         VHOST_MULTICAST_PKT,
88         VHOST_UNICAST_PKT,
89         VHOST_ERRORS_PKT,
90         VHOST_ERRORS_FRAGMENTED,
91         VHOST_ERRORS_JABBER,
92         VHOST_UNKNOWN_PROTOCOL,
93         VHOST_XSTATS_MAX,
94 };
95
96 struct vhost_stats {
97         uint64_t pkts;
98         uint64_t bytes;
99         uint64_t missed_pkts;
100         uint64_t xstats[VHOST_XSTATS_MAX];
101 };
102
103 struct vhost_queue {
104         int vid;
105         rte_atomic32_t allow_queuing;
106         rte_atomic32_t while_queuing;
107         struct pmd_internal *internal;
108         struct rte_mempool *mb_pool;
109         uint8_t port;
110         uint16_t virtqueue_id;
111         struct vhost_stats stats;
112 };
113
114 struct pmd_internal {
115         char *dev_name;
116         char *iface_name;
117         uint16_t max_queues;
118 };
119
120 struct internal_list {
121         TAILQ_ENTRY(internal_list) next;
122         struct rte_eth_dev *eth_dev;
123 };
124
125 TAILQ_HEAD(internal_list_head, internal_list);
126 static struct internal_list_head internal_list =
127         TAILQ_HEAD_INITIALIZER(internal_list);
128
129 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
130
131 static rte_atomic16_t nb_started_ports;
132 static pthread_t session_th;
133
134 static struct rte_eth_link pmd_link = {
135                 .link_speed = 10000,
136                 .link_duplex = ETH_LINK_FULL_DUPLEX,
137                 .link_status = ETH_LINK_DOWN
138 };
139
140 struct rte_vhost_vring_state {
141         rte_spinlock_t lock;
142
143         bool cur[RTE_MAX_QUEUES_PER_PORT * 2];
144         bool seen[RTE_MAX_QUEUES_PER_PORT * 2];
145         unsigned int index;
146         unsigned int max_vring;
147 };
148
149 static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
150
151 #define VHOST_XSTATS_NAME_SIZE 64
152
153 struct vhost_xstats_name_off {
154         char name[VHOST_XSTATS_NAME_SIZE];
155         uint64_t offset;
156 };
157
158 /* [rx]_is prepended to the name string here */
159 static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
160         {"good_packets",
161          offsetof(struct vhost_queue, stats.pkts)},
162         {"total_bytes",
163          offsetof(struct vhost_queue, stats.bytes)},
164         {"missed_pkts",
165          offsetof(struct vhost_queue, stats.missed_pkts)},
166         {"broadcast_packets",
167          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
168         {"multicast_packets",
169          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
170         {"unicast_packets",
171          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
172          {"undersize_packets",
173          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
174         {"size_64_packets",
175          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
176         {"size_65_to_127_packets",
177          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
178         {"size_128_to_255_packets",
179          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
180         {"size_256_to_511_packets",
181          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
182         {"size_512_to_1023_packets",
183          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
184         {"size_1024_to_1522_packets",
185          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
186         {"size_1523_to_max_packets",
187          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
188         {"errors_with_bad_CRC",
189          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
190         {"fragmented_errors",
191          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
192         {"jabber_errors",
193          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
194         {"unknown_protos_packets",
195          offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
196 };
197
198 /* [tx]_ is prepended to the name string here */
199 static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
200         {"good_packets",
201          offsetof(struct vhost_queue, stats.pkts)},
202         {"total_bytes",
203          offsetof(struct vhost_queue, stats.bytes)},
204         {"missed_pkts",
205          offsetof(struct vhost_queue, stats.missed_pkts)},
206         {"broadcast_packets",
207          offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
208         {"multicast_packets",
209          offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
210         {"unicast_packets",
211          offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
212         {"undersize_packets",
213          offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
214         {"size_64_packets",
215          offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
216         {"size_65_to_127_packets",
217          offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
218         {"size_128_to_255_packets",
219          offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
220         {"size_256_to_511_packets",
221          offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
222         {"size_512_to_1023_packets",
223          offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
224         {"size_1024_to_1522_packets",
225          offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
226         {"size_1523_to_max_packets",
227          offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
228         {"errors_with_bad_CRC",
229          offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
230 };
231
232 #define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
233                                 sizeof(vhost_rxport_stat_strings[0]))
234
235 #define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
236                                 sizeof(vhost_txport_stat_strings[0]))
237
238 static void
239 vhost_dev_xstats_reset(struct rte_eth_dev *dev)
240 {
241         struct vhost_queue *vq = NULL;
242         unsigned int i = 0;
243
244         for (i = 0; i < dev->data->nb_rx_queues; i++) {
245                 vq = dev->data->rx_queues[i];
246                 if (!vq)
247                         continue;
248                 memset(&vq->stats, 0, sizeof(vq->stats));
249         }
250         for (i = 0; i < dev->data->nb_tx_queues; i++) {
251                 vq = dev->data->tx_queues[i];
252                 if (!vq)
253                         continue;
254                 memset(&vq->stats, 0, sizeof(vq->stats));
255         }
256 }
257
258 static int
259 vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
260                            struct rte_eth_xstat_name *xstats_names,
261                            unsigned int limit __rte_unused)
262 {
263         unsigned int t = 0;
264         int count = 0;
265         int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
266
267         if (!xstats_names)
268                 return nstats;
269         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
270                 snprintf(xstats_names[count].name,
271                          sizeof(xstats_names[count].name),
272                          "rx_%s", vhost_rxport_stat_strings[t].name);
273                 count++;
274         }
275         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
276                 snprintf(xstats_names[count].name,
277                          sizeof(xstats_names[count].name),
278                          "tx_%s", vhost_txport_stat_strings[t].name);
279                 count++;
280         }
281         return count;
282 }
283
284 static int
285 vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
286                      unsigned int n)
287 {
288         unsigned int i;
289         unsigned int t;
290         unsigned int count = 0;
291         struct vhost_queue *vq = NULL;
292         unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
293
294         if (n < nxstats)
295                 return nxstats;
296
297         for (i = 0; i < dev->data->nb_rx_queues; i++) {
298                 vq = dev->data->rx_queues[i];
299                 if (!vq)
300                         continue;
301                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
302                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
303                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
304         }
305         for (i = 0; i < dev->data->nb_tx_queues; i++) {
306                 vq = dev->data->tx_queues[i];
307                 if (!vq)
308                         continue;
309                 vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
310                                 + vq->stats.missed_pkts
311                                 - (vq->stats.xstats[VHOST_BROADCAST_PKT]
312                                 + vq->stats.xstats[VHOST_MULTICAST_PKT]);
313         }
314         for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
315                 xstats[count].value = 0;
316                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
317                         vq = dev->data->rx_queues[i];
318                         if (!vq)
319                                 continue;
320                         xstats[count].value +=
321                                 *(uint64_t *)(((char *)vq)
322                                 + vhost_rxport_stat_strings[t].offset);
323                 }
324                 xstats[count].id = count;
325                 count++;
326         }
327         for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
328                 xstats[count].value = 0;
329                 for (i = 0; i < dev->data->nb_tx_queues; i++) {
330                         vq = dev->data->tx_queues[i];
331                         if (!vq)
332                                 continue;
333                         xstats[count].value +=
334                                 *(uint64_t *)(((char *)vq)
335                                 + vhost_txport_stat_strings[t].offset);
336                 }
337                 xstats[count].id = count;
338                 count++;
339         }
340         return count;
341 }
342
343 static inline void
344 vhost_count_multicast_broadcast(struct vhost_queue *vq,
345                                 struct rte_mbuf *mbuf)
346 {
347         struct ether_addr *ea = NULL;
348         struct vhost_stats *pstats = &vq->stats;
349
350         ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
351         if (is_multicast_ether_addr(ea)) {
352                 if (is_broadcast_ether_addr(ea))
353                         pstats->xstats[VHOST_BROADCAST_PKT]++;
354                 else
355                         pstats->xstats[VHOST_MULTICAST_PKT]++;
356         }
357 }
358
359 static void
360 vhost_update_packet_xstats(struct vhost_queue *vq,
361                            struct rte_mbuf **bufs,
362                            uint16_t count)
363 {
364         uint32_t pkt_len = 0;
365         uint64_t i = 0;
366         uint64_t index;
367         struct vhost_stats *pstats = &vq->stats;
368
369         for (i = 0; i < count ; i++) {
370                 pkt_len = bufs[i]->pkt_len;
371                 if (pkt_len == 64) {
372                         pstats->xstats[VHOST_64_PKT]++;
373                 } else if (pkt_len > 64 && pkt_len < 1024) {
374                         index = (sizeof(pkt_len) * 8)
375                                 - __builtin_clz(pkt_len) - 5;
376                         pstats->xstats[index]++;
377                 } else {
378                         if (pkt_len < 64)
379                                 pstats->xstats[VHOST_UNDERSIZE_PKT]++;
380                         else if (pkt_len <= 1522)
381                                 pstats->xstats[VHOST_1024_TO_1522_PKT]++;
382                         else if (pkt_len > 1522)
383                                 pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
384                 }
385                 vhost_count_multicast_broadcast(vq, bufs[i]);
386         }
387 }
388
389 static uint16_t
390 eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
391 {
392         struct vhost_queue *r = q;
393         uint16_t i, nb_rx = 0;
394
395         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
396                 return 0;
397
398         rte_atomic32_set(&r->while_queuing, 1);
399
400         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
401                 goto out;
402
403         /* Dequeue packets from guest TX queue */
404         nb_rx = rte_vhost_dequeue_burst(r->vid,
405                         r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
406
407         r->stats.pkts += nb_rx;
408
409         for (i = 0; likely(i < nb_rx); i++) {
410                 bufs[i]->port = r->port;
411                 r->stats.bytes += bufs[i]->pkt_len;
412         }
413
414         vhost_update_packet_xstats(r, bufs, nb_rx);
415
416 out:
417         rte_atomic32_set(&r->while_queuing, 0);
418
419         return nb_rx;
420 }
421
422 static uint16_t
423 eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
424 {
425         struct vhost_queue *r = q;
426         uint16_t i, nb_tx = 0;
427
428         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
429                 return 0;
430
431         rte_atomic32_set(&r->while_queuing, 1);
432
433         if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
434                 goto out;
435
436         /* Enqueue packets to guest RX queue */
437         nb_tx = rte_vhost_enqueue_burst(r->vid,
438                         r->virtqueue_id, bufs, nb_bufs);
439
440         r->stats.pkts += nb_tx;
441         r->stats.missed_pkts += nb_bufs - nb_tx;
442
443         for (i = 0; likely(i < nb_tx); i++)
444                 r->stats.bytes += bufs[i]->pkt_len;
445
446         vhost_update_packet_xstats(r, bufs, nb_tx);
447
448         /* According to RFC2863 page42 section ifHCOutMulticastPkts and
449          * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
450          * are increased when packets are not transmitted successfully.
451          */
452         for (i = nb_tx; i < nb_bufs; i++)
453                 vhost_count_multicast_broadcast(r, bufs[i]);
454
455         for (i = 0; likely(i < nb_tx); i++)
456                 rte_pktmbuf_free(bufs[i]);
457 out:
458         rte_atomic32_set(&r->while_queuing, 0);
459
460         return nb_tx;
461 }
462
463 static int
464 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
465 {
466         return 0;
467 }
468
469 static inline struct internal_list *
470 find_internal_resource(char *ifname)
471 {
472         int found = 0;
473         struct internal_list *list;
474         struct pmd_internal *internal;
475
476         if (ifname == NULL)
477                 return NULL;
478
479         pthread_mutex_lock(&internal_list_lock);
480
481         TAILQ_FOREACH(list, &internal_list, next) {
482                 internal = list->eth_dev->data->dev_private;
483                 if (!strcmp(internal->iface_name, ifname)) {
484                         found = 1;
485                         break;
486                 }
487         }
488
489         pthread_mutex_unlock(&internal_list_lock);
490
491         if (!found)
492                 return NULL;
493
494         return list;
495 }
496
497 static int
498 new_device(int vid)
499 {
500         struct rte_eth_dev *eth_dev;
501         struct internal_list *list;
502         struct pmd_internal *internal;
503         struct vhost_queue *vq;
504         unsigned i;
505         char ifname[PATH_MAX];
506 #ifdef RTE_LIBRTE_VHOST_NUMA
507         int newnode;
508 #endif
509
510         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
511         list = find_internal_resource(ifname);
512         if (list == NULL) {
513                 RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname);
514                 return -1;
515         }
516
517         eth_dev = list->eth_dev;
518         internal = eth_dev->data->dev_private;
519
520 #ifdef RTE_LIBRTE_VHOST_NUMA
521         newnode = rte_vhost_get_numa_node(vid);
522         if (newnode >= 0)
523                 eth_dev->data->numa_node = newnode;
524 #endif
525
526         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
527                 vq = eth_dev->data->rx_queues[i];
528                 if (vq == NULL)
529                         continue;
530                 vq->vid = vid;
531                 vq->internal = internal;
532                 vq->port = eth_dev->data->port_id;
533         }
534         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
535                 vq = eth_dev->data->tx_queues[i];
536                 if (vq == NULL)
537                         continue;
538                 vq->vid = vid;
539                 vq->internal = internal;
540                 vq->port = eth_dev->data->port_id;
541         }
542
543         for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
544                 rte_vhost_enable_guest_notification(vid, i, 0);
545
546         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
547
548         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
549                 vq = eth_dev->data->rx_queues[i];
550                 if (vq == NULL)
551                         continue;
552                 rte_atomic32_set(&vq->allow_queuing, 1);
553         }
554         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
555                 vq = eth_dev->data->tx_queues[i];
556                 if (vq == NULL)
557                         continue;
558                 rte_atomic32_set(&vq->allow_queuing, 1);
559         }
560
561         RTE_LOG(INFO, PMD, "Vhost device %d created\n", vid);
562
563         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
564
565         return 0;
566 }
567
568 static void
569 destroy_device(int vid)
570 {
571         struct rte_eth_dev *eth_dev;
572         struct vhost_queue *vq;
573         struct internal_list *list;
574         char ifname[PATH_MAX];
575         unsigned i;
576         struct rte_vhost_vring_state *state;
577
578         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
579         list = find_internal_resource(ifname);
580         if (list == NULL) {
581                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
582                 return;
583         }
584         eth_dev = list->eth_dev;
585
586         /* Wait until rx/tx_pkt_burst stops accessing vhost device */
587         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
588                 vq = eth_dev->data->rx_queues[i];
589                 if (vq == NULL)
590                         continue;
591                 rte_atomic32_set(&vq->allow_queuing, 0);
592                 while (rte_atomic32_read(&vq->while_queuing))
593                         rte_pause();
594         }
595         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
596                 vq = eth_dev->data->tx_queues[i];
597                 if (vq == NULL)
598                         continue;
599                 rte_atomic32_set(&vq->allow_queuing, 0);
600                 while (rte_atomic32_read(&vq->while_queuing))
601                         rte_pause();
602         }
603
604         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
605
606         for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
607                 vq = eth_dev->data->rx_queues[i];
608                 if (vq == NULL)
609                         continue;
610                 vq->vid = -1;
611         }
612         for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
613                 vq = eth_dev->data->tx_queues[i];
614                 if (vq == NULL)
615                         continue;
616                 vq->vid = -1;
617         }
618
619         state = vring_states[eth_dev->data->port_id];
620         rte_spinlock_lock(&state->lock);
621         for (i = 0; i <= state->max_vring; i++) {
622                 state->cur[i] = false;
623                 state->seen[i] = false;
624         }
625         state->max_vring = 0;
626         rte_spinlock_unlock(&state->lock);
627
628         RTE_LOG(INFO, PMD, "Vhost device %d destroyed\n", vid);
629
630         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
631 }
632
633 static int
634 vring_state_changed(int vid, uint16_t vring, int enable)
635 {
636         struct rte_vhost_vring_state *state;
637         struct rte_eth_dev *eth_dev;
638         struct internal_list *list;
639         char ifname[PATH_MAX];
640
641         rte_vhost_get_ifname(vid, ifname, sizeof(ifname));
642         list = find_internal_resource(ifname);
643         if (list == NULL) {
644                 RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname);
645                 return -1;
646         }
647
648         eth_dev = list->eth_dev;
649         /* won't be NULL */
650         state = vring_states[eth_dev->data->port_id];
651         rte_spinlock_lock(&state->lock);
652         state->cur[vring] = enable;
653         state->max_vring = RTE_MAX(vring, state->max_vring);
654         rte_spinlock_unlock(&state->lock);
655
656         RTE_LOG(INFO, PMD, "vring%u is %s\n",
657                         vring, enable ? "enabled" : "disabled");
658
659         _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
660
661         return 0;
662 }
663
664 int
665 rte_eth_vhost_get_queue_event(uint8_t port_id,
666                 struct rte_eth_vhost_queue_event *event)
667 {
668         struct rte_vhost_vring_state *state;
669         unsigned int i;
670         int idx;
671
672         if (port_id >= RTE_MAX_ETHPORTS) {
673                 RTE_LOG(ERR, PMD, "Invalid port id\n");
674                 return -1;
675         }
676
677         state = vring_states[port_id];
678         if (!state) {
679                 RTE_LOG(ERR, PMD, "Unused port\n");
680                 return -1;
681         }
682
683         rte_spinlock_lock(&state->lock);
684         for (i = 0; i <= state->max_vring; i++) {
685                 idx = state->index++ % (state->max_vring + 1);
686
687                 if (state->cur[idx] != state->seen[idx]) {
688                         state->seen[idx] = state->cur[idx];
689                         event->queue_id = idx / 2;
690                         event->rx = idx & 1;
691                         event->enable = state->cur[idx];
692                         rte_spinlock_unlock(&state->lock);
693                         return 0;
694                 }
695         }
696         rte_spinlock_unlock(&state->lock);
697
698         return -1;
699 }
700
701 int
702 rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
703 {
704         struct internal_list *list;
705         struct rte_eth_dev *eth_dev;
706         struct vhost_queue *vq;
707         int vid = -1;
708
709         if (!rte_eth_dev_is_valid_port(port_id))
710                 return -1;
711
712         pthread_mutex_lock(&internal_list_lock);
713
714         TAILQ_FOREACH(list, &internal_list, next) {
715                 eth_dev = list->eth_dev;
716                 if (eth_dev->data->port_id == port_id) {
717                         vq = eth_dev->data->rx_queues[0];
718                         if (vq) {
719                                 vid = vq->vid;
720                         }
721                         break;
722                 }
723         }
724
725         pthread_mutex_unlock(&internal_list_lock);
726
727         return vid;
728 }
729
730 static void *
731 vhost_driver_session(void *param __rte_unused)
732 {
733         static struct virtio_net_device_ops vhost_ops;
734
735         /* set vhost arguments */
736         vhost_ops.new_device = new_device;
737         vhost_ops.destroy_device = destroy_device;
738         vhost_ops.vring_state_changed = vring_state_changed;
739         if (rte_vhost_driver_callback_register(&vhost_ops) < 0)
740                 RTE_LOG(ERR, PMD, "Can't register callbacks\n");
741
742         /* start event handling */
743         rte_vhost_driver_session_start();
744
745         return NULL;
746 }
747
748 static int
749 vhost_driver_session_start(void)
750 {
751         int ret;
752
753         ret = pthread_create(&session_th,
754                         NULL, vhost_driver_session, NULL);
755         if (ret)
756                 RTE_LOG(ERR, PMD, "Can't create a thread\n");
757
758         return ret;
759 }
760
761 static void
762 vhost_driver_session_stop(void)
763 {
764         int ret;
765
766         ret = pthread_cancel(session_th);
767         if (ret)
768                 RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
769
770         ret = pthread_join(session_th, NULL);
771         if (ret)
772                 RTE_LOG(ERR, PMD, "Can't join the thread\n");
773 }
774
775 static int
776 eth_dev_start(struct rte_eth_dev *dev __rte_unused)
777 {
778         return 0;
779 }
780
781 static void
782 eth_dev_stop(struct rte_eth_dev *dev __rte_unused)
783 {
784 }
785
786 static void
787 eth_dev_close(struct rte_eth_dev *dev)
788 {
789         struct pmd_internal *internal;
790         struct internal_list *list;
791
792         internal = dev->data->dev_private;
793         if (!internal)
794                 return;
795
796         rte_vhost_driver_unregister(internal->iface_name);
797
798         list = find_internal_resource(internal->iface_name);
799         if (!list)
800                 return;
801
802         pthread_mutex_lock(&internal_list_lock);
803         TAILQ_REMOVE(&internal_list, list, next);
804         pthread_mutex_unlock(&internal_list_lock);
805         rte_free(list);
806
807         free(internal->dev_name);
808         free(internal->iface_name);
809         rte_free(internal);
810 }
811
812 static int
813 eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
814                    uint16_t nb_rx_desc __rte_unused,
815                    unsigned int socket_id,
816                    const struct rte_eth_rxconf *rx_conf __rte_unused,
817                    struct rte_mempool *mb_pool)
818 {
819         struct vhost_queue *vq;
820
821         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
822                         RTE_CACHE_LINE_SIZE, socket_id);
823         if (vq == NULL) {
824                 RTE_LOG(ERR, PMD, "Failed to allocate memory for rx queue\n");
825                 return -ENOMEM;
826         }
827
828         vq->mb_pool = mb_pool;
829         vq->virtqueue_id = rx_queue_id * VIRTIO_QNUM + VIRTIO_TXQ;
830         dev->data->rx_queues[rx_queue_id] = vq;
831
832         return 0;
833 }
834
835 static int
836 eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
837                    uint16_t nb_tx_desc __rte_unused,
838                    unsigned int socket_id,
839                    const struct rte_eth_txconf *tx_conf __rte_unused)
840 {
841         struct vhost_queue *vq;
842
843         vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
844                         RTE_CACHE_LINE_SIZE, socket_id);
845         if (vq == NULL) {
846                 RTE_LOG(ERR, PMD, "Failed to allocate memory for tx queue\n");
847                 return -ENOMEM;
848         }
849
850         vq->virtqueue_id = tx_queue_id * VIRTIO_QNUM + VIRTIO_RXQ;
851         dev->data->tx_queues[tx_queue_id] = vq;
852
853         return 0;
854 }
855
856 static void
857 eth_dev_info(struct rte_eth_dev *dev,
858              struct rte_eth_dev_info *dev_info)
859 {
860         struct pmd_internal *internal;
861
862         internal = dev->data->dev_private;
863         if (internal == NULL) {
864                 RTE_LOG(ERR, PMD, "Invalid device specified\n");
865                 return;
866         }
867
868         dev_info->driver_name = drivername;
869         dev_info->max_mac_addrs = 1;
870         dev_info->max_rx_pktlen = (uint32_t)-1;
871         dev_info->max_rx_queues = internal->max_queues;
872         dev_info->max_tx_queues = internal->max_queues;
873         dev_info->min_rx_bufsize = 0;
874 }
875
876 static void
877 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
878 {
879         unsigned i;
880         unsigned long rx_total = 0, tx_total = 0, tx_missed_total = 0;
881         unsigned long rx_total_bytes = 0, tx_total_bytes = 0;
882         struct vhost_queue *vq;
883
884         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
885                         i < dev->data->nb_rx_queues; i++) {
886                 if (dev->data->rx_queues[i] == NULL)
887                         continue;
888                 vq = dev->data->rx_queues[i];
889                 stats->q_ipackets[i] = vq->stats.pkts;
890                 rx_total += stats->q_ipackets[i];
891
892                 stats->q_ibytes[i] = vq->stats.bytes;
893                 rx_total_bytes += stats->q_ibytes[i];
894         }
895
896         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
897                         i < dev->data->nb_tx_queues; i++) {
898                 if (dev->data->tx_queues[i] == NULL)
899                         continue;
900                 vq = dev->data->tx_queues[i];
901                 stats->q_opackets[i] = vq->stats.pkts;
902                 tx_missed_total += vq->stats.missed_pkts;
903                 tx_total += stats->q_opackets[i];
904
905                 stats->q_obytes[i] = vq->stats.bytes;
906                 tx_total_bytes += stats->q_obytes[i];
907         }
908
909         stats->ipackets = rx_total;
910         stats->opackets = tx_total;
911         stats->oerrors = tx_missed_total;
912         stats->ibytes = rx_total_bytes;
913         stats->obytes = tx_total_bytes;
914 }
915
916 static void
917 eth_stats_reset(struct rte_eth_dev *dev)
918 {
919         struct vhost_queue *vq;
920         unsigned i;
921
922         for (i = 0; i < dev->data->nb_rx_queues; i++) {
923                 if (dev->data->rx_queues[i] == NULL)
924                         continue;
925                 vq = dev->data->rx_queues[i];
926                 vq->stats.pkts = 0;
927                 vq->stats.bytes = 0;
928         }
929         for (i = 0; i < dev->data->nb_tx_queues; i++) {
930                 if (dev->data->tx_queues[i] == NULL)
931                         continue;
932                 vq = dev->data->tx_queues[i];
933                 vq->stats.pkts = 0;
934                 vq->stats.bytes = 0;
935                 vq->stats.missed_pkts = 0;
936         }
937 }
938
939 static void
940 eth_queue_release(void *q)
941 {
942         rte_free(q);
943 }
944
945 static int
946 eth_link_update(struct rte_eth_dev *dev __rte_unused,
947                 int wait_to_complete __rte_unused)
948 {
949         return 0;
950 }
951
952 /**
953  * Disable features in feature_mask. Returns 0 on success.
954  */
955 int
956 rte_eth_vhost_feature_disable(uint64_t feature_mask)
957 {
958         return rte_vhost_feature_disable(feature_mask);
959 }
960
961 /**
962  * Enable features in feature_mask. Returns 0 on success.
963  */
964 int
965 rte_eth_vhost_feature_enable(uint64_t feature_mask)
966 {
967         return rte_vhost_feature_enable(feature_mask);
968 }
969
970 /* Returns currently supported vhost features */
971 uint64_t
972 rte_eth_vhost_feature_get(void)
973 {
974         return rte_vhost_feature_get();
975 }
976
977 static const struct eth_dev_ops ops = {
978         .dev_start = eth_dev_start,
979         .dev_stop = eth_dev_stop,
980         .dev_close = eth_dev_close,
981         .dev_configure = eth_dev_configure,
982         .dev_infos_get = eth_dev_info,
983         .rx_queue_setup = eth_rx_queue_setup,
984         .tx_queue_setup = eth_tx_queue_setup,
985         .rx_queue_release = eth_queue_release,
986         .tx_queue_release = eth_queue_release,
987         .link_update = eth_link_update,
988         .stats_get = eth_stats_get,
989         .stats_reset = eth_stats_reset,
990         .xstats_reset = vhost_dev_xstats_reset,
991         .xstats_get = vhost_dev_xstats_get,
992         .xstats_get_names = vhost_dev_xstats_get_names,
993 };
994
995 static int
996 eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
997                      const unsigned numa_node, uint64_t flags)
998 {
999         struct rte_eth_dev_data *data = NULL;
1000         struct pmd_internal *internal = NULL;
1001         struct rte_eth_dev *eth_dev = NULL;
1002         struct ether_addr *eth_addr = NULL;
1003         struct rte_vhost_vring_state *vring_state = NULL;
1004         struct internal_list *list = NULL;
1005
1006         RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
1007                 numa_node);
1008
1009         /* now do all data allocation - for eth_dev structure, dummy pci driver
1010          * and internal (private) data
1011          */
1012         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1013         if (data == NULL)
1014                 goto error;
1015
1016         internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);
1017         if (internal == NULL)
1018                 goto error;
1019
1020         list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
1021         if (list == NULL)
1022                 goto error;
1023
1024         /* reserve an ethdev entry */
1025         eth_dev = rte_eth_dev_allocate(name);
1026         if (eth_dev == NULL)
1027                 goto error;
1028
1029         eth_addr = rte_zmalloc_socket(name, sizeof(*eth_addr), 0, numa_node);
1030         if (eth_addr == NULL)
1031                 goto error;
1032         *eth_addr = base_eth_addr;
1033         eth_addr->addr_bytes[5] = eth_dev->data->port_id;
1034
1035         vring_state = rte_zmalloc_socket(name,
1036                         sizeof(*vring_state), 0, numa_node);
1037         if (vring_state == NULL)
1038                 goto error;
1039
1040         TAILQ_INIT(&eth_dev->link_intr_cbs);
1041
1042         /* now put it all together
1043          * - store queue data in internal,
1044          * - store numa_node info in ethdev data
1045          * - point eth_dev_data to internals
1046          * - and point eth_dev structure to new eth_dev_data structure
1047          */
1048         internal->dev_name = strdup(name);
1049         if (internal->dev_name == NULL)
1050                 goto error;
1051         internal->iface_name = strdup(iface_name);
1052         if (internal->iface_name == NULL)
1053                 goto error;
1054
1055         list->eth_dev = eth_dev;
1056         pthread_mutex_lock(&internal_list_lock);
1057         TAILQ_INSERT_TAIL(&internal_list, list, next);
1058         pthread_mutex_unlock(&internal_list_lock);
1059
1060         rte_spinlock_init(&vring_state->lock);
1061         vring_states[eth_dev->data->port_id] = vring_state;
1062
1063         data->dev_private = internal;
1064         data->port_id = eth_dev->data->port_id;
1065         memmove(data->name, eth_dev->data->name, sizeof(data->name));
1066         data->nb_rx_queues = queues;
1067         data->nb_tx_queues = queues;
1068         internal->max_queues = queues;
1069         data->dev_link = pmd_link;
1070         data->mac_addrs = eth_addr;
1071
1072         /* We'll replace the 'data' originally allocated by eth_dev. So the
1073          * vhost PMD resources won't be shared between multi processes.
1074          */
1075         eth_dev->data = data;
1076         eth_dev->dev_ops = &ops;
1077         eth_dev->driver = NULL;
1078         data->dev_flags =
1079                 RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
1080         data->kdrv = RTE_KDRV_NONE;
1081         data->drv_name = internal->dev_name;
1082         data->numa_node = numa_node;
1083
1084         /* finally assign rx and tx ops */
1085         eth_dev->rx_pkt_burst = eth_vhost_rx;
1086         eth_dev->tx_pkt_burst = eth_vhost_tx;
1087
1088         if (rte_vhost_driver_register(iface_name, flags))
1089                 goto error;
1090
1091         /* We need only one message handling thread */
1092         if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
1093                 if (vhost_driver_session_start())
1094                         goto error;
1095         }
1096
1097         return data->port_id;
1098
1099 error:
1100         if (internal)
1101                 free(internal->dev_name);
1102         rte_free(vring_state);
1103         rte_free(eth_addr);
1104         if (eth_dev)
1105                 rte_eth_dev_release_port(eth_dev);
1106         rte_free(internal);
1107         rte_free(list);
1108         rte_free(data);
1109
1110         return -1;
1111 }
1112
1113 static inline int
1114 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
1115 {
1116         const char **iface_name = extra_args;
1117
1118         if (value == NULL)
1119                 return -1;
1120
1121         *iface_name = value;
1122
1123         return 0;
1124 }
1125
1126 static inline int
1127 open_int(const char *key __rte_unused, const char *value, void *extra_args)
1128 {
1129         uint16_t *n = extra_args;
1130
1131         if (value == NULL || extra_args == NULL)
1132                 return -EINVAL;
1133
1134         *n = (uint16_t)strtoul(value, NULL, 0);
1135         if (*n == USHRT_MAX && errno == ERANGE)
1136                 return -1;
1137
1138         return 0;
1139 }
1140
1141 static int
1142 rte_pmd_vhost_probe(const char *name, const char *params)
1143 {
1144         struct rte_kvargs *kvlist = NULL;
1145         int ret = 0;
1146         char *iface_name;
1147         uint16_t queues;
1148         uint64_t flags = 0;
1149         int client_mode = 0;
1150         int dequeue_zero_copy = 0;
1151
1152         RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
1153
1154         kvlist = rte_kvargs_parse(params, valid_arguments);
1155         if (kvlist == NULL)
1156                 return -1;
1157
1158         if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
1159                 ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
1160                                          &open_iface, &iface_name);
1161                 if (ret < 0)
1162                         goto out_free;
1163         } else {
1164                 ret = -1;
1165                 goto out_free;
1166         }
1167
1168         if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
1169                 ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
1170                                          &open_int, &queues);
1171                 if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT)
1172                         goto out_free;
1173
1174         } else
1175                 queues = 1;
1176
1177         if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) {
1178                 ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG,
1179                                          &open_int, &client_mode);
1180                 if (ret < 0)
1181                         goto out_free;
1182
1183                 if (client_mode)
1184                         flags |= RTE_VHOST_USER_CLIENT;
1185         }
1186
1187         if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
1188                 ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
1189                                          &open_int, &dequeue_zero_copy);
1190                 if (ret < 0)
1191                         goto out_free;
1192
1193                 if (dequeue_zero_copy)
1194                         flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1195         }
1196
1197         eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags);
1198
1199 out_free:
1200         rte_kvargs_free(kvlist);
1201         return ret;
1202 }
1203
1204 static int
1205 rte_pmd_vhost_remove(const char *name)
1206 {
1207         struct rte_eth_dev *eth_dev = NULL;
1208         unsigned int i;
1209
1210         RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
1211
1212         /* find an ethdev entry */
1213         eth_dev = rte_eth_dev_allocated(name);
1214         if (eth_dev == NULL)
1215                 return -ENODEV;
1216
1217         eth_dev_stop(eth_dev);
1218
1219         eth_dev_close(eth_dev);
1220
1221         if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
1222                 vhost_driver_session_stop();
1223
1224         rte_free(vring_states[eth_dev->data->port_id]);
1225         vring_states[eth_dev->data->port_id] = NULL;
1226
1227         for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
1228                 rte_free(eth_dev->data->rx_queues[i]);
1229         for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1230                 rte_free(eth_dev->data->tx_queues[i]);
1231
1232         rte_free(eth_dev->data->mac_addrs);
1233         rte_free(eth_dev->data);
1234
1235         rte_eth_dev_release_port(eth_dev);
1236
1237         return 0;
1238 }
1239
1240 static struct rte_vdev_driver pmd_vhost_drv = {
1241         .probe = rte_pmd_vhost_probe,
1242         .remove = rte_pmd_vhost_remove,
1243 };
1244
1245 RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
1246 RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
1247 RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
1248         "iface=<ifc> "
1249         "queues=<int>");