New upstream version 18.02
[deb_dpdk.git] / examples / vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_vhost.h>
12
13 #include "main.h"
14
15 /*
16  * A very simple vhost-user net driver implementation, without
17  * any extra features being enabled, such as TSO and mrg-Rx.
18  */
19
20 void
21 vs_vhost_net_setup(struct vhost_dev *dev)
22 {
23         uint16_t i;
24         int vid = dev->vid;
25         struct vhost_queue *queue;
26
27         RTE_LOG(INFO, VHOST_CONFIG,
28                 "setting builtin vhost-user net driver\n");
29
30         rte_vhost_get_negotiated_features(vid, &dev->features);
31         if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
32                 dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
33         else
34                 dev->hdr_len = sizeof(struct virtio_net_hdr);
35
36         rte_vhost_get_mem_table(vid, &dev->mem);
37
38         dev->nr_vrings = rte_vhost_get_vring_num(vid);
39         for (i = 0; i < dev->nr_vrings; i++) {
40                 queue = &dev->queues[i];
41
42                 queue->last_used_idx  = 0;
43                 queue->last_avail_idx = 0;
44                 rte_vhost_get_vhost_vring(vid, i, &queue->vr);
45         }
46 }
47
48 void
49 vs_vhost_net_remove(struct vhost_dev *dev)
50 {
51         free(dev->mem);
52 }
53
54 static __rte_always_inline int
55 enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
56             struct rte_mbuf *m, uint16_t desc_idx)
57 {
58         uint32_t desc_avail, desc_offset;
59         uint32_t mbuf_avail, mbuf_offset;
60         uint32_t cpy_len;
61         struct vring_desc *desc;
62         uint64_t desc_addr;
63         struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
64         /* A counter to avoid desc dead loop chain */
65         uint16_t nr_desc = 1;
66
67         desc = &vr->desc[desc_idx];
68         desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
69         /*
70          * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
71          * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
72          * otherwise stores offset on the stack instead of in a register.
73          */
74         if (unlikely(desc->len < dev->hdr_len) || !desc_addr)
75                 return -1;
76
77         rte_prefetch0((void *)(uintptr_t)desc_addr);
78
79         /* write virtio-net header */
80         *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
81
82         desc_offset = dev->hdr_len;
83         desc_avail  = desc->len - dev->hdr_len;
84
85         mbuf_avail  = rte_pktmbuf_data_len(m);
86         mbuf_offset = 0;
87         while (mbuf_avail != 0 || m->next != NULL) {
88                 /* done with current mbuf, fetch next */
89                 if (mbuf_avail == 0) {
90                         m = m->next;
91
92                         mbuf_offset = 0;
93                         mbuf_avail  = rte_pktmbuf_data_len(m);
94                 }
95
96                 /* done with current desc buf, fetch next */
97                 if (desc_avail == 0) {
98                         if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
99                                 /* Room in vring buffer is not enough */
100                                 return -1;
101                         }
102                         if (unlikely(desc->next >= vr->size ||
103                                      ++nr_desc > vr->size))
104                                 return -1;
105
106                         desc = &vr->desc[desc->next];
107                         desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
108                         if (unlikely(!desc_addr))
109                                 return -1;
110
111                         desc_offset = 0;
112                         desc_avail  = desc->len;
113                 }
114
115                 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
116                 rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
117                         rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
118                         cpy_len);
119
120                 mbuf_avail  -= cpy_len;
121                 mbuf_offset += cpy_len;
122                 desc_avail  -= cpy_len;
123                 desc_offset += cpy_len;
124         }
125
126         return 0;
127 }
128
129 uint16_t
130 vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
131                 struct rte_mbuf **pkts, uint32_t count)
132 {
133         struct vhost_queue *queue;
134         struct rte_vhost_vring *vr;
135         uint16_t avail_idx, free_entries, start_idx;
136         uint16_t desc_indexes[MAX_PKT_BURST];
137         uint16_t used_idx;
138         uint32_t i;
139
140         queue = &dev->queues[queue_id];
141         vr    = &queue->vr;
142
143         avail_idx = *((volatile uint16_t *)&vr->avail->idx);
144         start_idx = queue->last_used_idx;
145         free_entries = avail_idx - start_idx;
146         count = RTE_MIN(count, free_entries);
147         count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
148         if (count == 0)
149                 return 0;
150
151         /* Retrieve all of the desc indexes first to avoid caching issues. */
152         rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]);
153         for (i = 0; i < count; i++) {
154                 used_idx = (start_idx + i) & (vr->size - 1);
155                 desc_indexes[i] = vr->avail->ring[used_idx];
156                 vr->used->ring[used_idx].id = desc_indexes[i];
157                 vr->used->ring[used_idx].len = pkts[i]->pkt_len +
158                                                dev->hdr_len;
159         }
160
161         rte_prefetch0(&vr->desc[desc_indexes[0]]);
162         for (i = 0; i < count; i++) {
163                 uint16_t desc_idx = desc_indexes[i];
164                 int err;
165
166                 err = enqueue_pkt(dev, vr, pkts[i], desc_idx);
167                 if (unlikely(err)) {
168                         used_idx = (start_idx + i) & (vr->size - 1);
169                         vr->used->ring[used_idx].len = dev->hdr_len;
170                 }
171
172                 if (i + 1 < count)
173                         rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
174         }
175
176         rte_smp_wmb();
177
178         *(volatile uint16_t *)&vr->used->idx += count;
179         queue->last_used_idx += count;
180
181         rte_vhost_vring_call(dev->vid, queue_id);
182
183         return count;
184 }
185
186 static __rte_always_inline int
187 dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
188             struct rte_mbuf *m, uint16_t desc_idx,
189             struct rte_mempool *mbuf_pool)
190 {
191         struct vring_desc *desc;
192         uint64_t desc_addr;
193         uint32_t desc_avail, desc_offset;
194         uint32_t mbuf_avail, mbuf_offset;
195         uint32_t cpy_len;
196         struct rte_mbuf *cur = m, *prev = m;
197         /* A counter to avoid desc dead loop chain */
198         uint32_t nr_desc = 1;
199
200         desc = &vr->desc[desc_idx];
201         if (unlikely((desc->len < dev->hdr_len)) ||
202                         (desc->flags & VRING_DESC_F_INDIRECT))
203                 return -1;
204
205         desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
206         if (unlikely(!desc_addr))
207                 return -1;
208
209         /*
210          * We don't support ANY_LAYOUT, neither VERSION_1, meaning
211          * a Tx packet from guest must have 2 desc buffers at least:
212          * the first for storing the header and the others for
213          * storing the data.
214          *
215          * And since we don't support TSO, we could simply skip the
216          * header.
217          */
218         desc = &vr->desc[desc->next];
219         desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
220         if (unlikely(!desc_addr))
221                 return -1;
222         rte_prefetch0((void *)(uintptr_t)desc_addr);
223
224         desc_offset = 0;
225         desc_avail  = desc->len;
226         nr_desc    += 1;
227
228         mbuf_offset = 0;
229         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
230         while (1) {
231                 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
232                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
233                                                    mbuf_offset),
234                         (void *)((uintptr_t)(desc_addr + desc_offset)),
235                         cpy_len);
236
237                 mbuf_avail  -= cpy_len;
238                 mbuf_offset += cpy_len;
239                 desc_avail  -= cpy_len;
240                 desc_offset += cpy_len;
241
242                 /* This desc reaches to its end, get the next one */
243                 if (desc_avail == 0) {
244                         if ((desc->flags & VRING_DESC_F_NEXT) == 0)
245                                 break;
246
247                         if (unlikely(desc->next >= vr->size ||
248                                      ++nr_desc > vr->size))
249                                 return -1;
250                         desc = &vr->desc[desc->next];
251
252                         desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
253                         if (unlikely(!desc_addr))
254                                 return -1;
255                         rte_prefetch0((void *)(uintptr_t)desc_addr);
256
257                         desc_offset = 0;
258                         desc_avail  = desc->len;
259                 }
260
261                 /*
262                  * This mbuf reaches to its end, get a new one
263                  * to hold more data.
264                  */
265                 if (mbuf_avail == 0) {
266                         cur = rte_pktmbuf_alloc(mbuf_pool);
267                         if (unlikely(cur == NULL)) {
268                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
269                                         "allocate memory for mbuf.\n");
270                                 return -1;
271                         }
272
273                         prev->next = cur;
274                         prev->data_len = mbuf_offset;
275                         m->nb_segs += 1;
276                         m->pkt_len += mbuf_offset;
277                         prev = cur;
278
279                         mbuf_offset = 0;
280                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
281                 }
282         }
283
284         prev->data_len = mbuf_offset;
285         m->pkt_len    += mbuf_offset;
286
287         return 0;
288 }
289
290 uint16_t
291 vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
292         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
293 {
294         struct vhost_queue *queue;
295         struct rte_vhost_vring *vr;
296         uint32_t desc_indexes[MAX_PKT_BURST];
297         uint32_t used_idx;
298         uint32_t i = 0;
299         uint16_t free_entries;
300         uint16_t avail_idx;
301
302         queue = &dev->queues[queue_id];
303         vr    = &queue->vr;
304
305         free_entries = *((volatile uint16_t *)&vr->avail->idx) -
306                         queue->last_avail_idx;
307         if (free_entries == 0)
308                 return 0;
309
310         /* Prefetch available and used ring */
311         avail_idx = queue->last_avail_idx & (vr->size - 1);
312         used_idx  = queue->last_used_idx  & (vr->size - 1);
313         rte_prefetch0(&vr->avail->ring[avail_idx]);
314         rte_prefetch0(&vr->used->ring[used_idx]);
315
316         count = RTE_MIN(count, MAX_PKT_BURST);
317         count = RTE_MIN(count, free_entries);
318
319         if (unlikely(count == 0))
320                 return 0;
321
322         /*
323          * Retrieve all of the head indexes first and pre-update used entries
324          * to avoid caching issues.
325          */
326         for (i = 0; i < count; i++) {
327                 avail_idx = (queue->last_avail_idx + i) & (vr->size - 1);
328                 used_idx  = (queue->last_used_idx  + i) & (vr->size - 1);
329                 desc_indexes[i] = vr->avail->ring[avail_idx];
330
331                 vr->used->ring[used_idx].id  = desc_indexes[i];
332                 vr->used->ring[used_idx].len = 0;
333         }
334
335         /* Prefetch descriptor index. */
336         rte_prefetch0(&vr->desc[desc_indexes[0]]);
337         for (i = 0; i < count; i++) {
338                 int err;
339
340                 if (likely(i + 1 < count))
341                         rte_prefetch0(&vr->desc[desc_indexes[i + 1]]);
342
343                 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
344                 if (unlikely(pkts[i] == NULL)) {
345                         RTE_LOG(ERR, VHOST_DATA,
346                                 "Failed to allocate memory for mbuf.\n");
347                         break;
348                 }
349
350                 err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool);
351                 if (unlikely(err)) {
352                         rte_pktmbuf_free(pkts[i]);
353                         break;
354                 }
355
356         }
357
358         queue->last_avail_idx += i;
359         queue->last_used_idx += i;
360         rte_smp_wmb();
361         rte_smp_rmb();
362
363         vr->used->idx += i;
364
365         rte_vhost_vring_call(dev->vid, queue_id);
366
367         return i;
368 }