New upstream version 17.11-rc3
[deb_dpdk.git] / test / test / virtual_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_mbuf.h>
35 #include <rte_ethdev.h>
36 #include <rte_pci.h>
37 #include <rte_bus_pci.h>
38 #include <rte_malloc.h>
39 #include <rte_memcpy.h>
40 #include <rte_memory.h>
41 #include <rte_ring.h>
42
43 #include "virtual_pmd.h"
44
45 #define MAX_PKT_BURST 512
46
47 static const char *virtual_ethdev_driver_name = "Virtual PMD";
48
49 struct virtual_ethdev_private {
50         struct eth_dev_ops dev_ops;
51         struct rte_eth_stats eth_stats;
52
53         struct rte_ring *rx_queue;
54         struct rte_ring *tx_queue;
55
56         int tx_burst_fail_count;
57 };
58
59 struct virtual_ethdev_queue {
60         int port_id;
61         int queue_id;
62 };
63
64 static int
65 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
66 {
67         eth_dev->data->dev_started = 1;
68
69         return 0;
70 }
71
72 static int
73 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
74 {
75         eth_dev->data->dev_started = 0;
76
77         return -1;
78 }
79 static void  virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
80 {
81         void *pkt = NULL;
82         struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
83
84         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
85         eth_dev->data->dev_started = 0;
86         while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
87                 rte_pktmbuf_free(pkt);
88
89         while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT)
90                 rte_pktmbuf_free(pkt);
91 }
92
93 static void
94 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
95 {}
96
97 static int
98 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
99 {
100         return 0;
101 }
102
103 static int
104 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
105 {
106         return -1;
107 }
108
109 static void
110 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
111                 struct rte_eth_dev_info *dev_info)
112 {
113         dev_info->driver_name = virtual_ethdev_driver_name;
114         dev_info->max_mac_addrs = 1;
115
116         dev_info->max_rx_pktlen = (uint32_t)2048;
117
118         dev_info->max_rx_queues = (uint16_t)128;
119         dev_info->max_tx_queues = (uint16_t)512;
120
121         dev_info->min_rx_bufsize = 0;
122 }
123
124 static int
125 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
126                 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
127                 unsigned int socket_id,
128                 const struct rte_eth_rxconf *rx_conf __rte_unused,
129                 struct rte_mempool *mb_pool __rte_unused)
130 {
131         struct virtual_ethdev_queue *rx_q;
132
133         rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
134                         sizeof(struct virtual_ethdev_queue), 0, socket_id);
135
136         if (rx_q == NULL)
137                 return -1;
138
139         rx_q->port_id = dev->data->port_id;
140         rx_q->queue_id = rx_queue_id;
141
142         dev->data->rx_queues[rx_queue_id] = rx_q;
143
144         return 0;
145 }
146
147 static int
148 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
149                 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
150                 unsigned int socket_id __rte_unused,
151                 const struct rte_eth_rxconf *rx_conf __rte_unused,
152                 struct rte_mempool *mb_pool __rte_unused)
153 {
154         return -1;
155 }
156
157 static int
158 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
159                 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
160                 unsigned int socket_id,
161                 const struct rte_eth_txconf *tx_conf __rte_unused)
162 {
163         struct virtual_ethdev_queue *tx_q;
164
165         tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
166                         sizeof(struct virtual_ethdev_queue), 0, socket_id);
167
168         if (tx_q == NULL)
169                 return -1;
170
171         tx_q->port_id = dev->data->port_id;
172         tx_q->queue_id = tx_queue_id;
173
174         dev->data->tx_queues[tx_queue_id] = tx_q;
175
176         return 0;
177 }
178
179 static int
180 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
181                 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
182                 unsigned int socket_id __rte_unused,
183                 const struct rte_eth_txconf *tx_conf __rte_unused)
184 {
185         return -1;
186 }
187
188 static void
189 virtual_ethdev_rx_queue_release(void *q __rte_unused)
190 {
191 }
192
193 static void
194 virtual_ethdev_tx_queue_release(void *q __rte_unused)
195 {
196 }
197
198 static int
199 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
200                 int wait_to_complete __rte_unused)
201 {
202         if (!bonded_eth_dev->data->dev_started)
203                 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
204
205         return 0;
206 }
207
208 static int
209 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
210                 int wait_to_complete __rte_unused)
211 {
212         return -1;
213 }
214
215 static int
216 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
217 {
218         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
219
220         if (stats)
221                 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
222
223         return 0;
224 }
225
226 static void
227 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
228 {
229         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
230         void *pkt = NULL;
231
232         while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS)
233                         rte_pktmbuf_free(pkt);
234
235         /* Reset internal statistics */
236         memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
237 }
238
239 static void
240 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
241 {}
242
243 static void
244 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
245 {}
246
247
248 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
249         .dev_configure = virtual_ethdev_configure_success,
250         .dev_start = virtual_ethdev_start_success,
251         .dev_stop = virtual_ethdev_stop,
252         .dev_close = virtual_ethdev_close,
253         .dev_infos_get = virtual_ethdev_info_get,
254         .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
255         .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
256         .rx_queue_release = virtual_ethdev_rx_queue_release,
257         .tx_queue_release = virtual_ethdev_tx_queue_release,
258         .link_update = virtual_ethdev_link_update_success,
259         .stats_get = virtual_ethdev_stats_get,
260         .stats_reset = virtual_ethdev_stats_reset,
261         .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
262         .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
263 };
264
265
266 void
267 virtual_ethdev_start_fn_set_success(uint16_t port_id, uint8_t success)
268 {
269         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
270         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
271         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
272
273         if (success)
274                 dev_ops->dev_start = virtual_ethdev_start_success;
275         else
276                 dev_ops->dev_start = virtual_ethdev_start_fail;
277
278 }
279
280 void
281 virtual_ethdev_configure_fn_set_success(uint16_t port_id, uint8_t success)
282 {
283         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
284         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
285         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
286
287         if (success)
288                 dev_ops->dev_configure = virtual_ethdev_configure_success;
289         else
290                 dev_ops->dev_configure = virtual_ethdev_configure_fail;
291 }
292
293 void
294 virtual_ethdev_rx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
295 {
296         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
297         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
298         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
299
300         if (success)
301                 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success;
302         else
303                 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail;
304 }
305
306 void
307 virtual_ethdev_tx_queue_setup_fn_set_success(uint16_t port_id, uint8_t success)
308 {
309         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
310         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
311         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
312
313         if (success)
314                 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success;
315         else
316                 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail;
317 }
318
319 void
320 virtual_ethdev_link_update_fn_set_success(uint16_t port_id, uint8_t success)
321 {
322         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
323         struct virtual_ethdev_private *dev_private = dev->data->dev_private;
324         struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
325
326         if (success)
327                 dev_ops->link_update = virtual_ethdev_link_update_success;
328         else
329                 dev_ops->link_update = virtual_ethdev_link_update_fail;
330 }
331
332
333 static uint16_t
334 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
335                                                          struct rte_mbuf **bufs,
336                                                          uint16_t nb_pkts)
337 {
338         struct rte_eth_dev *vrtl_eth_dev;
339         struct virtual_ethdev_queue *pq_map;
340         struct virtual_ethdev_private *dev_private;
341
342         int rx_count, i;
343
344         pq_map = (struct virtual_ethdev_queue *)queue;
345         vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
346         dev_private = vrtl_eth_dev->data->dev_private;
347
348         rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
349                         nb_pkts, NULL);
350
351         /* increments ipackets count */
352         dev_private->eth_stats.ipackets += rx_count;
353
354         /* increments ibytes count */
355         for (i = 0; i < rx_count; i++)
356                 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]);
357
358         return rx_count;
359 }
360
361 static uint16_t
362 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
363                                                          struct rte_mbuf **bufs __rte_unused,
364                                                          uint16_t nb_pkts __rte_unused)
365 {
366         return 0;
367 }
368
369 static uint16_t
370 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
371                 uint16_t nb_pkts)
372 {
373         struct virtual_ethdev_queue *tx_q = queue;
374
375         struct rte_eth_dev *vrtl_eth_dev;
376         struct virtual_ethdev_private *dev_private;
377
378         int i;
379
380         vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
381         dev_private = vrtl_eth_dev->data->dev_private;
382
383         if (!vrtl_eth_dev->data->dev_link.link_status)
384                 nb_pkts = 0;
385         else
386                 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
387                                 nb_pkts, NULL);
388
389         /* increment opacket count */
390         dev_private->eth_stats.opackets += nb_pkts;
391
392         /* increment obytes count */
393         for (i = 0; i < nb_pkts; i++)
394                 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]);
395
396         return nb_pkts;
397 }
398
399 static uint16_t
400 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
401                 uint16_t nb_pkts)
402 {
403         struct rte_eth_dev *vrtl_eth_dev = NULL;
404         struct virtual_ethdev_queue *tx_q = NULL;
405         struct virtual_ethdev_private *dev_private = NULL;
406
407         int i;
408
409         tx_q = queue;
410         vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
411         dev_private = vrtl_eth_dev->data->dev_private;
412
413         if (dev_private->tx_burst_fail_count < nb_pkts) {
414                 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
415
416                 /* increment opacket count */
417                 dev_private->eth_stats.opackets += successfully_txd;
418
419                 /* free packets in burst */
420                 for (i = 0; i < successfully_txd; i++) {
421                         /* free packets in burst */
422                         if (bufs[i] != NULL)
423                                 rte_pktmbuf_free(bufs[i]);
424
425                         bufs[i] = NULL;
426                 }
427
428                 return successfully_txd;
429         }
430
431         return 0;
432 }
433
434
435 void
436 virtual_ethdev_rx_burst_fn_set_success(uint16_t port_id, uint8_t success)
437 {
438         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
439
440         if (success)
441                 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
442         else
443                 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
444 }
445
446
447 void
448 virtual_ethdev_tx_burst_fn_set_success(uint16_t port_id, uint8_t success)
449 {
450         struct virtual_ethdev_private *dev_private = NULL;
451         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
452
453         dev_private = vrtl_eth_dev->data->dev_private;
454
455         if (success)
456                 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
457         else
458                 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
459
460         dev_private->tx_burst_fail_count = 0;
461 }
462
463 void
464 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint16_t port_id,
465                 uint8_t packet_fail_count)
466 {
467         struct virtual_ethdev_private *dev_private = NULL;
468         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
469
470
471         dev_private = vrtl_eth_dev->data->dev_private;
472         dev_private->tx_burst_fail_count = packet_fail_count;
473 }
474
475 void
476 virtual_ethdev_set_link_status(uint16_t port_id, uint8_t link_status)
477 {
478         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
479
480         vrtl_eth_dev->data->dev_link.link_status = link_status;
481 }
482
483 void
484 virtual_ethdev_simulate_link_status_interrupt(uint16_t port_id,
485                 uint8_t link_status)
486 {
487         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
488
489         vrtl_eth_dev->data->dev_link.link_status = link_status;
490
491         _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC,
492                                       NULL, NULL);
493 }
494
495 int
496 virtual_ethdev_add_mbufs_to_rx_queue(uint16_t port_id,
497                 struct rte_mbuf **pkt_burst, int burst_length)
498 {
499         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
500         struct virtual_ethdev_private *dev_private =
501                         vrtl_eth_dev->data->dev_private;
502
503         return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
504                         burst_length, NULL);
505 }
506
507 int
508 virtual_ethdev_get_mbufs_from_tx_queue(uint16_t port_id,
509                 struct rte_mbuf **pkt_burst, int burst_length)
510 {
511         struct virtual_ethdev_private *dev_private;
512         struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
513
514         dev_private = vrtl_eth_dev->data->dev_private;
515         return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
516                 burst_length, NULL);
517 }
518
519
520 int
521 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
522                 uint8_t socket_id, uint8_t isr_support)
523 {
524         struct rte_pci_device *pci_dev = NULL;
525         struct rte_eth_dev *eth_dev = NULL;
526         struct rte_pci_driver *pci_drv = NULL;
527         struct rte_pci_id *id_table = NULL;
528         struct virtual_ethdev_private *dev_private = NULL;
529         char name_buf[RTE_RING_NAMESIZE];
530
531
532         /* now do all data allocation - for eth_dev structure, dummy pci driver
533          * and internal (dev_private) data
534          */
535
536         pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
537         if (pci_dev == NULL)
538                 goto err;
539
540         pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
541         if (pci_drv == NULL)
542                 goto err;
543
544         id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
545         if (id_table == NULL)
546                 goto err;
547         id_table->device_id = 0xBEEF;
548
549         dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
550         if (dev_private == NULL)
551                 goto err;
552
553         snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name);
554         dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
555                         0);
556         if (dev_private->rx_queue == NULL)
557                 goto err;
558
559         snprintf(name_buf, sizeof(name_buf), "%s_txQ", name);
560         dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
561                         0);
562         if (dev_private->tx_queue == NULL)
563                 goto err;
564
565         /* reserve an ethdev entry */
566         eth_dev = rte_eth_dev_allocate(name);
567         if (eth_dev == NULL)
568                 goto err;
569
570         pci_dev->device.numa_node = socket_id;
571         pci_dev->device.name = eth_dev->data->name;
572         pci_drv->driver.name = virtual_ethdev_driver_name;
573         pci_drv->id_table = id_table;
574
575         if (isr_support)
576                 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC;
577         else
578                 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
579
580
581         eth_dev->device = &pci_dev->device;
582         eth_dev->device->driver = &pci_drv->driver;
583
584         eth_dev->data->nb_rx_queues = (uint16_t)1;
585         eth_dev->data->nb_tx_queues = (uint16_t)1;
586
587         eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
588         eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
589         eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
590
591         eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
592         if (eth_dev->data->mac_addrs == NULL)
593                 goto err;
594
595         memcpy(eth_dev->data->mac_addrs, mac_addr,
596                         sizeof(*eth_dev->data->mac_addrs));
597
598         eth_dev->data->dev_started = 0;
599         eth_dev->data->promiscuous = 0;
600         eth_dev->data->scattered_rx = 0;
601         eth_dev->data->all_multicast = 0;
602
603         eth_dev->data->dev_private = dev_private;
604
605         /* Copy default device operation functions */
606         dev_private->dev_ops = virtual_ethdev_default_dev_ops;
607         eth_dev->dev_ops = &dev_private->dev_ops;
608
609         pci_dev->device.driver = &pci_drv->driver;
610         eth_dev->device = &pci_dev->device;
611
612         eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
613         eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
614
615         return eth_dev->data->port_id;
616
617 err:
618         rte_free(pci_dev);
619         rte_free(pci_drv);
620         rte_free(id_table);
621         rte_free(dev_private);
622
623         return -1;
624 }