4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <rte_ethdev.h>
36 #include <rte_malloc.h>
37 #include <rte_memcpy.h>
38 #include <rte_memory.h>
41 #include "virtual_pmd.h"
43 #define MAX_PKT_BURST 512
45 static const char *virtual_ethdev_driver_name = "Virtual PMD";
47 struct virtual_ethdev_private {
48 struct eth_dev_ops dev_ops;
49 struct rte_eth_stats eth_stats;
51 struct rte_ring *rx_queue;
52 struct rte_ring *tx_queue;
54 int tx_burst_fail_count;
57 struct virtual_ethdev_queue {
63 virtual_ethdev_start_success(struct rte_eth_dev *eth_dev __rte_unused)
65 eth_dev->data->dev_started = 1;
71 virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused)
73 eth_dev->data->dev_started = 0;
77 static void virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused)
80 struct virtual_ethdev_private *prv = eth_dev->data->dev_private;
82 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
83 eth_dev->data->dev_started = 0;
84 while (rte_ring_dequeue(prv->rx_queue, &pkt) != -ENOENT)
85 rte_pktmbuf_free(pkt);
87 while (rte_ring_dequeue(prv->tx_queue, &pkt) != -ENOENT)
88 rte_pktmbuf_free(pkt);
92 virtual_ethdev_close(struct rte_eth_dev *dev __rte_unused)
96 virtual_ethdev_configure_success(struct rte_eth_dev *dev __rte_unused)
102 virtual_ethdev_configure_fail(struct rte_eth_dev *dev __rte_unused)
108 virtual_ethdev_info_get(struct rte_eth_dev *dev __rte_unused,
109 struct rte_eth_dev_info *dev_info)
111 dev_info->driver_name = virtual_ethdev_driver_name;
112 dev_info->max_mac_addrs = 1;
114 dev_info->max_rx_pktlen = (uint32_t)2048;
116 dev_info->max_rx_queues = (uint16_t)128;
117 dev_info->max_tx_queues = (uint16_t)512;
119 dev_info->min_rx_bufsize = 0;
120 dev_info->pci_dev = NULL;
124 virtual_ethdev_rx_queue_setup_success(struct rte_eth_dev *dev,
125 uint16_t rx_queue_id, uint16_t nb_rx_desc __rte_unused,
126 unsigned int socket_id,
127 const struct rte_eth_rxconf *rx_conf __rte_unused,
128 struct rte_mempool *mb_pool __rte_unused)
130 struct virtual_ethdev_queue *rx_q;
132 rx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
133 sizeof(struct virtual_ethdev_queue), 0, socket_id);
138 rx_q->port_id = dev->data->port_id;
139 rx_q->queue_id = rx_queue_id;
141 dev->data->rx_queues[rx_queue_id] = rx_q;
147 virtual_ethdev_rx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
148 uint16_t rx_queue_id __rte_unused, uint16_t nb_rx_desc __rte_unused,
149 unsigned int socket_id __rte_unused,
150 const struct rte_eth_rxconf *rx_conf __rte_unused,
151 struct rte_mempool *mb_pool __rte_unused)
157 virtual_ethdev_tx_queue_setup_success(struct rte_eth_dev *dev,
158 uint16_t tx_queue_id, uint16_t nb_tx_desc __rte_unused,
159 unsigned int socket_id,
160 const struct rte_eth_txconf *tx_conf __rte_unused)
162 struct virtual_ethdev_queue *tx_q;
164 tx_q = (struct virtual_ethdev_queue *)rte_zmalloc_socket(NULL,
165 sizeof(struct virtual_ethdev_queue), 0, socket_id);
170 tx_q->port_id = dev->data->port_id;
171 tx_q->queue_id = tx_queue_id;
173 dev->data->tx_queues[tx_queue_id] = tx_q;
179 virtual_ethdev_tx_queue_setup_fail(struct rte_eth_dev *dev __rte_unused,
180 uint16_t tx_queue_id __rte_unused, uint16_t nb_tx_desc __rte_unused,
181 unsigned int socket_id __rte_unused,
182 const struct rte_eth_txconf *tx_conf __rte_unused)
188 virtual_ethdev_rx_queue_release(void *q __rte_unused)
193 virtual_ethdev_tx_queue_release(void *q __rte_unused)
198 virtual_ethdev_link_update_success(struct rte_eth_dev *bonded_eth_dev,
199 int wait_to_complete __rte_unused)
201 if (!bonded_eth_dev->data->dev_started)
202 bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
208 virtual_ethdev_link_update_fail(struct rte_eth_dev *bonded_eth_dev __rte_unused,
209 int wait_to_complete __rte_unused)
215 virtual_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
217 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
220 rte_memcpy(stats, &dev_private->eth_stats, sizeof(*stats));
224 virtual_ethdev_stats_reset(struct rte_eth_dev *dev)
226 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
229 while (rte_ring_dequeue(dev_private->tx_queue, &pkt) == -ENOBUFS)
230 rte_pktmbuf_free(pkt);
232 /* Reset internal statistics */
233 memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats));
237 virtual_ethdev_promiscuous_mode_enable(struct rte_eth_dev *dev __rte_unused)
241 virtual_ethdev_promiscuous_mode_disable(struct rte_eth_dev *dev __rte_unused)
245 virtual_ethdev_mac_address_set(__rte_unused struct rte_eth_dev *dev,
246 __rte_unused struct ether_addr *addr)
250 static const struct eth_dev_ops virtual_ethdev_default_dev_ops = {
251 .dev_configure = virtual_ethdev_configure_success,
252 .dev_start = virtual_ethdev_start_success,
253 .dev_stop = virtual_ethdev_stop,
254 .dev_close = virtual_ethdev_close,
255 .dev_infos_get = virtual_ethdev_info_get,
256 .rx_queue_setup = virtual_ethdev_rx_queue_setup_success,
257 .tx_queue_setup = virtual_ethdev_tx_queue_setup_success,
258 .rx_queue_release = virtual_ethdev_rx_queue_release,
259 .tx_queue_release = virtual_ethdev_tx_queue_release,
260 .link_update = virtual_ethdev_link_update_success,
261 .mac_addr_set = virtual_ethdev_mac_address_set,
262 .stats_get = virtual_ethdev_stats_get,
263 .stats_reset = virtual_ethdev_stats_reset,
264 .promiscuous_enable = virtual_ethdev_promiscuous_mode_enable,
265 .promiscuous_disable = virtual_ethdev_promiscuous_mode_disable
269 virtual_ethdev_start_fn_set_success(uint8_t port_id, uint8_t success)
271 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
272 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
273 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
276 dev_ops->dev_start = virtual_ethdev_start_success;
278 dev_ops->dev_start = virtual_ethdev_start_fail;
283 virtual_ethdev_configure_fn_set_success(uint8_t port_id, uint8_t success)
285 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
286 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
287 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
290 dev_ops->dev_configure = virtual_ethdev_configure_success;
292 dev_ops->dev_configure = virtual_ethdev_configure_fail;
296 virtual_ethdev_rx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
298 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
299 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
300 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
303 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_success;
305 dev_ops->rx_queue_setup = virtual_ethdev_rx_queue_setup_fail;
309 virtual_ethdev_tx_queue_setup_fn_set_success(uint8_t port_id, uint8_t success)
311 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
312 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
313 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
316 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_success;
318 dev_ops->tx_queue_setup = virtual_ethdev_tx_queue_setup_fail;
322 virtual_ethdev_link_update_fn_set_success(uint8_t port_id, uint8_t success)
324 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
325 struct virtual_ethdev_private *dev_private = dev->data->dev_private;
326 struct eth_dev_ops *dev_ops = &dev_private->dev_ops;
329 dev_ops->link_update = virtual_ethdev_link_update_success;
331 dev_ops->link_update = virtual_ethdev_link_update_fail;
336 virtual_ethdev_rx_burst_success(void *queue __rte_unused,
337 struct rte_mbuf **bufs,
340 struct rte_eth_dev *vrtl_eth_dev;
341 struct virtual_ethdev_queue *pq_map;
342 struct virtual_ethdev_private *dev_private;
346 pq_map = (struct virtual_ethdev_queue *)queue;
347 vrtl_eth_dev = &rte_eth_devices[pq_map->port_id];
348 dev_private = vrtl_eth_dev->data->dev_private;
350 rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs,
353 /* increments ipackets count */
354 dev_private->eth_stats.ipackets += rx_count;
356 /* increments ibytes count */
357 for (i = 0; i < rx_count; i++)
358 dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]);
364 virtual_ethdev_rx_burst_fail(void *queue __rte_unused,
365 struct rte_mbuf **bufs __rte_unused,
366 uint16_t nb_pkts __rte_unused)
372 virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs,
375 struct virtual_ethdev_queue *tx_q = (struct virtual_ethdev_queue *)queue;
377 struct rte_eth_dev *vrtl_eth_dev;
378 struct virtual_ethdev_private *dev_private;
382 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
383 dev_private = vrtl_eth_dev->data->dev_private;
385 if (!vrtl_eth_dev->data->dev_link.link_status)
388 nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs,
391 /* increment opacket count */
392 dev_private->eth_stats.opackets += nb_pkts;
394 /* increment obytes count */
395 for (i = 0; i < nb_pkts; i++)
396 dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]);
402 virtual_ethdev_tx_burst_fail(void *queue, struct rte_mbuf **bufs,
405 struct rte_eth_dev *vrtl_eth_dev = NULL;
406 struct virtual_ethdev_queue *tx_q = NULL;
407 struct virtual_ethdev_private *dev_private = NULL;
411 tx_q = (struct virtual_ethdev_queue *)queue;
412 vrtl_eth_dev = &rte_eth_devices[tx_q->port_id];
413 dev_private = vrtl_eth_dev->data->dev_private;
415 if (dev_private->tx_burst_fail_count < nb_pkts) {
416 int successfully_txd = nb_pkts - dev_private->tx_burst_fail_count;
418 /* increment opacket count */
419 dev_private->eth_stats.opackets += successfully_txd;
421 /* free packets in burst */
422 for (i = 0; i < successfully_txd; i++) {
423 /* free packets in burst */
425 rte_pktmbuf_free(bufs[i]);
430 return successfully_txd;
438 virtual_ethdev_rx_burst_fn_set_success(uint8_t port_id, uint8_t success)
440 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
443 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
445 vrtl_eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_fail;
450 virtual_ethdev_tx_burst_fn_set_success(uint8_t port_id, uint8_t success)
452 struct virtual_ethdev_private *dev_private = NULL;
453 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
455 dev_private = vrtl_eth_dev->data->dev_private;
458 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
460 vrtl_eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_fail;
462 dev_private->tx_burst_fail_count = 0;
466 virtual_ethdev_tx_burst_fn_set_tx_pkt_fail_count(uint8_t port_id,
467 uint8_t packet_fail_count)
469 struct virtual_ethdev_private *dev_private = NULL;
470 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
473 dev_private = vrtl_eth_dev->data->dev_private;
474 dev_private->tx_burst_fail_count = packet_fail_count;
478 virtual_ethdev_set_link_status(uint8_t port_id, uint8_t link_status)
480 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
482 vrtl_eth_dev->data->dev_link.link_status = link_status;
486 virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id,
489 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
491 vrtl_eth_dev->data->dev_link.link_status = link_status;
493 _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
497 virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id,
498 struct rte_mbuf **pkt_burst, int burst_length)
500 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
501 struct virtual_ethdev_private *dev_private =
502 vrtl_eth_dev->data->dev_private;
504 return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst,
509 virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id,
510 struct rte_mbuf **pkt_burst, int burst_length)
512 struct virtual_ethdev_private *dev_private;
513 struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id];
515 dev_private = vrtl_eth_dev->data->dev_private;
516 return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst,
521 get_number_of_sockets(void)
525 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
527 for (i = 0; i < RTE_MAX_MEMSEG && ms[i].addr != NULL; i++) {
528 if (sockets < ms[i].socket_id)
529 sockets = ms[i].socket_id;
531 /* Number of sockets = maximum socket_id + 1 */
536 virtual_ethdev_create(const char *name, struct ether_addr *mac_addr,
537 uint8_t socket_id, uint8_t isr_support)
539 struct rte_pci_device *pci_dev = NULL;
540 struct rte_eth_dev *eth_dev = NULL;
541 struct eth_driver *eth_drv = NULL;
542 struct rte_pci_driver *pci_drv = NULL;
543 struct rte_pci_id *id_table = NULL;
544 struct virtual_ethdev_private *dev_private = NULL;
545 char name_buf[RTE_RING_NAMESIZE];
548 /* now do all data allocation - for eth_dev structure, dummy pci driver
549 * and internal (dev_private) data
552 if (socket_id >= get_number_of_sockets())
555 pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
559 eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
563 pci_drv = rte_zmalloc_socket(name, sizeof(*pci_drv), 0, socket_id);
567 id_table = rte_zmalloc_socket(name, sizeof(*id_table), 0, socket_id);
568 if (id_table == NULL)
570 id_table->device_id = 0xBEEF;
572 dev_private = rte_zmalloc_socket(name, sizeof(*dev_private), 0, socket_id);
573 if (dev_private == NULL)
576 snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name);
577 dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
579 if (dev_private->rx_queue == NULL)
582 snprintf(name_buf, sizeof(name_buf), "%s_txQ", name);
583 dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id,
585 if (dev_private->tx_queue == NULL)
588 /* reserve an ethdev entry */
589 eth_dev = rte_eth_dev_allocate(name);
593 pci_dev->device.numa_node = socket_id;
594 pci_drv->driver.name = virtual_ethdev_driver_name;
595 pci_drv->id_table = id_table;
598 pci_drv->drv_flags |= RTE_PCI_DRV_INTR_LSC;
600 pci_drv->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
603 eth_drv->pci_drv = (struct rte_pci_driver)(*pci_drv);
604 eth_dev->driver = eth_drv;
606 eth_dev->data->nb_rx_queues = (uint16_t)1;
607 eth_dev->data->nb_tx_queues = (uint16_t)1;
609 TAILQ_INIT(&(eth_dev->link_intr_cbs));
611 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
612 eth_dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
613 eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
615 eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
616 if (eth_dev->data->mac_addrs == NULL)
619 memcpy(eth_dev->data->mac_addrs, mac_addr,
620 sizeof(*eth_dev->data->mac_addrs));
622 eth_dev->data->dev_started = 0;
623 eth_dev->data->promiscuous = 0;
624 eth_dev->data->scattered_rx = 0;
625 eth_dev->data->all_multicast = 0;
627 eth_dev->data->dev_private = dev_private;
629 /* Copy default device operation functions */
630 dev_private->dev_ops = virtual_ethdev_default_dev_ops;
631 eth_dev->dev_ops = &dev_private->dev_ops;
633 eth_dev->pci_dev = pci_dev;
634 eth_dev->pci_dev->device.driver = ð_drv->pci_drv.driver;
636 eth_dev->rx_pkt_burst = virtual_ethdev_rx_burst_success;
637 eth_dev->tx_pkt_burst = virtual_ethdev_tx_burst_success;
639 return eth_dev->data->port_id;
646 rte_free(dev_private);