4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_vhost.h>
55 #include <rte_pause.h>
60 #define MAX_QUEUES 128
63 /* the maximum number of external ports supported */
64 #define MAX_SUP_PORTS 1
66 #define MBUF_CACHE_SIZE 128
67 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
71 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
74 #define JUMBO_FRAME_MAX_SIZE 0x2600
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
79 #define DEVICE_SAFE_REMOVE 2
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
85 #define INVALID_PORT_ID 0xFF
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
90 /* Maximum long option length for option parsing. */
91 #define MAX_LONG_OPT_SZ 64
93 /* mask of enabled ports */
94 static uint32_t enabled_port_mask = 0;
96 /* Promiscuous mode */
97 static uint32_t promiscuous;
99 /* number of devices/queues to support*/
100 static uint32_t num_queues = 0;
101 static uint32_t num_devices;
103 static struct rte_mempool *mbuf_pool;
104 static int mergeable;
106 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
113 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
116 static uint32_t enable_stats = 0;
117 /* Enable retries on RX. */
118 static uint32_t enable_retry = 1;
120 /* Disable TX checksum offload */
121 static uint32_t enable_tx_csum;
123 /* Disable TSO offload */
124 static uint32_t enable_tso;
126 static int client_mode;
127 static int dequeue_zero_copy;
129 static int builtin_net_driver;
131 /* Specify timeout (in useconds) between retries on RX. */
132 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
133 /* Specify the number of retries on RX. */
134 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
136 /* Socket file paths. Can be set by user */
137 static char *socket_files;
138 static int nb_sockets;
140 /* empty vmdq configuration structure. Filled in programatically */
141 static struct rte_eth_conf vmdq_conf_default = {
143 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
145 .header_split = 0, /**< Header Split disabled */
146 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
147 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
149 * It is necessary for 1G NIC such as I350,
150 * this fixes bug of ipv4 forwarding in guest can't
151 * forward pakets from one virtio dev to another virtio dev.
153 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
154 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
155 .hw_strip_crc = 1, /**< CRC stripped by hardware */
159 .mq_mode = ETH_MQ_TX_NONE,
163 * should be overridden separately in code with
167 .nb_queue_pools = ETH_8_POOLS,
168 .enable_default_pool = 0,
171 .pool_map = {{0, 0},},
176 static unsigned lcore_ids[RTE_MAX_LCORE];
177 static uint16_t ports[RTE_MAX_ETHPORTS];
178 static unsigned num_ports = 0; /**< The number of ports specified in command line */
179 static uint16_t num_pf_queues, num_vmdq_queues;
180 static uint16_t vmdq_pool_base, vmdq_queue_base;
181 static uint16_t queues_per_pool;
183 const uint16_t vlan_tags[] = {
184 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
185 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
186 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
187 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
188 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
189 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
190 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
191 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
194 /* ethernet addresses of ports */
195 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
197 static struct vhost_dev_tailq_list vhost_dev_list =
198 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
200 static struct lcore_info lcore_info[RTE_MAX_LCORE];
202 /* Used for queueing bursts of TX packets. */
206 struct rte_mbuf *m_table[MAX_PKT_BURST];
209 /* TX queue for each data core. */
210 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
212 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
213 / US_PER_S * BURST_TX_DRAIN_US)
217 * Builds up the correct configuration for VMDQ VLAN pool map
218 * according to the pool & queue limits.
221 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
223 struct rte_eth_vmdq_rx_conf conf;
224 struct rte_eth_vmdq_rx_conf *def_conf =
225 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
228 memset(&conf, 0, sizeof(conf));
229 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
230 conf.nb_pool_maps = num_devices;
231 conf.enable_loop_back = def_conf->enable_loop_back;
232 conf.rx_mode = def_conf->rx_mode;
234 for (i = 0; i < conf.nb_pool_maps; i++) {
235 conf.pool_map[i].vlan_id = vlan_tags[ i ];
236 conf.pool_map[i].pools = (1UL << i);
239 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
240 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
241 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
246 * Validate the device number according to the max pool number gotten form
247 * dev_info. If the device number is invalid, give the error message and
248 * return -1. Each device must have its own pool.
251 validate_num_devices(uint32_t max_nb_devices)
253 if (num_devices > max_nb_devices) {
254 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
261 * Initialises a given port using global settings and with the rx buffers
262 * coming from the mbuf_pool passed as parameter
265 port_init(uint16_t port)
267 struct rte_eth_dev_info dev_info;
268 struct rte_eth_conf port_conf;
269 struct rte_eth_rxconf *rxconf;
270 struct rte_eth_txconf *txconf;
271 int16_t rx_rings, tx_rings;
272 uint16_t rx_ring_size, tx_ring_size;
276 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
277 rte_eth_dev_info_get (port, &dev_info);
279 rxconf = &dev_info.default_rxconf;
280 txconf = &dev_info.default_txconf;
281 rxconf->rx_drop_en = 1;
283 /* Enable vlan offload */
284 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
286 /*configure the number of supported virtio devices based on VMDQ limits */
287 num_devices = dev_info.max_vmdq_pools;
289 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
290 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
293 * When dequeue zero copy is enabled, guest Tx used vring will be
294 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
295 * (tx_ring_size here) must be small enough so that the driver will
296 * hit the free threshold easily and free mbufs timely. Otherwise,
297 * guest Tx vring would be starved.
299 if (dequeue_zero_copy)
302 tx_rings = (uint16_t)rte_lcore_count();
304 retval = validate_num_devices(MAX_DEVICES);
308 /* Get port configuration. */
309 retval = get_eth_conf(&port_conf, num_devices);
312 /* NIC queues are divided into pf queues and vmdq queues. */
313 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
314 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
315 num_vmdq_queues = num_devices * queues_per_pool;
316 num_queues = num_pf_queues + num_vmdq_queues;
317 vmdq_queue_base = dev_info.vmdq_queue_base;
318 vmdq_pool_base = dev_info.vmdq_pool_base;
319 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
320 num_pf_queues, num_devices, queues_per_pool);
322 if (port >= rte_eth_dev_count()) return -1;
324 rx_rings = (uint16_t)dev_info.max_rx_queues;
325 /* Configure ethernet device. */
326 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
328 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
329 port, strerror(-retval));
333 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
336 RTE_LOG(ERR, VHOST_PORT, "Failed to adjust number of descriptors "
337 "for port %u: %s.\n", port, strerror(-retval));
340 if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
341 RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size "
342 "for Rx queues on port %u.\n", port);
346 /* Setup the queues. */
347 for (q = 0; q < rx_rings; q ++) {
348 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
349 rte_eth_dev_socket_id(port),
353 RTE_LOG(ERR, VHOST_PORT,
354 "Failed to setup rx queue %u of port %u: %s.\n",
355 q, port, strerror(-retval));
359 for (q = 0; q < tx_rings; q ++) {
360 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
361 rte_eth_dev_socket_id(port),
364 RTE_LOG(ERR, VHOST_PORT,
365 "Failed to setup tx queue %u of port %u: %s.\n",
366 q, port, strerror(-retval));
371 /* Start the device. */
372 retval = rte_eth_dev_start(port);
374 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
375 port, strerror(-retval));
380 rte_eth_promiscuous_enable(port);
382 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
383 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
384 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
385 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
387 vmdq_ports_eth_addr[port].addr_bytes[0],
388 vmdq_ports_eth_addr[port].addr_bytes[1],
389 vmdq_ports_eth_addr[port].addr_bytes[2],
390 vmdq_ports_eth_addr[port].addr_bytes[3],
391 vmdq_ports_eth_addr[port].addr_bytes[4],
392 vmdq_ports_eth_addr[port].addr_bytes[5]);
398 * Set socket file path.
401 us_vhost_parse_socket_path(const char *q_arg)
403 /* parse number string */
404 if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
407 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
408 snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
415 * Parse the portmask provided at run time.
418 parse_portmask(const char *portmask)
425 /* parse hexadecimal string */
426 pm = strtoul(portmask, &end, 16);
427 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
438 * Parse num options at run time.
441 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
448 /* parse unsigned int string */
449 num = strtoul(q_arg, &end, 10);
450 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
453 if (num > max_valid_value)
464 us_vhost_usage(const char *prgname)
466 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
468 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
469 " --socket-file <path>\n"
471 " -p PORTMASK: Set mask for ports to be used by application\n"
472 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
473 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
474 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
475 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
476 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
477 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
478 " --socket-file: The path of the socket file.\n"
479 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
480 " --tso [0|1] disable/enable TCP segment offload.\n"
481 " --client register a vhost-user socket as client mode.\n"
482 " --dequeue-zero-copy enables dequeue zero copy\n",
487 * Parse the arguments given in the command line of the application.
490 us_vhost_parse_args(int argc, char **argv)
495 const char *prgname = argv[0];
496 static struct option long_option[] = {
497 {"vm2vm", required_argument, NULL, 0},
498 {"rx-retry", required_argument, NULL, 0},
499 {"rx-retry-delay", required_argument, NULL, 0},
500 {"rx-retry-num", required_argument, NULL, 0},
501 {"mergeable", required_argument, NULL, 0},
502 {"stats", required_argument, NULL, 0},
503 {"socket-file", required_argument, NULL, 0},
504 {"tx-csum", required_argument, NULL, 0},
505 {"tso", required_argument, NULL, 0},
506 {"client", no_argument, &client_mode, 1},
507 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
508 {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
512 /* Parse command line */
513 while ((opt = getopt_long(argc, argv, "p:P",
514 long_option, &option_index)) != EOF) {
518 enabled_port_mask = parse_portmask(optarg);
519 if (enabled_port_mask == 0) {
520 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
521 us_vhost_usage(prgname);
528 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
529 ETH_VMDQ_ACCEPT_BROADCAST |
530 ETH_VMDQ_ACCEPT_MULTICAST;
535 /* Enable/disable vm2vm comms. */
536 if (!strncmp(long_option[option_index].name, "vm2vm",
538 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
540 RTE_LOG(INFO, VHOST_CONFIG,
541 "Invalid argument for "
543 us_vhost_usage(prgname);
546 vm2vm_mode = (vm2vm_type)ret;
550 /* Enable/disable retries on RX. */
551 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
552 ret = parse_num_opt(optarg, 1);
554 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
555 us_vhost_usage(prgname);
562 /* Enable/disable TX checksum offload. */
563 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
564 ret = parse_num_opt(optarg, 1);
566 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
567 us_vhost_usage(prgname);
570 enable_tx_csum = ret;
573 /* Enable/disable TSO offload. */
574 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
575 ret = parse_num_opt(optarg, 1);
577 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
578 us_vhost_usage(prgname);
584 /* Specify the retries delay time (in useconds) on RX. */
585 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
586 ret = parse_num_opt(optarg, INT32_MAX);
588 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
589 us_vhost_usage(prgname);
592 burst_rx_delay_time = ret;
596 /* Specify the retries number on RX. */
597 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
598 ret = parse_num_opt(optarg, INT32_MAX);
600 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
601 us_vhost_usage(prgname);
604 burst_rx_retry_num = ret;
608 /* Enable/disable RX mergeable buffers. */
609 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
610 ret = parse_num_opt(optarg, 1);
612 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
613 us_vhost_usage(prgname);
618 vmdq_conf_default.rxmode.jumbo_frame = 1;
619 vmdq_conf_default.rxmode.max_rx_pkt_len
620 = JUMBO_FRAME_MAX_SIZE;
625 /* Enable/disable stats. */
626 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
627 ret = parse_num_opt(optarg, INT32_MAX);
629 RTE_LOG(INFO, VHOST_CONFIG,
630 "Invalid argument for stats [0..N]\n");
631 us_vhost_usage(prgname);
638 /* Set socket file path. */
639 if (!strncmp(long_option[option_index].name,
640 "socket-file", MAX_LONG_OPT_SZ)) {
641 if (us_vhost_parse_socket_path(optarg) == -1) {
642 RTE_LOG(INFO, VHOST_CONFIG,
643 "Invalid argument for socket name (Max %d characters)\n",
645 us_vhost_usage(prgname);
652 /* Invalid option - print options. */
654 us_vhost_usage(prgname);
659 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
660 if (enabled_port_mask & (1 << i))
661 ports[num_ports++] = i;
664 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
665 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
666 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
674 * Update the global var NUM_PORTS and array PORTS according to system ports number
675 * and return valid ports number
677 static unsigned check_ports_num(unsigned nb_ports)
679 unsigned valid_num_ports = num_ports;
682 if (num_ports > nb_ports) {
683 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
684 num_ports, nb_ports);
685 num_ports = nb_ports;
688 for (portid = 0; portid < num_ports; portid ++) {
689 if (ports[portid] >= nb_ports) {
690 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
691 ports[portid], (nb_ports - 1));
692 ports[portid] = INVALID_PORT_ID;
696 return valid_num_ports;
699 static __rte_always_inline struct vhost_dev *
700 find_vhost_dev(struct ether_addr *mac)
702 struct vhost_dev *vdev;
704 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
705 if (vdev->ready == DEVICE_RX &&
706 is_same_ether_addr(mac, &vdev->mac_address))
714 * This function learns the MAC address of the device and registers this along with a
715 * vlan tag to a VMDQ.
718 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
720 struct ether_hdr *pkt_hdr;
723 /* Learn MAC address of guest device from packet */
724 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
726 if (find_vhost_dev(&pkt_hdr->s_addr)) {
727 RTE_LOG(ERR, VHOST_DATA,
728 "(%d) device is using a registered MAC!\n",
733 for (i = 0; i < ETHER_ADDR_LEN; i++)
734 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
736 /* vlan_tag currently uses the device_id. */
737 vdev->vlan_tag = vlan_tags[vdev->vid];
739 /* Print out VMDQ registration info. */
740 RTE_LOG(INFO, VHOST_DATA,
741 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
743 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
744 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
745 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
748 /* Register the MAC address. */
749 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
750 (uint32_t)vdev->vid + vmdq_pool_base);
752 RTE_LOG(ERR, VHOST_DATA,
753 "(%d) failed to add device MAC address to VMDQ\n",
756 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
758 /* Set device as ready for RX. */
759 vdev->ready = DEVICE_RX;
765 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
766 * queue before disabling RX on the device.
769 unlink_vmdq(struct vhost_dev *vdev)
773 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
775 if (vdev->ready == DEVICE_RX) {
776 /*clear MAC and VLAN settings*/
777 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
778 for (i = 0; i < 6; i++)
779 vdev->mac_address.addr_bytes[i] = 0;
783 /*Clear out the receive buffers*/
784 rx_count = rte_eth_rx_burst(ports[0],
785 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
788 for (i = 0; i < rx_count; i++)
789 rte_pktmbuf_free(pkts_burst[i]);
791 rx_count = rte_eth_rx_burst(ports[0],
792 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
795 vdev->ready = DEVICE_MAC_LEARNING;
799 static __rte_always_inline void
800 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
805 if (builtin_net_driver) {
806 ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
808 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
812 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
813 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
814 src_vdev->stats.tx_total++;
815 src_vdev->stats.tx += ret;
820 * Check if the packet destination MAC address is for a local device. If so then put
821 * the packet on that devices RX queue. If not then return.
823 static __rte_always_inline int
824 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
826 struct ether_hdr *pkt_hdr;
827 struct vhost_dev *dst_vdev;
829 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
831 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
835 if (vdev->vid == dst_vdev->vid) {
836 RTE_LOG_DP(DEBUG, VHOST_DATA,
837 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
842 RTE_LOG_DP(DEBUG, VHOST_DATA,
843 "(%d) TX: MAC address is local\n", dst_vdev->vid);
845 if (unlikely(dst_vdev->remove)) {
846 RTE_LOG_DP(DEBUG, VHOST_DATA,
847 "(%d) device is marked for removal\n", dst_vdev->vid);
851 virtio_xmit(dst_vdev, vdev, m);
856 * Check if the destination MAC of a packet is one local VM,
857 * and get its vlan tag, and offset if it is.
859 static __rte_always_inline int
860 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
861 uint32_t *offset, uint16_t *vlan_tag)
863 struct vhost_dev *dst_vdev;
864 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
866 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
870 if (vdev->vid == dst_vdev->vid) {
871 RTE_LOG_DP(DEBUG, VHOST_DATA,
872 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
878 * HW vlan strip will reduce the packet length
879 * by minus length of vlan tag, so need restore
880 * the packet length by plus it.
883 *vlan_tag = vlan_tags[vdev->vid];
885 RTE_LOG_DP(DEBUG, VHOST_DATA,
886 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
887 vdev->vid, dst_vdev->vid, *vlan_tag);
893 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
895 if (ol_flags & PKT_TX_IPV4)
896 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
897 else /* assume ethertype == ETHER_TYPE_IPv6 */
898 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
901 static void virtio_tx_offload(struct rte_mbuf *m)
904 struct ipv4_hdr *ipv4_hdr = NULL;
905 struct tcp_hdr *tcp_hdr = NULL;
906 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
908 l3_hdr = (char *)eth_hdr + m->l2_len;
910 if (m->ol_flags & PKT_TX_IPV4) {
912 ipv4_hdr->hdr_checksum = 0;
913 m->ol_flags |= PKT_TX_IP_CKSUM;
916 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
917 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
921 free_pkts(struct rte_mbuf **pkts, uint16_t n)
924 rte_pktmbuf_free(pkts[n]);
927 static __rte_always_inline void
928 do_drain_mbuf_table(struct mbuf_table *tx_q)
932 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
933 tx_q->m_table, tx_q->len);
934 if (unlikely(count < tx_q->len))
935 free_pkts(&tx_q->m_table[count], tx_q->len - count);
941 * This function routes the TX packet to the correct interface. This
942 * may be a local device or the physical port.
944 static __rte_always_inline void
945 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
947 struct mbuf_table *tx_q;
949 const uint16_t lcore_id = rte_lcore_id();
950 struct ether_hdr *nh;
953 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
954 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
955 struct vhost_dev *vdev2;
957 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
959 virtio_xmit(vdev2, vdev, m);
964 /*check if destination is local VM*/
965 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
970 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
971 if (unlikely(find_local_dest(vdev, m, &offset,
978 RTE_LOG_DP(DEBUG, VHOST_DATA,
979 "(%d) TX: MAC address is external\n", vdev->vid);
983 /*Add packet to the port tx queue*/
984 tx_q = &lcore_tx_queue[lcore_id];
986 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
987 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
988 /* Guest has inserted the vlan tag. */
989 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
990 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
991 if ((vm2vm_mode == VM2VM_HARDWARE) &&
992 (vh->vlan_tci != vlan_tag_be))
993 vh->vlan_tci = vlan_tag_be;
995 m->ol_flags |= PKT_TX_VLAN_PKT;
998 * Find the right seg to adjust the data len when offset is
999 * bigger than tail room size.
1001 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1002 if (likely(offset <= rte_pktmbuf_tailroom(m)))
1003 m->data_len += offset;
1005 struct rte_mbuf *seg = m;
1007 while ((seg->next != NULL) &&
1008 (offset > rte_pktmbuf_tailroom(seg)))
1011 seg->data_len += offset;
1013 m->pkt_len += offset;
1016 m->vlan_tci = vlan_tag;
1019 if (m->ol_flags & PKT_TX_TCP_SEG)
1020 virtio_tx_offload(m);
1022 tx_q->m_table[tx_q->len++] = m;
1024 vdev->stats.tx_total++;
1028 if (unlikely(tx_q->len == MAX_PKT_BURST))
1029 do_drain_mbuf_table(tx_q);
1033 static __rte_always_inline void
1034 drain_mbuf_table(struct mbuf_table *tx_q)
1036 static uint64_t prev_tsc;
1042 cur_tsc = rte_rdtsc();
1043 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1046 RTE_LOG_DP(DEBUG, VHOST_DATA,
1047 "TX queue drained after timeout with burst size %u\n",
1049 do_drain_mbuf_table(tx_q);
1053 static __rte_always_inline void
1054 drain_eth_rx(struct vhost_dev *vdev)
1056 uint16_t rx_count, enqueue_count;
1057 struct rte_mbuf *pkts[MAX_PKT_BURST];
1059 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1060 pkts, MAX_PKT_BURST);
1065 * When "enable_retry" is set, here we wait and retry when there
1066 * is no enough free slots in the queue to hold @rx_count packets,
1067 * to diminish packet loss.
1070 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1074 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1075 rte_delay_us(burst_rx_delay_time);
1076 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1082 if (builtin_net_driver) {
1083 enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
1086 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1090 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1091 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1094 free_pkts(pkts, rx_count);
1097 static __rte_always_inline void
1098 drain_virtio_tx(struct vhost_dev *vdev)
1100 struct rte_mbuf *pkts[MAX_PKT_BURST];
1104 if (builtin_net_driver) {
1105 count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
1106 pkts, MAX_PKT_BURST);
1108 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ,
1109 mbuf_pool, pkts, MAX_PKT_BURST);
1112 /* setup VMDq for the first packet */
1113 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1114 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1115 free_pkts(pkts, count);
1118 for (i = 0; i < count; ++i)
1119 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1123 * Main function of vhost-switch. It basically does:
1125 * for each vhost device {
1128 * Which drains the host eth Rx queue linked to the vhost device,
1129 * and deliver all of them to guest virito Rx ring associated with
1130 * this vhost device.
1132 * - drain_virtio_tx()
1134 * Which drains the guest virtio Tx queue and deliver all of them
1135 * to the target, which could be another vhost device, or the
1136 * physical eth dev. The route is done in function "virtio_tx_route".
1140 switch_worker(void *arg __rte_unused)
1143 unsigned lcore_id = rte_lcore_id();
1144 struct vhost_dev *vdev;
1145 struct mbuf_table *tx_q;
1147 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1149 tx_q = &lcore_tx_queue[lcore_id];
1150 for (i = 0; i < rte_lcore_count(); i++) {
1151 if (lcore_ids[i] == lcore_id) {
1158 drain_mbuf_table(tx_q);
1161 * Inform the configuration core that we have exited the
1162 * linked list and that no devices are in use if requested.
1164 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1165 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1168 * Process vhost devices
1170 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1172 if (unlikely(vdev->remove)) {
1174 vdev->ready = DEVICE_SAFE_REMOVE;
1178 if (likely(vdev->ready == DEVICE_RX))
1181 if (likely(!vdev->remove))
1182 drain_virtio_tx(vdev);
1190 * Remove a device from the specific data core linked list and from the
1191 * main linked list. Synchonization occurs through the use of the
1192 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1193 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1196 destroy_device(int vid)
1198 struct vhost_dev *vdev = NULL;
1201 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1202 if (vdev->vid == vid)
1207 /*set the remove flag. */
1209 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1213 if (builtin_net_driver)
1214 vs_vhost_net_remove(vdev);
1216 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1218 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1221 /* Set the dev_removal_flag on each lcore. */
1222 RTE_LCORE_FOREACH_SLAVE(lcore)
1223 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1226 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1227 * we can be sure that they can no longer access the device removed
1228 * from the linked lists and that the devices are no longer in use.
1230 RTE_LCORE_FOREACH_SLAVE(lcore) {
1231 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1235 lcore_info[vdev->coreid].device_num--;
1237 RTE_LOG(INFO, VHOST_DATA,
1238 "(%d) device has been removed from data core\n",
1245 * A new device is added to a data core. First the device is added to the main linked list
1246 * and the allocated to a specific data core.
1251 int lcore, core_add = 0;
1252 uint32_t device_num_min = num_devices;
1253 struct vhost_dev *vdev;
1255 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1257 RTE_LOG(INFO, VHOST_DATA,
1258 "(%d) couldn't allocate memory for vhost dev\n",
1264 if (builtin_net_driver)
1265 vs_vhost_net_setup(vdev);
1267 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1268 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1270 /*reset ready flag*/
1271 vdev->ready = DEVICE_MAC_LEARNING;
1274 /* Find a suitable lcore to add the device. */
1275 RTE_LCORE_FOREACH_SLAVE(lcore) {
1276 if (lcore_info[lcore].device_num < device_num_min) {
1277 device_num_min = lcore_info[lcore].device_num;
1281 vdev->coreid = core_add;
1283 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1285 lcore_info[vdev->coreid].device_num++;
1287 /* Disable notifications. */
1288 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1289 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1291 RTE_LOG(INFO, VHOST_DATA,
1292 "(%d) device has been added to data core %d\n",
1299 * These callback allow devices to be added to the data core when configuration
1300 * has been fully complete.
1302 static const struct vhost_device_ops virtio_net_device_ops =
1304 .new_device = new_device,
1305 .destroy_device = destroy_device,
1309 * This is a thread will wake up after a period to print stats if the user has
1315 struct vhost_dev *vdev;
1316 uint64_t tx_dropped, rx_dropped;
1317 uint64_t tx, tx_total, rx, rx_total;
1318 const char clr[] = { 27, '[', '2', 'J', '\0' };
1319 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1322 sleep(enable_stats);
1324 /* Clear screen and move to top left */
1325 printf("%s%s\n", clr, top_left);
1326 printf("Device statistics =================================\n");
1328 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1329 tx_total = vdev->stats.tx_total;
1330 tx = vdev->stats.tx;
1331 tx_dropped = tx_total - tx;
1333 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1334 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1335 rx_dropped = rx_total - rx;
1337 printf("Statistics for device %d\n"
1338 "-----------------------\n"
1339 "TX total: %" PRIu64 "\n"
1340 "TX dropped: %" PRIu64 "\n"
1341 "TX successful: %" PRIu64 "\n"
1342 "RX total: %" PRIu64 "\n"
1343 "RX dropped: %" PRIu64 "\n"
1344 "RX successful: %" PRIu64 "\n",
1346 tx_total, tx_dropped, tx,
1347 rx_total, rx_dropped, rx);
1350 printf("===================================================\n");
1355 unregister_drivers(int socket_num)
1359 for (i = 0; i < socket_num; i++) {
1360 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1362 RTE_LOG(ERR, VHOST_CONFIG,
1363 "Fail to unregister vhost driver for %s.\n",
1364 socket_files + i * PATH_MAX);
1368 /* When we receive a INT signal, unregister vhost driver */
1370 sigint_handler(__rte_unused int signum)
1372 /* Unregister vhost driver. */
1373 unregister_drivers(nb_sockets);
1379 * While creating an mbuf pool, one key thing is to figure out how
1380 * many mbuf entries is enough for our use. FYI, here are some
1383 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1385 * - For each switch core (A CPU core does the packet switch), we need
1386 * also make some reservation for receiving the packets from virtio
1387 * Tx queue. How many is enough depends on the usage. It's normally
1388 * a simple calculation like following:
1390 * MAX_PKT_BURST * max packet size / mbuf size
1392 * So, we definitely need allocate more mbufs when TSO is enabled.
1394 * - Similarly, for each switching core, we should serve @nr_rx_desc
1395 * mbufs for receiving the packets from physical NIC device.
1397 * - We also need make sure, for each switch core, we have allocated
1398 * enough mbufs to fill up the mbuf cache.
1401 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1402 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1405 uint32_t nr_mbufs_per_core;
1406 uint32_t mtu = 1500;
1413 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1414 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1415 nr_mbufs_per_core += nr_rx_desc;
1416 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1418 nr_mbufs = nr_queues * nr_rx_desc;
1419 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1420 nr_mbufs *= nr_port;
1422 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1423 nr_mbuf_cache, 0, mbuf_size,
1425 if (mbuf_pool == NULL)
1426 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1430 * Main function, does initialisation and calls the per-lcore functions.
1433 main(int argc, char *argv[])
1435 unsigned lcore_id, core_id = 0;
1436 unsigned nb_ports, valid_num_ports;
1439 static pthread_t tid;
1440 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1443 signal(SIGINT, sigint_handler);
1446 ret = rte_eal_init(argc, argv);
1448 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1452 /* parse app arguments */
1453 ret = us_vhost_parse_args(argc, argv);
1455 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1457 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1458 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1460 if (rte_lcore_is_enabled(lcore_id))
1461 lcore_ids[core_id++] = lcore_id;
1464 if (rte_lcore_count() > RTE_MAX_LCORE)
1465 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1467 /* Get the number of physical ports. */
1468 nb_ports = rte_eth_dev_count();
1471 * Update the global var NUM_PORTS and global array PORTS
1472 * and get value of var VALID_NUM_PORTS according to system ports number
1474 valid_num_ports = check_ports_num(nb_ports);
1476 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1477 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1478 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1483 * FIXME: here we are trying to allocate mbufs big enough for
1484 * @MAX_QUEUES, but the truth is we're never going to use that
1485 * many queues here. We probably should only do allocation for
1486 * those queues we are going to use.
1488 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1489 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1491 if (vm2vm_mode == VM2VM_HARDWARE) {
1492 /* Enable VT loop back to let L2 switch to do it. */
1493 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1494 RTE_LOG(DEBUG, VHOST_CONFIG,
1495 "Enable loop back for L2 switch in vmdq.\n");
1498 /* initialize all ports */
1499 for (portid = 0; portid < nb_ports; portid++) {
1500 /* skip ports that are not enabled */
1501 if ((enabled_port_mask & (1 << portid)) == 0) {
1502 RTE_LOG(INFO, VHOST_PORT,
1503 "Skipping disabled port %d\n", portid);
1506 if (port_init(portid) != 0)
1507 rte_exit(EXIT_FAILURE,
1508 "Cannot initialize network ports\n");
1511 /* Enable stats if the user option is set. */
1513 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1515 rte_exit(EXIT_FAILURE,
1516 "Cannot create print-stats thread\n");
1518 /* Set thread_name for aid in debugging. */
1519 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1520 ret = rte_thread_setname(tid, thread_name);
1522 RTE_LOG(DEBUG, VHOST_CONFIG,
1523 "Cannot set print-stats name\n");
1526 /* Launch all data cores. */
1527 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1528 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1531 flags |= RTE_VHOST_USER_CLIENT;
1533 if (dequeue_zero_copy)
1534 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1536 /* Register vhost user driver to handle vhost messages. */
1537 for (i = 0; i < nb_sockets; i++) {
1538 char *file = socket_files + i * PATH_MAX;
1539 ret = rte_vhost_driver_register(file, flags);
1541 unregister_drivers(i);
1542 rte_exit(EXIT_FAILURE,
1543 "vhost driver register failure.\n");
1546 if (builtin_net_driver)
1547 rte_vhost_driver_set_features(file, VIRTIO_NET_FEATURES);
1549 if (mergeable == 0) {
1550 rte_vhost_driver_disable_features(file,
1551 1ULL << VIRTIO_NET_F_MRG_RXBUF);
1554 if (enable_tx_csum == 0) {
1555 rte_vhost_driver_disable_features(file,
1556 1ULL << VIRTIO_NET_F_CSUM);
1559 if (enable_tso == 0) {
1560 rte_vhost_driver_disable_features(file,
1561 1ULL << VIRTIO_NET_F_HOST_TSO4);
1562 rte_vhost_driver_disable_features(file,
1563 1ULL << VIRTIO_NET_F_HOST_TSO6);
1564 rte_vhost_driver_disable_features(file,
1565 1ULL << VIRTIO_NET_F_GUEST_TSO4);
1566 rte_vhost_driver_disable_features(file,
1567 1ULL << VIRTIO_NET_F_GUEST_TSO6);
1571 rte_vhost_driver_enable_features(file,
1572 1ULL << VIRTIO_NET_F_CTRL_RX);
1575 ret = rte_vhost_driver_callback_register(file,
1576 &virtio_net_device_ops);
1578 rte_exit(EXIT_FAILURE,
1579 "failed to register vhost driver callbacks.\n");
1582 if (rte_vhost_driver_start(file) < 0) {
1583 rte_exit(EXIT_FAILURE,
1584 "failed to start vhost driver.\n");
1588 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1589 rte_eal_wait_lcore(lcore_id);