4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <arpa/inet.h>
36 #include <linux/if_ether.h>
37 #include <linux/if_vlan.h>
38 #include <linux/virtio_net.h>
39 #include <linux/virtio_ring.h>
42 #include <sys/eventfd.h>
43 #include <sys/param.h>
46 #include <rte_atomic.h>
47 #include <rte_cycles.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
59 #define MAX_QUEUES 128
62 /* the maximum number of external ports supported */
63 #define MAX_SUP_PORTS 1
65 #define MBUF_CACHE_SIZE 128
66 #define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
68 #define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
69 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
71 #define BURST_RX_WAIT_US 15 /* Defines how long we wait between retries on RX */
72 #define BURST_RX_RETRIES 4 /* Number of retries on RX. */
74 #define JUMBO_FRAME_MAX_SIZE 0x2600
76 /* State of virtio device. */
77 #define DEVICE_MAC_LEARNING 0
79 #define DEVICE_SAFE_REMOVE 2
81 /* Configurable number of RX/TX ring descriptors */
82 #define RTE_TEST_RX_DESC_DEFAULT 1024
83 #define RTE_TEST_TX_DESC_DEFAULT 512
85 #define INVALID_PORT_ID 0xFF
87 /* Max number of devices. Limited by vmdq. */
88 #define MAX_DEVICES 64
90 /* Maximum long option length for option parsing. */
91 #define MAX_LONG_OPT_SZ 64
93 /* mask of enabled ports */
94 static uint32_t enabled_port_mask = 0;
96 /* Promiscuous mode */
97 static uint32_t promiscuous;
99 /* number of devices/queues to support*/
100 static uint32_t num_queues = 0;
101 static uint32_t num_devices;
103 static struct rte_mempool *mbuf_pool;
104 static int mergeable;
106 /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
113 static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
116 static uint32_t enable_stats = 0;
117 /* Enable retries on RX. */
118 static uint32_t enable_retry = 1;
120 /* Disable TX checksum offload */
121 static uint32_t enable_tx_csum;
123 /* Disable TSO offload */
124 static uint32_t enable_tso;
126 static int client_mode;
127 static int dequeue_zero_copy;
129 /* Specify timeout (in useconds) between retries on RX. */
130 static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
131 /* Specify the number of retries on RX. */
132 static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
134 /* Socket file paths. Can be set by user */
135 static char *socket_files;
136 static int nb_sockets;
138 /* empty vmdq configuration structure. Filled in programatically */
139 static struct rte_eth_conf vmdq_conf_default = {
141 .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
143 .header_split = 0, /**< Header Split disabled */
144 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
145 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
147 * It is necessary for 1G NIC such as I350,
148 * this fixes bug of ipv4 forwarding in guest can't
149 * forward pakets from one virtio dev to another virtio dev.
151 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
152 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
153 .hw_strip_crc = 1, /**< CRC stripped by hardware */
157 .mq_mode = ETH_MQ_TX_NONE,
161 * should be overridden separately in code with
165 .nb_queue_pools = ETH_8_POOLS,
166 .enable_default_pool = 0,
169 .pool_map = {{0, 0},},
174 static unsigned lcore_ids[RTE_MAX_LCORE];
175 static uint8_t ports[RTE_MAX_ETHPORTS];
176 static unsigned num_ports = 0; /**< The number of ports specified in command line */
177 static uint16_t num_pf_queues, num_vmdq_queues;
178 static uint16_t vmdq_pool_base, vmdq_queue_base;
179 static uint16_t queues_per_pool;
181 const uint16_t vlan_tags[] = {
182 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
183 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
184 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
185 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
186 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
187 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
188 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
189 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
192 /* ethernet addresses of ports */
193 static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
195 static struct vhost_dev_tailq_list vhost_dev_list =
196 TAILQ_HEAD_INITIALIZER(vhost_dev_list);
198 static struct lcore_info lcore_info[RTE_MAX_LCORE];
200 /* Used for queueing bursts of TX packets. */
204 struct rte_mbuf *m_table[MAX_PKT_BURST];
207 /* TX queue for each data core. */
208 struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
210 #define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
211 / US_PER_S * BURST_TX_DRAIN_US)
215 * Builds up the correct configuration for VMDQ VLAN pool map
216 * according to the pool & queue limits.
219 get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
221 struct rte_eth_vmdq_rx_conf conf;
222 struct rte_eth_vmdq_rx_conf *def_conf =
223 &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
226 memset(&conf, 0, sizeof(conf));
227 conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
228 conf.nb_pool_maps = num_devices;
229 conf.enable_loop_back = def_conf->enable_loop_back;
230 conf.rx_mode = def_conf->rx_mode;
232 for (i = 0; i < conf.nb_pool_maps; i++) {
233 conf.pool_map[i].vlan_id = vlan_tags[ i ];
234 conf.pool_map[i].pools = (1UL << i);
237 (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
238 (void)(rte_memcpy(ð_conf->rx_adv_conf.vmdq_rx_conf, &conf,
239 sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
244 * Validate the device number according to the max pool number gotten form
245 * dev_info. If the device number is invalid, give the error message and
246 * return -1. Each device must have its own pool.
249 validate_num_devices(uint32_t max_nb_devices)
251 if (num_devices > max_nb_devices) {
252 RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
259 * Initialises a given port using global settings and with the rx buffers
260 * coming from the mbuf_pool passed as parameter
263 port_init(uint8_t port)
265 struct rte_eth_dev_info dev_info;
266 struct rte_eth_conf port_conf;
267 struct rte_eth_rxconf *rxconf;
268 struct rte_eth_txconf *txconf;
269 int16_t rx_rings, tx_rings;
270 uint16_t rx_ring_size, tx_ring_size;
274 /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
275 rte_eth_dev_info_get (port, &dev_info);
277 rxconf = &dev_info.default_rxconf;
278 txconf = &dev_info.default_txconf;
279 rxconf->rx_drop_en = 1;
281 /* Enable vlan offload */
282 txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
284 /*configure the number of supported virtio devices based on VMDQ limits */
285 num_devices = dev_info.max_vmdq_pools;
287 rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
288 tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
291 * When dequeue zero copy is enabled, guest Tx used vring will be
292 * updated only when corresponding mbuf is freed. Thus, the nb_tx_desc
293 * (tx_ring_size here) must be small enough so that the driver will
294 * hit the free threshold easily and free mbufs timely. Otherwise,
295 * guest Tx vring would be starved.
297 if (dequeue_zero_copy)
300 tx_rings = (uint16_t)rte_lcore_count();
302 retval = validate_num_devices(MAX_DEVICES);
306 /* Get port configuration. */
307 retval = get_eth_conf(&port_conf, num_devices);
310 /* NIC queues are divided into pf queues and vmdq queues. */
311 num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
312 queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
313 num_vmdq_queues = num_devices * queues_per_pool;
314 num_queues = num_pf_queues + num_vmdq_queues;
315 vmdq_queue_base = dev_info.vmdq_queue_base;
316 vmdq_pool_base = dev_info.vmdq_pool_base;
317 printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
318 num_pf_queues, num_devices, queues_per_pool);
320 if (port >= rte_eth_dev_count()) return -1;
322 if (enable_tx_csum == 0)
323 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
325 if (enable_tso == 0) {
326 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
327 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
328 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4);
329 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6);
332 rx_rings = (uint16_t)dev_info.max_rx_queues;
333 /* Configure ethernet device. */
334 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
336 RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
337 port, strerror(-retval));
341 /* Setup the queues. */
342 for (q = 0; q < rx_rings; q ++) {
343 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
344 rte_eth_dev_socket_id(port),
348 RTE_LOG(ERR, VHOST_PORT,
349 "Failed to setup rx queue %u of port %u: %s.\n",
350 q, port, strerror(-retval));
354 for (q = 0; q < tx_rings; q ++) {
355 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
356 rte_eth_dev_socket_id(port),
359 RTE_LOG(ERR, VHOST_PORT,
360 "Failed to setup tx queue %u of port %u: %s.\n",
361 q, port, strerror(-retval));
366 /* Start the device. */
367 retval = rte_eth_dev_start(port);
369 RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
370 port, strerror(-retval));
375 rte_eth_promiscuous_enable(port);
377 rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
378 RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
379 RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
380 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
382 vmdq_ports_eth_addr[port].addr_bytes[0],
383 vmdq_ports_eth_addr[port].addr_bytes[1],
384 vmdq_ports_eth_addr[port].addr_bytes[2],
385 vmdq_ports_eth_addr[port].addr_bytes[3],
386 vmdq_ports_eth_addr[port].addr_bytes[4],
387 vmdq_ports_eth_addr[port].addr_bytes[5]);
393 * Set socket file path.
396 us_vhost_parse_socket_path(const char *q_arg)
398 /* parse number string */
399 if (strnlen(q_arg, PATH_MAX) > PATH_MAX)
402 socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
403 snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
410 * Parse the portmask provided at run time.
413 parse_portmask(const char *portmask)
420 /* parse hexadecimal string */
421 pm = strtoul(portmask, &end, 16);
422 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
433 * Parse num options at run time.
436 parse_num_opt(const char *q_arg, uint32_t max_valid_value)
443 /* parse unsigned int string */
444 num = strtoul(q_arg, &end, 10);
445 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
448 if (num > max_valid_value)
459 us_vhost_usage(const char *prgname)
461 RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
463 " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
464 " --socket-file <path>\n"
466 " -p PORTMASK: Set mask for ports to be used by application\n"
467 " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
468 " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
469 " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
470 " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
471 " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
472 " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
473 " --socket-file: The path of the socket file.\n"
474 " --tx-csum [0|1] disable/enable TX checksum offload.\n"
475 " --tso [0|1] disable/enable TCP segment offload.\n"
476 " --client register a vhost-user socket as client mode.\n"
477 " --dequeue-zero-copy enables dequeue zero copy\n",
482 * Parse the arguments given in the command line of the application.
485 us_vhost_parse_args(int argc, char **argv)
490 const char *prgname = argv[0];
491 static struct option long_option[] = {
492 {"vm2vm", required_argument, NULL, 0},
493 {"rx-retry", required_argument, NULL, 0},
494 {"rx-retry-delay", required_argument, NULL, 0},
495 {"rx-retry-num", required_argument, NULL, 0},
496 {"mergeable", required_argument, NULL, 0},
497 {"stats", required_argument, NULL, 0},
498 {"socket-file", required_argument, NULL, 0},
499 {"tx-csum", required_argument, NULL, 0},
500 {"tso", required_argument, NULL, 0},
501 {"client", no_argument, &client_mode, 1},
502 {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
506 /* Parse command line */
507 while ((opt = getopt_long(argc, argv, "p:P",
508 long_option, &option_index)) != EOF) {
512 enabled_port_mask = parse_portmask(optarg);
513 if (enabled_port_mask == 0) {
514 RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
515 us_vhost_usage(prgname);
522 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
523 ETH_VMDQ_ACCEPT_BROADCAST |
524 ETH_VMDQ_ACCEPT_MULTICAST;
525 rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
530 /* Enable/disable vm2vm comms. */
531 if (!strncmp(long_option[option_index].name, "vm2vm",
533 ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
535 RTE_LOG(INFO, VHOST_CONFIG,
536 "Invalid argument for "
538 us_vhost_usage(prgname);
541 vm2vm_mode = (vm2vm_type)ret;
545 /* Enable/disable retries on RX. */
546 if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
547 ret = parse_num_opt(optarg, 1);
549 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
550 us_vhost_usage(prgname);
557 /* Enable/disable TX checksum offload. */
558 if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
559 ret = parse_num_opt(optarg, 1);
561 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
562 us_vhost_usage(prgname);
565 enable_tx_csum = ret;
568 /* Enable/disable TSO offload. */
569 if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
570 ret = parse_num_opt(optarg, 1);
572 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
573 us_vhost_usage(prgname);
579 /* Specify the retries delay time (in useconds) on RX. */
580 if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
581 ret = parse_num_opt(optarg, INT32_MAX);
583 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
584 us_vhost_usage(prgname);
587 burst_rx_delay_time = ret;
591 /* Specify the retries number on RX. */
592 if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
593 ret = parse_num_opt(optarg, INT32_MAX);
595 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
596 us_vhost_usage(prgname);
599 burst_rx_retry_num = ret;
603 /* Enable/disable RX mergeable buffers. */
604 if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
605 ret = parse_num_opt(optarg, 1);
607 RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
608 us_vhost_usage(prgname);
613 vmdq_conf_default.rxmode.jumbo_frame = 1;
614 vmdq_conf_default.rxmode.max_rx_pkt_len
615 = JUMBO_FRAME_MAX_SIZE;
620 /* Enable/disable stats. */
621 if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
622 ret = parse_num_opt(optarg, INT32_MAX);
624 RTE_LOG(INFO, VHOST_CONFIG,
625 "Invalid argument for stats [0..N]\n");
626 us_vhost_usage(prgname);
633 /* Set socket file path. */
634 if (!strncmp(long_option[option_index].name,
635 "socket-file", MAX_LONG_OPT_SZ)) {
636 if (us_vhost_parse_socket_path(optarg) == -1) {
637 RTE_LOG(INFO, VHOST_CONFIG,
638 "Invalid argument for socket name (Max %d characters)\n",
640 us_vhost_usage(prgname);
647 /* Invalid option - print options. */
649 us_vhost_usage(prgname);
654 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
655 if (enabled_port_mask & (1 << i))
656 ports[num_ports++] = (uint8_t)i;
659 if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
660 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
661 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
669 * Update the global var NUM_PORTS and array PORTS according to system ports number
670 * and return valid ports number
672 static unsigned check_ports_num(unsigned nb_ports)
674 unsigned valid_num_ports = num_ports;
677 if (num_ports > nb_ports) {
678 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
679 num_ports, nb_ports);
680 num_ports = nb_ports;
683 for (portid = 0; portid < num_ports; portid ++) {
684 if (ports[portid] >= nb_ports) {
685 RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
686 ports[portid], (nb_ports - 1));
687 ports[portid] = INVALID_PORT_ID;
691 return valid_num_ports;
694 static inline struct vhost_dev *__attribute__((always_inline))
695 find_vhost_dev(struct ether_addr *mac)
697 struct vhost_dev *vdev;
699 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
700 if (vdev->ready == DEVICE_RX &&
701 is_same_ether_addr(mac, &vdev->mac_address))
709 * This function learns the MAC address of the device and registers this along with a
710 * vlan tag to a VMDQ.
713 link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
715 struct ether_hdr *pkt_hdr;
718 /* Learn MAC address of guest device from packet */
719 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
721 if (find_vhost_dev(&pkt_hdr->s_addr)) {
722 RTE_LOG(ERR, VHOST_DATA,
723 "(%d) device is using a registered MAC!\n",
728 for (i = 0; i < ETHER_ADDR_LEN; i++)
729 vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
731 /* vlan_tag currently uses the device_id. */
732 vdev->vlan_tag = vlan_tags[vdev->vid];
734 /* Print out VMDQ registration info. */
735 RTE_LOG(INFO, VHOST_DATA,
736 "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
738 vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
739 vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
740 vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
743 /* Register the MAC address. */
744 ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
745 (uint32_t)vdev->vid + vmdq_pool_base);
747 RTE_LOG(ERR, VHOST_DATA,
748 "(%d) failed to add device MAC address to VMDQ\n",
751 rte_eth_dev_set_vlan_strip_on_queue(ports[0], vdev->vmdq_rx_q, 1);
753 /* Set device as ready for RX. */
754 vdev->ready = DEVICE_RX;
760 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
761 * queue before disabling RX on the device.
764 unlink_vmdq(struct vhost_dev *vdev)
768 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
770 if (vdev->ready == DEVICE_RX) {
771 /*clear MAC and VLAN settings*/
772 rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
773 for (i = 0; i < 6; i++)
774 vdev->mac_address.addr_bytes[i] = 0;
778 /*Clear out the receive buffers*/
779 rx_count = rte_eth_rx_burst(ports[0],
780 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
783 for (i = 0; i < rx_count; i++)
784 rte_pktmbuf_free(pkts_burst[i]);
786 rx_count = rte_eth_rx_burst(ports[0],
787 (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
790 vdev->ready = DEVICE_MAC_LEARNING;
794 static inline void __attribute__((always_inline))
795 virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
800 ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
802 rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
803 rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
804 src_vdev->stats.tx_total++;
805 src_vdev->stats.tx += ret;
810 * Check if the packet destination MAC address is for a local device. If so then put
811 * the packet on that devices RX queue. If not then return.
813 static inline int __attribute__((always_inline))
814 virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
816 struct ether_hdr *pkt_hdr;
817 struct vhost_dev *dst_vdev;
819 pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
821 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
825 if (vdev->vid == dst_vdev->vid) {
826 RTE_LOG(DEBUG, VHOST_DATA,
827 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
832 RTE_LOG(DEBUG, VHOST_DATA,
833 "(%d) TX: MAC address is local\n", dst_vdev->vid);
835 if (unlikely(dst_vdev->remove)) {
836 RTE_LOG(DEBUG, VHOST_DATA,
837 "(%d) device is marked for removal\n", dst_vdev->vid);
841 virtio_xmit(dst_vdev, vdev, m);
846 * Check if the destination MAC of a packet is one local VM,
847 * and get its vlan tag, and offset if it is.
849 static inline int __attribute__((always_inline))
850 find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
851 uint32_t *offset, uint16_t *vlan_tag)
853 struct vhost_dev *dst_vdev;
854 struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
856 dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
860 if (vdev->vid == dst_vdev->vid) {
861 RTE_LOG(DEBUG, VHOST_DATA,
862 "(%d) TX: src and dst MAC is same. Dropping packet.\n",
868 * HW vlan strip will reduce the packet length
869 * by minus length of vlan tag, so need restore
870 * the packet length by plus it.
873 *vlan_tag = vlan_tags[vdev->vid];
875 RTE_LOG(DEBUG, VHOST_DATA,
876 "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
877 vdev->vid, dst_vdev->vid, *vlan_tag);
883 get_psd_sum(void *l3_hdr, uint64_t ol_flags)
885 if (ol_flags & PKT_TX_IPV4)
886 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
887 else /* assume ethertype == ETHER_TYPE_IPv6 */
888 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
891 static void virtio_tx_offload(struct rte_mbuf *m)
894 struct ipv4_hdr *ipv4_hdr = NULL;
895 struct tcp_hdr *tcp_hdr = NULL;
896 struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
898 l3_hdr = (char *)eth_hdr + m->l2_len;
900 if (m->ol_flags & PKT_TX_IPV4) {
902 ipv4_hdr->hdr_checksum = 0;
903 m->ol_flags |= PKT_TX_IP_CKSUM;
906 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
907 tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
911 free_pkts(struct rte_mbuf **pkts, uint16_t n)
914 rte_pktmbuf_free(pkts[n]);
917 static inline void __attribute__((always_inline))
918 do_drain_mbuf_table(struct mbuf_table *tx_q)
922 count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
923 tx_q->m_table, tx_q->len);
924 if (unlikely(count < tx_q->len))
925 free_pkts(&tx_q->m_table[count], tx_q->len - count);
931 * This function routes the TX packet to the correct interface. This
932 * may be a local device or the physical port.
934 static inline void __attribute__((always_inline))
935 virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
937 struct mbuf_table *tx_q;
939 const uint16_t lcore_id = rte_lcore_id();
940 struct ether_hdr *nh;
943 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
944 if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
945 struct vhost_dev *vdev2;
947 TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
949 virtio_xmit(vdev2, vdev, m);
954 /*check if destination is local VM*/
955 if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
960 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
961 if (unlikely(find_local_dest(vdev, m, &offset,
968 RTE_LOG(DEBUG, VHOST_DATA,
969 "(%d) TX: MAC address is external\n", vdev->vid);
973 /*Add packet to the port tx queue*/
974 tx_q = &lcore_tx_queue[lcore_id];
976 nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
977 if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
978 /* Guest has inserted the vlan tag. */
979 struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
980 uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
981 if ((vm2vm_mode == VM2VM_HARDWARE) &&
982 (vh->vlan_tci != vlan_tag_be))
983 vh->vlan_tci = vlan_tag_be;
985 m->ol_flags |= PKT_TX_VLAN_PKT;
988 * Find the right seg to adjust the data len when offset is
989 * bigger than tail room size.
991 if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
992 if (likely(offset <= rte_pktmbuf_tailroom(m)))
993 m->data_len += offset;
995 struct rte_mbuf *seg = m;
997 while ((seg->next != NULL) &&
998 (offset > rte_pktmbuf_tailroom(seg)))
1001 seg->data_len += offset;
1003 m->pkt_len += offset;
1006 m->vlan_tci = vlan_tag;
1009 if (m->ol_flags & PKT_TX_TCP_SEG)
1010 virtio_tx_offload(m);
1012 tx_q->m_table[tx_q->len++] = m;
1014 vdev->stats.tx_total++;
1018 if (unlikely(tx_q->len == MAX_PKT_BURST))
1019 do_drain_mbuf_table(tx_q);
1023 static inline void __attribute__((always_inline))
1024 drain_mbuf_table(struct mbuf_table *tx_q)
1026 static uint64_t prev_tsc;
1032 cur_tsc = rte_rdtsc();
1033 if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1036 RTE_LOG(DEBUG, VHOST_DATA,
1037 "TX queue drained after timeout with burst size %u\n",
1039 do_drain_mbuf_table(tx_q);
1043 static inline void __attribute__((always_inline))
1044 drain_eth_rx(struct vhost_dev *vdev)
1046 uint16_t rx_count, enqueue_count;
1047 struct rte_mbuf *pkts[MAX_PKT_BURST];
1049 rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1050 pkts, MAX_PKT_BURST);
1055 * When "enable_retry" is set, here we wait and retry when there
1056 * is no enough free slots in the queue to hold @rx_count packets,
1057 * to diminish packet loss.
1060 unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1064 for (retry = 0; retry < burst_rx_retry_num; retry++) {
1065 rte_delay_us(burst_rx_delay_time);
1066 if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1072 enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1075 rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1076 rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1079 free_pkts(pkts, rx_count);
1082 static inline void __attribute__((always_inline))
1083 drain_virtio_tx(struct vhost_dev *vdev)
1085 struct rte_mbuf *pkts[MAX_PKT_BURST];
1089 count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1090 pkts, MAX_PKT_BURST);
1092 /* setup VMDq for the first packet */
1093 if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1094 if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1095 free_pkts(pkts, count);
1098 for (i = 0; i < count; ++i)
1099 virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1103 * Main function of vhost-switch. It basically does:
1105 * for each vhost device {
1108 * Which drains the host eth Rx queue linked to the vhost device,
1109 * and deliver all of them to guest virito Rx ring associated with
1110 * this vhost device.
1112 * - drain_virtio_tx()
1114 * Which drains the guest virtio Tx queue and deliver all of them
1115 * to the target, which could be another vhost device, or the
1116 * physical eth dev. The route is done in function "virtio_tx_route".
1120 switch_worker(void *arg __rte_unused)
1123 unsigned lcore_id = rte_lcore_id();
1124 struct vhost_dev *vdev;
1125 struct mbuf_table *tx_q;
1127 RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1129 tx_q = &lcore_tx_queue[lcore_id];
1130 for (i = 0; i < rte_lcore_count(); i++) {
1131 if (lcore_ids[i] == lcore_id) {
1138 drain_mbuf_table(tx_q);
1141 * Inform the configuration core that we have exited the
1142 * linked list and that no devices are in use if requested.
1144 if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1145 lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1148 * Process vhost devices
1150 TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1152 if (unlikely(vdev->remove)) {
1154 vdev->ready = DEVICE_SAFE_REMOVE;
1158 if (likely(vdev->ready == DEVICE_RX))
1161 if (likely(!vdev->remove))
1162 drain_virtio_tx(vdev);
1170 * Remove a device from the specific data core linked list and from the
1171 * main linked list. Synchonization occurs through the use of the
1172 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1173 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1176 destroy_device(int vid)
1178 struct vhost_dev *vdev = NULL;
1181 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1182 if (vdev->vid == vid)
1187 /*set the remove flag. */
1189 while(vdev->ready != DEVICE_SAFE_REMOVE) {
1193 TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1195 TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1198 /* Set the dev_removal_flag on each lcore. */
1199 RTE_LCORE_FOREACH_SLAVE(lcore)
1200 lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1203 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1204 * we can be sure that they can no longer access the device removed
1205 * from the linked lists and that the devices are no longer in use.
1207 RTE_LCORE_FOREACH_SLAVE(lcore) {
1208 while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1212 lcore_info[vdev->coreid].device_num--;
1214 RTE_LOG(INFO, VHOST_DATA,
1215 "(%d) device has been removed from data core\n",
1222 * A new device is added to a data core. First the device is added to the main linked list
1223 * and the allocated to a specific data core.
1228 int lcore, core_add = 0;
1229 uint32_t device_num_min = num_devices;
1230 struct vhost_dev *vdev;
1232 vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1234 RTE_LOG(INFO, VHOST_DATA,
1235 "(%d) couldn't allocate memory for vhost dev\n",
1241 TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1242 vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1244 /*reset ready flag*/
1245 vdev->ready = DEVICE_MAC_LEARNING;
1248 /* Find a suitable lcore to add the device. */
1249 RTE_LCORE_FOREACH_SLAVE(lcore) {
1250 if (lcore_info[lcore].device_num < device_num_min) {
1251 device_num_min = lcore_info[lcore].device_num;
1255 vdev->coreid = core_add;
1257 TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1259 lcore_info[vdev->coreid].device_num++;
1261 /* Disable notifications. */
1262 rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1263 rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1265 RTE_LOG(INFO, VHOST_DATA,
1266 "(%d) device has been added to data core %d\n",
1273 * These callback allow devices to be added to the data core when configuration
1274 * has been fully complete.
1276 static const struct virtio_net_device_ops virtio_net_device_ops =
1278 .new_device = new_device,
1279 .destroy_device = destroy_device,
1283 * This is a thread will wake up after a period to print stats if the user has
1289 struct vhost_dev *vdev;
1290 uint64_t tx_dropped, rx_dropped;
1291 uint64_t tx, tx_total, rx, rx_total;
1292 const char clr[] = { 27, '[', '2', 'J', '\0' };
1293 const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1296 sleep(enable_stats);
1298 /* Clear screen and move to top left */
1299 printf("%s%s\n", clr, top_left);
1300 printf("Device statistics =================================\n");
1302 TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1303 tx_total = vdev->stats.tx_total;
1304 tx = vdev->stats.tx;
1305 tx_dropped = tx_total - tx;
1307 rx_total = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1308 rx = rte_atomic64_read(&vdev->stats.rx_atomic);
1309 rx_dropped = rx_total - rx;
1311 printf("Statistics for device %d\n"
1312 "-----------------------\n"
1313 "TX total: %" PRIu64 "\n"
1314 "TX dropped: %" PRIu64 "\n"
1315 "TX successful: %" PRIu64 "\n"
1316 "RX total: %" PRIu64 "\n"
1317 "RX dropped: %" PRIu64 "\n"
1318 "RX successful: %" PRIu64 "\n",
1320 tx_total, tx_dropped, tx,
1321 rx_total, rx_dropped, rx);
1324 printf("===================================================\n");
1329 unregister_drivers(int socket_num)
1333 for (i = 0; i < socket_num; i++) {
1334 ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
1336 RTE_LOG(ERR, VHOST_CONFIG,
1337 "Fail to unregister vhost driver for %s.\n",
1338 socket_files + i * PATH_MAX);
1342 /* When we receive a INT signal, unregister vhost driver */
1344 sigint_handler(__rte_unused int signum)
1346 /* Unregister vhost driver. */
1347 unregister_drivers(nb_sockets);
1353 * While creating an mbuf pool, one key thing is to figure out how
1354 * many mbuf entries is enough for our use. FYI, here are some
1357 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1359 * - For each switch core (A CPU core does the packet switch), we need
1360 * also make some reservation for receiving the packets from virtio
1361 * Tx queue. How many is enough depends on the usage. It's normally
1362 * a simple calculation like following:
1364 * MAX_PKT_BURST * max packet size / mbuf size
1366 * So, we definitely need allocate more mbufs when TSO is enabled.
1368 * - Similarly, for each switching core, we should serve @nr_rx_desc
1369 * mbufs for receiving the packets from physical NIC device.
1371 * - We also need make sure, for each switch core, we have allocated
1372 * enough mbufs to fill up the mbuf cache.
1375 create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1376 uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1379 uint32_t nr_mbufs_per_core;
1380 uint32_t mtu = 1500;
1387 nr_mbufs_per_core = (mtu + mbuf_size) * MAX_PKT_BURST /
1388 (mbuf_size - RTE_PKTMBUF_HEADROOM);
1389 nr_mbufs_per_core += nr_rx_desc;
1390 nr_mbufs_per_core = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1392 nr_mbufs = nr_queues * nr_rx_desc;
1393 nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1394 nr_mbufs *= nr_port;
1396 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1397 nr_mbuf_cache, 0, mbuf_size,
1399 if (mbuf_pool == NULL)
1400 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1404 * Main function, does initialisation and calls the per-lcore functions.
1407 main(int argc, char *argv[])
1409 unsigned lcore_id, core_id = 0;
1410 unsigned nb_ports, valid_num_ports;
1413 static pthread_t tid;
1414 char thread_name[RTE_MAX_THREAD_NAME_LEN];
1417 signal(SIGINT, sigint_handler);
1420 ret = rte_eal_init(argc, argv);
1422 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1426 /* parse app arguments */
1427 ret = us_vhost_parse_args(argc, argv);
1429 rte_exit(EXIT_FAILURE, "Invalid argument\n");
1431 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1432 TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1434 if (rte_lcore_is_enabled(lcore_id))
1435 lcore_ids[core_id++] = lcore_id;
1438 if (rte_lcore_count() > RTE_MAX_LCORE)
1439 rte_exit(EXIT_FAILURE,"Not enough cores\n");
1441 /* Get the number of physical ports. */
1442 nb_ports = rte_eth_dev_count();
1445 * Update the global var NUM_PORTS and global array PORTS
1446 * and get value of var VALID_NUM_PORTS according to system ports number
1448 valid_num_ports = check_ports_num(nb_ports);
1450 if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
1451 RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1452 "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1457 * FIXME: here we are trying to allocate mbufs big enough for
1458 * @MAX_QUEUES, but the truth is we're never going to use that
1459 * many queues here. We probably should only do allocation for
1460 * those queues we are going to use.
1462 create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1463 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1465 if (vm2vm_mode == VM2VM_HARDWARE) {
1466 /* Enable VT loop back to let L2 switch to do it. */
1467 vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1468 RTE_LOG(DEBUG, VHOST_CONFIG,
1469 "Enable loop back for L2 switch in vmdq.\n");
1472 /* initialize all ports */
1473 for (portid = 0; portid < nb_ports; portid++) {
1474 /* skip ports that are not enabled */
1475 if ((enabled_port_mask & (1 << portid)) == 0) {
1476 RTE_LOG(INFO, VHOST_PORT,
1477 "Skipping disabled port %d\n", portid);
1480 if (port_init(portid) != 0)
1481 rte_exit(EXIT_FAILURE,
1482 "Cannot initialize network ports\n");
1485 /* Enable stats if the user option is set. */
1487 ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1489 rte_exit(EXIT_FAILURE,
1490 "Cannot create print-stats thread\n");
1492 /* Set thread_name for aid in debugging. */
1493 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1494 ret = rte_thread_setname(tid, thread_name);
1496 RTE_LOG(DEBUG, VHOST_CONFIG,
1497 "Cannot set print-stats name\n");
1500 /* Launch all data cores. */
1501 RTE_LCORE_FOREACH_SLAVE(lcore_id)
1502 rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1505 rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1508 flags |= RTE_VHOST_USER_CLIENT;
1510 if (dequeue_zero_copy)
1511 flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
1513 /* Register vhost user driver to handle vhost messages. */
1514 for (i = 0; i < nb_sockets; i++) {
1515 ret = rte_vhost_driver_register
1516 (socket_files + i * PATH_MAX, flags);
1518 unregister_drivers(i);
1519 rte_exit(EXIT_FAILURE,
1520 "vhost driver register failure.\n");
1524 rte_vhost_driver_callback_register(&virtio_net_device_ops);
1526 rte_vhost_driver_session_start();