2 *------------------------------------------------------------------
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
21 #include <fcntl.h> /* for open */
22 #include <sys/ioctl.h>
23 #include <sys/socket.h>
26 #include <sys/types.h>
27 #include <sys/uio.h> /* for iovec */
28 #include <netinet/in.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_tun.h>
34 #include <vlib/vlib.h>
35 #include <vlib/unix/unix.h>
37 #include <vnet/ip/ip.h>
39 #include <vnet/ethernet/ethernet.h>
40 #include <vnet/devices/devices.h>
41 #include <vnet/feature/feature.h>
43 #include <vnet/devices/virtio/virtio.h>
44 #include <vnet/devices/virtio/vhost_user.h>
45 #include <vnet/devices/virtio/vhost_user_inline.h>
48 * On the transmit side, we keep processing the buffers from vlib in the while
49 * loop and prepare the copy order to be executed later. However, the static
50 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
51 * entries. In order to not corrupt memory, we have to do the copy when the
52 * static array reaches the copy threshold. We subtract 40 in case the code
53 * goes into the inner loop for a maximum of 64k frames which may require
56 #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
58 extern vnet_device_class_t vhost_user_device_class;
60 #define foreach_vhost_user_tx_func_error \
62 _(NOT_READY, "vhost vring not ready") \
63 _(DOWN, "vhost interface is down") \
64 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
65 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
66 _(MMAP_FAIL, "mmap failure") \
67 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
71 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
72 foreach_vhost_user_tx_func_error
74 VHOST_USER_TX_FUNC_N_ERROR,
75 } vhost_user_tx_func_error_t;
77 static __clib_unused char *vhost_user_tx_func_error_strings[] = {
79 foreach_vhost_user_tx_func_error
83 static __clib_unused u8 *
84 format_vhost_user_interface_name (u8 * s, va_list * args)
86 u32 i = va_arg (*args, u32);
87 u32 show_dev_instance = ~0;
88 vhost_user_main_t *vum = &vhost_user_main;
90 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
91 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
93 if (show_dev_instance != ~0)
94 i = show_dev_instance;
96 s = format (s, "VirtualEthernet0/0/%d", i);
100 static __clib_unused int
101 vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
103 // FIXME: check if the new dev instance is already used
104 vhost_user_main_t *vum = &vhost_user_main;
105 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
108 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
109 hi->dev_instance, ~0);
111 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
114 vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
115 hi->dev_instance, new_dev_instance);
121 * @brief Try once to lock the vring
122 * @return 0 on success, non-zero on failure.
124 static_always_inline int
125 vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
127 return clib_atomic_test_and_set (vui->vring_locks[qid]);
131 * @brief Spin until the vring is successfully locked
133 static_always_inline void
134 vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
136 while (vhost_user_vring_try_lock (vui, qid))
141 * @brief Unlock the vring lock
143 static_always_inline void
144 vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
146 clib_atomic_release (vui->vring_locks[qid]);
149 static_always_inline void
150 vhost_user_tx_trace (vhost_trace_t * t,
151 vhost_user_intf_t * vui, u16 qid,
152 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
154 vhost_user_main_t *vum = &vhost_user_main;
155 u32 last_avail_idx = rxvq->last_avail_idx;
156 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
157 vring_desc_t *hdr_desc = 0;
160 clib_memset (t, 0, sizeof (*t));
161 t->device_index = vui - vum->vhost_user_interfaces;
164 hdr_desc = &rxvq->desc[desc_current];
165 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
167 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
168 /* Header is the first here */
169 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
171 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
173 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
175 if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
176 !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
178 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
181 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
184 static_always_inline u32
185 vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
186 u16 copy_len, u32 * map_hint)
188 void *dst0, *dst1, *dst2, *dst3;
189 if (PREDICT_TRUE (copy_len >= 4))
191 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
193 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
195 while (PREDICT_TRUE (copy_len >= 4))
201 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
204 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
207 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
208 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
210 clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
211 clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
213 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
214 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
221 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
223 clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
224 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
231 static_always_inline void
232 vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
233 virtio_net_hdr_t * hdr)
235 /* checksum offload */
236 if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
238 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
239 hdr->csum_start = vnet_buffer (b)->l4_hdr_offset;
240 hdr->csum_offset = offsetof (udp_header_t, checksum);
242 else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
244 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
245 hdr->csum_start = vnet_buffer (b)->l4_hdr_offset;
246 hdr->csum_offset = offsetof (tcp_header_t, checksum);
250 if (b->flags & VNET_BUFFER_F_GSO)
252 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
254 if ((b->flags & VNET_BUFFER_F_IS_IP4) &&
255 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4)))
257 hdr->gso_size = vnet_buffer2 (b)->gso_size;
258 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
260 else if ((b->flags & VNET_BUFFER_F_IS_IP6) &&
261 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6)))
263 hdr->gso_size = vnet_buffer2 (b)->gso_size;
264 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
267 else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) &&
268 (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
270 hdr->gso_size = vnet_buffer2 (b)->gso_size;
271 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
276 VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
277 vlib_node_runtime_t *
278 node, vlib_frame_t * frame)
280 u32 *buffers = vlib_frame_vector_args (frame);
281 u32 n_left = frame->n_vectors;
282 vhost_user_main_t *vum = &vhost_user_main;
283 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
284 vhost_user_intf_t *vui =
285 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
287 vhost_user_vring_t *rxvq;
289 u32 thread_index = vm->thread_index;
290 vhost_cpu_t *cpu = &vum->cpus[thread_index];
296 if (PREDICT_FALSE (!vui->admin_up))
298 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
302 if (PREDICT_FALSE (!vui->is_ready))
304 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
308 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
310 rxvq = &vui->vrings[qid];
311 if (PREDICT_FALSE (rxvq->avail == 0))
313 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
317 if (PREDICT_FALSE (vui->use_tx_spinlock))
318 vhost_user_vring_lock (vui, qid);
321 error = VHOST_USER_TX_FUNC_ERROR_NONE;
326 vlib_buffer_t *b0, *current_b0;
327 u16 desc_head, desc_index, desc_len;
328 vring_desc_t *desc_table;
329 uword buffer_map_addr;
333 if (PREDICT_TRUE (n_left > 1))
334 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
336 b0 = vlib_get_buffer (vm, buffers[0]);
338 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
340 cpu->current_trace = vlib_add_trace (vm, node, b0,
341 sizeof (*cpu->current_trace));
342 vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
345 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
347 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
351 desc_table = rxvq->desc;
352 desc_head = desc_index =
353 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
355 /* Go deeper in case of indirect descriptor
356 * I don't know of any driver providing indirect for RX. */
357 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
360 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
362 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
367 map_guest_mem (vui, rxvq->desc[desc_index].addr,
370 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
376 desc_len = vui->virtio_net_hdr_sz;
377 buffer_map_addr = desc_table[desc_index].addr;
378 buffer_len = desc_table[desc_index].len;
381 // Get a header from the header array
382 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
385 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
386 hdr->num_buffers = 1; //This is local, no need to check
388 /* Guest supports csum offload? */
389 if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM))
390 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
392 // Prepare a copy order executed later for the header
393 vhost_copy_t *cpy = &cpu->copy[copy_len];
395 cpy->len = vui->virtio_net_hdr_sz;
396 cpy->dst = buffer_map_addr;
397 cpy->src = (uword) hdr;
400 buffer_map_addr += vui->virtio_net_hdr_sz;
401 buffer_len -= vui->virtio_net_hdr_sz;
402 bytes_left = b0->current_length;
408 if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
410 //Next one is chained
411 desc_index = desc_table[desc_index].next;
412 buffer_map_addr = desc_table[desc_index].addr;
413 buffer_len = desc_table[desc_index].len;
415 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
417 virtio_net_hdr_mrg_rxbuf_t *hdr =
418 &cpu->tx_headers[tx_headers_len - 1];
420 //Move from available to used buffer
421 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
423 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
425 vhost_user_log_dirty_ring (vui, rxvq,
426 ring[rxvq->last_used_idx &
429 rxvq->last_avail_idx++;
430 rxvq->last_used_idx++;
435 (rxvq->last_avail_idx == rxvq->avail->idx))
437 //Dequeue queued descriptors for this packet
438 rxvq->last_used_idx -= hdr->num_buffers - 1;
439 rxvq->last_avail_idx -= hdr->num_buffers - 1;
440 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
444 desc_table = rxvq->desc;
445 desc_head = desc_index =
446 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
448 (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
450 //It is seriously unlikely that a driver will put indirect descriptor
451 //after non-indirect descriptor.
453 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
455 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
461 rxvq->desc[desc_index].addr,
464 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
469 buffer_map_addr = desc_table[desc_index].addr;
470 buffer_len = desc_table[desc_index].len;
474 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
480 vhost_copy_t *cpy = &cpu->copy[copy_len];
482 cpy->len = bytes_left;
483 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
484 cpy->dst = buffer_map_addr;
485 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
486 current_b0->current_length - bytes_left;
488 bytes_left -= cpy->len;
489 buffer_len -= cpy->len;
490 buffer_map_addr += cpy->len;
491 desc_len += cpy->len;
493 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
496 // Check if vlib buffer has more data. If not, get more or break.
497 if (PREDICT_TRUE (!bytes_left))
500 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
502 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
503 bytes_left = current_b0->current_length;
513 //Move from available to used ring
514 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
515 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
516 vhost_user_log_dirty_ring (vui, rxvq,
517 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
518 rxvq->last_avail_idx++;
519 rxvq->last_used_idx++;
521 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
523 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
526 n_left--; //At the end for error counting when 'goto done' is invoked
529 * Do the copy periodically to prevent
530 * cpu->copy array overflow and corrupt memory
532 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
534 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
537 vlib_error_count (vm, node->node_index,
538 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
542 /* give buffers back to driver */
543 CLIB_MEMORY_BARRIER ();
544 rxvq->used->idx = rxvq->last_used_idx;
545 vhost_user_log_dirty_ring (vui, rxvq, idx);
551 //Do the memory copies
552 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
555 vlib_error_count (vm, node->node_index,
556 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
559 CLIB_MEMORY_BARRIER ();
560 rxvq->used->idx = rxvq->last_used_idx;
561 vhost_user_log_dirty_ring (vui, rxvq, idx);
564 * When n_left is set, error is always set to something too.
565 * In case error is due to lack of remaining buffers, we go back up and
567 * The idea is that it is better to waste some time on packets
568 * that have been processed already than dropping them and get
569 * more fresh packets with a good likelyhood that they will be dropped too.
570 * This technique also gives more time to VM driver to pick-up packets.
571 * In case the traffic flows from physical to virtual interfaces, this
572 * technique will end-up leveraging the physical NIC buffer in order to
573 * absorb the VM's CPU jitter.
575 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
581 /* interrupt (call) handling */
582 if ((rxvq->callfd_idx != ~0) &&
583 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
585 rxvq->n_since_last_int += frame->n_vectors - n_left;
587 if (rxvq->n_since_last_int > vum->coalesce_frames)
588 vhost_user_send_call (vm, rxvq);
591 vhost_user_vring_unlock (vui, qid);
594 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
596 vlib_error_count (vm, node->node_index, error, n_left);
597 vlib_increment_simple_counter
598 (vnet_main.interface_main.sw_if_counters
599 + VNET_INTERFACE_COUNTER_DROP,
600 thread_index, vui->sw_if_index, n_left);
603 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
604 return frame->n_vectors;
607 static __clib_unused clib_error_t *
608 vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
609 u32 qid, vnet_hw_interface_rx_mode mode)
611 vlib_main_t *vm = vnm->vlib_main;
612 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
613 vhost_user_main_t *vum = &vhost_user_main;
614 vhost_user_intf_t *vui =
615 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
616 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
618 if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
619 (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
621 if (txvq->kickfd_idx == ~0)
623 // We cannot support interrupt mode if the driver opts out
624 return clib_error_return (0, "Driver does not support interrupt");
626 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
629 // Start the timer if this is the first encounter on interrupt
631 if ((vum->ifq_count == 1) &&
632 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
633 vlib_process_signal_event (vm,
634 vhost_user_send_interrupt_node.index,
635 VHOST_USER_EVENT_START_TIMER, 0);
638 else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
640 if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
641 (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
645 // Stop the timer if there is no more interrupt interface/queue
646 if ((vum->ifq_count == 0) &&
647 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
648 vlib_process_signal_event (vm,
649 vhost_user_send_interrupt_node.index,
650 VHOST_USER_EVENT_STOP_TIMER, 0);
655 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
656 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
657 else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
658 (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
659 txvq->used->flags = 0;
662 vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
664 return clib_error_return (0, "unsupported");
670 static __clib_unused clib_error_t *
671 vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
674 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
675 vhost_user_main_t *vum = &vhost_user_main;
676 vhost_user_intf_t *vui =
677 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
678 u8 link_old, link_new;
680 link_old = vui_is_link_up (vui);
682 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
684 link_new = vui_is_link_up (vui);
686 if (link_old != link_new)
687 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
688 VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
690 return /* no error */ 0;
694 VNET_DEVICE_CLASS (vhost_user_device_class) = {
695 .name = "vhost-user",
696 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
697 .tx_function_error_strings = vhost_user_tx_func_error_strings,
698 .format_device_name = format_vhost_user_interface_name,
699 .name_renumber = vhost_user_name_renumber,
700 .admin_up_down_function = vhost_user_interface_admin_up_down,
701 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
702 .format_tx_trace = format_vhost_trace,
708 * fd.io coding-style-patch-verification: ON
711 * eval: (c-set-style "gnu")