2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <sys/types.h>
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ethernet/ethernet.h>
26 #include <vnet/gso/gro_func.h>
27 #include <vnet/gso/hdr_offset_parser.h>
28 #include <vnet/ip/ip4_packet.h>
29 #include <vnet/ip/ip6_packet.h>
30 #include <vnet/tcp/tcp_packet.h>
31 #include <vnet/udp/udp_packet.h>
32 #include <vnet/devices/virtio/virtio.h>
34 #define foreach_virtio_tx_func_error \
35 _(NO_FREE_SLOTS, "no free tx slots") \
36 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
37 _(PENDING_MSGS, "pending msgs in tx ring") \
38 _(INDIRECT_DESC_ALLOC_FAILED, "indirect descriptor allocation failed - packet drop") \
39 _(OUT_OF_ORDER, "out-of-order buffers in used ring") \
40 _(GSO_PACKET_DROP, "gso disabled on itf -- gso packet drop") \
41 _(CSUM_OFFLOAD_PACKET_DROP, "checksum offload disabled on itf -- csum offload packet drop")
45 #define _(f,s) VIRTIO_TX_ERROR_##f,
46 foreach_virtio_tx_func_error
49 } virtio_tx_func_error_t;
51 static char *virtio_tx_func_error_strings[] = {
53 foreach_virtio_tx_func_error
58 format_virtio_device (u8 * s, va_list * args)
60 u32 dev_instance = va_arg (*args, u32);
61 int verbose = va_arg (*args, int);
62 u32 indent = format_get_indent (s);
64 s = format (s, "VIRTIO interface");
67 s = format (s, "\n%U instance %u", format_white_space, indent + 2,
78 generic_header_offset_t gho;
82 format_virtio_tx_trace (u8 * s, va_list * va)
84 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
85 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
86 virtio_tx_trace_t *t = va_arg (*va, virtio_tx_trace_t *);
87 u32 indent = format_get_indent (s);
89 s = format (s, "%U ", format_generic_header_offset, &t->gho);
90 s = format (s, "%Ubuffer 0x%x: %U",
91 format_white_space, indent,
92 t->buffer_index, format_vnet_buffer, &t->buffer);
94 s = format (s, "\n%U%U", format_white_space, indent,
95 format_ethernet_header_with_length, t->buffer.pre_data,
96 sizeof (t->buffer.pre_data));
100 static_always_inline void
101 virtio_interface_drop_inline (vlib_main_t * vm, uword node_index,
102 u32 * buffers, u16 n,
103 virtio_tx_func_error_t error)
105 vlib_error_count (vm, node_index, error, n);
106 vlib_buffer_free (vm, buffers, n);
109 static_always_inline void
110 virtio_memset_ring_u32 (u32 * ring, u32 start, u32 ring_size, u32 n_buffers)
112 ASSERT (n_buffers <= ring_size);
114 if (PREDICT_TRUE (start + n_buffers <= ring_size))
116 clib_memset_u32 (ring + start, ~0, n_buffers);
120 clib_memset_u32 (ring + start, ~0, ring_size - start);
121 clib_memset_u32 (ring, ~0, n_buffers - (ring_size - start));
125 static_always_inline void
126 virtio_free_used_device_desc (vlib_main_t * vm, virtio_vring_t * vring,
129 u16 used = vring->desc_in_use;
130 u16 sz = vring->size;
132 u16 last = vring->last_used_idx;
133 u16 n_left = vring->used->idx - last;
134 u16 out_of_order_count = 0;
141 struct vring_used_elem *e = &vring->used->ring[last & mask];
143 slot = n_buffers = e->id;
145 while (e->id == (n_buffers & mask))
150 struct vring_desc *d = &vring->desc[e->id];
152 while (d->flags & VRING_DESC_F_NEXT)
156 d = &vring->desc[next];
160 e = &vring->used->ring[last & mask];
162 vlib_buffer_free_from_ring (vm, vring->buffers, slot,
163 sz, (n_buffers - slot));
164 virtio_memset_ring_u32 (vring->buffers, slot, sz, (n_buffers - slot));
165 used -= (n_buffers - slot);
169 vlib_buffer_free (vm, &vring->buffers[e->id], 1);
170 vring->buffers[e->id] = ~0;
174 out_of_order_count++;
175 vring->flags |= VRING_TX_OUT_OF_ORDER;
180 * Some vhost-backends give buffers back in out-of-order fashion in used ring.
181 * It impacts the overall virtio-performance.
183 if (out_of_order_count)
184 vlib_error_count (vm, node_index, VIRTIO_TX_ERROR_OUT_OF_ORDER,
187 vring->desc_in_use = used;
188 vring->last_used_idx = last;
191 static_always_inline void
192 set_checksum_offsets (vlib_buffer_t * b, struct virtio_net_hdr_v1 *hdr,
195 if (b->flags & VNET_BUFFER_F_IS_IP4)
198 generic_header_offset_t gho = { 0 };
199 vnet_generic_header_offset_parser (b, &gho, is_l2, 1 /* ip4 */ ,
201 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
202 hdr->csum_start = gho.l4_hdr_offset; // 0x22;
203 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
205 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
207 else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
209 hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
213 * virtio devices do not support IP4 checksum offload. So driver takes care
214 * of it while doing tx.
217 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
218 if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
219 ip4->checksum = ip4_header_checksum (ip4);
221 else if (b->flags & VNET_BUFFER_F_IS_IP6)
223 generic_header_offset_t gho = { 0 };
224 vnet_generic_header_offset_parser (b, &gho, is_l2, 0 /* ip4 */ ,
226 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
227 hdr->csum_start = gho.l4_hdr_offset; // 0x36;
228 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
230 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
232 else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
234 hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
239 static_always_inline void
240 set_gso_offsets (vlib_buffer_t * b, struct virtio_net_hdr_v1 *hdr, int is_l2)
242 if (b->flags & VNET_BUFFER_F_IS_IP4)
245 generic_header_offset_t gho = { 0 };
246 vnet_generic_header_offset_parser (b, &gho, is_l2, 1 /* ip4 */ ,
248 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
249 hdr->gso_size = vnet_buffer2 (b)->gso_size;
250 hdr->hdr_len = gho.hdr_sz;
251 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
252 hdr->csum_start = gho.l4_hdr_offset; // 0x22;
253 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
255 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
257 * virtio devices do not support IP4 checksum offload. So driver takes care
258 * of it while doing tx.
260 if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
261 ip4->checksum = ip4_header_checksum (ip4);
263 else if (b->flags & VNET_BUFFER_F_IS_IP6)
265 generic_header_offset_t gho = { 0 };
266 vnet_generic_header_offset_parser (b, &gho, is_l2, 0 /* ip4 */ ,
268 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
269 hdr->gso_size = vnet_buffer2 (b)->gso_size;
270 hdr->hdr_len = gho.hdr_sz;
271 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
272 hdr->csum_start = gho.l4_hdr_offset; // 0x36;
273 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
277 static_always_inline u16
278 add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
279 virtio_if_type_t type, virtio_vring_t * vring,
280 u32 bi, u16 free_desc_count,
281 u16 avail, u16 next, u16 mask, int do_gso,
282 int csum_offload, uword node_index)
285 int hdr_sz = vif->virtio_net_hdr_sz;
286 struct vring_desc *d;
287 d = &vring->desc[next];
288 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
289 struct virtio_net_hdr_v1 *hdr = vlib_buffer_get_current (b) - hdr_sz;
290 int is_l2 = (type & (VIRTIO_IF_TYPE_TAP | VIRTIO_IF_TYPE_PCI));
292 clib_memset (hdr, 0, hdr_sz);
294 if (b->flags & VNET_BUFFER_F_GSO)
297 set_gso_offsets (b, hdr, is_l2);
300 virtio_interface_drop_inline (vm, node_index, &bi, 1,
301 VIRTIO_TX_ERROR_GSO_PACKET_DROP);
305 else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
306 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
309 set_checksum_offsets (b, hdr, is_l2);
312 virtio_interface_drop_inline (vm, node_index, &bi, 1,
313 VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP);
318 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
321 ((type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
323 pointer_to_uword (vlib_buffer_get_current (b))) - hdr_sz;
324 d->len = b->current_length + hdr_sz;
327 else if (vif->features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC))
330 * We are using single vlib_buffer_t for indirect descriptor(s)
331 * chain. Single descriptor is 16 bytes and vlib_buffer_t
332 * has 2048 bytes space. So maximum long chain can have 128
333 * (=2048/16) indirect descriptors.
334 * It can easily support 65535 bytes of Jumbo frames with
335 * each data buffer size of 512 bytes minimum.
337 u32 indirect_buffer = 0;
338 if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
340 virtio_interface_drop_inline (vm, node_index, &bi, 1,
341 VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED);
345 vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
346 indirect_desc->current_data = 0;
347 indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
348 indirect_desc->next_buffer = bi;
349 bi = indirect_buffer;
351 struct vring_desc *id =
352 (struct vring_desc *) vlib_buffer_get_current (indirect_desc);
354 if (type == VIRTIO_IF_TYPE_PCI)
356 d->addr = vlib_physmem_get_pa (vm, id);
357 id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
360 * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
361 * should be presented in separate descriptor and data will start
362 * from next descriptor.
365 (vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)))
366 id->len = b->current_length + hdr_sz;
370 id->flags = VRING_DESC_F_NEXT;
374 id->addr = vlib_buffer_get_current_pa (vm, b);
375 id->len = b->current_length;
377 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
379 id->flags = VRING_DESC_F_NEXT;
383 b = vlib_get_buffer (vm, b->next_buffer);
384 id->addr = vlib_buffer_get_current_pa (vm, b);
385 id->len = b->current_length;
388 else /* VIRTIO_IF_TYPE_[TAP | TUN] */
390 d->addr = pointer_to_uword (id);
391 /* first buffer in chain */
392 id->addr = pointer_to_uword (vlib_buffer_get_current (b)) - hdr_sz;
393 id->len = b->current_length + hdr_sz;
395 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
397 id->flags = VRING_DESC_F_NEXT;
401 b = vlib_get_buffer (vm, b->next_buffer);
402 id->addr = pointer_to_uword (vlib_buffer_get_current (b));
403 id->len = b->current_length;
408 d->len = count * sizeof (struct vring_desc);
409 d->flags = VRING_DESC_F_INDIRECT;
411 else if (type == VIRTIO_IF_TYPE_PCI)
414 vlib_buffer_t *b_temp = b;
415 u16 n_buffers_in_chain = 1;
418 * Check the length of the chain for the required number of
419 * descriptors. Return from here, retry to get more descriptors,
420 * if chain length is greater than available descriptors.
422 while (b_temp->flags & VLIB_BUFFER_NEXT_PRESENT)
424 n_buffers_in_chain++;
425 b_temp = vlib_get_buffer (vm, b_temp->next_buffer);
428 if (n_buffers_in_chain > free_desc_count)
429 return n_buffers_in_chain;
431 d->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
432 d->len = b->current_length + hdr_sz;
434 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
436 d->flags = VRING_DESC_F_NEXT;
437 vring->buffers[count] = bi;
439 ~(VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID);
443 count = (count + 1) & mask;
445 d = &vring->desc[count];
446 b = vlib_get_buffer (vm, bi);
447 d->addr = vlib_buffer_get_current_pa (vm, b);
448 d->len = b->current_length;
451 vring->buffers[count] = bi;
452 vring->avail->ring[avail & mask] = next;
460 vring->buffers[next] = bi;
461 vring->avail->ring[avail & mask] = next;
466 static_always_inline void
467 virtio_find_free_desc (virtio_vring_t * vring, u16 size, u16 mask,
468 u16 req, u16 next, u32 * first_free_desc_index,
469 u16 * free_desc_count)
472 /* next is used as hint: from where to start looking */
473 for (u16 i = 0; i < size; i++, next++)
475 if (vring->buffers[next & mask] == ~0)
477 if (*first_free_desc_index == ~0)
479 *first_free_desc_index = (next & mask);
481 (*free_desc_count)++;
488 if (start + *free_desc_count == i)
490 (*free_desc_count)++;
502 static_always_inline uword
503 virtio_interface_tx_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
504 vlib_frame_t * frame, virtio_if_t * vif,
505 virtio_if_type_t type, int do_gso,
506 int csum_offload, int do_gro)
508 u16 n_left = frame->n_vectors;
509 virtio_vring_t *vring;
510 u16 qid = vm->thread_index % vif->num_txqs;
511 vring = vec_elt_at_index (vif->txq_vrings, qid);
512 u16 used, next, avail;
513 u16 sz = vring->size;
516 u32 *buffers = vlib_frame_vector_args (frame);
517 u32 to[GRO_TO_VECTOR_SIZE (n_left)];
519 clib_spinlock_lock_if_init (&vring->lockp);
521 if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0 &&
522 (vring->last_kick_avail_idx != vring->avail->idx))
523 virtio_kick (vm, vring, vif);
527 n_left = vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
532 /* free consumed buffers */
533 virtio_free_used_device_desc (vm, vring, node->node_index);
535 used = vring->desc_in_use;
536 next = vring->desc_next;
537 avail = vring->avail->idx;
539 u16 free_desc_count = 0;
541 if (PREDICT_FALSE (vring->flags & VRING_TX_OUT_OF_ORDER))
543 u32 first_free_desc_index = ~0;
545 virtio_find_free_desc (vring, sz, mask, n_left, next,
546 &first_free_desc_index, &free_desc_count);
549 next = first_free_desc_index;
552 free_desc_count = sz - used;
554 while (n_left && free_desc_count)
557 virtio_tx_trace_t *t;
559 vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[0]);
560 if (b0->flags & VLIB_BUFFER_IS_TRACED)
562 t = vlib_add_trace (vm, node, b0, sizeof (t[0]));
563 t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
564 t->buffer_index = buffers[0];
565 if (type == VIRTIO_IF_TYPE_TUN)
567 int is_ip4 = 0, is_ip6 = 0;
569 switch (((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0)
580 vnet_generic_header_offset_parser (b0, &t->gho, 0, is_ip4,
584 vnet_generic_header_offset_parser (b0, &t->gho, 1,
586 VNET_BUFFER_F_IS_IP4,
588 VNET_BUFFER_F_IS_IP6);
590 clib_memcpy_fast (&t->buffer, b0,
591 sizeof (*b0) - sizeof (b0->pre_data));
592 clib_memcpy_fast (t->buffer.pre_data, vlib_buffer_get_current (b0),
593 sizeof (t->buffer.pre_data));
596 add_buffer_to_slot (vm, vif, type, vring, buffers[0], free_desc_count,
597 avail, next, mask, do_gso, csum_offload,
600 if (PREDICT_FALSE (n_added == 0))
606 else if (PREDICT_FALSE (n_added > free_desc_count))
610 next = (next + n_added) & mask;
614 free_desc_count -= n_added;
617 if (n_left != frame->n_vectors)
619 CLIB_MEMORY_STORE_BARRIER ();
620 vring->avail->idx = avail;
621 vring->desc_next = next;
622 vring->desc_in_use = used;
623 if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0)
624 virtio_kick (vm, vring, vif);
632 virtio_interface_drop_inline (vm, node->node_index,
634 VIRTIO_TX_ERROR_NO_FREE_SLOTS);
637 clib_spinlock_unlock_if_init (&vring->lockp);
639 return frame->n_vectors - n_left;
642 static_always_inline uword
643 virtio_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
644 vlib_frame_t * frame, virtio_if_t * vif,
645 virtio_if_type_t type)
647 vnet_main_t *vnm = vnet_get_main ();
648 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
650 if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
651 return virtio_interface_tx_gso_inline (vm, node, frame, vif, type,
653 1 /* checksum offload */ ,
654 vif->packet_coalesce);
655 else if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)
656 return virtio_interface_tx_gso_inline (vm, node, frame, vif, type,
658 1 /* checksum offload */ ,
661 return virtio_interface_tx_gso_inline (vm, node, frame, vif, type,
663 0 /* no checksum offload */ ,
667 VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
668 vlib_node_runtime_t * node,
669 vlib_frame_t * frame)
671 virtio_main_t *nm = &virtio_main;
672 vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
673 virtio_if_t *vif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
675 if (vif->type == VIRTIO_IF_TYPE_TAP)
676 return virtio_interface_tx_inline (vm, node, frame, vif,
678 else if (vif->type == VIRTIO_IF_TYPE_PCI)
679 return virtio_interface_tx_inline (vm, node, frame, vif,
681 else if (vif->type == VIRTIO_IF_TYPE_TUN)
682 return virtio_interface_tx_inline (vm, node, frame, vif,
691 virtio_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
694 virtio_main_t *apm = &virtio_main;
695 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
696 virtio_if_t *vif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
698 /* Shut off redirection */
699 if (node_index == ~0)
701 vif->per_interface_next_index = node_index;
705 vif->per_interface_next_index =
706 vlib_node_add_next (vlib_get_main (), virtio_input_node.index,
711 virtio_clear_hw_interface_counters (u32 instance)
713 /* Nothing for now */
716 static clib_error_t *
717 virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
718 vnet_hw_interface_rx_mode mode)
720 virtio_main_t *mm = &virtio_main;
721 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
722 virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
723 virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
725 if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
727 vring->avail->flags |= VIRTIO_RING_FLAG_MASK_INT;
728 return clib_error_return (0, "interrupt mode is not supported");
731 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
733 /* only enable packet coalesce in poll mode */
734 gro_flow_table_set_is_enable (vring->flow_table, 1);
735 vring->avail->flags |= VIRTIO_RING_FLAG_MASK_INT;
739 gro_flow_table_set_is_enable (vring->flow_table, 0);
740 vring->avail->flags &= ~VIRTIO_RING_FLAG_MASK_INT;
746 static clib_error_t *
747 virtio_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
749 virtio_main_t *mm = &virtio_main;
750 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
751 virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
753 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
755 vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
756 vnet_hw_interface_set_flags (vnm, vif->hw_if_index,
757 VNET_HW_INTERFACE_FLAG_LINK_UP);
761 vif->flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
762 vnet_hw_interface_set_flags (vnm, vif->hw_if_index, 0);
767 static clib_error_t *
768 virtio_subif_add_del_function (vnet_main_t * vnm,
770 struct vnet_sw_interface_t *st, int is_add)
772 /* Nothing for now */
777 VNET_DEVICE_CLASS (virtio_device_class) = {
779 .format_device_name = format_virtio_device_name,
780 .format_device = format_virtio_device,
781 .format_tx_trace = format_virtio_tx_trace,
782 .tx_function_n_errors = VIRTIO_TX_N_ERROR,
783 .tx_function_error_strings = virtio_tx_func_error_strings,
784 .rx_redirect_to_node = virtio_set_interface_next_node,
785 .clear_counters = virtio_clear_hw_interface_counters,
786 .admin_up_down_function = virtio_interface_admin_up_down,
787 .subif_add_del_function = virtio_subif_add_del_function,
788 .rx_mode_change_function = virtio_interface_rx_mode_change,
794 * fd.io coding-style-patch-verification: ON
797 * eval: (c-set-style "gnu")