2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <sys/types.h>
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ethernet/ethernet.h>
26 #include <vnet/gso/gro_func.h>
27 #include <vnet/gso/hdr_offset_parser.h>
28 #include <vnet/ip/ip4_packet.h>
29 #include <vnet/ip/ip6_packet.h>
30 #include <vnet/ip/ip_psh_cksum.h>
31 #include <vnet/tcp/tcp_packet.h>
32 #include <vnet/udp/udp_packet.h>
33 #include <vnet/devices/virtio/virtio.h>
35 #define VIRTIO_TX_MAX_CHAIN_LEN 127
37 #define foreach_virtio_tx_func_error \
38 _(NO_FREE_SLOTS, "no free tx slots") \
39 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
40 _(PENDING_MSGS, "pending msgs in tx ring") \
41 _(INDIRECT_DESC_ALLOC_FAILED, "indirect descriptor allocation failed - packet drop") \
42 _(OUT_OF_ORDER, "out-of-order buffers in used ring") \
43 _(GSO_PACKET_DROP, "gso disabled on itf -- gso packet drop") \
44 _(CSUM_OFFLOAD_PACKET_DROP, "checksum offload disabled on itf -- csum offload packet drop")
48 #define _(f,s) VIRTIO_TX_ERROR_##f,
49 foreach_virtio_tx_func_error
52 } virtio_tx_func_error_t;
54 static char *virtio_tx_func_error_strings[] = {
56 foreach_virtio_tx_func_error
61 format_virtio_device (u8 * s, va_list * args)
63 u32 dev_instance = va_arg (*args, u32);
64 int verbose = va_arg (*args, int);
65 u32 indent = format_get_indent (s);
66 virtio_main_t *vim = &virtio_main;
67 virtio_if_t *vif = vec_elt_at_index (vim->interfaces, dev_instance);
68 vnet_virtio_vring_t *vring = 0;
70 s = format (s, "VIRTIO interface");
73 s = format (s, "\n%U instance %u", format_white_space, indent + 2,
75 s = format (s, "\n%U RX QUEUE : Total Packets", format_white_space,
77 vec_foreach (vring, vif->rxq_vrings)
79 s = format (s, "\n%U %8u : %llu", format_white_space, indent + 4,
80 RX_QUEUE_ACCESS (vring->queue_id), vring->total_packets);
82 s = format (s, "\n%U TX QUEUE : Total Packets", format_white_space,
84 vec_foreach (vring, vif->txq_vrings)
86 s = format (s, "\n%U %8u : %llu", format_white_space, indent + 4,
87 TX_QUEUE_ACCESS (vring->queue_id), vring->total_packets);
98 generic_header_offset_t gho;
103 format_virtio_tx_trace (u8 * s, va_list * va)
105 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
106 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
107 virtio_tx_trace_t *t = va_arg (*va, virtio_tx_trace_t *);
108 u32 indent = format_get_indent (s);
110 s = format (s, "%Ubuffer 0x%x: %U\n", format_white_space, indent,
111 t->buffer_index, format_vnet_buffer_no_chain, &t->buffer);
113 format (s, "%U%U\n", format_white_space, indent,
114 format_generic_header_offset, &t->gho);
116 format (s, "%U%U", format_white_space, indent,
117 format_ethernet_header_with_length, t->buffer.pre_data,
118 sizeof (t->buffer.pre_data));
123 virtio_tx_trace (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b0,
126 virtio_tx_trace_t *t;
127 t = vlib_add_trace (vm, node, b0, sizeof (t[0]));
128 t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
129 t->buffer_index = bi;
130 clib_memset (&t->gho, 0, sizeof (t->gho));
133 int is_ip4 = 0, is_ip6 = 0;
135 switch (((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0)
146 vnet_generic_header_offset_parser (b0, &t->gho, 0, is_ip4, is_ip6);
149 vnet_generic_header_offset_parser (b0, &t->gho, 1,
151 VNET_BUFFER_F_IS_IP4,
152 b0->flags & VNET_BUFFER_F_IS_IP6);
154 clib_memcpy_fast (&t->buffer, b0, sizeof (*b0) - sizeof (b0->pre_data));
155 clib_memcpy_fast (t->buffer.pre_data, vlib_buffer_get_current (b0),
156 sizeof (t->buffer.pre_data));
160 virtio_interface_drop_inline (vlib_main_t *vm, virtio_if_t *vif,
161 uword node_index, u32 *buffers, u16 n,
162 virtio_tx_func_error_t error)
164 vlib_error_count (vm, node_index, error, n);
165 vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
166 VNET_INTERFACE_COUNTER_DROP,
167 vm->thread_index, vif->sw_if_index, n);
168 vlib_buffer_free (vm, buffers, n);
172 virtio_memset_ring_u32 (u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
174 ASSERT (n_buffers <= ring_size);
176 if (PREDICT_TRUE (start + n_buffers <= ring_size))
178 clib_memset_u32 (ring + start, ~0, n_buffers);
182 clib_memset_u32 (ring + start, ~0, ring_size - start);
183 clib_memset_u32 (ring, ~0, n_buffers - (ring_size - start));
188 virtio_free_used_device_desc_split (vlib_main_t *vm,
189 vnet_virtio_vring_t *vring,
192 u16 used = vring->desc_in_use;
193 u16 sz = vring->queue_size;
195 u16 last = vring->last_used_idx;
196 u16 n_left = vring->used->idx - last;
197 u16 out_of_order_count = 0;
204 vnet_virtio_vring_used_elem_t *e = &vring->used->ring[last & mask];
206 slot = n_buffers = e->id;
208 while (e->id == (n_buffers & mask))
213 vnet_virtio_vring_desc_t *d = &vring->desc[e->id];
215 while (d->flags & VRING_DESC_F_NEXT)
219 d = &vring->desc[next];
223 e = &vring->used->ring[last & mask];
225 vlib_buffer_free_from_ring (vm, vring->buffers, slot,
226 sz, (n_buffers - slot));
227 virtio_memset_ring_u32 (vring->buffers, slot, sz, (n_buffers - slot));
228 used -= (n_buffers - slot);
232 vlib_buffer_free (vm, &vring->buffers[e->id], 1);
233 vring->buffers[e->id] = ~0;
237 out_of_order_count++;
238 vring->flags |= VRING_TX_OUT_OF_ORDER;
243 * Some vhost-backends give buffers back in out-of-order fashion in used ring.
244 * It impacts the overall virtio-performance.
246 if (out_of_order_count)
247 vlib_error_count (vm, node_index, VIRTIO_TX_ERROR_OUT_OF_ORDER,
250 vring->desc_in_use = used;
251 vring->last_used_idx = last;
255 virtio_free_used_device_desc_packed (vlib_main_t *vm,
256 vnet_virtio_vring_t *vring,
259 vnet_virtio_vring_packed_desc_t *d;
260 u16 sz = vring->queue_size;
261 u16 last = vring->last_used_idx;
262 u16 n_buffers = 0, start;
265 if (vring->desc_in_use == 0)
268 d = &vring->packed_desc[last];
272 while ((flags & VRING_DESC_F_AVAIL) == (vring->used_wrap_counter << 7) &&
273 (flags & VRING_DESC_F_USED) == (vring->used_wrap_counter << 15))
281 vring->used_wrap_counter ^= 1;
283 d = &vring->packed_desc[last];
289 vlib_buffer_free_from_ring (vm, vring->buffers, start, sz, n_buffers);
290 virtio_memset_ring_u32 (vring->buffers, start, sz, n_buffers);
291 vring->desc_in_use -= n_buffers;
292 vring->last_used_idx = last;
297 virtio_free_used_device_desc (vlib_main_t *vm, vnet_virtio_vring_t *vring,
298 uword node_index, int packed)
301 virtio_free_used_device_desc_packed (vm, vring, node_index);
303 virtio_free_used_device_desc_split (vm, vring, node_index);
308 set_checksum_offsets (vlib_buffer_t *b, vnet_virtio_net_hdr_v1_t *hdr,
311 vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
312 i16 l4_hdr_offset = vnet_buffer (b)->l4_hdr_offset - b->current_data;
313 if (b->flags & VNET_BUFFER_F_IS_IP4)
316 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
317 hdr->csum_start = l4_hdr_offset; // 0x22;
320 * virtio devices do not support IP4 checksum offload. So driver takes
321 * care of it while doing tx.
323 ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
324 if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
325 ip4->checksum = ip4_header_checksum (ip4);
328 * virtio devices assume the l4 header is set to the checksum of the
329 * l3 pseudo-header, so we compute it before tx-ing
331 if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
334 (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
335 tcp->checksum = ip4_pseudo_header_cksum (ip4);
336 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
338 else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
341 (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
342 udp->checksum = ip4_pseudo_header_cksum (ip4);
343 hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
346 else if (b->flags & VNET_BUFFER_F_IS_IP6)
349 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
350 hdr->csum_start = l4_hdr_offset; // 0x36;
351 ip6 = (ip6_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
354 * virtio devices assume the l4 header is set to the checksum of the
355 * l3 pseudo-header, so we compute it before tx-ing
357 if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
360 (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
361 tcp->checksum = ip6_pseudo_header_cksum (ip6);
362 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
364 else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
367 (udp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
368 udp->checksum = ip6_pseudo_header_cksum (ip6);
369 hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
375 set_gso_offsets (vlib_buffer_t *b, vnet_virtio_net_hdr_v1_t *hdr,
378 vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
379 i16 l4_hdr_offset = vnet_buffer (b)->l4_hdr_offset - b->current_data;
381 if (b->flags & VNET_BUFFER_F_IS_IP4)
384 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
385 hdr->gso_size = vnet_buffer2 (b)->gso_size;
386 hdr->hdr_len = l4_hdr_offset + vnet_buffer2 (b)->gso_l4_hdr_sz;
387 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
388 hdr->csum_start = l4_hdr_offset; // 0x22;
389 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
390 ip4 = (ip4_header_t *) (b->data + vnet_buffer (b)->l3_hdr_offset);
392 * virtio devices do not support IP4 checksum offload. So driver takes care
393 * of it while doing tx.
395 if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
396 ip4->checksum = ip4_header_checksum (ip4);
398 else if (b->flags & VNET_BUFFER_F_IS_IP6)
400 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
401 hdr->gso_size = vnet_buffer2 (b)->gso_size;
402 hdr->hdr_len = l4_hdr_offset + vnet_buffer2 (b)->gso_l4_hdr_sz;
403 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
404 hdr->csum_start = l4_hdr_offset; // 0x36;
405 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
410 add_buffer_to_slot (vlib_main_t *vm, vlib_node_runtime_t *node,
411 virtio_if_t *vif, vnet_virtio_vring_t *vring, u32 bi,
412 u16 free_desc_count, u16 avail, u16 next, u16 mask,
413 int hdr_sz, int do_gso, int csum_offload, int is_pci,
414 int is_tun, int is_indirect, int is_any_layout)
417 vnet_virtio_vring_desc_t *d;
419 d = &vring->desc[next];
420 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
421 vnet_virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
422 u32 drop_inline = ~0;
424 clib_memset_u8 (hdr, 0, hdr_sz);
426 vring->total_packets++;
427 if (b->flags & VNET_BUFFER_F_GSO)
430 set_gso_offsets (b, hdr, is_l2);
433 drop_inline = VIRTIO_TX_ERROR_GSO_PACKET_DROP;
437 else if (b->flags & VNET_BUFFER_F_OFFLOAD)
440 set_checksum_offsets (b, hdr, is_l2);
443 drop_inline = VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP;
448 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
450 virtio_tx_trace (vm, node, b, bi, is_tun);
453 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
455 d->addr = ((is_pci) ? vlib_buffer_get_current_pa (vm, b) :
456 pointer_to_uword (vlib_buffer_get_current (b))) - hdr_sz;
457 d->len = b->current_length + hdr_sz;
460 else if (is_indirect)
463 * We are using single vlib_buffer_t for indirect descriptor(s)
464 * chain. Single descriptor is 16 bytes and vlib_buffer_t
465 * has 2048 bytes space. So maximum long chain can have 128
466 * (=2048/16) indirect descriptors.
467 * It can easily support 65535 bytes of Jumbo frames with
468 * each data buffer size of 512 bytes minimum.
470 u32 indirect_buffer = 0;
471 if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
473 drop_inline = VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED;
477 vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
478 indirect_desc->current_data = 0;
479 indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
480 indirect_desc->next_buffer = bi;
481 bi = indirect_buffer;
483 vnet_virtio_vring_desc_t *id =
484 (vnet_virtio_vring_desc_t *) vlib_buffer_get_current (indirect_desc);
488 d->addr = vlib_physmem_get_pa (vm, id);
489 id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
492 * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
493 * should be presented in separate descriptor and data will start
494 * from next descriptor.
497 id->len = b->current_length + hdr_sz;
501 id->flags = VRING_DESC_F_NEXT;
505 id->addr = vlib_buffer_get_current_pa (vm, b);
506 id->len = b->current_length;
508 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
510 id->flags = VRING_DESC_F_NEXT;
514 b = vlib_get_buffer (vm, b->next_buffer);
515 id->addr = vlib_buffer_get_current_pa (vm, b);
516 id->len = b->current_length;
517 if (PREDICT_FALSE (count == VIRTIO_TX_MAX_CHAIN_LEN))
519 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
520 vlib_error_count (vm, node->node_index,
521 VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
526 else /* VIRTIO_IF_TYPE_[TAP | TUN] */
528 d->addr = pointer_to_uword (id);
529 /* first buffer in chain */
530 id->addr = pointer_to_uword (vlib_buffer_get_current (b)) - hdr_sz;
531 id->len = b->current_length + hdr_sz;
533 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
535 id->flags = VRING_DESC_F_NEXT;
539 b = vlib_get_buffer (vm, b->next_buffer);
540 id->addr = pointer_to_uword (vlib_buffer_get_current (b));
541 id->len = b->current_length;
542 if (PREDICT_FALSE (count == VIRTIO_TX_MAX_CHAIN_LEN))
544 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
545 vlib_error_count (vm, node->node_index,
546 VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
553 d->len = count * sizeof (vnet_virtio_vring_desc_t);
554 d->flags = VRING_DESC_F_INDIRECT;
559 vlib_buffer_t *b_temp = b;
560 u16 n_buffers_in_chain = 1;
563 * Check the length of the chain for the required number of
564 * descriptors. Return from here, retry to get more descriptors,
565 * if chain length is greater than available descriptors.
567 while (b_temp->flags & VLIB_BUFFER_NEXT_PRESENT)
569 n_buffers_in_chain++;
570 b_temp = vlib_get_buffer (vm, b_temp->next_buffer);
573 if (n_buffers_in_chain > free_desc_count)
574 return n_buffers_in_chain;
576 d->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
577 d->len = b->current_length + hdr_sz;
579 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
581 d->flags = VRING_DESC_F_NEXT;
582 vring->buffers[count] = bi;
584 ~(VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID);
588 count = (count + 1) & mask;
590 d = &vring->desc[count];
591 b = vlib_get_buffer (vm, bi);
592 d->addr = vlib_buffer_get_current_pa (vm, b);
593 d->len = b->current_length;
596 vring->buffers[count] = bi;
597 vring->avail->ring[avail & mask] = next;
605 vring->buffers[next] = bi;
606 vring->avail->ring[avail & mask] = next;
610 if (drop_inline != ~0)
611 virtio_interface_drop_inline (vm, vif, node->node_index, &bi, 1,
618 add_buffer_to_slot_packed (vlib_main_t *vm, vlib_node_runtime_t *node,
619 virtio_if_t *vif, vnet_virtio_vring_t *vring,
620 u32 bi, u16 next, int hdr_sz, int do_gso,
621 int csum_offload, int is_pci, int is_tun,
622 int is_indirect, int is_any_layout)
624 u16 n_added = 0, flags = 0;
626 vnet_virtio_vring_packed_desc_t *d = &vring->packed_desc[next];
627 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
628 vnet_virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
629 u32 drop_inline = ~0;
631 clib_memset (hdr, 0, hdr_sz);
633 vring->total_packets++;
635 if (b->flags & VNET_BUFFER_F_GSO)
638 set_gso_offsets (b, hdr, is_l2);
641 drop_inline = VIRTIO_TX_ERROR_GSO_PACKET_DROP;
645 else if (b->flags & VNET_BUFFER_F_OFFLOAD)
648 set_checksum_offsets (b, hdr, is_l2);
651 drop_inline = VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP;
655 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
657 virtio_tx_trace (vm, node, b, bi, is_tun);
660 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
663 ((is_pci) ? vlib_buffer_get_current_pa (vm,
665 pointer_to_uword (vlib_buffer_get_current (b))) - hdr_sz;
666 d->len = b->current_length + hdr_sz;
668 else if (is_indirect)
671 * We are using single vlib_buffer_t for indirect descriptor(s)
672 * chain. Single descriptor is 16 bytes and vlib_buffer_t
673 * has 2048 bytes space. So maximum long chain can have 128
674 * (=2048/16) indirect descriptors.
675 * It can easily support 65535 bytes of Jumbo frames with
676 * each data buffer size of 512 bytes minimum.
678 u32 indirect_buffer = 0;
679 if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
681 drop_inline = VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED;
685 vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
686 indirect_desc->current_data = 0;
687 indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
688 indirect_desc->next_buffer = bi;
689 bi = indirect_buffer;
691 vnet_virtio_vring_packed_desc_t *id =
692 (vnet_virtio_vring_packed_desc_t *) vlib_buffer_get_current (
697 d->addr = vlib_physmem_get_pa (vm, id);
698 id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
701 * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
702 * should be presented in separate descriptor and data will start
703 * from next descriptor.
706 id->len = b->current_length + hdr_sz;
714 id->addr = vlib_buffer_get_current_pa (vm, b);
715 id->len = b->current_length;
717 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
723 b = vlib_get_buffer (vm, b->next_buffer);
724 id->addr = vlib_buffer_get_current_pa (vm, b);
725 id->len = b->current_length;
726 if (PREDICT_FALSE (count == VIRTIO_TX_MAX_CHAIN_LEN))
728 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
729 vlib_error_count (vm, node->node_index,
730 VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
737 d->len = count * sizeof (vnet_virtio_vring_packed_desc_t);
738 flags = VRING_DESC_F_INDIRECT;
744 if (vring->avail_wrap_counter)
746 flags |= VRING_DESC_F_AVAIL;
747 flags &= ~VRING_DESC_F_USED;
751 flags &= ~VRING_DESC_F_AVAIL;
752 flags |= VRING_DESC_F_USED;
757 vring->buffers[next] = bi;
761 if (drop_inline != ~0)
762 virtio_interface_drop_inline (vm, vif, node->node_index, &bi, 1,
769 virtio_interface_tx_packed_gso_inline (
770 vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif,
771 virtio_if_type_t type, vnet_virtio_vring_t *vring, u32 *buffers, u16 n_left,
772 const int do_gso, const int csum_offload)
774 int is_pci = (type == VIRTIO_IF_TYPE_PCI);
775 int is_tun = (type == VIRTIO_IF_TYPE_TUN);
777 ((vif->features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC)) != 0);
779 ((vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) != 0);
780 const int hdr_sz = vif->virtio_net_hdr_sz;
781 u16 sz = vring->queue_size;
782 u16 used, next, n_buffers = 0, n_buffers_left = 0;
783 u16 n_vectors = n_left;
786 used = vring->desc_in_use;
787 next = vring->desc_next;
789 if (vif->packet_buffering)
791 n_buffers = n_buffers_left = virtio_vring_n_buffers (vring->buffering);
793 while (n_buffers_left && used < sz)
797 u32 bi = virtio_vring_buffering_read_from_front (vring->buffering);
800 n_added = add_buffer_to_slot_packed (
801 vm, node, vif, vring, bi, next, hdr_sz, do_gso, csum_offload,
802 is_pci, is_tun, is_indirect, is_any_layout);
804 if (PREDICT_FALSE (n_added == 0))
812 vring->avail_wrap_counter ^= 1;
815 virtio_txq_clear_scheduled (vring);
818 while (n_left && used < sz)
822 n_added = add_buffer_to_slot_packed (
823 vm, node, vif, vring, buffers[0], next, hdr_sz, do_gso, csum_offload,
824 is_pci, is_tun, is_indirect, is_any_layout);
827 if (PREDICT_FALSE (n_added == 0))
835 vring->avail_wrap_counter ^= 1;
839 if (n_left != n_vectors || n_buffers != n_buffers_left)
841 CLIB_MEMORY_STORE_BARRIER ();
842 vring->desc_next = next;
843 vring->desc_in_use = used;
844 CLIB_MEMORY_BARRIER ();
845 if (vring->device_event->flags != VRING_EVENT_F_DISABLE)
846 virtio_kick (vm, vring, vif);
853 virtio_find_free_desc (vnet_virtio_vring_t *vring, u16 size, u16 mask, u16 req,
854 u16 next, u32 *first_free_desc_index,
855 u16 *free_desc_count)
858 /* next is used as hint: from where to start looking */
859 for (u16 i = 0; i < size; i++, next++)
861 if (vring->buffers[next & mask] == ~0)
863 if (*first_free_desc_index == ~0)
865 *first_free_desc_index = (next & mask);
867 (*free_desc_count)++;
874 if (start + *free_desc_count == i)
876 (*free_desc_count)++;
889 virtio_interface_tx_split_gso_inline (vlib_main_t *vm,
890 vlib_node_runtime_t *node,
891 virtio_if_t *vif, virtio_if_type_t type,
892 vnet_virtio_vring_t *vring, u32 *buffers,
893 u16 n_left, int do_gso, int csum_offload)
895 u16 used, next, avail, n_buffers = 0, n_buffers_left = 0;
896 int is_pci = (type == VIRTIO_IF_TYPE_PCI);
897 int is_tun = (type == VIRTIO_IF_TYPE_TUN);
899 ((vif->features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC)) != 0);
901 ((vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) != 0);
902 u16 sz = vring->queue_size;
903 int hdr_sz = vif->virtio_net_hdr_sz;
905 u16 n_vectors = n_left;
907 used = vring->desc_in_use;
908 next = vring->desc_next;
909 avail = vring->avail->idx;
911 u16 free_desc_count = 0;
913 if (PREDICT_FALSE (vring->flags & VRING_TX_OUT_OF_ORDER))
915 u32 first_free_desc_index = ~0;
917 virtio_find_free_desc (vring, sz, mask, n_left, next,
918 &first_free_desc_index, &free_desc_count);
921 next = first_free_desc_index;
924 free_desc_count = sz - used;
926 if (vif->packet_buffering)
928 n_buffers = n_buffers_left = virtio_vring_n_buffers (vring->buffering);
930 while (n_buffers_left && free_desc_count)
934 u32 bi = virtio_vring_buffering_read_from_front (vring->buffering);
938 n_added = add_buffer_to_slot (vm, node, vif, vring, bi,
939 free_desc_count, avail, next, mask,
940 hdr_sz, do_gso, csum_offload, is_pci,
941 is_tun, is_indirect, is_any_layout);
942 if (PREDICT_FALSE (n_added == 0))
947 else if (PREDICT_FALSE (n_added > free_desc_count))
951 next = (next + n_added) & mask;
954 free_desc_count -= n_added;
956 virtio_txq_clear_scheduled (vring);
959 while (n_left && free_desc_count)
964 add_buffer_to_slot (vm, node, vif, vring, buffers[0], free_desc_count,
965 avail, next, mask, hdr_sz, do_gso, csum_offload,
966 is_pci, is_tun, is_indirect, is_any_layout);
968 if (PREDICT_FALSE (n_added == 0))
974 else if (PREDICT_FALSE (n_added > free_desc_count))
978 next = (next + n_added) & mask;
982 free_desc_count -= n_added;
985 if (n_left != n_vectors || n_buffers != n_buffers_left)
987 clib_atomic_store_seq_cst (&vring->avail->idx, avail);
988 vring->desc_next = next;
989 vring->desc_in_use = used;
990 if ((clib_atomic_load_seq_cst (&vring->used->flags) &
991 VRING_USED_F_NO_NOTIFY) == 0)
992 virtio_kick (vm, vring, vif);
999 virtio_interface_tx_gso_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1000 virtio_if_t *vif, virtio_if_type_t type,
1001 vnet_virtio_vring_t *vring, u32 *buffers,
1002 u16 n_left, int packed, int do_gso,
1006 return virtio_interface_tx_packed_gso_inline (vm, node, vif, type, vring,
1008 do_gso, csum_offload);
1010 return virtio_interface_tx_split_gso_inline (vm, node, vif, type, vring,
1012 do_gso, csum_offload);
1016 virtio_interface_tx_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1017 virtio_if_t *vif, vnet_virtio_vring_t *vring,
1018 virtio_if_type_t type, u32 *buffers, u16 n_left,
1021 vnet_main_t *vnm = vnet_get_main ();
1022 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
1024 if (hw->caps & VNET_HW_IF_CAP_TCP_GSO)
1025 return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
1026 buffers, n_left, packed,
1028 1 /* checksum offload */ );
1029 else if (hw->caps & VNET_HW_IF_CAP_L4_TX_CKSUM)
1030 return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
1031 buffers, n_left, packed,
1033 1 /* checksum offload */ );
1035 return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
1036 buffers, n_left, packed,
1038 0 /* no checksum offload */ );
1041 VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
1042 vlib_node_runtime_t * node,
1043 vlib_frame_t * frame)
1045 virtio_main_t *nm = &virtio_main;
1046 vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
1047 virtio_if_t *vif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
1048 vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame);
1049 u16 qid = tf->queue_id;
1050 vnet_virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, qid);
1051 u16 n_left = frame->n_vectors;
1052 u32 *buffers = vlib_frame_vector_args (frame);
1053 u32 to[GRO_TO_VECTOR_SIZE (n_left)];
1054 int packed = vif->is_packed;
1055 u16 n_vectors = frame->n_vectors;
1057 if (tf->shared_queue)
1058 clib_spinlock_lock (&vring->lockp);
1060 if (vif->packet_coalesce)
1062 n_vectors = n_left =
1063 vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
1065 virtio_txq_clear_scheduled (vring);
1068 u16 retry_count = 2;
1071 /* free consumed buffers */
1072 virtio_free_used_device_desc (vm, vring, node->node_index, packed);
1074 if (vif->type == VIRTIO_IF_TYPE_TAP)
1075 n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1077 &buffers[n_vectors - n_left],
1079 else if (vif->type == VIRTIO_IF_TYPE_PCI)
1080 n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1082 &buffers[n_vectors - n_left],
1084 else if (vif->type == VIRTIO_IF_TYPE_TUN)
1085 n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1087 &buffers[n_vectors - n_left],
1092 if (n_left && retry_count--)
1095 if (vif->packet_buffering && n_left)
1097 u16 n_buffered = virtio_vring_buffering_store_packets (vring->buffering,
1102 n_left -= n_buffered;
1105 virtio_interface_drop_inline (vm, vif, node->node_index,
1106 &buffers[n_vectors - n_left], n_left,
1107 VIRTIO_TX_ERROR_NO_FREE_SLOTS);
1109 if (tf->shared_queue)
1110 clib_spinlock_unlock (&vring->lockp);
1112 return frame->n_vectors - n_left;
1116 virtio_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
1119 virtio_main_t *apm = &virtio_main;
1120 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1121 virtio_if_t *vif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
1123 /* Shut off redirection */
1124 if (node_index == ~0)
1126 vif->per_interface_next_index = node_index;
1130 vif->per_interface_next_index =
1131 vlib_node_add_next (vlib_get_main (), virtio_input_node.index,
1136 virtio_clear_hw_interface_counters (u32 instance)
1138 /* Nothing for now */
1142 virtio_set_rx_interrupt (virtio_if_t *vif, vnet_virtio_vring_t *vring)
1145 vring->driver_event->flags &= ~VRING_EVENT_F_DISABLE;
1147 vring->avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
1151 virtio_set_rx_polling (virtio_if_t *vif, vnet_virtio_vring_t *vring)
1154 vring->driver_event->flags |= VRING_EVENT_F_DISABLE;
1156 vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
1159 static clib_error_t *
1160 virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
1161 vnet_hw_if_rx_mode mode)
1163 virtio_main_t *mm = &virtio_main;
1164 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1165 virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
1166 vnet_virtio_vring_t *rx_vring = vec_elt_at_index (vif->rxq_vrings, qid);
1168 if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
1170 virtio_set_rx_polling (vif, rx_vring);
1171 return clib_error_return (0, "interrupt mode is not supported");
1174 if (mode == VNET_HW_IF_RX_MODE_POLLING)
1175 virtio_set_rx_polling (vif, rx_vring);
1177 virtio_set_rx_interrupt (vif, rx_vring);
1179 rx_vring->mode = mode;
1184 static clib_error_t *
1185 virtio_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
1187 virtio_main_t *mm = &virtio_main;
1188 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1189 virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
1191 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
1193 vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
1194 vnet_hw_interface_set_flags (vnm, vif->hw_if_index,
1195 VNET_HW_INTERFACE_FLAG_LINK_UP);
1199 vif->flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
1200 vnet_hw_interface_set_flags (vnm, vif->hw_if_index, 0);
1205 VNET_DEVICE_CLASS (virtio_device_class) = {
1207 .format_device_name = format_virtio_device_name,
1208 .format_device = format_virtio_device,
1209 .format_tx_trace = format_virtio_tx_trace,
1210 .tx_function_n_errors = VIRTIO_TX_N_ERROR,
1211 .tx_function_error_strings = virtio_tx_func_error_strings,
1212 .rx_redirect_to_node = virtio_set_interface_next_node,
1213 .clear_counters = virtio_clear_hw_interface_counters,
1214 .admin_up_down_function = virtio_interface_admin_up_down,
1215 .rx_mode_change_function = virtio_interface_rx_mode_change,
1220 * fd.io coding-style-patch-verification: ON
1223 * eval: (c-set-style "gnu")