2 *------------------------------------------------------------------
3 * Copyright (c) 2016 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <sys/types.h>
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ethernet/ethernet.h>
26 #include <vnet/gso/gso.h>
27 #include <vnet/ip/ip4_packet.h>
28 #include <vnet/ip/ip6_packet.h>
29 #include <vnet/tcp/tcp_packet.h>
30 #include <vnet/udp/udp_packet.h>
31 #include <vnet/devices/virtio/virtio.h>
33 #define foreach_virtio_tx_func_error \
34 _(NO_FREE_SLOTS, "no free tx slots") \
35 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
36 _(PENDING_MSGS, "pending msgs in tx ring") \
37 _(INDIRECT_DESC_ALLOC_FAILED, "indirect descriptor allocation failed - packet drop") \
38 _(OUT_OF_ORDER, "out-of-order buffers in used ring") \
39 _(GSO_PACKET_DROP, "gso disabled on itf -- gso packet drop") \
40 _(CSUM_OFFLOAD_PACKET_DROP, "checksum offload disabled on itf -- csum offload packet drop")
44 #define _(f,s) VIRTIO_TX_ERROR_##f,
45 foreach_virtio_tx_func_error
48 } virtio_tx_func_error_t;
50 static char *virtio_tx_func_error_strings[] = {
52 foreach_virtio_tx_func_error
57 format_virtio_device (u8 * s, va_list * args)
59 u32 dev_instance = va_arg (*args, u32);
60 int verbose = va_arg (*args, int);
61 u32 indent = format_get_indent (s);
63 s = format (s, "VIRTIO interface");
66 s = format (s, "\n%U instance %u", format_white_space, indent + 2,
73 format_virtio_tx_trace (u8 * s, va_list * args)
75 s = format (s, "Unimplemented...");
79 static_always_inline void
80 virtio_interface_drop_inline (vlib_main_t * vm, uword node_index,
82 virtio_tx_func_error_t error)
84 vlib_error_count (vm, node_index, error, n);
85 vlib_buffer_free (vm, buffers, n);
88 static_always_inline void
89 virtio_memset_ring_u32 (u32 * ring, u32 start, u32 ring_size, u32 n_buffers)
91 ASSERT (n_buffers <= ring_size);
93 if (PREDICT_TRUE (start + n_buffers <= ring_size))
95 clib_memset_u32 (ring + start, ~0, n_buffers);
99 clib_memset_u32 (ring + start, ~0, ring_size - start);
100 clib_memset_u32 (ring, ~0, n_buffers - (ring_size - start));
104 static_always_inline void
105 virtio_free_used_device_desc (vlib_main_t * vm, virtio_vring_t * vring,
108 u16 used = vring->desc_in_use;
109 u16 sz = vring->size;
111 u16 last = vring->last_used_idx;
112 u16 n_left = vring->used->idx - last;
113 u16 out_of_order_count = 0;
120 struct vring_used_elem *e = &vring->used->ring[last & mask];
122 slot = n_buffers = e->id;
124 while (e->id == (n_buffers & mask))
131 e = &vring->used->ring[last & mask];
133 vlib_buffer_free_from_ring (vm, vring->buffers, slot,
134 sz, (n_buffers - slot));
135 virtio_memset_ring_u32 (vring->buffers, slot, sz, (n_buffers - slot));
136 used -= (n_buffers - slot);
140 vlib_buffer_free (vm, &vring->buffers[e->id], 1);
141 vring->buffers[e->id] = ~0;
145 out_of_order_count++;
146 vring->flags |= VRING_TX_OUT_OF_ORDER;
151 * Some vhost-backends give buffers back in out-of-order fashion in used ring.
152 * It impacts the overall virtio-performance.
154 if (out_of_order_count)
155 vlib_error_count (vm, node_index, VIRTIO_TX_ERROR_OUT_OF_ORDER,
158 vring->desc_in_use = used;
159 vring->last_used_idx = last;
162 static_always_inline void
163 set_checksum_offsets (vlib_buffer_t * b, struct virtio_net_hdr_v1 *hdr)
165 if (b->flags & VNET_BUFFER_F_IS_IP4)
168 gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 0);
169 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
170 hdr->csum_start = gho.l4_hdr_offset; // 0x22;
171 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
173 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
175 else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
177 hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
181 * virtio devices do not support IP4 checksum offload. So driver takes care
182 * of it while doing tx.
185 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
186 if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
187 ip4->checksum = ip4_header_checksum (ip4);
189 else if (b->flags & VNET_BUFFER_F_IS_IP6)
191 gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 1);
192 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
193 hdr->csum_start = gho.l4_hdr_offset; // 0x36;
194 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
196 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
198 else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
200 hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
205 static_always_inline void
206 set_gso_offsets (vlib_buffer_t * b, struct virtio_net_hdr_v1 *hdr)
208 if (b->flags & VNET_BUFFER_F_IS_IP4)
211 gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 0);
212 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
213 hdr->gso_size = vnet_buffer2 (b)->gso_size;
214 hdr->hdr_len = gho.l4_hdr_offset + gho.l4_hdr_sz;
215 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
216 hdr->csum_start = gho.l4_hdr_offset; // 0x22;
217 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
219 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
221 * virtio devices do not support IP4 checksum offload. So driver takes care
222 * of it while doing tx.
224 if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
225 ip4->checksum = ip4_header_checksum (ip4);
227 else if (b->flags & VNET_BUFFER_F_IS_IP6)
229 gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 1);
230 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
231 hdr->gso_size = vnet_buffer2 (b)->gso_size;
232 hdr->hdr_len = gho.l4_hdr_offset + gho.l4_hdr_sz;
233 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
234 hdr->csum_start = gho.l4_hdr_offset; // 0x36;
235 hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
239 static_always_inline u16
240 add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
241 virtio_vring_t * vring, u32 bi, u16 avail, u16 next,
242 u16 mask, int do_gso, int csum_offload, uword node_index)
245 int hdr_sz = vif->virtio_net_hdr_sz;
246 struct vring_desc *d;
247 d = &vring->desc[next];
248 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
249 struct virtio_net_hdr_v1 *hdr = vlib_buffer_get_current (b) - hdr_sz;
251 clib_memset (hdr, 0, hdr_sz);
253 if (b->flags & VNET_BUFFER_F_GSO)
256 set_gso_offsets (b, hdr);
259 virtio_interface_drop_inline (vm, node_index, &bi, 1,
260 VIRTIO_TX_ERROR_GSO_PACKET_DROP);
264 else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
265 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
268 set_checksum_offsets (b, hdr);
271 virtio_interface_drop_inline (vm, node_index, &bi, 1,
272 VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP);
277 if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
280 ((vif->type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
282 pointer_to_uword (vlib_buffer_get_current (b))) - hdr_sz;
283 d->len = b->current_length + hdr_sz;
289 * We are using single vlib_buffer_t for indirect descriptor(s)
290 * chain. Single descriptor is 16 bytes and vlib_buffer_t
291 * has 2048 bytes space. So maximum long chain can have 128
292 * (=2048/16) indirect descriptors.
293 * It can easily support 65535 bytes of Jumbo frames with
294 * each data buffer size of 512 bytes minimum.
296 u32 indirect_buffer = 0;
297 if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
299 virtio_interface_drop_inline (vm, node_index, &bi, 1,
300 VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED);
304 vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
305 indirect_desc->current_data = 0;
306 indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
307 indirect_desc->next_buffer = bi;
308 bi = indirect_buffer;
310 struct vring_desc *id =
311 (struct vring_desc *) vlib_buffer_get_current (indirect_desc);
313 if (vif->type == VIRTIO_IF_TYPE_PCI)
315 d->addr = vlib_physmem_get_pa (vm, id);
316 id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
319 * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
320 * should be presented in separate descriptor and data will start
321 * from next descriptor.
324 (vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)))
325 id->len = b->current_length + hdr_sz;
329 id->flags = VRING_DESC_F_NEXT;
333 id->addr = vlib_buffer_get_current_pa (vm, b);
334 id->len = b->current_length;
336 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
338 id->flags = VRING_DESC_F_NEXT;
342 b = vlib_get_buffer (vm, b->next_buffer);
343 id->addr = vlib_buffer_get_current_pa (vm, b);
344 id->len = b->current_length;
347 else /* VIRTIO_IF_TYPE_[TAP | TUN] */
349 d->addr = pointer_to_uword (id);
350 /* first buffer in chain */
351 id->addr = pointer_to_uword (vlib_buffer_get_current (b)) - hdr_sz;
352 id->len = b->current_length + hdr_sz;
354 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
356 id->flags = VRING_DESC_F_NEXT;
360 b = vlib_get_buffer (vm, b->next_buffer);
361 id->addr = pointer_to_uword (vlib_buffer_get_current (b));
362 id->len = b->current_length;
367 d->len = count * sizeof (struct vring_desc);
368 d->flags = VRING_DESC_F_INDIRECT;
370 vring->buffers[next] = bi;
371 vring->avail->ring[avail & mask] = next;
376 static_always_inline void
377 virtio_find_free_desc (virtio_vring_t * vring, u16 size, u16 mask,
378 u16 req, u16 next, u32 * first_free_desc_index,
379 u16 * free_desc_count)
382 /* next is used as hint: from where to start looking */
383 for (u16 i = 0; i < size; i++, next++)
385 if (vring->buffers[next & mask] == ~0)
387 if (*first_free_desc_index == ~0)
389 *first_free_desc_index = (next & mask);
391 (*free_desc_count)++;
398 if (start + *free_desc_count == i)
400 (*free_desc_count)++;
412 static_always_inline uword
413 virtio_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
414 vlib_frame_t * frame, virtio_if_t * vif,
415 int do_gso, int csum_offload)
417 u16 n_left = frame->n_vectors;
418 virtio_vring_t *vring;
419 u16 qid = vm->thread_index % vif->num_txqs;
420 vring = vec_elt_at_index (vif->txq_vrings, qid);
421 u16 used, next, avail;
422 u16 sz = vring->size;
425 u32 *buffers = vlib_frame_vector_args (frame);
427 clib_spinlock_lock_if_init (&vring->lockp);
429 if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0 &&
430 (vring->last_kick_avail_idx != vring->avail->idx))
431 virtio_kick (vm, vring, vif);
434 /* free consumed buffers */
435 virtio_free_used_device_desc (vm, vring, node->node_index);
437 used = vring->desc_in_use;
438 next = vring->desc_next;
439 avail = vring->avail->idx;
441 u16 free_desc_count = 0;
443 if (PREDICT_FALSE (vring->flags & VRING_TX_OUT_OF_ORDER))
445 u32 first_free_desc_index = ~0;
447 virtio_find_free_desc (vring, sz, mask, n_left, next,
448 &first_free_desc_index, &free_desc_count);
451 next = first_free_desc_index;
454 free_desc_count = sz - used;
456 while (n_left && free_desc_count)
460 add_buffer_to_slot (vm, vif, vring, buffers[0], avail, next, mask,
461 do_gso, csum_offload, node->node_index);
463 if (PREDICT_FALSE (n_added == 0))
471 next = (next + n_added) & mask;
478 if (n_left != frame->n_vectors)
480 CLIB_MEMORY_STORE_BARRIER ();
481 vring->avail->idx = avail;
482 vring->desc_next = next;
483 vring->desc_in_use = used;
484 if ((vring->used->flags & VIRTIO_RING_FLAG_MASK_INT) == 0)
485 virtio_kick (vm, vring, vif);
493 virtio_interface_drop_inline (vm, node->node_index, buffers, n_left,
494 VIRTIO_TX_ERROR_NO_FREE_SLOTS);
497 clib_spinlock_unlock_if_init (&vring->lockp);
499 return frame->n_vectors - n_left;
502 VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
503 vlib_node_runtime_t * node,
504 vlib_frame_t * frame)
506 vnet_main_t *vnm = vnet_get_main ();
507 virtio_main_t *nm = &virtio_main;
508 vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
509 virtio_if_t *vif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
510 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
512 if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
513 return virtio_interface_tx_inline (vm, node, frame, vif, 1 /* do_gso */ ,
515 else if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)
516 return virtio_interface_tx_inline (vm, node, frame, vif,
517 0 /* no do_gso */ , 1);
519 return virtio_interface_tx_inline (vm, node, frame, vif,
520 0 /* no do_gso */ , 0);
524 virtio_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
527 virtio_main_t *apm = &virtio_main;
528 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
529 virtio_if_t *vif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
531 /* Shut off redirection */
532 if (node_index == ~0)
534 vif->per_interface_next_index = node_index;
538 vif->per_interface_next_index =
539 vlib_node_add_next (vlib_get_main (), virtio_input_node.index,
544 virtio_clear_hw_interface_counters (u32 instance)
546 /* Nothing for now */
549 static clib_error_t *
550 virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
551 vnet_hw_interface_rx_mode mode)
553 virtio_main_t *mm = &virtio_main;
554 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
555 virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
556 virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
558 if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
560 vring->avail->flags |= VIRTIO_RING_FLAG_MASK_INT;
561 return clib_error_return (0, "interrupt mode is not supported");
564 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
565 vring->avail->flags |= VIRTIO_RING_FLAG_MASK_INT;
567 vring->avail->flags &= ~VIRTIO_RING_FLAG_MASK_INT;
572 static clib_error_t *
573 virtio_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
575 virtio_main_t *mm = &virtio_main;
576 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
577 virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
579 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
581 vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
582 vnet_hw_interface_set_flags (vnm, vif->hw_if_index,
583 VNET_HW_INTERFACE_FLAG_LINK_UP);
587 vif->flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
588 vnet_hw_interface_set_flags (vnm, vif->hw_if_index, 0);
593 static clib_error_t *
594 virtio_subif_add_del_function (vnet_main_t * vnm,
596 struct vnet_sw_interface_t *st, int is_add)
598 /* Nothing for now */
603 VNET_DEVICE_CLASS (virtio_device_class) = {
605 .format_device_name = format_virtio_device_name,
606 .format_device = format_virtio_device,
607 .format_tx_trace = format_virtio_tx_trace,
608 .tx_function_n_errors = VIRTIO_TX_N_ERROR,
609 .tx_function_error_strings = virtio_tx_func_error_strings,
610 .rx_redirect_to_node = virtio_set_interface_next_node,
611 .clear_counters = virtio_clear_hw_interface_counters,
612 .admin_up_down_function = virtio_interface_admin_up_down,
613 .subif_add_del_function = virtio_subif_add_del_function,
614 .rx_mode_change_function = virtio_interface_rx_mode_change,
620 * fd.io coding-style-patch-verification: ON
623 * eval: (c-set-style "gnu")