2 *------------------------------------------------------------------
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
21 #include <fcntl.h> /* for open */
22 #include <sys/ioctl.h>
23 #include <sys/socket.h>
26 #include <sys/types.h>
27 #include <sys/uio.h> /* for iovec */
28 #include <netinet/in.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_tun.h>
34 #include <vlib/vlib.h>
35 #include <vlib/unix/unix.h>
37 #include <vnet/ethernet/ethernet.h>
38 #include <vnet/devices/devices.h>
39 #include <vnet/feature/feature.h>
41 #include <vnet/devices/virtio/vhost_user.h>
42 #include <vnet/devices/virtio/vhost_user_inline.h>
44 #include <vnet/gso/hdr_offset_parser.h>
46 * On the transmit side, we keep processing the buffers from vlib in the while
47 * loop and prepare the copy order to be executed later. However, the static
48 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
49 * entries. In order to not corrupt memory, we have to do the copy when the
50 * static array reaches the copy threshold. We subtract 40 in case the code
51 * goes into the inner loop for a maximum of 64k frames which may require
52 * more array entries. We subtract 200 because our default buffer size is
53 * 2048 and the default desc len is likely 1536. While it takes less than 40
54 * vlib buffers for the jumbo frame, it may take twice as much descriptors
55 * for the same jumbo frame. Use 200 for the extra head room.
57 #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200)
59 extern vnet_device_class_t vhost_user_device_class;
61 #define foreach_vhost_user_tx_func_error \
63 _(NOT_READY, "vhost vring not ready") \
64 _(DOWN, "vhost interface is down") \
65 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
66 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
67 _(MMAP_FAIL, "mmap failure") \
68 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
72 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
73 foreach_vhost_user_tx_func_error
75 VHOST_USER_TX_FUNC_N_ERROR,
76 } vhost_user_tx_func_error_t;
78 static __clib_unused char *vhost_user_tx_func_error_strings[] = {
80 foreach_vhost_user_tx_func_error
84 static __clib_unused u8 *
85 format_vhost_user_interface_name (u8 * s, va_list * args)
87 u32 i = va_arg (*args, u32);
88 u32 show_dev_instance = ~0;
89 vhost_user_main_t *vum = &vhost_user_main;
91 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
92 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
94 if (show_dev_instance != ~0)
95 i = show_dev_instance;
97 s = format (s, "VirtualEthernet0/0/%d", i);
101 static __clib_unused int
102 vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
104 // FIXME: check if the new dev instance is already used
105 vhost_user_main_t *vum = &vhost_user_main;
106 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
109 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
110 hi->dev_instance, ~0);
112 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
115 vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
116 hi->dev_instance, new_dev_instance);
121 static_always_inline void
122 vhost_user_tx_trace (vhost_trace_t * t,
123 vhost_user_intf_t * vui, u16 qid,
124 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
126 vhost_user_main_t *vum = &vhost_user_main;
127 u32 last_avail_idx = rxvq->last_avail_idx;
128 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
129 vring_desc_t *hdr_desc = 0;
132 clib_memset (t, 0, sizeof (*t));
133 t->device_index = vui - vum->vhost_user_interfaces;
136 hdr_desc = &rxvq->desc[desc_current];
137 if (rxvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
139 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
140 /* Header is the first here */
141 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
143 if (rxvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
145 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
147 if (!(rxvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
148 !(rxvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
150 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
153 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
156 static_always_inline u32
157 vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
158 u16 copy_len, u32 * map_hint)
160 void *dst0, *dst1, *dst2, *dst3;
161 if (PREDICT_TRUE (copy_len >= 4))
163 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
165 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
167 while (PREDICT_TRUE (copy_len >= 4))
173 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
176 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
179 clib_prefetch_load ((void *) cpy[2].src);
180 clib_prefetch_load ((void *) cpy[3].src);
182 clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
183 clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
185 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
186 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
193 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
195 clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
196 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
203 static_always_inline void
204 vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
205 virtio_net_hdr_t * hdr)
207 generic_header_offset_t gho = { 0 };
208 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
209 int is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
210 vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
212 ASSERT (!(is_ip4 && is_ip6));
213 vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4, is_ip6);
214 if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
219 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
220 ip4->checksum = ip4_header_checksum (ip4);
223 /* checksum offload */
224 if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
226 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
227 hdr->csum_start = gho.l4_hdr_offset;
228 hdr->csum_offset = offsetof (udp_header_t, checksum);
230 else if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
232 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
233 hdr->csum_start = gho.l4_hdr_offset;
234 hdr->csum_offset = offsetof (tcp_header_t, checksum);
238 if (b->flags & VNET_BUFFER_F_GSO)
240 if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
243 (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO4)))
245 hdr->gso_size = vnet_buffer2 (b)->gso_size;
246 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
249 (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO6)))
251 hdr->gso_size = vnet_buffer2 (b)->gso_size;
252 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
255 else if ((vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_UFO)) &&
256 (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
258 hdr->gso_size = vnet_buffer2 (b)->gso_size;
259 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
264 static_always_inline void
265 vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_intf_t * vui,
266 vhost_user_vring_t * rxvq,
267 u16 * n_descs_processed, u8 chained,
268 vlib_frame_t * frame, u32 n_left)
271 vring_packed_desc_t *desc_table = rxvq->packed_desc;
272 u16 last_used_idx = rxvq->last_used_idx;
274 if (PREDICT_FALSE (*n_descs_processed == 0))
277 if (rxvq->used_wrap_counter)
278 flags = desc_table[last_used_idx & rxvq->qsz_mask].flags |
279 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
281 flags = desc_table[last_used_idx & rxvq->qsz_mask].flags &
282 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
284 vhost_user_advance_last_used_idx (rxvq);
286 for (desc_idx = 1; desc_idx < *n_descs_processed; desc_idx++)
288 if (rxvq->used_wrap_counter)
289 desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags |=
290 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
292 desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &=
293 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
294 vhost_user_advance_last_used_idx (rxvq);
297 desc_table[last_used_idx & rxvq->qsz_mask].flags = flags;
299 *n_descs_processed = 0;
303 vring_packed_desc_t *desc_table = rxvq->packed_desc;
305 while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &
307 vhost_user_advance_last_used_idx (rxvq);
309 /* Advance past the current chained table entries */
310 vhost_user_advance_last_used_idx (rxvq);
313 /* interrupt (call) handling */
314 if ((rxvq->callfd_idx != ~0) &&
315 (rxvq->avail_event->flags != VRING_EVENT_F_DISABLE))
317 vhost_user_main_t *vum = &vhost_user_main;
319 rxvq->n_since_last_int += frame->n_vectors - n_left;
320 if (rxvq->n_since_last_int > vum->coalesce_frames)
321 vhost_user_send_call (vm, vui, rxvq);
325 static_always_inline void
326 vhost_user_tx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
327 u16 qid, vlib_buffer_t * b,
328 vhost_user_vring_t * rxvq)
330 vhost_user_main_t *vum = &vhost_user_main;
331 u32 last_avail_idx = rxvq->last_avail_idx;
332 u32 desc_current = last_avail_idx & rxvq->qsz_mask;
333 vring_packed_desc_t *hdr_desc = 0;
336 clib_memset (t, 0, sizeof (*t));
337 t->device_index = vui - vum->vhost_user_interfaces;
340 hdr_desc = &rxvq->packed_desc[desc_current];
341 if (rxvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
343 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
344 /* Header is the first here */
345 hdr_desc = map_guest_mem (vui, rxvq->packed_desc[desc_current].addr,
348 if (rxvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
350 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
352 if (!(rxvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
353 !(rxvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
355 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
358 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
361 static_always_inline uword
362 vhost_user_device_class_packed (vlib_main_t *vm, vlib_node_runtime_t *node,
363 vlib_frame_t *frame, vhost_user_intf_t *vui,
364 vhost_user_vring_t *rxvq)
366 u32 *buffers = vlib_frame_vector_args (frame);
367 u32 n_left = frame->n_vectors;
368 vhost_user_main_t *vum = &vhost_user_main;
371 u32 thread_index = vm->thread_index;
372 vhost_cpu_t *cpu = &vum->cpus[thread_index];
377 vring_packed_desc_t *desc_table;
379 u16 desc_head, desc_index, desc_len;
380 u16 n_descs_processed;
381 u8 indirect, chained;
384 error = VHOST_USER_TX_FUNC_ERROR_NONE;
387 n_descs_processed = 0;
391 vlib_buffer_t *b0, *current_b0;
392 uword buffer_map_addr;
395 u32 total_desc_len = 0;
400 if (PREDICT_TRUE (n_left > 1))
401 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
403 b0 = vlib_get_buffer (vm, buffers[0]);
404 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
406 cpu->current_trace = vlib_add_trace (vm, node, b0,
407 sizeof (*cpu->current_trace));
408 vhost_user_tx_trace_packed (cpu->current_trace, vui, qid / 2, b0,
412 desc_table = rxvq->packed_desc;
413 desc_head = desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
414 if (PREDICT_FALSE (!vhost_user_packed_desc_available (rxvq, desc_head)))
416 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
420 * Go deeper in case of indirect descriptor.
421 * To test it, turn off mrg_rxbuf.
423 if (desc_table[desc_head].flags & VRING_DESC_F_INDIRECT)
426 if (PREDICT_FALSE (desc_table[desc_head].len <
427 sizeof (vring_packed_desc_t)))
429 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
432 n_entries = desc_table[desc_head].len >> 4;
433 desc_table = map_guest_mem (vui, desc_table[desc_index].addr,
435 if (PREDICT_FALSE (desc_table == 0))
437 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
442 else if (rxvq->packed_desc[desc_head].flags & VRING_DESC_F_NEXT)
445 desc_len = vui->virtio_net_hdr_sz;
446 buffer_map_addr = desc_table[desc_index].addr;
447 buffer_len = desc_table[desc_index].len;
449 /* Get a header from the header array */
450 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
453 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
454 hdr->num_buffers = 1;
456 or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD);
458 /* Guest supports csum offload and buffer requires checksum offload? */
460 (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)))
461 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
463 /* Prepare a copy order executed later for the header */
464 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
465 vhost_copy_t *cpy = &cpu->copy[copy_len];
467 cpy->len = vui->virtio_net_hdr_sz;
468 cpy->dst = buffer_map_addr;
469 cpy->src = (uword) hdr;
471 buffer_map_addr += vui->virtio_net_hdr_sz;
472 buffer_len -= vui->virtio_net_hdr_sz;
473 bytes_left = b0->current_length;
483 * Next one is chained
484 * Test it with both indirect and mrg_rxbuf off
486 if (PREDICT_FALSE (!(desc_table[desc_index].flags &
490 * Last descriptor in chain.
491 * Dequeue queued descriptors for this packet
493 vhost_user_dequeue_chained_descs (rxvq,
495 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
498 vhost_user_advance_last_avail_idx (rxvq);
499 desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
501 buffer_map_addr = desc_table[desc_index].addr;
502 buffer_len = desc_table[desc_index].len;
503 total_desc_len += desc_len;
510 * Test it with mrg_rxnuf off
512 if (PREDICT_TRUE (n_entries > 0))
516 /* Dequeue queued descriptors for this packet */
517 vhost_user_dequeue_chained_descs (rxvq,
519 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
522 total_desc_len += desc_len;
523 desc_index = (desc_index + 1) & rxvq->qsz_mask;
524 buffer_map_addr = desc_table[desc_index].addr;
525 buffer_len = desc_table[desc_index].len;
528 else if (vui->virtio_net_hdr_sz == 12)
532 * This is the default setting for the guest VM
534 virtio_net_hdr_mrg_rxbuf_t *hdr =
535 &cpu->tx_headers[tx_headers_len - 1];
537 desc_table[desc_index].len = desc_len;
538 vhost_user_advance_last_avail_idx (rxvq);
539 desc_head = desc_index =
540 rxvq->last_avail_idx & rxvq->qsz_mask;
545 if (PREDICT_FALSE (!vhost_user_packed_desc_available
548 /* Dequeue queued descriptors for this packet */
549 vhost_user_dequeue_descs (rxvq, hdr,
551 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
555 buffer_map_addr = desc_table[desc_index].addr;
556 buffer_len = desc_table[desc_index].len;
560 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
565 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
566 vhost_copy_t *cpy = &cpu->copy[copy_len];
568 cpy->len = bytes_left;
569 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
570 cpy->dst = buffer_map_addr;
571 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
572 current_b0->current_length - bytes_left;
574 bytes_left -= cpy->len;
575 buffer_len -= cpy->len;
576 buffer_map_addr += cpy->len;
577 desc_len += cpy->len;
579 clib_prefetch_load (&rxvq->packed_desc);
581 /* Check if vlib buffer has more data. If not, get more or break */
582 if (PREDICT_TRUE (!bytes_left))
585 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
587 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
588 bytes_left = current_b0->current_length;
598 /* Move from available to used ring */
599 total_desc_len += desc_len;
600 rxvq->packed_desc[desc_head].len = total_desc_len;
602 vhost_user_advance_last_avail_table_idx (vui, rxvq, chained);
605 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
606 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
611 * Do the copy periodically to prevent
612 * cpu->copy array overflow and corrupt memory
614 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD) || chained)
616 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
618 vlib_error_count (vm, node->node_index,
619 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
622 /* give buffers back to driver */
623 vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
624 chained, frame, n_left);
631 if (PREDICT_TRUE (copy_len))
633 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
635 vlib_error_count (vm, node->node_index,
636 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
638 vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
639 chained, frame, n_left);
643 * When n_left is set, error is always set to something too.
644 * In case error is due to lack of remaining buffers, we go back up and
646 * The idea is that it is better to waste some time on packets
647 * that have been processed already than dropping them and get
648 * more fresh packets with a good likelyhood that they will be dropped too.
649 * This technique also gives more time to VM driver to pick-up packets.
650 * In case the traffic flows from physical to virtual interfaces, this
651 * technique will end-up leveraging the physical NIC buffer in order to
652 * absorb the VM's CPU jitter.
654 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
660 clib_spinlock_unlock (&rxvq->vring_lock);
662 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
664 vlib_error_count (vm, node->node_index, error, n_left);
665 vlib_increment_simple_counter
666 (vnet_main.interface_main.sw_if_counters +
667 VNET_INTERFACE_COUNTER_DROP, thread_index, vui->sw_if_index, n_left);
670 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
671 return frame->n_vectors;
674 VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
675 vlib_node_runtime_t *
676 node, vlib_frame_t * frame)
678 u32 *buffers = vlib_frame_vector_args (frame);
679 u32 n_left = frame->n_vectors;
680 vhost_user_main_t *vum = &vhost_user_main;
681 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
682 vhost_user_intf_t *vui =
683 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
685 vhost_user_vring_t *rxvq;
687 u32 thread_index = vm->thread_index;
688 vhost_cpu_t *cpu = &vum->cpus[thread_index];
694 vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame);
696 if (PREDICT_FALSE (!vui->admin_up))
698 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
702 if (PREDICT_FALSE (!vui->is_ready))
704 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
708 qid = VHOST_VRING_IDX_RX (tf->queue_id);
709 rxvq = &vui->vrings[qid];
710 ASSERT (tf->queue_id == rxvq->qid);
712 if (PREDICT_FALSE (rxvq->avail == 0))
714 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
717 if (tf->shared_queue)
718 clib_spinlock_lock (&rxvq->vring_lock);
720 if (vhost_user_is_packed_ring_supported (vui))
721 return (vhost_user_device_class_packed (vm, node, frame, vui, rxvq));
724 error = VHOST_USER_TX_FUNC_ERROR_NONE;
729 vlib_buffer_t *b0, *current_b0;
730 u16 desc_head, desc_index, desc_len;
731 vring_desc_t *desc_table;
732 uword buffer_map_addr;
736 if (PREDICT_TRUE (n_left > 1))
737 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
739 b0 = vlib_get_buffer (vm, buffers[0]);
741 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
743 cpu->current_trace = vlib_add_trace (vm, node, b0,
744 sizeof (*cpu->current_trace));
745 vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
748 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
750 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
754 desc_table = rxvq->desc;
755 desc_head = desc_index =
756 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
758 /* Go deeper in case of indirect descriptor
759 * I don't know of any driver providing indirect for RX. */
760 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
763 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
765 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
770 map_guest_mem (vui, rxvq->desc[desc_index].addr,
773 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
779 desc_len = vui->virtio_net_hdr_sz;
780 buffer_map_addr = desc_table[desc_index].addr;
781 buffer_len = desc_table[desc_index].len;
784 // Get a header from the header array
785 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
788 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
789 hdr->num_buffers = 1; //This is local, no need to check
791 or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD);
793 /* Guest supports csum offload and buffer requires checksum offload? */
795 && (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)))
796 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
798 // Prepare a copy order executed later for the header
799 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
800 vhost_copy_t *cpy = &cpu->copy[copy_len];
802 cpy->len = vui->virtio_net_hdr_sz;
803 cpy->dst = buffer_map_addr;
804 cpy->src = (uword) hdr;
807 buffer_map_addr += vui->virtio_net_hdr_sz;
808 buffer_len -= vui->virtio_net_hdr_sz;
809 bytes_left = b0->current_length;
815 if (desc_table[desc_index].flags & VRING_DESC_F_NEXT)
817 //Next one is chained
818 desc_index = desc_table[desc_index].next;
819 buffer_map_addr = desc_table[desc_index].addr;
820 buffer_len = desc_table[desc_index].len;
822 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
824 virtio_net_hdr_mrg_rxbuf_t *hdr =
825 &cpu->tx_headers[tx_headers_len - 1];
827 //Move from available to used buffer
828 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
830 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
832 vhost_user_log_dirty_ring (vui, rxvq,
833 ring[rxvq->last_used_idx &
836 rxvq->last_avail_idx++;
837 rxvq->last_used_idx++;
842 (rxvq->last_avail_idx == rxvq->avail->idx))
844 //Dequeue queued descriptors for this packet
845 rxvq->last_used_idx -= hdr->num_buffers - 1;
846 rxvq->last_avail_idx -= hdr->num_buffers - 1;
847 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
851 desc_table = rxvq->desc;
852 desc_head = desc_index =
853 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
855 (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
857 //It is seriously unlikely that a driver will put indirect descriptor
858 //after non-indirect descriptor.
860 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
862 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
868 rxvq->desc[desc_index].addr,
871 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
876 buffer_map_addr = desc_table[desc_index].addr;
877 buffer_len = desc_table[desc_index].len;
881 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
887 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
888 vhost_copy_t *cpy = &cpu->copy[copy_len];
890 cpy->len = bytes_left;
891 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
892 cpy->dst = buffer_map_addr;
893 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
894 current_b0->current_length - bytes_left;
896 bytes_left -= cpy->len;
897 buffer_len -= cpy->len;
898 buffer_map_addr += cpy->len;
899 desc_len += cpy->len;
901 clib_prefetch_load (&rxvq->desc);
904 // Check if vlib buffer has more data. If not, get more or break.
905 if (PREDICT_TRUE (!bytes_left))
908 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
910 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
911 bytes_left = current_b0->current_length;
921 //Move from available to used ring
922 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
923 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
924 vhost_user_log_dirty_ring (vui, rxvq,
925 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
926 rxvq->last_avail_idx++;
927 rxvq->last_used_idx++;
929 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
931 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
934 n_left--; //At the end for error counting when 'goto done' is invoked
937 * Do the copy periodically to prevent
938 * cpu->copy array overflow and corrupt memory
940 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
942 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
945 vlib_error_count (vm, node->node_index,
946 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
950 /* give buffers back to driver */
951 CLIB_MEMORY_BARRIER ();
952 rxvq->used->idx = rxvq->last_used_idx;
953 vhost_user_log_dirty_ring (vui, rxvq, idx);
959 //Do the memory copies
960 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
963 vlib_error_count (vm, node->node_index,
964 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
967 CLIB_MEMORY_BARRIER ();
968 rxvq->used->idx = rxvq->last_used_idx;
969 vhost_user_log_dirty_ring (vui, rxvq, idx);
972 * When n_left is set, error is always set to something too.
973 * In case error is due to lack of remaining buffers, we go back up and
975 * The idea is that it is better to waste some time on packets
976 * that have been processed already than dropping them and get
977 * more fresh packets with a good likelihood that they will be dropped too.
978 * This technique also gives more time to VM driver to pick-up packets.
979 * In case the traffic flows from physical to virtual interfaces, this
980 * technique will end-up leveraging the physical NIC buffer in order to
981 * absorb the VM's CPU jitter.
983 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
989 /* interrupt (call) handling */
990 if ((rxvq->callfd_idx != ~0) &&
991 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
993 rxvq->n_since_last_int += frame->n_vectors - n_left;
995 if (rxvq->n_since_last_int > vum->coalesce_frames)
996 vhost_user_send_call (vm, vui, rxvq);
999 clib_spinlock_unlock (&rxvq->vring_lock);
1002 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
1004 vlib_error_count (vm, node->node_index, error, n_left);
1005 vlib_increment_simple_counter
1006 (vnet_main.interface_main.sw_if_counters
1007 + VNET_INTERFACE_COUNTER_DROP,
1008 thread_index, vui->sw_if_index, n_left);
1011 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
1012 return frame->n_vectors;
1015 static __clib_unused clib_error_t *
1016 vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
1017 u32 qid, vnet_hw_if_rx_mode mode)
1019 vlib_main_t *vm = vnm->vlib_main;
1020 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1021 vhost_user_main_t *vum = &vhost_user_main;
1022 vhost_user_intf_t *vui =
1023 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
1024 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1027 if (mode == txvq->mode)
1030 if ((mode != VNET_HW_IF_RX_MODE_POLLING) &&
1031 (mode != VNET_HW_IF_RX_MODE_ADAPTIVE) &&
1032 (mode != VNET_HW_IF_RX_MODE_INTERRUPT))
1034 vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
1036 return clib_error_return (0, "unsupported");
1039 if (txvq->thread_index == ~0)
1040 return clib_error_return (0, "Queue initialization is not finished yet");
1042 cpu = vec_elt_at_index (vum->cpus, txvq->thread_index);
1043 if ((mode == VNET_HW_IF_RX_MODE_INTERRUPT) ||
1044 (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
1046 if (txvq->kickfd_idx == ~0)
1048 // We cannot support interrupt mode if the driver opts out
1049 return clib_error_return (0, "Driver does not support interrupt");
1051 if (txvq->mode == VNET_HW_IF_RX_MODE_POLLING)
1053 ASSERT (cpu->polling_q_count != 0);
1054 if (cpu->polling_q_count)
1055 cpu->polling_q_count--;
1057 // Start the timer if this is the first encounter on interrupt
1059 if ((vum->ifq_count == 1) &&
1060 ((vum->coalesce_time > 0.0) || (vum->coalesce_frames > 0)))
1061 vlib_process_signal_event (vm,
1062 vhost_user_send_interrupt_node.index,
1063 VHOST_USER_EVENT_START_TIMER, 0);
1066 else if (mode == VNET_HW_IF_RX_MODE_POLLING)
1068 if (((txvq->mode == VNET_HW_IF_RX_MODE_INTERRUPT) ||
1069 (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE)) && vum->ifq_count)
1071 cpu->polling_q_count++;
1073 // Stop the timer if there is no more interrupt interface/queue
1074 if (vum->ifq_count == 0)
1075 vlib_process_signal_event (vm,
1076 vhost_user_send_interrupt_node.index,
1077 VHOST_USER_EVENT_STOP_TIMER, 0);
1082 vhost_user_set_operation_mode (vui, txvq);
1087 static __clib_unused clib_error_t *
1088 vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
1091 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1092 vhost_user_main_t *vum = &vhost_user_main;
1093 vhost_user_intf_t *vui =
1094 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
1095 u8 link_old, link_new;
1097 link_old = vui_is_link_up (vui);
1099 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1101 link_new = vui_is_link_up (vui);
1103 if (link_old != link_new)
1104 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
1105 VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
1107 return /* no error */ 0;
1111 VNET_DEVICE_CLASS (vhost_user_device_class) = {
1112 .name = "vhost-user",
1113 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
1114 .tx_function_error_strings = vhost_user_tx_func_error_strings,
1115 .format_device_name = format_vhost_user_interface_name,
1116 .name_renumber = vhost_user_name_renumber,
1117 .admin_up_down_function = vhost_user_interface_admin_up_down,
1118 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
1119 .format_tx_trace = format_vhost_trace,
1125 * fd.io coding-style-patch-verification: ON
1128 * eval: (c-set-style "gnu")