2 *------------------------------------------------------------------
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
21 #include <fcntl.h> /* for open */
22 #include <sys/ioctl.h>
23 #include <sys/socket.h>
26 #include <sys/types.h>
27 #include <sys/uio.h> /* for iovec */
28 #include <netinet/in.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_tun.h>
34 #include <vlib/vlib.h>
35 #include <vlib/unix/unix.h>
37 #include <vnet/ip/ip.h>
39 #include <vnet/ethernet/ethernet.h>
40 #include <vnet/devices/devices.h>
41 #include <vnet/feature/feature.h>
43 #include <vnet/devices/virtio/virtio.h>
44 #include <vnet/devices/virtio/vhost_user.h>
45 #include <vnet/devices/virtio/vhost_user_inline.h>
47 #include <vnet/gso/hdr_offset_parser.h>
49 * On the transmit side, we keep processing the buffers from vlib in the while
50 * loop and prepare the copy order to be executed later. However, the static
51 * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
52 * entries. In order to not corrupt memory, we have to do the copy when the
53 * static array reaches the copy threshold. We subtract 40 in case the code
54 * goes into the inner loop for a maximum of 64k frames which may require
55 * more array entries. We subtract 200 because our default buffer size is
56 * 2048 and the default desc len is likely 1536. While it takes less than 40
57 * vlib buffers for the jumbo frame, it may take twice as much descriptors
58 * for the same jumbo frame. Use 200 for the extra head room.
60 #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200)
62 extern vnet_device_class_t vhost_user_device_class;
64 #define foreach_vhost_user_tx_func_error \
66 _(NOT_READY, "vhost vring not ready") \
67 _(DOWN, "vhost interface is down") \
68 _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
69 _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
70 _(MMAP_FAIL, "mmap failure") \
71 _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
75 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
76 foreach_vhost_user_tx_func_error
78 VHOST_USER_TX_FUNC_N_ERROR,
79 } vhost_user_tx_func_error_t;
81 static __clib_unused char *vhost_user_tx_func_error_strings[] = {
83 foreach_vhost_user_tx_func_error
87 static __clib_unused u8 *
88 format_vhost_user_interface_name (u8 * s, va_list * args)
90 u32 i = va_arg (*args, u32);
91 u32 show_dev_instance = ~0;
92 vhost_user_main_t *vum = &vhost_user_main;
94 if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
95 show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
97 if (show_dev_instance != ~0)
98 i = show_dev_instance;
100 s = format (s, "VirtualEthernet0/0/%d", i);
104 static __clib_unused int
105 vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
107 // FIXME: check if the new dev instance is already used
108 vhost_user_main_t *vum = &vhost_user_main;
109 vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
112 vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
113 hi->dev_instance, ~0);
115 vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
118 vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
119 hi->dev_instance, new_dev_instance);
125 * @brief Try once to lock the vring
126 * @return 0 on success, non-zero on failure.
128 static_always_inline int
129 vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
131 return clib_atomic_test_and_set (vui->vring_locks[qid]);
135 * @brief Spin until the vring is successfully locked
137 static_always_inline void
138 vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
140 while (vhost_user_vring_try_lock (vui, qid))
145 * @brief Unlock the vring lock
147 static_always_inline void
148 vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
150 clib_atomic_release (vui->vring_locks[qid]);
153 static_always_inline void
154 vhost_user_tx_trace (vhost_trace_t * t,
155 vhost_user_intf_t * vui, u16 qid,
156 vlib_buffer_t * b, vhost_user_vring_t * rxvq)
158 vhost_user_main_t *vum = &vhost_user_main;
159 u32 last_avail_idx = rxvq->last_avail_idx;
160 u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
161 vring_desc_t *hdr_desc = 0;
164 clib_memset (t, 0, sizeof (*t));
165 t->device_index = vui - vum->vhost_user_interfaces;
168 hdr_desc = &rxvq->desc[desc_current];
169 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
171 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
172 /* Header is the first here */
173 hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
175 if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
177 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
179 if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
180 !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
182 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
185 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
188 static_always_inline u32
189 vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
190 u16 copy_len, u32 * map_hint)
192 void *dst0, *dst1, *dst2, *dst3;
193 if (PREDICT_TRUE (copy_len >= 4))
195 if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
197 if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
199 while (PREDICT_TRUE (copy_len >= 4))
205 (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
208 (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
211 CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
212 CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
214 clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
215 clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
217 vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
218 vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
225 if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
227 clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
228 vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
235 static_always_inline void
236 vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
237 virtio_net_hdr_t * hdr)
239 generic_header_offset_t gho = { 0 };
240 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
241 int is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
243 ASSERT (!(is_ip4 && is_ip6));
244 vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4, is_ip6);
245 if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
250 (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
251 ip4->checksum = ip4_header_checksum (ip4);
254 /* checksum offload */
255 if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
257 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
258 hdr->csum_start = gho.l4_hdr_offset;
259 hdr->csum_offset = offsetof (udp_header_t, checksum);
261 (udp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset);
264 else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
266 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
267 hdr->csum_start = gho.l4_hdr_offset;
268 hdr->csum_offset = offsetof (tcp_header_t, checksum);
270 (tcp_header_t *) (vlib_buffer_get_current (b) + gho.l4_hdr_offset);
275 if (b->flags & VNET_BUFFER_F_GSO)
277 if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
280 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4)))
282 hdr->gso_size = vnet_buffer2 (b)->gso_size;
283 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
286 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6)))
288 hdr->gso_size = vnet_buffer2 (b)->gso_size;
289 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
292 else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) &&
293 (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
295 hdr->gso_size = vnet_buffer2 (b)->gso_size;
296 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
301 static_always_inline void
302 vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_vring_t * rxvq,
303 u16 * n_descs_processed, u8 chained,
304 vlib_frame_t * frame, u32 n_left)
307 vring_packed_desc_t *desc_table = rxvq->packed_desc;
308 u16 last_used_idx = rxvq->last_used_idx;
310 if (PREDICT_FALSE (*n_descs_processed == 0))
313 if (rxvq->used_wrap_counter)
314 flags = desc_table[last_used_idx & rxvq->qsz_mask].flags |
315 (VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
317 flags = desc_table[last_used_idx & rxvq->qsz_mask].flags &
318 ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
320 vhost_user_advance_last_used_idx (rxvq);
322 for (desc_idx = 1; desc_idx < *n_descs_processed; desc_idx++)
324 if (rxvq->used_wrap_counter)
325 desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags |=
326 (VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
328 desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &=
329 ~(VIRTQ_DESC_F_AVAIL | VIRTQ_DESC_F_USED);
330 vhost_user_advance_last_used_idx (rxvq);
333 desc_table[last_used_idx & rxvq->qsz_mask].flags = flags;
335 *n_descs_processed = 0;
339 vring_packed_desc_t *desc_table = rxvq->packed_desc;
341 while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &
343 vhost_user_advance_last_used_idx (rxvq);
345 /* Advance past the current chained table entries */
346 vhost_user_advance_last_used_idx (rxvq);
349 /* interrupt (call) handling */
350 if ((rxvq->callfd_idx != ~0) &&
351 (rxvq->avail_event->flags != VRING_EVENT_F_DISABLE))
353 vhost_user_main_t *vum = &vhost_user_main;
355 rxvq->n_since_last_int += frame->n_vectors - n_left;
356 if (rxvq->n_since_last_int > vum->coalesce_frames)
357 vhost_user_send_call (vm, rxvq);
361 static_always_inline void
362 vhost_user_tx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
363 u16 qid, vlib_buffer_t * b,
364 vhost_user_vring_t * rxvq)
366 vhost_user_main_t *vum = &vhost_user_main;
367 u32 last_avail_idx = rxvq->last_avail_idx;
368 u32 desc_current = last_avail_idx & rxvq->qsz_mask;
369 vring_packed_desc_t *hdr_desc = 0;
372 clib_memset (t, 0, sizeof (*t));
373 t->device_index = vui - vum->vhost_user_interfaces;
376 hdr_desc = &rxvq->packed_desc[desc_current];
377 if (rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
379 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
380 /* Header is the first here */
381 hdr_desc = map_guest_mem (vui, rxvq->packed_desc[desc_current].addr,
384 if (rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
386 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
388 if (!(rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
389 !(rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
391 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
394 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
397 static_always_inline uword
398 vhost_user_device_class_packed (vlib_main_t * vm, vlib_node_runtime_t * node,
399 vlib_frame_t * frame)
401 u32 *buffers = vlib_frame_vector_args (frame);
402 u32 n_left = frame->n_vectors;
403 vhost_user_main_t *vum = &vhost_user_main;
404 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
405 vhost_user_intf_t *vui =
406 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
408 vhost_user_vring_t *rxvq;
410 u32 thread_index = vm->thread_index;
411 vhost_cpu_t *cpu = &vum->cpus[thread_index];
416 vring_packed_desc_t *desc_table;
418 u16 desc_head, desc_index, desc_len;
419 u16 n_descs_processed;
420 u8 indirect, chained;
422 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
424 rxvq = &vui->vrings[qid];
427 error = VHOST_USER_TX_FUNC_ERROR_NONE;
430 n_descs_processed = 0;
434 vlib_buffer_t *b0, *current_b0;
435 uword buffer_map_addr;
438 u32 total_desc_len = 0;
443 if (PREDICT_TRUE (n_left > 1))
444 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
446 b0 = vlib_get_buffer (vm, buffers[0]);
447 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
449 cpu->current_trace = vlib_add_trace (vm, node, b0,
450 sizeof (*cpu->current_trace));
451 vhost_user_tx_trace_packed (cpu->current_trace, vui, qid / 2, b0,
455 desc_table = rxvq->packed_desc;
456 desc_head = desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
457 if (PREDICT_FALSE (!vhost_user_packed_desc_available (rxvq, desc_head)))
459 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
463 * Go deeper in case of indirect descriptor.
464 * To test it, turn off mrg_rxbuf.
466 if (desc_table[desc_head].flags & VIRTQ_DESC_F_INDIRECT)
469 if (PREDICT_FALSE (desc_table[desc_head].len <
470 sizeof (vring_packed_desc_t)))
472 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
475 n_entries = desc_table[desc_head].len >> 4;
476 desc_table = map_guest_mem (vui, desc_table[desc_index].addr,
478 if (PREDICT_FALSE (desc_table == 0))
480 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
485 else if (rxvq->packed_desc[desc_head].flags & VIRTQ_DESC_F_NEXT)
488 desc_len = vui->virtio_net_hdr_sz;
489 buffer_map_addr = desc_table[desc_index].addr;
490 buffer_len = desc_table[desc_index].len;
492 /* Get a header from the header array */
493 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
496 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
497 hdr->num_buffers = 1;
499 or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) ||
500 (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) ||
501 (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
503 /* Guest supports csum offload and buffer requires checksum offload? */
505 (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)))
506 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
508 /* Prepare a copy order executed later for the header */
509 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
510 vhost_copy_t *cpy = &cpu->copy[copy_len];
512 cpy->len = vui->virtio_net_hdr_sz;
513 cpy->dst = buffer_map_addr;
514 cpy->src = (uword) hdr;
516 buffer_map_addr += vui->virtio_net_hdr_sz;
517 buffer_len -= vui->virtio_net_hdr_sz;
518 bytes_left = b0->current_length;
528 * Next one is chained
529 * Test it with both indirect and mrg_rxbuf off
531 if (PREDICT_FALSE (!(desc_table[desc_index].flags &
535 * Last descriptor in chain.
536 * Dequeue queued descriptors for this packet
538 vhost_user_dequeue_chained_descs (rxvq,
540 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
543 vhost_user_advance_last_avail_idx (rxvq);
544 desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
546 buffer_map_addr = desc_table[desc_index].addr;
547 buffer_len = desc_table[desc_index].len;
548 total_desc_len += desc_len;
555 * Test it with mrg_rxnuf off
557 if (PREDICT_TRUE (n_entries > 0))
561 /* Dequeue queued descriptors for this packet */
562 vhost_user_dequeue_chained_descs (rxvq,
564 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
567 total_desc_len += desc_len;
568 desc_index = (desc_index + 1) & rxvq->qsz_mask;
569 buffer_map_addr = desc_table[desc_index].addr;
570 buffer_len = desc_table[desc_index].len;
573 else if (vui->virtio_net_hdr_sz == 12)
577 * This is the default setting for the guest VM
579 virtio_net_hdr_mrg_rxbuf_t *hdr =
580 &cpu->tx_headers[tx_headers_len - 1];
582 desc_table[desc_index].len = desc_len;
583 vhost_user_advance_last_avail_idx (rxvq);
584 desc_head = desc_index =
585 rxvq->last_avail_idx & rxvq->qsz_mask;
590 if (PREDICT_FALSE (!vhost_user_packed_desc_available
593 /* Dequeue queued descriptors for this packet */
594 vhost_user_dequeue_descs (rxvq, hdr,
596 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
600 buffer_map_addr = desc_table[desc_index].addr;
601 buffer_len = desc_table[desc_index].len;
605 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
610 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
611 vhost_copy_t *cpy = &cpu->copy[copy_len];
613 cpy->len = bytes_left;
614 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
615 cpy->dst = buffer_map_addr;
616 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
617 current_b0->current_length - bytes_left;
619 bytes_left -= cpy->len;
620 buffer_len -= cpy->len;
621 buffer_map_addr += cpy->len;
622 desc_len += cpy->len;
624 CLIB_PREFETCH (&rxvq->packed_desc, CLIB_CACHE_LINE_BYTES, LOAD);
626 /* Check if vlib buffer has more data. If not, get more or break */
627 if (PREDICT_TRUE (!bytes_left))
630 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
632 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
633 bytes_left = current_b0->current_length;
643 /* Move from available to used ring */
644 total_desc_len += desc_len;
645 rxvq->packed_desc[desc_head].len = total_desc_len;
647 vhost_user_advance_last_avail_table_idx (vui, rxvq, chained);
650 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
651 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
656 * Do the copy periodically to prevent
657 * cpu->copy array overflow and corrupt memory
659 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD) || chained)
661 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
663 vlib_error_count (vm, node->node_index,
664 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
667 /* give buffers back to driver */
668 vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed,
669 chained, frame, n_left);
676 if (PREDICT_TRUE (copy_len))
678 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
680 vlib_error_count (vm, node->node_index,
681 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
683 vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed, chained,
688 * When n_left is set, error is always set to something too.
689 * In case error is due to lack of remaining buffers, we go back up and
691 * The idea is that it is better to waste some time on packets
692 * that have been processed already than dropping them and get
693 * more fresh packets with a good likelyhood that they will be dropped too.
694 * This technique also gives more time to VM driver to pick-up packets.
695 * In case the traffic flows from physical to virtual interfaces, this
696 * technique will end-up leveraging the physical NIC buffer in order to
697 * absorb the VM's CPU jitter.
699 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
705 vhost_user_vring_unlock (vui, qid);
707 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
709 vlib_error_count (vm, node->node_index, error, n_left);
710 vlib_increment_simple_counter
711 (vnet_main.interface_main.sw_if_counters +
712 VNET_INTERFACE_COUNTER_DROP, thread_index, vui->sw_if_index, n_left);
715 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
716 return frame->n_vectors;
719 VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
720 vlib_node_runtime_t *
721 node, vlib_frame_t * frame)
723 u32 *buffers = vlib_frame_vector_args (frame);
724 u32 n_left = frame->n_vectors;
725 vhost_user_main_t *vum = &vhost_user_main;
726 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
727 vhost_user_intf_t *vui =
728 pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
730 vhost_user_vring_t *rxvq;
732 u32 thread_index = vm->thread_index;
733 vhost_cpu_t *cpu = &vum->cpus[thread_index];
740 if (PREDICT_FALSE (!vui->admin_up))
742 error = VHOST_USER_TX_FUNC_ERROR_DOWN;
746 if (PREDICT_FALSE (!vui->is_ready))
748 error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
752 qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
754 rxvq = &vui->vrings[qid];
755 if (PREDICT_FALSE (rxvq->avail == 0))
757 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
761 if (PREDICT_FALSE (vui->use_tx_spinlock))
762 vhost_user_vring_lock (vui, qid);
764 if (vhost_user_is_packed_ring_supported (vui))
765 return (vhost_user_device_class_packed (vm, node, frame));
768 error = VHOST_USER_TX_FUNC_ERROR_NONE;
773 vlib_buffer_t *b0, *current_b0;
774 u16 desc_head, desc_index, desc_len;
775 vring_desc_t *desc_table;
776 uword buffer_map_addr;
780 if (PREDICT_TRUE (n_left > 1))
781 vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
783 b0 = vlib_get_buffer (vm, buffers[0]);
785 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
787 cpu->current_trace = vlib_add_trace (vm, node, b0,
788 sizeof (*cpu->current_trace));
789 vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
792 if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
794 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
798 desc_table = rxvq->desc;
799 desc_head = desc_index =
800 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
802 /* Go deeper in case of indirect descriptor
803 * I don't know of any driver providing indirect for RX. */
804 if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
807 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
809 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
814 map_guest_mem (vui, rxvq->desc[desc_index].addr,
817 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
823 desc_len = vui->virtio_net_hdr_sz;
824 buffer_map_addr = desc_table[desc_index].addr;
825 buffer_len = desc_table[desc_index].len;
828 // Get a header from the header array
829 virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
832 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
833 hdr->num_buffers = 1; //This is local, no need to check
835 or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) ||
836 (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) ||
837 (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
839 /* Guest supports csum offload and buffer requires checksum offload? */
841 && (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)))
842 vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
844 // Prepare a copy order executed later for the header
845 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
846 vhost_copy_t *cpy = &cpu->copy[copy_len];
848 cpy->len = vui->virtio_net_hdr_sz;
849 cpy->dst = buffer_map_addr;
850 cpy->src = (uword) hdr;
853 buffer_map_addr += vui->virtio_net_hdr_sz;
854 buffer_len -= vui->virtio_net_hdr_sz;
855 bytes_left = b0->current_length;
861 if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
863 //Next one is chained
864 desc_index = desc_table[desc_index].next;
865 buffer_map_addr = desc_table[desc_index].addr;
866 buffer_len = desc_table[desc_index].len;
868 else if (vui->virtio_net_hdr_sz == 12) //MRG is available
870 virtio_net_hdr_mrg_rxbuf_t *hdr =
871 &cpu->tx_headers[tx_headers_len - 1];
873 //Move from available to used buffer
874 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
876 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
878 vhost_user_log_dirty_ring (vui, rxvq,
879 ring[rxvq->last_used_idx &
882 rxvq->last_avail_idx++;
883 rxvq->last_used_idx++;
888 (rxvq->last_avail_idx == rxvq->avail->idx))
890 //Dequeue queued descriptors for this packet
891 rxvq->last_used_idx -= hdr->num_buffers - 1;
892 rxvq->last_avail_idx -= hdr->num_buffers - 1;
893 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
897 desc_table = rxvq->desc;
898 desc_head = desc_index =
899 rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
901 (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
903 //It is seriously unlikely that a driver will put indirect descriptor
904 //after non-indirect descriptor.
906 (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
908 error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
914 rxvq->desc[desc_index].addr,
917 error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
922 buffer_map_addr = desc_table[desc_index].addr;
923 buffer_len = desc_table[desc_index].len;
927 error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
933 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
934 vhost_copy_t *cpy = &cpu->copy[copy_len];
936 cpy->len = bytes_left;
937 cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
938 cpy->dst = buffer_map_addr;
939 cpy->src = (uword) vlib_buffer_get_current (current_b0) +
940 current_b0->current_length - bytes_left;
942 bytes_left -= cpy->len;
943 buffer_len -= cpy->len;
944 buffer_map_addr += cpy->len;
945 desc_len += cpy->len;
947 CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
950 // Check if vlib buffer has more data. If not, get more or break.
951 if (PREDICT_TRUE (!bytes_left))
954 (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
956 current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
957 bytes_left = current_b0->current_length;
967 //Move from available to used ring
968 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
969 rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
970 vhost_user_log_dirty_ring (vui, rxvq,
971 ring[rxvq->last_used_idx & rxvq->qsz_mask]);
972 rxvq->last_avail_idx++;
973 rxvq->last_used_idx++;
975 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
977 cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
980 n_left--; //At the end for error counting when 'goto done' is invoked
983 * Do the copy periodically to prevent
984 * cpu->copy array overflow and corrupt memory
986 if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
988 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
991 vlib_error_count (vm, node->node_index,
992 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
996 /* give buffers back to driver */
997 CLIB_MEMORY_BARRIER ();
998 rxvq->used->idx = rxvq->last_used_idx;
999 vhost_user_log_dirty_ring (vui, rxvq, idx);
1005 //Do the memory copies
1006 if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
1009 vlib_error_count (vm, node->node_index,
1010 VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
1013 CLIB_MEMORY_BARRIER ();
1014 rxvq->used->idx = rxvq->last_used_idx;
1015 vhost_user_log_dirty_ring (vui, rxvq, idx);
1018 * When n_left is set, error is always set to something too.
1019 * In case error is due to lack of remaining buffers, we go back up and
1021 * The idea is that it is better to waste some time on packets
1022 * that have been processed already than dropping them and get
1023 * more fresh packets with a good likelihood that they will be dropped too.
1024 * This technique also gives more time to VM driver to pick-up packets.
1025 * In case the traffic flows from physical to virtual interfaces, this
1026 * technique will end-up leveraging the physical NIC buffer in order to
1027 * absorb the VM's CPU jitter.
1029 if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
1035 /* interrupt (call) handling */
1036 if ((rxvq->callfd_idx != ~0) &&
1037 !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
1039 rxvq->n_since_last_int += frame->n_vectors - n_left;
1041 if (rxvq->n_since_last_int > vum->coalesce_frames)
1042 vhost_user_send_call (vm, rxvq);
1045 vhost_user_vring_unlock (vui, qid);
1048 if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
1050 vlib_error_count (vm, node->node_index, error, n_left);
1051 vlib_increment_simple_counter
1052 (vnet_main.interface_main.sw_if_counters
1053 + VNET_INTERFACE_COUNTER_DROP,
1054 thread_index, vui->sw_if_index, n_left);
1057 vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
1058 return frame->n_vectors;
1061 static __clib_unused clib_error_t *
1062 vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
1063 u32 qid, vnet_hw_interface_rx_mode mode)
1065 vlib_main_t *vm = vnm->vlib_main;
1066 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1067 vhost_user_main_t *vum = &vhost_user_main;
1068 vhost_user_intf_t *vui =
1069 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
1070 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1072 if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
1073 (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
1075 if (txvq->kickfd_idx == ~0)
1077 // We cannot support interrupt mode if the driver opts out
1078 return clib_error_return (0, "Driver does not support interrupt");
1080 if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1083 // Start the timer if this is the first encounter on interrupt
1085 if ((vum->ifq_count == 1) &&
1086 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
1087 vlib_process_signal_event (vm,
1088 vhost_user_send_interrupt_node.index,
1089 VHOST_USER_EVENT_START_TIMER, 0);
1092 else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1094 if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
1095 (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
1099 // Stop the timer if there is no more interrupt interface/queue
1100 if ((vum->ifq_count == 0) &&
1101 (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
1102 vlib_process_signal_event (vm,
1103 vhost_user_send_interrupt_node.index,
1104 VHOST_USER_EVENT_STOP_TIMER, 0);
1109 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1110 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
1111 else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
1112 (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
1113 txvq->used->flags = 0;
1116 vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
1118 return clib_error_return (0, "unsupported");
1124 static __clib_unused clib_error_t *
1125 vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
1128 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1129 vhost_user_main_t *vum = &vhost_user_main;
1130 vhost_user_intf_t *vui =
1131 pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
1132 u8 link_old, link_new;
1134 link_old = vui_is_link_up (vui);
1136 vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1138 link_new = vui_is_link_up (vui);
1140 if (link_old != link_new)
1141 vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
1142 VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
1144 return /* no error */ 0;
1148 VNET_DEVICE_CLASS (vhost_user_device_class) = {
1149 .name = "vhost-user",
1150 .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
1151 .tx_function_error_strings = vhost_user_tx_func_error_strings,
1152 .format_device_name = format_vhost_user_interface_name,
1153 .name_renumber = vhost_user_name_renumber,
1154 .admin_up_down_function = vhost_user_interface_admin_up_down,
1155 .rx_mode_change_function = vhost_user_interface_rx_mode_change,
1156 .format_tx_trace = format_vhost_trace,
1162 * fd.io coding-style-patch-verification: ON
1165 * eval: (c-set-style "gnu")