2 *------------------------------------------------------------------
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
20 #include <fcntl.h> /* for open */
21 #include <sys/ioctl.h>
22 #include <sys/socket.h>
25 #include <sys/types.h>
26 #include <sys/uio.h> /* for iovec */
27 #include <netinet/in.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_tun.h>
33 #include <vlib/vlib.h>
34 #include <vlib/unix/unix.h>
36 #include <vnet/ip/ip.h>
38 #include <vnet/ethernet/ethernet.h>
39 #include <vnet/devices/devices.h>
40 #include <vnet/feature/feature.h>
42 #include <vnet/devices/virtio/vhost_user.h>
43 #include <vnet/devices/virtio/vhost_user_inline.h>
46 * When an RX queue is down but active, received packets
47 * must be discarded. This value controls up to how many
48 * packets will be discarded during each round.
50 #define VHOST_USER_DOWN_DISCARD_COUNT 256
53 * When the number of available buffers gets under this threshold,
54 * RX node will start discarding packets.
56 #define VHOST_USER_RX_BUFFER_STARVATION 32
59 * On the receive side, the host should free descriptors as soon
60 * as possible in order to avoid TX drop in the VM.
61 * This value controls the number of copy operations that are stacked
62 * before copy is done for all and descriptors are given back to
64 * The value 64 was obtained by testing (48 and 128 were not as good).
66 #define VHOST_USER_RX_COPY_THRESHOLD 64
68 vlib_node_registration_t vhost_user_input_node;
70 #define foreach_vhost_user_input_func_error \
71 _(NO_ERROR, "no error") \
72 _(NO_BUFFER, "no available buffer") \
73 _(MMAP_FAIL, "mmap failure") \
74 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
75 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
76 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
80 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
81 foreach_vhost_user_input_func_error
83 VHOST_USER_INPUT_FUNC_N_ERROR,
84 } vhost_user_input_func_error_t;
86 static __clib_unused char *vhost_user_input_func_error_strings[] = {
88 foreach_vhost_user_input_func_error
92 static_always_inline void
93 vhost_user_rx_trace (vhost_trace_t * t,
94 vhost_user_intf_t * vui, u16 qid,
95 vlib_buffer_t * b, vhost_user_vring_t * txvq)
97 vhost_user_main_t *vum = &vhost_user_main;
98 u32 last_avail_idx = txvq->last_avail_idx;
99 u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
100 vring_desc_t *hdr_desc = 0;
101 virtio_net_hdr_mrg_rxbuf_t *hdr;
104 memset (t, 0, sizeof (*t));
105 t->device_index = vui - vum->vhost_user_interfaces;
108 hdr_desc = &txvq->desc[desc_current];
109 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
111 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
112 /* Header is the first here */
113 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
115 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
117 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
119 if (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
120 !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
122 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
125 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
127 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
129 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
133 u32 len = vui->virtio_net_hdr_sz;
134 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
138 static_always_inline u32
139 vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
140 u16 copy_len, u32 * map_hint)
142 void *src0, *src1, *src2, *src3;
143 if (PREDICT_TRUE (copy_len >= 4))
145 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
147 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
150 while (PREDICT_TRUE (copy_len >= 4))
156 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
159 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
162 CLIB_PREFETCH (src2, 64, LOAD);
163 CLIB_PREFETCH (src3, 64, LOAD);
165 clib_memcpy ((void *) cpy[0].dst, src0, cpy[0].len);
166 clib_memcpy ((void *) cpy[1].dst, src1, cpy[1].len);
173 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
175 clib_memcpy ((void *) cpy->dst, src0, cpy->len);
183 * Try to discard packets from the tx ring (VPP RX path).
184 * Returns the number of discarded packets.
186 static_always_inline u32
187 vhost_user_rx_discard_packet (vlib_main_t * vm,
188 vhost_user_intf_t * vui,
189 vhost_user_vring_t * txvq, u32 discard_max)
192 * On the RX side, each packet corresponds to one descriptor
193 * (it is the same whether it is a shallow descriptor, chained, or indirect).
194 * Therefore, discarding a packet is like discarding a descriptor.
196 u32 discarded_packets = 0;
197 u32 avail_idx = txvq->avail->idx;
198 while (discarded_packets != discard_max)
200 if (avail_idx == txvq->last_avail_idx)
203 u16 desc_chain_head =
204 txvq->avail->ring[txvq->last_avail_idx & txvq->qsz_mask];
205 txvq->last_avail_idx++;
206 txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].id =
208 txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].len = 0;
209 vhost_user_log_dirty_ring (vui, txvq,
210 ring[txvq->last_used_idx & txvq->qsz_mask]);
211 txvq->last_used_idx++;
216 CLIB_MEMORY_BARRIER ();
217 txvq->used->idx = txvq->last_used_idx;
218 vhost_user_log_dirty_ring (vui, txvq, idx);
219 return discarded_packets;
223 * In case of overflow, we need to rewind the array of allocated buffers.
225 static __clib_unused void
226 vhost_user_input_rewind_buffers (vlib_main_t * vm,
227 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
229 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
230 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
231 b_current->current_length = 0;
232 b_current->flags = 0;
233 while (b_current != b_head)
235 cpu->rx_buffers_len++;
236 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
237 b_current = vlib_get_buffer (vm, bi_current);
238 b_current->current_length = 0;
239 b_current->flags = 0;
241 cpu->rx_buffers_len++;
244 static __clib_unused u32
245 vhost_user_if_input (vlib_main_t * vm,
246 vhost_user_main_t * vum,
247 vhost_user_intf_t * vui,
248 u16 qid, vlib_node_runtime_t * node,
249 vnet_hw_interface_rx_mode mode)
251 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
252 u16 n_rx_packets = 0;
255 u32 n_left_to_next, *to_next;
256 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
257 u32 n_trace = vlib_get_trace_count (vm, node);
259 u16 thread_index = vm->thread_index;
263 /* do we have pending interrupts ? */
264 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
265 f64 now = vlib_time_now (vm);
267 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
268 vhost_user_send_call (vm, txvq);
270 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
271 vhost_user_send_call (vm, rxvq);
275 * For adaptive mode, it is optimized to reduce interrupts.
276 * If the scheduler switches the input node to polling due
277 * to burst of traffic, we tell the driver no interrupt.
278 * When the traffic subsides, the scheduler switches the node back to
279 * interrupt mode. We must tell the driver we want interrupt.
281 if (PREDICT_FALSE (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
284 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
286 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
287 /* Tell driver we want notification */
288 txvq->used->flags = 0;
290 /* Tell driver we don't want notification */
291 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
294 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
297 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
300 if (PREDICT_FALSE (n_left == 0))
303 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
306 * Discard input packet if interface is admin down or vring is not
308 * "For example, for a networking device, in the disabled state
309 * client must not supply any new RX packets, but must process
310 * and discard any TX packets."
312 vhost_user_rx_discard_packet (vm, vui, txvq,
313 VHOST_USER_DOWN_DISCARD_COUNT);
317 if (PREDICT_FALSE (n_left == (txvq->qsz_mask + 1)))
320 * Informational error logging when VPP is not
321 * receiving packets fast enough.
323 vlib_error_count (vm, node->node_index,
324 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
327 if (n_left > VLIB_FRAME_SIZE)
328 n_left = VLIB_FRAME_SIZE;
331 * For small packets (<2kB), we will not need more than one vlib buffer
332 * per packet. In case packets are bigger, we will just yeld at some point
333 * in the loop and come back later. This is not an issue as for big packet,
334 * processing cost really comes from the memory copy.
335 * The assumption is that big packets will fit in 40 buffers.
337 if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len < n_left + 1 ||
338 vum->cpus[thread_index].rx_buffers_len < 40))
340 u32 curr_len = vum->cpus[thread_index].rx_buffers_len;
341 vum->cpus[thread_index].rx_buffers_len +=
342 vlib_buffer_alloc_from_free_list (vm,
343 vum->cpus[thread_index].rx_buffers +
345 VHOST_USER_RX_BUFFERS_N - curr_len,
346 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
349 (vum->cpus[thread_index].rx_buffers_len <
350 VHOST_USER_RX_BUFFER_STARVATION))
352 /* In case of buffer starvation, discard some packets from the queue
354 * We keep doing best effort for the remaining packets. */
355 u32 flush = (n_left + 1 > vum->cpus[thread_index].rx_buffers_len) ?
356 n_left + 1 - vum->cpus[thread_index].rx_buffers_len : 1;
357 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
360 vlib_increment_simple_counter (vnet_main.
361 interface_main.sw_if_counters +
362 VNET_INTERFACE_COUNTER_DROP,
363 vlib_get_thread_index (),
364 vui->sw_if_index, flush);
366 vlib_error_count (vm, vhost_user_input_node.index,
367 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
373 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
375 while (n_left > 0 && n_left_to_next > 0)
377 vlib_buffer_t *b_head, *b_current;
380 u32 desc_data_offset;
381 vring_desc_t *desc_table = txvq->desc;
383 if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len <= 1))
385 /* Not enough rx_buffers
386 * Note: We yeld on 1 so we don't need to do an additional
387 * check for the next buffer prefetch.
394 txvq->avail->ring[txvq->last_avail_idx & txvq->qsz_mask];
395 vum->cpus[thread_index].rx_buffers_len--;
396 bi_current = (vum->cpus[thread_index].rx_buffers)
397 [vum->cpus[thread_index].rx_buffers_len];
398 b_head = b_current = vlib_get_buffer (vm, bi_current);
399 to_next[0] = bi_current; //We do that now so we can forget about bi_current
403 vlib_prefetch_buffer_with_index (vm,
405 cpus[thread_index].rx_buffers)
406 [vum->cpus[thread_index].
407 rx_buffers_len - 1], LOAD);
409 /* Just preset the used descriptor id and length for later */
410 txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].id =
412 txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].len = 0;
413 vhost_user_log_dirty_ring (vui, txvq,
414 ring[txvq->last_used_idx &
417 /* The buffer should already be initialized */
418 b_head->total_length_not_including_first_buffer = 0;
419 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
421 if (PREDICT_FALSE (n_trace))
423 //TODO: next_index is not exactly known at that point
424 vlib_trace_buffer (vm, node, next_index, b_head,
425 /* follow_chain */ 0);
427 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
428 vhost_user_rx_trace (t0, vui, qid, b_head, txvq);
430 vlib_set_trace_count (vm, node, n_trace);
433 /* This depends on the setup but is very consistent
434 * So I think the CPU branch predictor will make a pretty good job
435 * at optimizing the decision. */
436 if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
438 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
441 if (PREDICT_FALSE (desc_table == 0))
443 vlib_error_count (vm, node->node_index,
444 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
449 if (PREDICT_TRUE (vui->is_any_layout) ||
450 (!(desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT)))
452 /* ANYLAYOUT or single buffer */
453 desc_data_offset = vui->virtio_net_hdr_sz;
457 /* CSR case without ANYLAYOUT, skip 1st buffer */
458 desc_data_offset = desc_table[desc_current].len;
463 /* Get more input if necessary. Or end of packet. */
464 if (desc_data_offset == desc_table[desc_current].len)
466 if (PREDICT_FALSE (desc_table[desc_current].flags &
469 desc_current = desc_table[desc_current].next;
470 desc_data_offset = 0;
478 /* Get more output if necessary. Or end of packet. */
480 (b_current->current_length == VLIB_BUFFER_DATA_SIZE))
483 (vum->cpus[thread_index].rx_buffers_len == 0))
485 /* Cancel speculation */
490 * Checking if there are some left buffers.
491 * If not, just rewind the used buffers and stop.
492 * Note: Scheduled copies are not cancelled. This is
493 * not an issue as they would still be valid. Useless,
496 vhost_user_input_rewind_buffers (vm,
504 /* Get next output */
505 vum->cpus[thread_index].rx_buffers_len--;
507 (vum->cpus[thread_index].rx_buffers)[vum->cpus
508 [thread_index].rx_buffers_len];
509 b_current->next_buffer = bi_next;
510 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
511 bi_current = bi_next;
512 b_current = vlib_get_buffer (vm, bi_current);
515 /* Prepare a copy order executed later for the data */
516 vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
519 desc_table[desc_current].len - desc_data_offset;
520 cpy->len = VLIB_BUFFER_DATA_SIZE - b_current->current_length;
521 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
522 cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
523 b_current->current_length);
524 cpy->src = desc_table[desc_current].addr + desc_data_offset;
526 desc_data_offset += cpy->len;
528 b_current->current_length += cpy->len;
529 b_head->total_length_not_including_first_buffer += cpy->len;
533 CLIB_PREFETCH (&n_left, sizeof (n_left), LOAD);
535 n_rx_bytes += b_head->total_length_not_including_first_buffer;
538 b_head->total_length_not_including_first_buffer -=
539 b_head->current_length;
541 /* consume the descriptor and return it as used */
542 txvq->last_avail_idx++;
543 txvq->last_used_idx++;
545 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
547 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
548 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
552 u32 next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
554 /* redirect if feature path enabled */
555 vnet_feature_start_device_input_x1 (vui->sw_if_index, &next0,
558 u32 bi = to_next[-1]; //Cannot use to_next[-1] in the macro
559 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
560 to_next, n_left_to_next,
567 * Although separating memory copies from virtio ring parsing
568 * is beneficial, we can offer to perform the copies from time
569 * to time in order to free some space in the ring.
571 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
574 (vhost_user_input_copy (vui, vum->cpus[thread_index].copy,
575 copy_len, &map_hint)))
577 vlib_error_count (vm, node->node_index,
578 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
582 /* give buffers back to driver */
583 CLIB_MEMORY_BARRIER ();
584 txvq->used->idx = txvq->last_used_idx;
585 vhost_user_log_dirty_ring (vui, txvq, idx);
589 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
592 /* Do the memory copies */
594 (vhost_user_input_copy (vui, vum->cpus[thread_index].copy,
595 copy_len, &map_hint)))
597 vlib_error_count (vm, node->node_index,
598 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
601 /* give buffers back to driver */
602 CLIB_MEMORY_BARRIER ();
603 txvq->used->idx = txvq->last_used_idx;
604 vhost_user_log_dirty_ring (vui, txvq, idx);
606 /* interrupt (call) handling */
607 if ((txvq->callfd_idx != ~0) &&
608 !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
610 txvq->n_since_last_int += n_rx_packets;
612 if (txvq->n_since_last_int > vum->coalesce_frames)
613 vhost_user_send_call (vm, txvq);
616 /* increase rx counters */
617 vlib_increment_combined_counter
618 (vnet_main.interface_main.combined_sw_if_counters
619 + VNET_INTERFACE_COUNTER_RX,
620 vlib_get_thread_index (), vui->sw_if_index, n_rx_packets, n_rx_bytes);
622 vnet_device_increment_rx_packets (thread_index, n_rx_packets);
627 VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
628 vlib_node_runtime_t * node,
629 vlib_frame_t * frame)
631 vhost_user_main_t *vum = &vhost_user_main;
632 uword n_rx_packets = 0;
633 vhost_user_intf_t *vui;
634 vnet_device_input_runtime_t *rt =
635 (vnet_device_input_runtime_t *) node->runtime_data;
636 vnet_device_and_queue_t *dq;
638 vec_foreach (dq, rt->devices_and_queues)
640 if (clib_smp_swap (&dq->interrupt_pending, 0) ||
641 (node->state == VLIB_NODE_STATE_POLLING))
644 pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
645 n_rx_packets = vhost_user_if_input (vm, vum, vui, dq->queue_id, node,
653 #ifndef CLIB_MARCH_VARIANT
655 VLIB_REGISTER_NODE (vhost_user_input_node) = {
656 .type = VLIB_NODE_TYPE_INPUT,
657 .name = "vhost-user-input",
658 .sibling_of = "device-input",
660 /* Will be enabled if/when hardware is detected. */
661 .state = VLIB_NODE_STATE_DISABLED,
663 .format_buffer = format_ethernet_header_with_length,
664 .format_trace = format_vhost_trace,
666 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
667 .error_strings = vhost_user_input_func_error_strings,
673 * fd.io coding-style-patch-verification: ON
676 * eval: (c-set-style "gnu")