2 *------------------------------------------------------------------
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
20 #include <fcntl.h> /* for open */
21 #include <sys/ioctl.h>
22 #include <sys/socket.h>
25 #include <sys/types.h>
26 #include <sys/uio.h> /* for iovec */
27 #include <netinet/in.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_tun.h>
33 #include <vlib/vlib.h>
34 #include <vlib/unix/unix.h>
36 #include <vnet/ethernet/ethernet.h>
37 #include <vnet/devices/devices.h>
38 #include <vnet/feature/feature.h>
39 #include <vnet/udp/udp_packet.h>
41 #include <vnet/devices/virtio/vhost_user.h>
42 #include <vnet/devices/virtio/vhost_user_inline.h>
45 * When an RX queue is down but active, received packets
46 * must be discarded. This value controls up to how many
47 * packets will be discarded during each round.
49 #define VHOST_USER_DOWN_DISCARD_COUNT 256
52 * When the number of available buffers gets under this threshold,
53 * RX node will start discarding packets.
55 #define VHOST_USER_RX_BUFFER_STARVATION 32
58 * On the receive side, the host should free descriptors as soon
59 * as possible in order to avoid TX drop in the VM.
60 * This value controls the number of copy operations that are stacked
61 * before copy is done for all and descriptors are given back to
63 * The value 64 was obtained by testing (48 and 128 were not as good).
65 #define VHOST_USER_RX_COPY_THRESHOLD 64
67 extern vlib_node_registration_t vhost_user_input_node;
69 #define foreach_vhost_user_input_func_error \
70 _(NO_ERROR, "no error") \
71 _(NO_BUFFER, "no available buffer") \
72 _(MMAP_FAIL, "mmap failure") \
73 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
74 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
75 _(NOT_READY, "vhost interface not ready or down") \
76 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
80 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
81 foreach_vhost_user_input_func_error
83 VHOST_USER_INPUT_FUNC_N_ERROR,
84 } vhost_user_input_func_error_t;
86 static __clib_unused char *vhost_user_input_func_error_strings[] = {
88 foreach_vhost_user_input_func_error
92 static_always_inline void
93 vhost_user_rx_trace (vhost_trace_t * t,
94 vhost_user_intf_t * vui, u16 qid,
95 vlib_buffer_t * b, vhost_user_vring_t * txvq,
98 vhost_user_main_t *vum = &vhost_user_main;
99 u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
100 vring_desc_t *hdr_desc = 0;
101 virtio_net_hdr_mrg_rxbuf_t *hdr;
104 clib_memset (t, 0, sizeof (*t));
105 t->device_index = vui - vum->vhost_user_interfaces;
108 hdr_desc = &txvq->desc[desc_current];
109 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
111 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
112 /* Header is the first here */
113 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
115 if (txvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
117 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
119 if (!(txvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
120 !(txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
122 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
125 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
127 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
129 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
133 u32 len = vui->virtio_net_hdr_sz;
134 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
138 static_always_inline u32
139 vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
140 u16 copy_len, u32 * map_hint)
142 void *src0, *src1, *src2, *src3;
143 if (PREDICT_TRUE (copy_len >= 4))
145 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
147 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
150 while (PREDICT_TRUE (copy_len >= 4))
156 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
159 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
162 CLIB_PREFETCH (src2, 64, LOAD);
163 CLIB_PREFETCH (src3, 64, LOAD);
165 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
166 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
173 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
175 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
183 * Try to discard packets from the tx ring (VPP RX path).
184 * Returns the number of discarded packets.
186 static_always_inline u32
187 vhost_user_rx_discard_packet (vlib_main_t * vm,
188 vhost_user_intf_t * vui,
189 vhost_user_vring_t * txvq, u32 discard_max)
192 * On the RX side, each packet corresponds to one descriptor
193 * (it is the same whether it is a shallow descriptor, chained, or indirect).
194 * Therefore, discarding a packet is like discarding a descriptor.
196 u32 discarded_packets = 0;
197 u32 avail_idx = txvq->avail->idx;
198 u16 mask = txvq->qsz_mask;
199 u16 last_avail_idx = txvq->last_avail_idx;
200 u16 last_used_idx = txvq->last_used_idx;
201 while (discarded_packets != discard_max)
203 if (avail_idx == last_avail_idx)
206 u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
208 txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
209 txvq->used->ring[last_used_idx & mask].len = 0;
210 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
216 txvq->last_avail_idx = last_avail_idx;
217 txvq->last_used_idx = last_used_idx;
218 CLIB_MEMORY_STORE_BARRIER ();
219 txvq->used->idx = txvq->last_used_idx;
220 vhost_user_log_dirty_ring (vui, txvq, idx);
221 return discarded_packets;
225 * In case of overflow, we need to rewind the array of allocated buffers.
227 static_always_inline void
228 vhost_user_input_rewind_buffers (vlib_main_t * vm,
229 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
231 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
232 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
233 b_current->current_length = 0;
234 b_current->flags = 0;
235 while (b_current != b_head)
237 cpu->rx_buffers_len++;
238 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
239 b_current = vlib_get_buffer (vm, bi_current);
240 b_current->current_length = 0;
241 b_current->flags = 0;
243 cpu->rx_buffers_len++;
246 static_always_inline void
247 vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
248 virtio_net_hdr_t * hdr)
252 ethernet_header_t *eh = (ethernet_header_t *) b0_data;
253 u16 ethertype = clib_net_to_host_u16 (eh->type);
254 u16 l2hdr_sz = sizeof (ethernet_header_t);
256 if (ethernet_frame_is_tagged (ethertype))
258 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
260 ethertype = clib_net_to_host_u16 (vlan->type);
261 l2hdr_sz += sizeof (*vlan);
262 if (ethertype == ETHERNET_TYPE_VLAN)
265 ethertype = clib_net_to_host_u16 (vlan->type);
266 l2hdr_sz += sizeof (*vlan);
269 vnet_buffer (b0)->l2_hdr_offset = 0;
270 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
271 vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
272 b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
273 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
274 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
276 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
278 ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
279 l4_proto = ip4->protocol;
280 b0->flags |= VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
282 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
284 ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
285 l4_proto = ip6->protocol;
286 b0->flags |= VNET_BUFFER_F_IS_IP6;
289 if (l4_proto == IP_PROTOCOL_TCP)
291 tcp_header_t *tcp = (tcp_header_t *)
292 (b0_data + vnet_buffer (b0)->l4_hdr_offset);
293 l4_hdr_sz = tcp_header_bytes (tcp);
294 b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
296 else if (l4_proto == IP_PROTOCOL_UDP)
298 l4_hdr_sz = sizeof (udp_header_t);
299 b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
302 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
304 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
305 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
306 b0->flags |= VNET_BUFFER_F_GSO;
308 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
310 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
311 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
312 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
314 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
316 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
317 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
318 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
322 static_always_inline void
323 vhost_user_input_do_interrupt (vlib_main_t * vm, vhost_user_intf_t * vui,
324 vhost_user_vring_t * txvq,
325 vhost_user_vring_t * rxvq)
327 f64 now = vlib_time_now (vm);
329 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
330 vhost_user_send_call (vm, vui, txvq);
332 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
333 vhost_user_send_call (vm, vui, rxvq);
336 static_always_inline void
337 vhost_user_input_setup_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
338 vhost_user_intf_t * vui,
339 u32 * current_config_index, u32 * next_index,
340 u32 ** to_next, u32 * n_left_to_next)
342 vnet_feature_main_t *fm = &feature_main;
343 u8 feature_arc_idx = fm->device_input_feature_arc_index;
345 if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
347 vnet_feature_config_main_t *cm;
348 cm = &fm->feature_config_mains[feature_arc_idx];
349 *current_config_index = vec_elt (cm->config_index_by_sw_if_index,
351 vnet_get_config_data (&cm->config_main, current_config_index,
355 vlib_get_new_next_frame (vm, node, *next_index, *to_next, *n_left_to_next);
357 if (*next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
359 /* give some hints to ethernet-input */
360 vlib_next_frame_t *nf;
362 ethernet_input_frame_t *ef;
363 nf = vlib_node_runtime_get_next_frame (vm, node, *next_index);
364 f = vlib_get_frame (vm, nf->frame);
365 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
367 ef = vlib_frame_scalar_args (f);
368 ef->sw_if_index = vui->sw_if_index;
369 ef->hw_if_index = vui->hw_if_index;
370 vlib_frame_no_append (f);
374 static_always_inline u32
375 vhost_user_if_input (vlib_main_t * vm,
376 vhost_user_main_t * vum,
377 vhost_user_intf_t * vui,
378 u16 qid, vlib_node_runtime_t * node,
379 vnet_hw_if_rx_mode mode, u8 enable_csum)
381 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
382 vnet_feature_main_t *fm = &feature_main;
383 u16 n_rx_packets = 0;
386 u32 n_left_to_next, *to_next;
387 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
388 u32 n_trace = vlib_get_trace_count (vm, node);
389 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
391 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
393 u8 feature_arc_idx = fm->device_input_feature_arc_index;
394 u32 current_config_index = ~(u32) 0;
395 u16 mask = txvq->qsz_mask;
397 /* The descriptor table is not ready yet */
398 if (PREDICT_FALSE (txvq->avail == 0))
402 /* do we have pending interrupts ? */
403 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
404 vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
408 * For adaptive mode, it is optimized to reduce interrupts.
409 * If the scheduler switches the input node to polling due
410 * to burst of traffic, we tell the driver no interrupt.
411 * When the traffic subsides, the scheduler switches the node back to
412 * interrupt mode. We must tell the driver we want interrupt.
414 if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
417 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
419 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
420 /* Tell driver we want notification */
421 txvq->used->flags = 0;
423 /* Tell driver we don't want notification */
424 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
427 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
430 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
433 if (PREDICT_FALSE (n_left == 0))
436 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
439 * Discard input packet if interface is admin down or vring is not
441 * "For example, for a networking device, in the disabled state
442 * client must not supply any new RX packets, but must process
443 * and discard any TX packets."
445 vhost_user_rx_discard_packet (vm, vui, txvq,
446 VHOST_USER_DOWN_DISCARD_COUNT);
450 if (PREDICT_FALSE (n_left == (mask + 1)))
453 * Informational error logging when VPP is not
454 * receiving packets fast enough.
456 vlib_error_count (vm, node->node_index,
457 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
460 if (n_left > VLIB_FRAME_SIZE)
461 n_left = VLIB_FRAME_SIZE;
464 * For small packets (<2kB), we will not need more than one vlib buffer
465 * per packet. In case packets are bigger, we will just yield at some point
466 * in the loop and come back later. This is not an issue as for big packet,
467 * processing cost really comes from the memory copy.
468 * The assumption is that big packets will fit in 40 buffers.
470 if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
471 cpu->rx_buffers_len < 40))
473 u32 curr_len = cpu->rx_buffers_len;
474 cpu->rx_buffers_len +=
475 vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
476 VHOST_USER_RX_BUFFERS_N - curr_len);
479 (cpu->rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION))
481 /* In case of buffer starvation, discard some packets from the queue
483 * We keep doing best effort for the remaining packets. */
484 u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
485 n_left + 1 - cpu->rx_buffers_len : 1;
486 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
489 vlib_increment_simple_counter (vnet_main.
490 interface_main.sw_if_counters +
491 VNET_INTERFACE_COUNTER_DROP,
492 vm->thread_index, vui->sw_if_index,
495 vlib_error_count (vm, vhost_user_input_node.index,
496 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
500 vhost_user_input_setup_frame (vm, node, vui, ¤t_config_index,
501 &next_index, &to_next, &n_left_to_next);
503 u16 last_avail_idx = txvq->last_avail_idx;
504 u16 last_used_idx = txvq->last_used_idx;
508 vlib_buffer_t *b_head, *b_current;
511 u32 desc_data_offset;
512 vring_desc_t *desc_table = txvq->desc;
514 if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
516 /* Not enough rx_buffers
517 * Note: We yeld on 1 so we don't need to do an additional
518 * check for the next buffer prefetch.
524 desc_current = txvq->avail->ring[last_avail_idx & mask];
525 cpu->rx_buffers_len--;
526 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
527 b_head = b_current = vlib_get_buffer (vm, bi_current);
528 to_next[0] = bi_current; //We do that now so we can forget about bi_current
532 vlib_prefetch_buffer_with_index
533 (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
535 /* Just preset the used descriptor id and length for later */
536 txvq->used->ring[last_used_idx & mask].id = desc_current;
537 txvq->used->ring[last_used_idx & mask].len = 0;
538 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
540 /* The buffer should already be initialized */
541 b_head->total_length_not_including_first_buffer = 0;
542 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
545 (n_trace > 0 && vlib_trace_buffer (vm, node, next_index, b_head,
546 /* follow_chain */ 0)))
549 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
550 vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
552 vlib_set_trace_count (vm, node, n_trace);
555 /* This depends on the setup but is very consistent
556 * So I think the CPU branch predictor will make a pretty good job
557 * at optimizing the decision. */
558 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
560 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
563 if (PREDICT_FALSE (desc_table == 0))
565 vlib_error_count (vm, node->node_index,
566 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
571 desc_data_offset = vui->virtio_net_hdr_sz;
575 virtio_net_hdr_mrg_rxbuf_t *hdr;
579 hdr = map_guest_mem (vui, desc_table[desc_current].addr, &map_hint);
580 if (PREDICT_FALSE (hdr == 0))
582 vlib_error_count (vm, node->node_index,
583 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
586 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
588 if ((desc_data_offset == desc_table[desc_current].len) &&
589 (desc_table[desc_current].flags & VRING_DESC_F_NEXT))
591 current = desc_table[desc_current].next;
592 b_data = map_guest_mem (vui, desc_table[current].addr,
594 if (PREDICT_FALSE (b_data == 0))
596 vlib_error_count (vm, node->node_index,
597 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL,
603 b_data = (u8 *) hdr + desc_data_offset;
605 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
611 /* Get more input if necessary. Or end of packet. */
612 if (desc_data_offset == desc_table[desc_current].len)
614 if (PREDICT_FALSE (desc_table[desc_current].flags &
617 desc_current = desc_table[desc_current].next;
618 desc_data_offset = 0;
626 /* Get more output if necessary. Or end of packet. */
627 if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
629 if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
631 /* Cancel speculation */
636 * Checking if there are some left buffers.
637 * If not, just rewind the used buffers and stop.
638 * Note: Scheduled copies are not cancelled. This is
639 * not an issue as they would still be valid. Useless,
642 vhost_user_input_rewind_buffers (vm, cpu, b_head);
647 /* Get next output */
648 cpu->rx_buffers_len--;
649 u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
650 b_current->next_buffer = bi_next;
651 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
652 bi_current = bi_next;
653 b_current = vlib_get_buffer (vm, bi_current);
656 /* Prepare a copy order executed later for the data */
657 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
658 vhost_copy_t *cpy = &cpu->copy[copy_len];
660 u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
661 cpy->len = buffer_data_size - b_current->current_length;
662 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
663 cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
664 b_current->current_length);
665 cpy->src = desc_table[desc_current].addr + desc_data_offset;
667 desc_data_offset += cpy->len;
669 b_current->current_length += cpy->len;
670 b_head->total_length_not_including_first_buffer += cpy->len;
675 n_rx_bytes += b_head->total_length_not_including_first_buffer;
678 b_head->total_length_not_including_first_buffer -=
679 b_head->current_length;
681 /* consume the descriptor and return it as used */
685 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
687 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
688 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
691 if (current_config_index != ~(u32) 0)
693 b_head->current_config_index = current_config_index;
694 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
700 * Although separating memory copies from virtio ring parsing
701 * is beneficial, we can offer to perform the copies from time
702 * to time in order to free some space in the ring.
704 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
706 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
707 copy_len, &map_hint)))
709 vlib_error_count (vm, node->node_index,
710 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
714 /* give buffers back to driver */
715 CLIB_MEMORY_STORE_BARRIER ();
716 txvq->used->idx = last_used_idx;
717 vhost_user_log_dirty_ring (vui, txvq, idx);
721 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
723 txvq->last_used_idx = last_used_idx;
724 txvq->last_avail_idx = last_avail_idx;
726 /* Do the memory copies */
727 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
730 vlib_error_count (vm, node->node_index,
731 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
734 /* give buffers back to driver */
735 CLIB_MEMORY_STORE_BARRIER ();
736 txvq->used->idx = txvq->last_used_idx;
737 vhost_user_log_dirty_ring (vui, txvq, idx);
739 /* interrupt (call) handling */
740 if ((txvq->callfd_idx != ~0) &&
741 !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
743 txvq->n_since_last_int += n_rx_packets;
745 if (txvq->n_since_last_int > vum->coalesce_frames)
746 vhost_user_send_call (vm, vui, txvq);
749 /* increase rx counters */
750 vlib_increment_combined_counter
751 (vnet_main.interface_main.combined_sw_if_counters
752 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
753 n_rx_packets, n_rx_bytes);
755 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
761 static_always_inline void
762 vhost_user_mark_desc_consumed (vhost_user_intf_t * vui,
763 vhost_user_vring_t * txvq, u16 desc_head,
764 u16 n_descs_processed)
766 vring_packed_desc_t *desc_table = txvq->packed_desc;
768 u16 mask = txvq->qsz_mask;
770 for (desc_idx = 0; desc_idx < n_descs_processed; desc_idx++)
772 if (txvq->used_wrap_counter)
773 desc_table[(desc_head + desc_idx) & mask].flags |=
774 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
776 desc_table[(desc_head + desc_idx) & mask].flags &=
777 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
778 vhost_user_advance_last_used_idx (txvq);
782 static_always_inline void
783 vhost_user_rx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
784 u16 qid, vhost_user_vring_t * txvq,
787 vhost_user_main_t *vum = &vhost_user_main;
788 vring_packed_desc_t *hdr_desc;
789 virtio_net_hdr_mrg_rxbuf_t *hdr;
792 clib_memset (t, 0, sizeof (*t));
793 t->device_index = vui - vum->vhost_user_interfaces;
796 hdr_desc = &txvq->packed_desc[desc_current];
797 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
799 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
800 /* Header is the first here */
801 hdr_desc = map_guest_mem (vui, txvq->packed_desc[desc_current].addr,
804 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
805 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
807 if (!(txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
808 !(txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
809 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
811 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
813 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
814 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
817 u32 len = vui->virtio_net_hdr_sz;
818 clib_memcpy_fast (&t->hdr, hdr,
819 len > hdr_desc->len ? hdr_desc->len : len);
823 static_always_inline u32
824 vhost_user_rx_discard_packet_packed (vlib_main_t * vm,
825 vhost_user_intf_t * vui,
826 vhost_user_vring_t * txvq,
829 u32 discarded_packets = 0;
830 u16 mask = txvq->qsz_mask;
831 u16 desc_current, desc_head;
833 desc_head = desc_current = txvq->last_used_idx & mask;
836 * On the RX side, each packet corresponds to one descriptor
837 * (it is the same whether it is a shallow descriptor, chained, or indirect).
838 * Therefore, discarding a packet is like discarding a descriptor.
840 while ((discarded_packets != discard_max) &&
841 vhost_user_packed_desc_available (txvq, desc_current))
843 vhost_user_advance_last_avail_idx (txvq);
845 desc_current = (desc_current + 1) & mask;
848 if (PREDICT_TRUE (discarded_packets))
849 vhost_user_mark_desc_consumed (vui, txvq, desc_head, discarded_packets);
850 return (discarded_packets);
853 static_always_inline u32
854 vhost_user_input_copy_packed (vhost_user_intf_t * vui, vhost_copy_t * cpy,
855 u16 copy_len, u32 * map_hint)
857 void *src0, *src1, *src2, *src3, *src4, *src5, *src6, *src7;
859 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
861 if (PREDICT_TRUE (copy_len >= 8))
863 src4 = map_guest_mem (vui, cpy[0].src, map_hint);
864 src5 = map_guest_mem (vui, cpy[1].src, map_hint);
865 src6 = map_guest_mem (vui, cpy[2].src, map_hint);
866 src7 = map_guest_mem (vui, cpy[3].src, map_hint);
867 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
868 if (PREDICT_FALSE (bad))
870 CLIB_PREFETCH (src4, 64, LOAD);
871 CLIB_PREFETCH (src5, 64, LOAD);
872 CLIB_PREFETCH (src6, 64, LOAD);
873 CLIB_PREFETCH (src7, 64, LOAD);
875 while (PREDICT_TRUE (copy_len >= 8))
882 src4 = map_guest_mem (vui, cpy[4].src, map_hint);
883 src5 = map_guest_mem (vui, cpy[5].src, map_hint);
884 src6 = map_guest_mem (vui, cpy[6].src, map_hint);
885 src7 = map_guest_mem (vui, cpy[7].src, map_hint);
886 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
887 if (PREDICT_FALSE (bad))
890 CLIB_PREFETCH (src4, 64, LOAD);
891 CLIB_PREFETCH (src5, 64, LOAD);
892 CLIB_PREFETCH (src6, 64, LOAD);
893 CLIB_PREFETCH (src7, 64, LOAD);
895 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
896 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
897 clib_memcpy_fast ((void *) cpy[2].dst, src2, cpy[2].len);
898 clib_memcpy_fast ((void *) cpy[3].dst, src3, cpy[3].len);
907 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
909 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
912 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
919 static_always_inline u32
920 vhost_user_do_offload (vhost_user_intf_t * vui,
921 vring_packed_desc_t * desc_table, u16 desc_current,
922 u16 mask, vlib_buffer_t * b_head, u32 * map_hint)
924 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
925 virtio_net_hdr_mrg_rxbuf_t *hdr;
927 u32 desc_data_offset = vui->virtio_net_hdr_sz;
929 hdr = map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
930 if (PREDICT_FALSE (hdr == 0))
931 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
932 else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
934 if (desc_data_offset == desc_table[desc_current].len)
936 desc_current = (desc_current + 1) & mask;
938 map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
939 if (PREDICT_FALSE (b_data == 0))
940 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
942 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
946 b_data = (u8 *) hdr + desc_data_offset;
947 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
954 static_always_inline u32
955 vhost_user_compute_buffers_required (u32 desc_len, u32 buffer_data_size)
958 u32 buffers_required;
960 if (PREDICT_TRUE (buffer_data_size == 2048))
962 buffers_required = desc_len >> 11;
963 if ((desc_len & 2047) != 0)
965 return (buffers_required);
968 if (desc_len < buffer_data_size)
971 result = div (desc_len, buffer_data_size);
973 buffers_required = result.quot + 1;
975 buffers_required = result.quot;
977 return (buffers_required);
980 static_always_inline u32
981 vhost_user_compute_indirect_desc_len (vhost_user_intf_t * vui,
982 vhost_user_vring_t * txvq,
983 u32 buffer_data_size, u16 desc_current,
986 vring_packed_desc_t *desc_table = txvq->packed_desc;
988 u16 desc_data_offset = vui->virtio_net_hdr_sz;
989 u16 desc_idx = desc_current;
992 n_descs = desc_table[desc_idx].len >> 4;
993 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr, map_hint);
994 if (PREDICT_FALSE (desc_table == 0))
997 for (desc_idx = 0; desc_idx < n_descs; desc_idx++)
998 desc_len += desc_table[desc_idx].len;
1000 if (PREDICT_TRUE (desc_len > desc_data_offset))
1001 desc_len -= desc_data_offset;
1003 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1006 static_always_inline u32
1007 vhost_user_compute_chained_desc_len (vhost_user_intf_t * vui,
1008 vhost_user_vring_t * txvq,
1009 u32 buffer_data_size, u16 * current,
1012 vring_packed_desc_t *desc_table = txvq->packed_desc;
1014 u16 mask = txvq->qsz_mask;
1016 while (desc_table[*current].flags & VRING_DESC_F_NEXT)
1018 desc_len += desc_table[*current].len;
1020 *current = (*current + 1) & mask;
1021 vhost_user_advance_last_avail_idx (txvq);
1023 desc_len += desc_table[*current].len;
1025 *current = (*current + 1) & mask;
1026 vhost_user_advance_last_avail_idx (txvq);
1028 if (PREDICT_TRUE (desc_len > vui->virtio_net_hdr_sz))
1029 desc_len -= vui->virtio_net_hdr_sz;
1031 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1034 static_always_inline void
1035 vhost_user_assemble_packet (vring_packed_desc_t * desc_table,
1036 u16 * desc_idx, vlib_buffer_t * b_head,
1037 vlib_buffer_t ** b_current, u32 ** next,
1038 vlib_buffer_t *** b, u32 * bi_current,
1039 vhost_cpu_t * cpu, u16 * copy_len,
1040 u32 * buffers_used, u32 buffers_required,
1041 u32 * desc_data_offset, u32 buffer_data_size,
1046 while (*desc_data_offset < desc_table[*desc_idx].len)
1048 /* Get more output if necessary. Or end of packet. */
1049 if (PREDICT_FALSE ((*b_current)->current_length == buffer_data_size))
1051 /* Get next output */
1052 u32 bi_next = **next;
1054 (*b_current)->next_buffer = bi_next;
1055 (*b_current)->flags |= VLIB_BUFFER_NEXT_PRESENT;
1056 *bi_current = bi_next;
1060 ASSERT (*buffers_used <= buffers_required);
1063 /* Prepare a copy order executed later for the data */
1064 ASSERT (*copy_len < VHOST_USER_COPY_ARRAY_N);
1065 vhost_copy_t *cpy = &cpu->copy[*copy_len];
1067 desc_data_l = desc_table[*desc_idx].len - *desc_data_offset;
1068 cpy->len = buffer_data_size - (*b_current)->current_length;
1069 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
1070 cpy->dst = (uword) (vlib_buffer_get_current (*b_current) +
1071 (*b_current)->current_length);
1072 cpy->src = desc_table[*desc_idx].addr + *desc_data_offset;
1074 *desc_data_offset += cpy->len;
1076 (*b_current)->current_length += cpy->len;
1077 b_head->total_length_not_including_first_buffer += cpy->len;
1079 *desc_idx = (*desc_idx + 1) & mask;;
1080 *desc_data_offset = 0;
1083 static_always_inline u32
1084 vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
1085 vhost_user_intf_t * vui, u16 qid,
1086 vlib_node_runtime_t * node,
1087 vnet_hw_if_rx_mode mode, u8 enable_csum)
1089 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1090 vnet_feature_main_t *fm = &feature_main;
1091 u8 feature_arc_idx = fm->device_input_feature_arc_index;
1092 u16 n_rx_packets = 0;
1095 u32 buffers_required = 0;
1096 u32 n_left_to_next, *to_next;
1097 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1098 u32 n_trace = vlib_get_trace_count (vm, node);
1099 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
1101 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
1103 u32 current_config_index = ~0;
1104 u16 mask = txvq->qsz_mask;
1105 u16 desc_current, desc_head, last_used_idx;
1106 vring_packed_desc_t *desc_table = 0;
1107 u32 n_descs_processed = 0;
1111 u32 buffers_used = 0;
1112 u16 current, n_descs_to_process;
1114 /* The descriptor table is not ready yet */
1115 if (PREDICT_FALSE (txvq->packed_desc == 0))
1118 /* do we have pending interrupts ? */
1119 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
1120 vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
1123 * For adaptive mode, it is optimized to reduce interrupts.
1124 * If the scheduler switches the input node to polling due
1125 * to burst of traffic, we tell the driver no interrupt.
1126 * When the traffic subsides, the scheduler switches the node back to
1127 * interrupt mode. We must tell the driver we want interrupt.
1129 if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
1132 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
1134 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
1135 /* Tell driver we want notification */
1136 txvq->used_event->flags = 0;
1138 /* Tell driver we don't want notification */
1139 txvq->used_event->flags = VRING_EVENT_F_DISABLE;
1142 last_used_idx = txvq->last_used_idx & mask;
1143 desc_head = desc_current = last_used_idx;
1145 if (vhost_user_packed_desc_available (txvq, desc_current) == 0)
1148 if (PREDICT_FALSE (!vui->admin_up || !vui->is_ready || !(txvq->enabled)))
1151 * Discard input packet if interface is admin down or vring is not
1153 * "For example, for a networking device, in the disabled state
1154 * client must not supply any new RX packets, but must process
1155 * and discard any TX packets."
1157 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq,
1158 VHOST_USER_DOWN_DISCARD_COUNT);
1159 vlib_error_count (vm, vhost_user_input_node.index,
1160 VHOST_USER_INPUT_FUNC_ERROR_NOT_READY, rv);
1164 vhost_user_input_setup_frame (vm, node, vui, ¤t_config_index,
1165 &next_index, &to_next, &n_left_to_next);
1168 * Compute n_left and total buffers needed
1170 desc_table = txvq->packed_desc;
1171 current = desc_current;
1172 while (vhost_user_packed_desc_available (txvq, current) &&
1173 (n_left < VLIB_FRAME_SIZE))
1175 if (desc_table[current].flags & VRING_DESC_F_INDIRECT)
1178 vhost_user_compute_indirect_desc_len (vui, txvq, buffer_data_size,
1179 current, &map_hint);
1181 current = (current + 1) & mask;
1182 vhost_user_advance_last_avail_idx (txvq);
1187 vhost_user_compute_chained_desc_len (vui, txvq, buffer_data_size,
1192 /* Something is broken if we need more than 10000 buffers */
1193 if (PREDICT_FALSE ((buffers_required == 0) || (buffers_required > 10000)))
1195 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1196 vlib_error_count (vm, vhost_user_input_node.index,
1197 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1201 vec_validate (cpu->to_next_list, buffers_required);
1202 rv = vlib_buffer_alloc (vm, cpu->to_next_list, buffers_required);
1203 if (PREDICT_FALSE (rv != buffers_required))
1205 vlib_buffer_free (vm, cpu->to_next_list, rv);
1206 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1207 vlib_error_count (vm, vhost_user_input_node.index,
1208 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1212 next = cpu->to_next_list;
1213 vec_validate (cpu->rx_buffers_pdesc, buffers_required);
1214 vlib_get_buffers (vm, next, cpu->rx_buffers_pdesc, buffers_required);
1215 b = cpu->rx_buffers_pdesc;
1216 n_descs_processed = n_left;
1220 vlib_buffer_t *b_head, *b_current;
1222 u32 desc_data_offset;
1223 u16 desc_idx = desc_current;
1226 desc_table = txvq->packed_desc;
1227 to_next[0] = bi_current = next[0];
1228 b_head = b_current = b[0];
1231 ASSERT (buffers_used <= buffers_required);
1236 /* The buffer should already be initialized */
1237 b_head->total_length_not_including_first_buffer = 0;
1238 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1239 desc_data_offset = vui->virtio_net_hdr_sz;
1240 n_descs_to_process = 1;
1242 if (desc_table[desc_idx].flags & VRING_DESC_F_INDIRECT)
1244 n_descs = desc_table[desc_idx].len >> 4;
1245 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr,
1248 if (PREDICT_FALSE (desc_table == 0) ||
1251 (vhost_user_do_offload
1252 (vui, desc_table, desc_idx, mask, b_head,
1253 &map_hint) != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))))
1255 vlib_error_count (vm, node->node_index,
1256 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1266 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1267 &b_current, &next, &b, &bi_current,
1268 cpu, ©_len, &buffers_used,
1269 buffers_required, &desc_data_offset,
1270 buffer_data_size, mask);
1278 rv = vhost_user_do_offload (vui, desc_table, desc_idx, mask,
1280 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1282 vlib_error_count (vm, node->node_index, rv, 1);
1292 * For chained descriptor, we process all chains in a single while
1293 * loop. So count how many descriptors in the chain.
1295 n_descs_to_process = 1;
1296 while (desc_table[desc_idx].flags & VRING_DESC_F_NEXT)
1298 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1299 &b_current, &next, &b, &bi_current,
1300 cpu, ©_len, &buffers_used,
1301 buffers_required, &desc_data_offset,
1302 buffer_data_size, mask);
1303 n_descs_to_process++;
1305 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1306 &b_current, &next, &b, &bi_current,
1307 cpu, ©_len, &buffers_used,
1308 buffers_required, &desc_data_offset,
1309 buffer_data_size, mask);
1312 n_rx_bytes += b_head->total_length_not_including_first_buffer;
1315 b_head->total_length_not_including_first_buffer -=
1316 b_head->current_length;
1318 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
1320 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
1321 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = ~0;
1324 if (current_config_index != ~0)
1326 b_head->current_config_index = current_config_index;
1327 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
1331 ASSERT (n_left >= n_descs_to_process);
1332 n_left -= n_descs_to_process;
1334 /* advance to next descrptor */
1335 desc_current = (desc_current + n_descs_to_process) & mask;
1338 * Although separating memory copies from virtio ring parsing
1339 * is beneficial, we can offer to perform the copies from time
1340 * to time in order to free some space in the ring.
1342 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
1344 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len,
1346 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1347 vlib_error_count (vm, node->node_index, rv, 1);
1351 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1353 /* Do the memory copies */
1354 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len, &map_hint);
1355 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1356 vlib_error_count (vm, node->node_index, rv, 1);
1358 /* Must do the tracing before giving buffers back to driver */
1359 if (PREDICT_FALSE (n_trace))
1361 u32 left = n_rx_packets;
1363 b = cpu->rx_buffers_pdesc;
1364 while (n_trace && left)
1368 (vm, node, next_index, b[0], /* follow_chain */ 0)))
1371 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
1372 vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
1373 last_used_idx = (last_used_idx + 1) & mask;
1375 vlib_set_trace_count (vm, node, n_trace);
1383 * Give buffers back to driver.
1385 vhost_user_mark_desc_consumed (vui, txvq, desc_head, n_descs_processed);
1387 /* interrupt (call) handling */
1388 if ((txvq->callfd_idx != ~0) &&
1389 (txvq->avail_event->flags != VRING_EVENT_F_DISABLE))
1391 txvq->n_since_last_int += n_rx_packets;
1392 if (txvq->n_since_last_int > vum->coalesce_frames)
1393 vhost_user_send_call (vm, vui, txvq);
1396 /* increase rx counters */
1397 vlib_increment_combined_counter
1398 (vnet_main.interface_main.combined_sw_if_counters
1399 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
1400 n_rx_packets, n_rx_bytes);
1402 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
1404 if (PREDICT_FALSE (buffers_used < buffers_required))
1405 vlib_buffer_free (vm, next, buffers_required - buffers_used);
1408 return n_rx_packets;
1411 VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
1412 vlib_node_runtime_t * node,
1413 vlib_frame_t * frame)
1415 vhost_user_main_t *vum = &vhost_user_main;
1416 uword n_rx_packets = 0;
1417 vhost_user_intf_t *vui;
1418 vnet_device_input_runtime_t *rt =
1419 (vnet_device_input_runtime_t *) node->runtime_data;
1420 vnet_device_and_queue_t *dq;
1422 vec_foreach (dq, rt->devices_and_queues)
1424 if ((node->state == VLIB_NODE_STATE_POLLING) ||
1425 clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
1428 pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
1429 if (vhost_user_is_packed_ring_supported (vui))
1431 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
1432 n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
1436 n_rx_packets += vhost_user_if_input_packed (vm, vum, vui,
1442 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
1443 n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
1446 n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id,
1452 return n_rx_packets;
1456 VLIB_REGISTER_NODE (vhost_user_input_node) = {
1457 .type = VLIB_NODE_TYPE_INPUT,
1458 .name = "vhost-user-input",
1459 .sibling_of = "device-input",
1460 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
1462 /* Will be enabled if/when hardware is detected. */
1463 .state = VLIB_NODE_STATE_DISABLED,
1465 .format_buffer = format_ethernet_header_with_length,
1466 .format_trace = format_vhost_trace,
1468 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
1469 .error_strings = vhost_user_input_func_error_strings,
1474 * fd.io coding-style-patch-verification: ON
1477 * eval: (c-set-style "gnu")