2 *------------------------------------------------------------------
5 * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at:
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *------------------------------------------------------------------
20 #include <fcntl.h> /* for open */
21 #include <sys/ioctl.h>
22 #include <sys/socket.h>
25 #include <sys/types.h>
26 #include <sys/uio.h> /* for iovec */
27 #include <netinet/in.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_tun.h>
33 #include <vlib/vlib.h>
34 #include <vlib/unix/unix.h>
36 #include <vnet/ethernet/ethernet.h>
37 #include <vnet/devices/devices.h>
38 #include <vnet/feature/feature.h>
39 #include <vnet/udp/udp_packet.h>
40 #include <vnet/tcp/tcp_packet.h>
41 #include <vnet/interface/rx_queue_funcs.h>
43 #include <vnet/devices/virtio/vhost_user.h>
44 #include <vnet/devices/virtio/vhost_user_inline.h>
46 #include <vnet/ip/ip4_packet.h>
47 #include <vnet/ip/ip6_packet.h>
50 * When an RX queue is down but active, received packets
51 * must be discarded. This value controls up to how many
52 * packets will be discarded during each round.
54 #define VHOST_USER_DOWN_DISCARD_COUNT 256
57 * When the number of available buffers gets under this threshold,
58 * RX node will start discarding packets.
60 #define VHOST_USER_RX_BUFFER_STARVATION 32
63 * On the receive side, the host should free descriptors as soon
64 * as possible in order to avoid TX drop in the VM.
65 * This value controls the number of copy operations that are stacked
66 * before copy is done for all and descriptors are given back to
68 * The value 64 was obtained by testing (48 and 128 were not as good).
70 #define VHOST_USER_RX_COPY_THRESHOLD 64
72 extern vlib_node_registration_t vhost_user_input_node;
74 #define foreach_vhost_user_input_func_error \
75 _(NO_ERROR, "no error") \
76 _(NO_BUFFER, "no available buffer") \
77 _(MMAP_FAIL, "mmap failure") \
78 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
79 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
80 _(NOT_READY, "vhost interface not ready or down") \
81 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
85 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
86 foreach_vhost_user_input_func_error
88 VHOST_USER_INPUT_FUNC_N_ERROR,
89 } vhost_user_input_func_error_t;
91 static __clib_unused char *vhost_user_input_func_error_strings[] = {
93 foreach_vhost_user_input_func_error
97 static_always_inline void
98 vhost_user_rx_trace (vhost_trace_t * t,
99 vhost_user_intf_t * vui, u16 qid,
100 vlib_buffer_t * b, vhost_user_vring_t * txvq,
103 vhost_user_main_t *vum = &vhost_user_main;
104 u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
105 vnet_virtio_vring_desc_t *hdr_desc = 0;
106 vnet_virtio_net_hdr_mrg_rxbuf_t *hdr;
109 clib_memset (t, 0, sizeof (*t));
110 t->device_index = vui - vum->vhost_user_interfaces;
113 hdr_desc = &txvq->desc[desc_current];
114 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
116 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
117 /* Header is the first here */
118 hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
120 if (txvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
122 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
124 if (!(txvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
125 !(txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
127 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
130 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
132 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
134 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
138 u32 len = vui->virtio_net_hdr_sz;
139 memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
143 static_always_inline u32
144 vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
145 u16 copy_len, u32 * map_hint)
147 void *src0, *src1, *src2, *src3;
148 if (PREDICT_TRUE (copy_len >= 4))
150 if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
152 if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
155 while (PREDICT_TRUE (copy_len >= 4))
161 (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
164 (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
167 clib_prefetch_load (src2);
168 clib_prefetch_load (src3);
170 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
171 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
178 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
180 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
188 * Try to discard packets from the tx ring (VPP RX path).
189 * Returns the number of discarded packets.
191 static_always_inline u32
192 vhost_user_rx_discard_packet (vlib_main_t * vm,
193 vhost_user_intf_t * vui,
194 vhost_user_vring_t * txvq, u32 discard_max)
197 * On the RX side, each packet corresponds to one descriptor
198 * (it is the same whether it is a shallow descriptor, chained, or indirect).
199 * Therefore, discarding a packet is like discarding a descriptor.
201 u32 discarded_packets = 0;
202 u32 avail_idx = txvq->avail->idx;
203 u16 mask = txvq->qsz_mask;
204 u16 last_avail_idx = txvq->last_avail_idx;
205 u16 last_used_idx = txvq->last_used_idx;
206 while (discarded_packets != discard_max)
208 if (avail_idx == last_avail_idx)
211 u16 desc_chain_head = txvq->avail->ring[last_avail_idx & mask];
213 txvq->used->ring[last_used_idx & mask].id = desc_chain_head;
214 txvq->used->ring[last_used_idx & mask].len = 0;
215 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
221 txvq->last_avail_idx = last_avail_idx;
222 txvq->last_used_idx = last_used_idx;
223 CLIB_MEMORY_STORE_BARRIER ();
224 txvq->used->idx = txvq->last_used_idx;
225 vhost_user_log_dirty_ring (vui, txvq, idx);
226 return discarded_packets;
230 * In case of overflow, we need to rewind the array of allocated buffers.
232 static_always_inline void
233 vhost_user_input_rewind_buffers (vlib_main_t * vm,
234 vhost_cpu_t * cpu, vlib_buffer_t * b_head)
236 u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
237 vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
238 b_current->current_length = 0;
239 b_current->flags = 0;
240 while (b_current != b_head)
242 cpu->rx_buffers_len++;
243 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
244 b_current = vlib_get_buffer (vm, bi_current);
245 b_current->current_length = 0;
246 b_current->flags = 0;
248 cpu->rx_buffers_len++;
251 static_always_inline void
252 vhost_user_handle_rx_offload (vlib_buffer_t *b0, u8 *b0_data,
253 vnet_virtio_net_hdr_t *hdr)
257 ethernet_header_t *eh = (ethernet_header_t *) b0_data;
258 u16 ethertype = clib_net_to_host_u16 (eh->type);
259 u16 l2hdr_sz = sizeof (ethernet_header_t);
260 vnet_buffer_oflags_t oflags = 0;
262 if (ethernet_frame_is_tagged (ethertype))
264 ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
266 ethertype = clib_net_to_host_u16 (vlan->type);
267 l2hdr_sz += sizeof (*vlan);
268 if (ethertype == ETHERNET_TYPE_VLAN)
271 ethertype = clib_net_to_host_u16 (vlan->type);
272 l2hdr_sz += sizeof (*vlan);
275 vnet_buffer (b0)->l2_hdr_offset = 0;
276 vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
277 vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
278 b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
279 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
280 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
282 if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
284 ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
285 l4_proto = ip4->protocol;
286 b0->flags |= VNET_BUFFER_F_IS_IP4;
287 oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
289 else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
291 ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
292 l4_proto = ip6->protocol;
293 b0->flags |= VNET_BUFFER_F_IS_IP6;
296 if (l4_proto == IP_PROTOCOL_TCP)
298 tcp_header_t *tcp = (tcp_header_t *)
299 (b0_data + vnet_buffer (b0)->l4_hdr_offset);
300 l4_hdr_sz = tcp_header_bytes (tcp);
301 oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
303 else if (l4_proto == IP_PROTOCOL_UDP)
305 l4_hdr_sz = sizeof (udp_header_t);
306 oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
309 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
311 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
312 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
313 b0->flags |= VNET_BUFFER_F_GSO;
315 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
317 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
318 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
319 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
321 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
323 vnet_buffer2 (b0)->gso_size = hdr->gso_size;
324 vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
325 b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
329 vnet_buffer_offload_flags_set (b0, oflags);
332 static_always_inline void
333 vhost_user_input_do_interrupt (vlib_main_t * vm, vhost_user_intf_t * vui,
334 vhost_user_vring_t * txvq,
335 vhost_user_vring_t * rxvq)
337 f64 now = vlib_time_now (vm);
339 if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
340 vhost_user_send_call (vm, vui, txvq);
342 if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
343 vhost_user_send_call (vm, vui, rxvq);
346 static_always_inline void
347 vhost_user_input_setup_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
348 vhost_user_intf_t * vui,
349 u32 * current_config_index, u32 * next_index,
350 u32 ** to_next, u32 * n_left_to_next)
352 vnet_feature_main_t *fm = &feature_main;
353 u8 feature_arc_idx = fm->device_input_feature_arc_index;
355 if (PREDICT_FALSE (vnet_have_features (feature_arc_idx, vui->sw_if_index)))
357 vnet_feature_config_main_t *cm;
358 cm = &fm->feature_config_mains[feature_arc_idx];
359 *current_config_index = vec_elt (cm->config_index_by_sw_if_index,
361 vnet_get_config_data (&cm->config_main, current_config_index,
365 vlib_get_new_next_frame (vm, node, *next_index, *to_next, *n_left_to_next);
367 if (*next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)
369 /* give some hints to ethernet-input */
370 vlib_next_frame_t *nf;
372 ethernet_input_frame_t *ef;
373 nf = vlib_node_runtime_get_next_frame (vm, node, *next_index);
374 f = vlib_get_frame (vm, nf->frame);
375 f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
377 ef = vlib_frame_scalar_args (f);
378 ef->sw_if_index = vui->sw_if_index;
379 ef->hw_if_index = vui->hw_if_index;
380 vlib_frame_no_append (f);
384 static_always_inline u32
385 vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
386 vhost_user_intf_t *vui, u16 qid,
387 vlib_node_runtime_t *node, u8 enable_csum)
389 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
390 vnet_feature_main_t *fm = &feature_main;
391 u16 n_rx_packets = 0;
394 u32 n_left_to_next, *to_next;
395 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
396 u32 n_trace = vlib_get_trace_count (vm, node);
397 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
399 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
401 u8 feature_arc_idx = fm->device_input_feature_arc_index;
402 u32 current_config_index = ~(u32) 0;
403 u16 mask = txvq->qsz_mask;
405 /* The descriptor table is not ready yet */
406 if (PREDICT_FALSE (txvq->avail == 0))
410 /* do we have pending interrupts ? */
411 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
412 vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
416 * For adaptive mode, it is optimized to reduce interrupts.
417 * If the scheduler switches the input node to polling due
418 * to burst of traffic, we tell the driver no interrupt.
419 * When the traffic subsides, the scheduler switches the node back to
420 * interrupt mode. We must tell the driver we want interrupt.
422 if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
425 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
427 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
428 /* Tell driver we want notification */
429 txvq->used->flags = 0;
431 /* Tell driver we don't want notification */
432 txvq->used->flags = VRING_USED_F_NO_NOTIFY;
435 if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
438 n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
441 if (PREDICT_FALSE (n_left == 0))
444 if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
447 * Discard input packet if interface is admin down or vring is not
449 * "For example, for a networking device, in the disabled state
450 * client must not supply any new RX packets, but must process
451 * and discard any TX packets."
453 vhost_user_rx_discard_packet (vm, vui, txvq,
454 VHOST_USER_DOWN_DISCARD_COUNT);
458 if (PREDICT_FALSE (n_left == (mask + 1)))
461 * Informational error logging when VPP is not
462 * receiving packets fast enough.
464 vlib_error_count (vm, node->node_index,
465 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
468 if (n_left > VLIB_FRAME_SIZE)
469 n_left = VLIB_FRAME_SIZE;
472 * For small packets (<2kB), we will not need more than one vlib buffer
473 * per packet. In case packets are bigger, we will just yield at some point
474 * in the loop and come back later. This is not an issue as for big packet,
475 * processing cost really comes from the memory copy.
476 * The assumption is that big packets will fit in 40 buffers.
478 if (PREDICT_FALSE (cpu->rx_buffers_len < n_left + 1 ||
479 cpu->rx_buffers_len < 40))
481 u32 curr_len = cpu->rx_buffers_len;
482 cpu->rx_buffers_len +=
483 vlib_buffer_alloc (vm, cpu->rx_buffers + curr_len,
484 VHOST_USER_RX_BUFFERS_N - curr_len);
487 (cpu->rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION))
489 /* In case of buffer starvation, discard some packets from the queue
491 * We keep doing best effort for the remaining packets. */
492 u32 flush = (n_left + 1 > cpu->rx_buffers_len) ?
493 n_left + 1 - cpu->rx_buffers_len : 1;
494 flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
497 vlib_increment_simple_counter (vnet_main.
498 interface_main.sw_if_counters +
499 VNET_INTERFACE_COUNTER_DROP,
500 vm->thread_index, vui->sw_if_index,
503 vlib_error_count (vm, vhost_user_input_node.index,
504 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
508 vhost_user_input_setup_frame (vm, node, vui, ¤t_config_index,
509 &next_index, &to_next, &n_left_to_next);
511 u16 last_avail_idx = txvq->last_avail_idx;
512 u16 last_used_idx = txvq->last_used_idx;
516 vlib_buffer_t *b_head, *b_current;
519 u32 desc_data_offset;
520 vnet_virtio_vring_desc_t *desc_table = txvq->desc;
522 if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
524 /* Not enough rx_buffers
525 * Note: We yeld on 1 so we don't need to do an additional
526 * check for the next buffer prefetch.
532 desc_current = txvq->avail->ring[last_avail_idx & mask];
533 cpu->rx_buffers_len--;
534 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
535 b_head = b_current = vlib_get_buffer (vm, bi_current);
536 to_next[0] = bi_current; //We do that now so we can forget about bi_current
540 vlib_prefetch_buffer_with_index
541 (vm, cpu->rx_buffers[cpu->rx_buffers_len - 1], LOAD);
543 /* Just preset the used descriptor id and length for later */
544 txvq->used->ring[last_used_idx & mask].id = desc_current;
545 txvq->used->ring[last_used_idx & mask].len = 0;
546 vhost_user_log_dirty_ring (vui, txvq, ring[last_used_idx & mask]);
548 /* The buffer should already be initialized */
549 b_head->total_length_not_including_first_buffer = 0;
550 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
553 (n_trace > 0 && vlib_trace_buffer (vm, node, next_index, b_head,
554 /* follow_chain */ 0)))
557 vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
558 vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
560 vlib_set_trace_count (vm, node, n_trace);
563 /* This depends on the setup but is very consistent
564 * So I think the CPU branch predictor will make a pretty good job
565 * at optimizing the decision. */
566 if (txvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
568 desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
571 if (PREDICT_FALSE (desc_table == 0))
573 vlib_error_count (vm, node->node_index,
574 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
579 desc_data_offset = vui->virtio_net_hdr_sz;
583 vnet_virtio_net_hdr_mrg_rxbuf_t *hdr;
587 hdr = map_guest_mem (vui, desc_table[desc_current].addr, &map_hint);
588 if (PREDICT_FALSE (hdr == 0))
590 vlib_error_count (vm, node->node_index,
591 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
594 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
596 if ((desc_data_offset == desc_table[desc_current].len) &&
597 (desc_table[desc_current].flags & VRING_DESC_F_NEXT))
599 current = desc_table[desc_current].next;
600 b_data = map_guest_mem (vui, desc_table[current].addr,
602 if (PREDICT_FALSE (b_data == 0))
604 vlib_error_count (vm, node->node_index,
605 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL,
611 b_data = (u8 *) hdr + desc_data_offset;
613 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
619 /* Get more input if necessary. Or end of packet. */
620 if (desc_data_offset == desc_table[desc_current].len)
622 if (PREDICT_FALSE (desc_table[desc_current].flags &
625 desc_current = desc_table[desc_current].next;
626 desc_data_offset = 0;
634 /* Get more output if necessary. Or end of packet. */
635 if (PREDICT_FALSE (b_current->current_length == buffer_data_size))
637 if (PREDICT_FALSE (cpu->rx_buffers_len == 0))
639 /* Cancel speculation */
644 * Checking if there are some left buffers.
645 * If not, just rewind the used buffers and stop.
646 * Note: Scheduled copies are not cancelled. This is
647 * not an issue as they would still be valid. Useless,
650 vhost_user_input_rewind_buffers (vm, cpu, b_head);
655 /* Get next output */
656 cpu->rx_buffers_len--;
657 u32 bi_next = cpu->rx_buffers[cpu->rx_buffers_len];
658 b_current->next_buffer = bi_next;
659 b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
660 bi_current = bi_next;
661 b_current = vlib_get_buffer (vm, bi_current);
664 /* Prepare a copy order executed later for the data */
665 ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
666 vhost_copy_t *cpy = &cpu->copy[copy_len];
668 u32 desc_data_l = desc_table[desc_current].len - desc_data_offset;
669 cpy->len = buffer_data_size - b_current->current_length;
670 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
671 cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
672 b_current->current_length);
673 cpy->src = desc_table[desc_current].addr + desc_data_offset;
675 desc_data_offset += cpy->len;
677 b_current->current_length += cpy->len;
678 b_head->total_length_not_including_first_buffer += cpy->len;
683 n_rx_bytes += b_head->total_length_not_including_first_buffer;
686 b_head->total_length_not_including_first_buffer -=
687 b_head->current_length;
689 /* consume the descriptor and return it as used */
693 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
694 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
697 if (current_config_index != ~(u32) 0)
699 b_head->current_config_index = current_config_index;
700 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
706 * Although separating memory copies from virtio ring parsing
707 * is beneficial, we can offer to perform the copies from time
708 * to time in order to free some space in the ring.
710 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
712 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy,
713 copy_len, &map_hint)))
715 vlib_error_count (vm, node->node_index,
716 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
720 /* give buffers back to driver */
721 CLIB_MEMORY_STORE_BARRIER ();
722 txvq->used->idx = last_used_idx;
723 vhost_user_log_dirty_ring (vui, txvq, idx);
727 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
729 txvq->last_used_idx = last_used_idx;
730 txvq->last_avail_idx = last_avail_idx;
732 /* Do the memory copies */
733 if (PREDICT_FALSE (vhost_user_input_copy (vui, cpu->copy, copy_len,
736 vlib_error_count (vm, node->node_index,
737 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
740 /* give buffers back to driver */
741 CLIB_MEMORY_STORE_BARRIER ();
742 txvq->used->idx = txvq->last_used_idx;
743 vhost_user_log_dirty_ring (vui, txvq, idx);
745 /* interrupt (call) handling */
746 if ((txvq->callfd_idx != ~0) &&
747 !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
749 txvq->n_since_last_int += n_rx_packets;
751 if (txvq->n_since_last_int > vum->coalesce_frames)
752 vhost_user_send_call (vm, vui, txvq);
755 /* increase rx counters */
756 vlib_increment_combined_counter
757 (vnet_main.interface_main.combined_sw_if_counters
758 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
759 n_rx_packets, n_rx_bytes);
761 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
767 static_always_inline void
768 vhost_user_mark_desc_consumed (vhost_user_intf_t * vui,
769 vhost_user_vring_t * txvq, u16 desc_head,
770 u16 n_descs_processed)
772 vnet_virtio_vring_packed_desc_t *desc_table = txvq->packed_desc;
774 u16 mask = txvq->qsz_mask;
776 for (desc_idx = 0; desc_idx < n_descs_processed; desc_idx++)
778 if (txvq->used_wrap_counter)
779 desc_table[(desc_head + desc_idx) & mask].flags |=
780 (VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
782 desc_table[(desc_head + desc_idx) & mask].flags &=
783 ~(VRING_DESC_F_AVAIL | VRING_DESC_F_USED);
784 vhost_user_advance_last_used_idx (txvq);
788 static_always_inline void
789 vhost_user_rx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
790 u16 qid, vhost_user_vring_t * txvq,
793 vhost_user_main_t *vum = &vhost_user_main;
794 vnet_virtio_vring_packed_desc_t *hdr_desc;
795 vnet_virtio_net_hdr_mrg_rxbuf_t *hdr;
798 clib_memset (t, 0, sizeof (*t));
799 t->device_index = vui - vum->vhost_user_interfaces;
802 hdr_desc = &txvq->packed_desc[desc_current];
803 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
805 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
806 /* Header is the first here */
807 hdr_desc = map_guest_mem (vui, txvq->packed_desc[desc_current].addr,
810 if (txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
811 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
813 if (!(txvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
814 !(txvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
815 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
817 t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
819 if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
820 t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
823 u32 len = vui->virtio_net_hdr_sz;
824 clib_memcpy_fast (&t->hdr, hdr,
825 len > hdr_desc->len ? hdr_desc->len : len);
829 static_always_inline u32
830 vhost_user_rx_discard_packet_packed (vlib_main_t * vm,
831 vhost_user_intf_t * vui,
832 vhost_user_vring_t * txvq,
835 u32 discarded_packets = 0;
836 u16 mask = txvq->qsz_mask;
837 u16 desc_current, desc_head;
839 desc_head = desc_current = txvq->last_used_idx & mask;
842 * On the RX side, each packet corresponds to one descriptor
843 * (it is the same whether it is a shallow descriptor, chained, or indirect).
844 * Therefore, discarding a packet is like discarding a descriptor.
846 while ((discarded_packets != discard_max) &&
847 vhost_user_packed_desc_available (txvq, desc_current))
849 vhost_user_advance_last_avail_idx (txvq);
851 desc_current = (desc_current + 1) & mask;
854 if (PREDICT_TRUE (discarded_packets))
855 vhost_user_mark_desc_consumed (vui, txvq, desc_head, discarded_packets);
856 return (discarded_packets);
859 static_always_inline u32
860 vhost_user_input_copy_packed (vhost_user_intf_t * vui, vhost_copy_t * cpy,
861 u16 copy_len, u32 * map_hint)
863 void *src0, *src1, *src2, *src3, *src4, *src5, *src6, *src7;
865 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
867 if (PREDICT_TRUE (copy_len >= 8))
869 src4 = map_guest_mem (vui, cpy[0].src, map_hint);
870 src5 = map_guest_mem (vui, cpy[1].src, map_hint);
871 src6 = map_guest_mem (vui, cpy[2].src, map_hint);
872 src7 = map_guest_mem (vui, cpy[3].src, map_hint);
873 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
874 if (PREDICT_FALSE (bad))
876 clib_prefetch_load (src4);
877 clib_prefetch_load (src5);
878 clib_prefetch_load (src6);
879 clib_prefetch_load (src7);
881 while (PREDICT_TRUE (copy_len >= 8))
888 src4 = map_guest_mem (vui, cpy[4].src, map_hint);
889 src5 = map_guest_mem (vui, cpy[5].src, map_hint);
890 src6 = map_guest_mem (vui, cpy[6].src, map_hint);
891 src7 = map_guest_mem (vui, cpy[7].src, map_hint);
892 bad = (src4 == 0) + (src5 == 0) + (src6 == 0) + (src7 == 0);
893 if (PREDICT_FALSE (bad))
896 clib_prefetch_load (src4);
897 clib_prefetch_load (src5);
898 clib_prefetch_load (src6);
899 clib_prefetch_load (src7);
901 clib_memcpy_fast ((void *) cpy[0].dst, src0, cpy[0].len);
902 clib_memcpy_fast ((void *) cpy[1].dst, src1, cpy[1].len);
903 clib_memcpy_fast ((void *) cpy[2].dst, src2, cpy[2].len);
904 clib_memcpy_fast ((void *) cpy[3].dst, src3, cpy[3].len);
913 if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
915 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
918 clib_memcpy_fast ((void *) cpy->dst, src0, cpy->len);
925 static_always_inline u32
926 vhost_user_do_offload (vhost_user_intf_t *vui,
927 vnet_virtio_vring_packed_desc_t *desc_table,
928 u16 desc_current, u16 mask, vlib_buffer_t *b_head,
931 u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
932 vnet_virtio_net_hdr_mrg_rxbuf_t *hdr;
934 u32 desc_data_offset = vui->virtio_net_hdr_sz;
936 hdr = map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
937 if (PREDICT_FALSE (hdr == 0))
938 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
939 else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
941 if (desc_data_offset == desc_table[desc_current].len)
943 desc_current = (desc_current + 1) & mask;
945 map_guest_mem (vui, desc_table[desc_current].addr, map_hint);
946 if (PREDICT_FALSE (b_data == 0))
947 rc = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
949 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
953 b_data = (u8 *) hdr + desc_data_offset;
954 vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
961 static_always_inline u32
962 vhost_user_compute_buffers_required (u32 desc_len, u32 buffer_data_size)
965 u32 buffers_required;
967 if (PREDICT_TRUE (buffer_data_size == 2048))
969 buffers_required = desc_len >> 11;
970 if ((desc_len & 2047) != 0)
972 return (buffers_required);
975 if (desc_len < buffer_data_size)
978 result = div (desc_len, buffer_data_size);
980 buffers_required = result.quot + 1;
982 buffers_required = result.quot;
984 return (buffers_required);
987 static_always_inline u32
988 vhost_user_compute_indirect_desc_len (vhost_user_intf_t * vui,
989 vhost_user_vring_t * txvq,
990 u32 buffer_data_size, u16 desc_current,
993 vnet_virtio_vring_packed_desc_t *desc_table = txvq->packed_desc;
995 u16 desc_data_offset = vui->virtio_net_hdr_sz;
996 u16 desc_idx = desc_current;
999 n_descs = desc_table[desc_idx].len >> 4;
1000 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr, map_hint);
1001 if (PREDICT_FALSE (desc_table == 0))
1004 for (desc_idx = 0; desc_idx < n_descs; desc_idx++)
1005 desc_len += desc_table[desc_idx].len;
1007 if (PREDICT_TRUE (desc_len > desc_data_offset))
1008 desc_len -= desc_data_offset;
1010 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1013 static_always_inline u32
1014 vhost_user_compute_chained_desc_len (vhost_user_intf_t * vui,
1015 vhost_user_vring_t * txvq,
1016 u32 buffer_data_size, u16 * current,
1019 vnet_virtio_vring_packed_desc_t *desc_table = txvq->packed_desc;
1021 u16 mask = txvq->qsz_mask;
1023 while (desc_table[*current].flags & VRING_DESC_F_NEXT)
1025 desc_len += desc_table[*current].len;
1027 *current = (*current + 1) & mask;
1028 vhost_user_advance_last_avail_idx (txvq);
1030 desc_len += desc_table[*current].len;
1032 *current = (*current + 1) & mask;
1033 vhost_user_advance_last_avail_idx (txvq);
1035 if (PREDICT_TRUE (desc_len > vui->virtio_net_hdr_sz))
1036 desc_len -= vui->virtio_net_hdr_sz;
1038 return vhost_user_compute_buffers_required (desc_len, buffer_data_size);
1041 static_always_inline void
1042 vhost_user_assemble_packet (vnet_virtio_vring_packed_desc_t *desc_table,
1043 u16 *desc_idx, vlib_buffer_t *b_head,
1044 vlib_buffer_t **b_current, u32 **next,
1045 vlib_buffer_t ***b, u32 *bi_current,
1046 vhost_cpu_t *cpu, u16 *copy_len, u32 *buffers_used,
1047 u32 buffers_required, u32 *desc_data_offset,
1048 u32 buffer_data_size, u16 mask)
1052 while (*desc_data_offset < desc_table[*desc_idx].len)
1054 /* Get more output if necessary. Or end of packet. */
1055 if (PREDICT_FALSE ((*b_current)->current_length == buffer_data_size))
1057 /* Get next output */
1058 u32 bi_next = **next;
1060 (*b_current)->next_buffer = bi_next;
1061 (*b_current)->flags |= VLIB_BUFFER_NEXT_PRESENT;
1062 *bi_current = bi_next;
1066 ASSERT (*buffers_used <= buffers_required);
1069 /* Prepare a copy order executed later for the data */
1070 ASSERT (*copy_len < VHOST_USER_COPY_ARRAY_N);
1071 vhost_copy_t *cpy = &cpu->copy[*copy_len];
1073 desc_data_l = desc_table[*desc_idx].len - *desc_data_offset;
1074 cpy->len = buffer_data_size - (*b_current)->current_length;
1075 cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
1076 cpy->dst = (uword) (vlib_buffer_get_current (*b_current) +
1077 (*b_current)->current_length);
1078 cpy->src = desc_table[*desc_idx].addr + *desc_data_offset;
1080 *desc_data_offset += cpy->len;
1082 (*b_current)->current_length += cpy->len;
1083 b_head->total_length_not_including_first_buffer += cpy->len;
1085 *desc_idx = (*desc_idx + 1) & mask;;
1086 *desc_data_offset = 0;
1089 static_always_inline u32
1090 vhost_user_if_input_packed (vlib_main_t *vm, vhost_user_main_t *vum,
1091 vhost_user_intf_t *vui, u16 qid,
1092 vlib_node_runtime_t *node, u8 enable_csum)
1094 vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1095 vnet_feature_main_t *fm = &feature_main;
1096 u8 feature_arc_idx = fm->device_input_feature_arc_index;
1097 u16 n_rx_packets = 0;
1100 u32 buffers_required = 0;
1101 u32 n_left_to_next, *to_next;
1102 u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
1103 u32 n_trace = vlib_get_trace_count (vm, node);
1104 u32 buffer_data_size = vlib_buffer_get_default_data_size (vm);
1106 vhost_cpu_t *cpu = &vum->cpus[vm->thread_index];
1108 u32 current_config_index = ~0;
1109 u16 mask = txvq->qsz_mask;
1110 u16 desc_current, desc_head, last_used_idx;
1111 vnet_virtio_vring_packed_desc_t *desc_table = 0;
1112 u32 n_descs_processed = 0;
1116 u32 buffers_used = 0;
1117 u16 current, n_descs_to_process;
1119 /* The descriptor table is not ready yet */
1120 if (PREDICT_FALSE (txvq->packed_desc == 0))
1123 /* do we have pending interrupts ? */
1124 vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
1125 vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
1128 * For adaptive mode, it is optimized to reduce interrupts.
1129 * If the scheduler switches the input node to polling due
1130 * to burst of traffic, we tell the driver no interrupt.
1131 * When the traffic subsides, the scheduler switches the node back to
1132 * interrupt mode. We must tell the driver we want interrupt.
1134 if (PREDICT_FALSE (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
1137 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
1139 VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
1140 /* Tell driver we want notification */
1141 txvq->used_event->flags = 0;
1143 /* Tell driver we don't want notification */
1144 txvq->used_event->flags = VRING_EVENT_F_DISABLE;
1147 last_used_idx = txvq->last_used_idx & mask;
1148 desc_head = desc_current = last_used_idx;
1150 if (vhost_user_packed_desc_available (txvq, desc_current) == 0)
1153 if (PREDICT_FALSE (!vui->admin_up || !vui->is_ready || !(txvq->enabled)))
1156 * Discard input packet if interface is admin down or vring is not
1158 * "For example, for a networking device, in the disabled state
1159 * client must not supply any new RX packets, but must process
1160 * and discard any TX packets."
1162 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq,
1163 VHOST_USER_DOWN_DISCARD_COUNT);
1164 vlib_error_count (vm, vhost_user_input_node.index,
1165 VHOST_USER_INPUT_FUNC_ERROR_NOT_READY, rv);
1169 vhost_user_input_setup_frame (vm, node, vui, ¤t_config_index,
1170 &next_index, &to_next, &n_left_to_next);
1173 * Compute n_left and total buffers needed
1175 desc_table = txvq->packed_desc;
1176 current = desc_current;
1177 while (vhost_user_packed_desc_available (txvq, current) &&
1178 (n_left < VLIB_FRAME_SIZE))
1180 if (desc_table[current].flags & VRING_DESC_F_INDIRECT)
1183 vhost_user_compute_indirect_desc_len (vui, txvq, buffer_data_size,
1184 current, &map_hint);
1186 current = (current + 1) & mask;
1187 vhost_user_advance_last_avail_idx (txvq);
1192 vhost_user_compute_chained_desc_len (vui, txvq, buffer_data_size,
1197 /* Something is broken if we need more than 10000 buffers */
1198 if (PREDICT_FALSE ((buffers_required == 0) || (buffers_required > 10000)))
1200 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1201 vlib_error_count (vm, vhost_user_input_node.index,
1202 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1206 vec_validate (cpu->to_next_list, buffers_required);
1207 rv = vlib_buffer_alloc (vm, cpu->to_next_list, buffers_required);
1208 if (PREDICT_FALSE (rv != buffers_required))
1210 vlib_buffer_free (vm, cpu->to_next_list, rv);
1211 rv = vhost_user_rx_discard_packet_packed (vm, vui, txvq, n_left);
1212 vlib_error_count (vm, vhost_user_input_node.index,
1213 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, rv);
1217 next = cpu->to_next_list;
1218 vec_validate (cpu->rx_buffers_pdesc, buffers_required);
1219 vlib_get_buffers (vm, next, cpu->rx_buffers_pdesc, buffers_required);
1220 b = cpu->rx_buffers_pdesc;
1221 n_descs_processed = n_left;
1225 vlib_buffer_t *b_head, *b_current;
1227 u32 desc_data_offset;
1228 u16 desc_idx = desc_current;
1231 desc_table = txvq->packed_desc;
1232 to_next[0] = bi_current = next[0];
1233 b_head = b_current = b[0];
1236 ASSERT (buffers_used <= buffers_required);
1241 /* The buffer should already be initialized */
1242 b_head->total_length_not_including_first_buffer = 0;
1243 b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1244 desc_data_offset = vui->virtio_net_hdr_sz;
1245 n_descs_to_process = 1;
1247 if (desc_table[desc_idx].flags & VRING_DESC_F_INDIRECT)
1249 n_descs = desc_table[desc_idx].len >> 4;
1250 desc_table = map_guest_mem (vui, desc_table[desc_idx].addr,
1253 if (PREDICT_FALSE (desc_table == 0) ||
1256 (vhost_user_do_offload
1257 (vui, desc_table, desc_idx, mask, b_head,
1258 &map_hint) != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))))
1260 vlib_error_count (vm, node->node_index,
1261 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
1271 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1272 &b_current, &next, &b, &bi_current,
1273 cpu, ©_len, &buffers_used,
1274 buffers_required, &desc_data_offset,
1275 buffer_data_size, mask);
1283 rv = vhost_user_do_offload (vui, desc_table, desc_idx, mask,
1285 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1287 vlib_error_count (vm, node->node_index, rv, 1);
1297 * For chained descriptor, we process all chains in a single while
1298 * loop. So count how many descriptors in the chain.
1300 n_descs_to_process = 1;
1301 while (desc_table[desc_idx].flags & VRING_DESC_F_NEXT)
1303 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1304 &b_current, &next, &b, &bi_current,
1305 cpu, ©_len, &buffers_used,
1306 buffers_required, &desc_data_offset,
1307 buffer_data_size, mask);
1308 n_descs_to_process++;
1310 vhost_user_assemble_packet (desc_table, &desc_idx, b_head,
1311 &b_current, &next, &b, &bi_current,
1312 cpu, ©_len, &buffers_used,
1313 buffers_required, &desc_data_offset,
1314 buffer_data_size, mask);
1317 n_rx_bytes += b_head->total_length_not_including_first_buffer;
1320 b_head->total_length_not_including_first_buffer -=
1321 b_head->current_length;
1323 vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
1324 vnet_buffer (b_head)->sw_if_index[VLIB_TX] = ~0;
1327 if (current_config_index != ~0)
1329 b_head->current_config_index = current_config_index;
1330 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
1334 ASSERT (n_left >= n_descs_to_process);
1335 n_left -= n_descs_to_process;
1337 /* advance to next descrptor */
1338 desc_current = (desc_current + n_descs_to_process) & mask;
1341 * Although separating memory copies from virtio ring parsing
1342 * is beneficial, we can offer to perform the copies from time
1343 * to time in order to free some space in the ring.
1345 if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
1347 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len,
1349 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1350 vlib_error_count (vm, node->node_index, rv, 1);
1354 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1356 /* Do the memory copies */
1357 rv = vhost_user_input_copy_packed (vui, cpu->copy, copy_len, &map_hint);
1358 if (PREDICT_FALSE (rv != VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
1359 vlib_error_count (vm, node->node_index, rv, 1);
1361 /* Must do the tracing before giving buffers back to driver */
1362 if (PREDICT_FALSE (n_trace))
1364 u32 left = n_rx_packets;
1366 b = cpu->rx_buffers_pdesc;
1367 while (n_trace && left)
1371 (vm, node, next_index, b[0], /* follow_chain */ 0)))
1374 t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
1375 vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
1376 last_used_idx = (last_used_idx + 1) & mask;
1378 vlib_set_trace_count (vm, node, n_trace);
1386 * Give buffers back to driver.
1388 vhost_user_mark_desc_consumed (vui, txvq, desc_head, n_descs_processed);
1390 /* interrupt (call) handling */
1391 if ((txvq->callfd_idx != ~0) &&
1392 (txvq->avail_event->flags != VRING_EVENT_F_DISABLE))
1394 txvq->n_since_last_int += n_rx_packets;
1395 if (txvq->n_since_last_int > vum->coalesce_frames)
1396 vhost_user_send_call (vm, vui, txvq);
1399 /* increase rx counters */
1400 vlib_increment_combined_counter
1401 (vnet_main.interface_main.combined_sw_if_counters
1402 + VNET_INTERFACE_COUNTER_RX, vm->thread_index, vui->sw_if_index,
1403 n_rx_packets, n_rx_bytes);
1405 vnet_device_increment_rx_packets (vm->thread_index, n_rx_packets);
1407 if (PREDICT_FALSE (buffers_used < buffers_required))
1408 vlib_buffer_free (vm, next, buffers_required - buffers_used);
1411 return n_rx_packets;
1414 VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
1415 vlib_node_runtime_t * node,
1416 vlib_frame_t * frame)
1418 vhost_user_main_t *vum = &vhost_user_main;
1419 uword n_rx_packets = 0;
1420 vhost_user_intf_t *vui;
1421 vnet_hw_if_rxq_poll_vector_t *pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
1422 vnet_hw_if_rxq_poll_vector_t *pve;
1424 vec_foreach (pve, pv)
1426 vui = pool_elt_at_index (vum->vhost_user_interfaces, pve->dev_instance);
1427 if (vhost_user_is_packed_ring_supported (vui))
1429 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
1430 n_rx_packets += vhost_user_if_input_packed (
1431 vm, vum, vui, pve->queue_id, node, 1);
1433 n_rx_packets += vhost_user_if_input_packed (
1434 vm, vum, vui, pve->queue_id, node, 0);
1438 if (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM))
1440 vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 1);
1443 vhost_user_if_input (vm, vum, vui, pve->queue_id, node, 0);
1447 return n_rx_packets;
1451 VLIB_REGISTER_NODE (vhost_user_input_node) = {
1452 .type = VLIB_NODE_TYPE_INPUT,
1453 .name = "vhost-user-input",
1454 .sibling_of = "device-input",
1455 .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
1457 /* Will be enabled if/when hardware is detected. */
1458 .state = VLIB_NODE_STATE_DISABLED,
1460 .format_buffer = format_ethernet_header_with_length,
1461 .format_trace = format_vhost_trace,
1463 .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
1464 .error_strings = vhost_user_input_func_error_strings,
1469 * fd.io coding-style-patch-verification: ON
1472 * eval: (c-set-style "gnu")