+ rx_len = vec_len (vum->rx_buffers[cpu_index]); //vector might be null
+ while (n_left > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b_head, *b_current;
+ u32 bi_head, bi_current;
+ u16 desc_chain_head, desc_current;
+ u8 error = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
+
+ if (PREDICT_TRUE (n_left > 1))
+ {
+ u32 next_desc =
+ txvq->avail->ring[(txvq->last_avail_idx + 1) & qsz_mask];
+ void *buffer_addr =
+ map_guest_mem (vui, txvq->desc[next_desc].addr,
+ &map_guest_hint_desc);
+ if (PREDICT_TRUE (buffer_addr != 0))
+ CLIB_PREFETCH (buffer_addr, 64, STORE);
+
+ u32 bi = vum->rx_buffers[cpu_index][rx_len - 2];
+ vlib_prefetch_buffer_with_index (vm, bi, STORE);
+ CLIB_PREFETCH (vlib_get_buffer (vm, bi)->data, 128, STORE);
+ }
+
+ desc_chain_head = desc_current =
+ txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
+ bi_head = bi_current = vum->rx_buffers[cpu_index][--rx_len];
+ b_head = b_current = vlib_get_buffer (vm, bi_head);
+ vlib_buffer_chain_init (b_head);
+ if (PREDICT_FALSE (n_trace))
+ {
+ vlib_trace_buffer (vm, node, next_index, b_head,
+ /* follow_chain */ 0);
+ vhost_trace_t *t0 =
+ vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
+ vhost_user_rx_trace (t0, vui, qid, b_head, txvq);
+ n_trace--;
+ vlib_set_trace_count (vm, node, n_trace);
+ }
+
+ uword offset;
+ if (PREDICT_TRUE (vui->is_any_layout) ||
+ (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
+ !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)))
+ {
+ /* ANYLAYOUT or single buffer */
+ offset = vui->virtio_net_hdr_sz;
+ }
+ else
+ {
+ /* CSR case without ANYLAYOUT, skip 1st buffer */
+ offset = txvq->desc[desc_current].len;
+ }
+
+ vring_desc_t *desc_table = txvq->desc;
+ u32 desc_index = desc_current;
+ map_guest_hint_p = &map_guest_hint_desc;
+
+ if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
+ {
+ desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
+ &map_guest_hint_desc);
+ desc_index = 0;
+ map_guest_hint_p = &map_guest_hint_indirect;
+ if (PREDICT_FALSE (desc_table == 0))
+ {
+ error = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
+ goto out;
+ }
+ }
+
+ while (1)
+ {
+ void *buffer_addr =
+ map_guest_mem (vui, desc_table[desc_index].addr,
+ map_guest_hint_p);
+ if (PREDICT_FALSE (buffer_addr == 0))
+ {
+ error = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
+ goto out;
+ }
+
+ if (PREDICT_TRUE
+ (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT))
+ {
+ CLIB_PREFETCH (&desc_table[desc_table[desc_index].next],
+ sizeof (vring_desc_t), STORE);
+ }
+
+ if (desc_table[desc_index].len > offset)
+ {
+ u16 len = desc_table[desc_index].len - offset;
+ u16 copied = vlib_buffer_chain_append_data_with_alloc (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX,
+ b_head,
+ &b_current,
+ buffer_addr
+ +
+ offset,
+ len);
+ if (copied != len)
+ {
+ error = VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER;
+ break;
+ }
+ }
+ offset = 0;
+
+ /* if next flag is set, take next desc in the chain */
+ if ((desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT))
+ desc_index = desc_table[desc_index].next;
+ else
+ goto out;
+ }
+ out:
+
+ /* consume the descriptor and return it as used */
+ txvq->last_avail_idx++;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].id =
+ desc_chain_head;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
+ vhost_user_log_dirty_ring (vui, txvq,
+ ring[txvq->last_used_idx & qsz_mask]);
+ txvq->last_used_idx++;
+
+ //It is important to free RX as fast as possible such that the TX
+ //process does not drop packets
+ if ((txvq->last_used_idx & 0x3f) == 0) // Every 64 packets
+ txvq->used->idx = txvq->last_used_idx;
+
+ if (PREDICT_FALSE (b_head->current_length < 14 &&
+ error == VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
+ error = VHOST_USER_INPUT_FUNC_ERROR_UNDERSIZED_FRAME;
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
+
+ vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
+ vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ b_head->error = node->errors[error];
+
+ if (PREDICT_FALSE (error))
+ {
+ drops++;
+ next0 = VNET_DEVICE_INPUT_NEXT_DROP;
+ }
+ else
+ {
+ n_rx_bytes +=
+ b_head->current_length +
+ b_head->total_length_not_including_first_buffer;
+ n_rx_packets++;
+ next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ }
+
+ to_next[0] = bi_head;
+ to_next++;
+ n_left_to_next--;
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (vui->sw_if_index, &next0,
+ b_head, 0);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi_head, next0);
+ n_left--;
+ if (PREDICT_FALSE (!n_left))
+ {
+ // I NEED SOME MORE !
+ u32 remain = (u16) (txvq->avail->idx - txvq->last_avail_idx);
+ remain = (remain > VLIB_FRAME_SIZE - n_rx_packets) ?
+ VLIB_FRAME_SIZE - n_rx_packets : remain;
+ remain = (remain > rx_len) ? rx_len : remain;
+ n_left = remain;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }