vhost: move tracing out of processing loop 27/43627/2
authorKonstantin Kogdenko <[email protected]>
Sat, 23 Aug 2025 09:09:04 +0000 (12:09 +0300)
committerFlorin Coras <[email protected]>
Wed, 27 Aug 2025 18:44:26 +0000 (18:44 +0000)
Before this fix vlib_trace_buffer was being called on not fully
initialized vlib_buffer_t (current_length == 0).
This caused trace filter functions to work incorrectly.

Type: fix

Change-Id: Ia8974af90ce91022ce1436ab54ab630bacce5bcf
Signed-off-by: Konstantin Kogdenko <[email protected]>
src/plugins/vhost/vhost_user_input.c

index 5dc1eed..328da72 100644 (file)
@@ -381,6 +381,33 @@ vhost_user_input_setup_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
     }
 }
 
+static_always_inline void
+vhost_user_if_input_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
+                          u32 n_trace, vhost_user_intf_t *vui, u16 qid,
+                          u32 next_index, u16 last_avail_idx, u32 n_left,
+                          const u32 *bi)
+{
+  vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
+
+  while (n_trace && n_left)
+    {
+      vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
+
+      if (PREDICT_FALSE (vlib_trace_buffer (vm, node, next_index, b, 0)))
+       {
+         vhost_trace_t *t0 = vlib_add_trace (vm, node, b, sizeof (t0[0]));
+         vhost_user_rx_trace (t0, vui, qid, b, txvq, last_avail_idx);
+         n_trace--;
+       }
+
+      n_left--;
+      last_avail_idx++;
+      bi++;
+    }
+
+  vlib_set_trace_count (vm, node, n_trace);
+}
+
 static_always_inline u32
 vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
                     vhost_user_intf_t *vui, u16 qid,
@@ -549,17 +576,6 @@ vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
       b_head->total_length_not_including_first_buffer = 0;
       b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
 
-      if (PREDICT_FALSE
-         (n_trace > 0 && vlib_trace_buffer (vm, node, next_index, b_head,
-                                            /* follow_chain */ 0)))
-       {
-         vhost_trace_t *t0 =
-           vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
-         vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
-         n_trace--;
-         vlib_set_trace_count (vm, node, n_trace);
-       }
-
       /* This depends on the setup but is very consistent
        * So I think the CPU branch predictor will make a pretty good job
        * at optimizing the decision. */
@@ -737,6 +753,14 @@ stop:
                        VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
     }
 
+  /* packet trace if enabled */
+  if (PREDICT_FALSE (n_trace))
+    {
+      vhost_user_if_input_trace (vm, node, n_trace, vui, qid, next_index,
+                                last_avail_idx - n_rx_packets, n_rx_packets,
+                                to_next - n_rx_packets);
+    }
+
   /* give buffers back to driver */
   CLIB_MEMORY_STORE_BARRIER ();
   txvq->used->idx = txvq->last_used_idx;