vlib: fix trace number accounting
[vpp.git] / src / vnet / devices / virtio / vhost_user_input.c
index ea8e7d6..53230a6 100644 (file)
 #include <vlib/vlib.h>
 #include <vlib/unix/unix.h>
 
-#include <vnet/ip/ip.h>
-
 #include <vnet/ethernet/ethernet.h>
 #include <vnet/devices/devices.h>
 #include <vnet/feature/feature.h>
+#include <vnet/udp/udp_packet.h>
 
 #include <vnet/devices/virtio/vhost_user.h>
 #include <vnet/devices/virtio/vhost_user_inline.h>
@@ -292,15 +291,11 @@ vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
       tcp_header_t *tcp = (tcp_header_t *)
        (b0_data + vnet_buffer (b0)->l4_hdr_offset);
       l4_hdr_sz = tcp_header_bytes (tcp);
-      tcp->checksum = 0;
       b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
     }
   else if (l4_proto == IP_PROTOCOL_UDP)
     {
-      udp_header_t *udp =
-       (udp_header_t *) (b0_data + vnet_buffer (b0)->l4_hdr_offset);
-      l4_hdr_sz = sizeof (*udp);
-      udp->checksum = 0;
+      l4_hdr_sz = sizeof (udp_header_t);
       b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
     }
 
@@ -380,7 +375,7 @@ vhost_user_if_input (vlib_main_t * vm,
                     vhost_user_main_t * vum,
                     vhost_user_intf_t * vui,
                     u16 qid, vlib_node_runtime_t * node,
-                    vnet_hw_interface_rx_mode mode, u8 enable_csum)
+                    vnet_hw_if_rx_mode mode, u8 enable_csum)
 {
   vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
   vnet_feature_main_t *fm = &feature_main;
@@ -415,7 +410,7 @@ vhost_user_if_input (vlib_main_t * vm,
    * When the traffic subsides, the scheduler switches the node back to
    * interrupt mode. We must tell the driver we want interrupt.
    */
-  if (PREDICT_FALSE (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
+  if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
     {
       if ((node->flags &
           VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
@@ -545,10 +540,10 @@ vhost_user_if_input (vlib_main_t * vm,
       b_head->total_length_not_including_first_buffer = 0;
       b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
 
-      if (PREDICT_FALSE (n_trace))
+      if (PREDICT_FALSE
+         (n_trace > 0 && vlib_trace_buffer (vm, node, next_index, b_head,
+                                            /* follow_chain */ 0)))
        {
-         vlib_trace_buffer (vm, node, next_index, b_head,
-                            /* follow_chain */ 0);
          vhost_trace_t *t0 =
            vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
          vhost_user_rx_trace (t0, vui, qid, b_head, txvq, last_avail_idx);
@@ -1088,7 +1083,7 @@ static_always_inline u32
 vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
                            vhost_user_intf_t * vui, u16 qid,
                            vlib_node_runtime_t * node,
-                           vnet_hw_interface_rx_mode mode, u8 enable_csum)
+                           vnet_hw_if_rx_mode mode, u8 enable_csum)
 {
   vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
   vnet_feature_main_t *fm = &feature_main;
@@ -1130,7 +1125,7 @@ vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
    * When the traffic subsides, the scheduler switches the node back to
    * interrupt mode. We must tell the driver we want interrupt.
    */
-  if (PREDICT_FALSE (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
+  if (PREDICT_FALSE (mode == VNET_HW_IF_RX_MODE_ADAPTIVE))
     {
       if ((node->flags &
           VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
@@ -1367,17 +1362,19 @@ vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
       b = cpu->rx_buffers_pdesc;
       while (n_trace && left)
        {
-         vhost_trace_t *t0;
-
-         vlib_trace_buffer (vm, node, next_index, b[0],
-                            /* follow_chain */ 0);
-         t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
-         b++;
-         vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
-         last_used_idx = (last_used_idx + 1) & mask;
-         n_trace--;
+         if (PREDICT_TRUE
+             (vlib_trace_buffer
+              (vm, node, next_index, b[0], /* follow_chain */ 0)))
+           {
+             vhost_trace_t *t0;
+             t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
+             vhost_user_rx_trace_packed (t0, vui, qid, txvq, last_used_idx);
+             last_used_idx = (last_used_idx + 1) & mask;
+             n_trace--;
+             vlib_set_trace_count (vm, node, n_trace);
+           }
          left--;
-         vlib_set_trace_count (vm, node, n_trace);
+         b++;
        }
     }