Add Rx and Tx statistics within nsh-vxlan-gpe node 03/703/4
authorHongjun Ni <hongjun.ni@intel.com>
Wed, 6 Apr 2016 23:20:22 +0000 (16:20 -0700)
committerGerrit Code Review <gerrit@fd.io>
Fri, 8 Apr 2016 14:38:43 +0000 (14:38 +0000)
PatchSet2: Modify the code according to review comments.

PatchSet3: modify sw_if_index1 in encap.c.

Change-Id: Ic4d3ee19a0ba0fa10568e570a79a3cb85cfbc9ab
Signed-off-by: Hongjun Ni <hongjun.ni@intel.com>
vnet/vnet/nsh-vxlan-gpe/decap.c
vnet/vnet/nsh-vxlan-gpe/encap.c
vnet/vnet/vxlan/encap.c

index a8de9bc..a5a85c6 100644 (file)
@@ -59,9 +59,13 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
 {
   u32 n_left_from, next_index, * from, * to_next;
   nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
+  vnet_main_t * vnm = ngm->vnet_main;
+  vnet_interface_main_t * im = &vnm->interface_main;
   u32 last_tunnel_index = ~0;
   nsh_vxlan_gpe_tunnel_key_t last_key;
   u32 pkts_decapsulated = 0;
+  u32 cpu_index = os_get_cpu_number();
+  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
 
   memset (&last_key, 0xff, sizeof (last_key));
 
@@ -69,6 +73,8 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
   n_left_from = from_frame->n_vectors;
 
   next_index = node->cached_next_index;
+  stats_sw_if_index = node->runtime_data[0];
+  stats_n_packets = stats_n_bytes = 0;
 
   while (n_left_from > 0)
     {
@@ -88,6 +94,7 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
           nsh_vxlan_gpe_tunnel_t * t0, * t1;
           nsh_vxlan_gpe_tunnel_key_t key0, key1;
           u32 error0, error1;
+          u32 sw_if_index0, sw_if_index1, len0, len1;
 
          /* Prefetch next iteration. */
          {
@@ -162,6 +169,8 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
           t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
 
           next0 = t0->decap_next_index;
+          sw_if_index0 = t0->sw_if_index;
+          len0 = vlib_buffer_length_in_chain(vm, b0);
 
           /* Required to make the l2 tag push / pop code work on l2 subifs */
           vnet_update_l2_len (b0);
@@ -204,13 +213,29 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
               vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->decap_fib_index;
             }
 
+          pkts_decapsulated++;
+          stats_n_packets += 1;
+          stats_n_bytes += len0;
+
+          if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+          {
+            stats_n_packets -= 1;
+            stats_n_bytes -= len0;
+            if (stats_n_packets)
+              vlib_increment_combined_counter(
+                  im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+                  cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+            stats_n_packets = 1;
+            stats_n_bytes = len0;
+            stats_sw_if_index = sw_if_index0;
+          }
 
         trace0:
           b0->error = error0 ? node->errors[error0] : 0;
 
-          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) 
+          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
             {
-              nsh_vxlan_gpe_rx_trace_t *tr 
+              nsh_vxlan_gpe_rx_trace_t *tr
                 = vlib_add_trace (vm, node, b0, sizeof (*tr));
               tr->next_index = next0;
               tr->error = error0;
@@ -244,6 +269,8 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
           t1 = pool_elt_at_index (ngm->tunnels, tunnel_index1);
 
           next1 = t1->decap_next_index;
+          sw_if_index1 = t1->sw_if_index;
+          len1 = vlib_buffer_length_in_chain(vm, b1);
 
           /* Required to make the l2 tag push / pop code work on l2 subifs */
           vnet_update_l2_len (b1);
@@ -286,15 +313,31 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
               vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
             }
 
+          pkts_decapsulated++;
+          stats_n_packets += 1;
+          stats_n_bytes += len1;
+          /* Batch stats increment on the same vxlan tunnel so counter
+           is not incremented per packet */
+          if (PREDICT_FALSE(sw_if_index1 != stats_sw_if_index))
+          {
+            stats_n_packets -= 1;
+            stats_n_bytes -= len1;
+            if (stats_n_packets)
+              vlib_increment_combined_counter(
+                  im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+                  cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+            stats_n_packets = 1;
+            stats_n_bytes = len1;
+            stats_sw_if_index = sw_if_index1;
+          }
           vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->decap_fib_index;
-          pkts_decapsulated += 2;
 
         trace1:
           b1->error = error1 ? node->errors[error1] : 0;
 
-          if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) 
+          if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
             {
-              nsh_vxlan_gpe_rx_trace_t *tr 
+              nsh_vxlan_gpe_rx_trace_t *tr
                 = vlib_add_trace (vm, node, b1, sizeof (*tr));
               tr->next_index = next1;
               tr->error = error1;
@@ -318,6 +361,7 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
           nsh_vxlan_gpe_tunnel_t * t0;
           nsh_vxlan_gpe_tunnel_key_t key0;
           u32 error0;
+          u32 sw_if_index0, len0;
 
          bi0 = from[0];
          to_next[0] = bi0;
@@ -367,13 +411,15 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
           t0 = pool_elt_at_index (ngm->tunnels, tunnel_index0);
 
           next0 = t0->decap_next_index;
+          sw_if_index0 = t0->sw_if_index;
+          len0 = vlib_buffer_length_in_chain(vm, b0);
 
           /* Required to make the l2 tag push / pop code work on l2 subifs */
           vnet_update_l2_len (b0);
 
           if (next0 == NSH_VXLAN_GPE_INPUT_NEXT_NSH_VXLAN_GPE_ENCAP)
             {
-              /* 
+              /*
                * Functioning as SFF (ie "half NSH tunnel mode")
                * If ingress (we are in decap.c) with NSH header, and 'decap next nsh-vxlan-gpe' then "NSH switch"
                * 1. Take DST, remap to SRC, remap other keys in place
@@ -402,7 +448,7 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
             } 
           else 
             {
-              /* 
+              /*
                * ip[46] lookup in the configured FIB
                * nsh-vxlan-gpe-encap, here's the encap tunnel sw_if_index
                */
@@ -411,12 +457,30 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
 
           pkts_decapsulated ++;
 
+          stats_n_packets += 1;
+          stats_n_bytes += len0;
+
+          /* Batch stats increment on the same nsh-vxlan-gpe tunnel so counter
+           is not incremented per packet */
+          if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
+          {
+            stats_n_packets -= 1;
+            stats_n_bytes -= len0;
+            if (stats_n_packets)
+              vlib_increment_combined_counter(
+                  im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+                  cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+            stats_n_packets = 1;
+            stats_n_bytes = len0;
+            stats_sw_if_index = sw_if_index0;
+          }
+
         trace00:
           b0->error = error0 ? node->errors[error0] : 0;
 
-          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) 
+          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
             {
-              nsh_vxlan_gpe_rx_trace_t *tr 
+              nsh_vxlan_gpe_rx_trace_t *tr
                 = vlib_add_trace (vm, node, b0, sizeof (*tr));
               tr->next_index = next0;
               tr->error = error0;
@@ -431,8 +495,16 @@ nsh_vxlan_gpe_input (vlib_main_t * vm,
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
   vlib_node_increment_counter (vm, nsh_vxlan_gpe_input_node.index,
-                               NSH_VXLAN_GPE_ERROR_DECAPSULATED, 
+                               NSH_VXLAN_GPE_ERROR_DECAPSULATED,
                                pkts_decapsulated);
+  /* Increment any remaining batch stats */
+  if (stats_n_packets)
+  {
+    vlib_increment_combined_counter(
+        im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index,
+        stats_sw_if_index, stats_n_packets, stats_n_bytes);
+    node->runtime_data[0] = stats_sw_if_index;
+  }
   return from_frame->n_vectors;
 }
 
index 0ccdf60..b5ff585 100644 (file)
@@ -58,7 +58,7 @@ u8 * format_nsh_vxlan_gpe_encap_trace (u8 * s, va_list * args)
 }
 
 #define foreach_fixed_header_offset             \
-_(0) _(1) _(2) _(3) _(4) _(5) _(6) 
+_(0) _(1) _(2) _(3) _(4) _(5) _(6)
 
 static uword
 nsh_vxlan_gpe_encap (vlib_main_t * vm,
@@ -68,13 +68,18 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
   u32 n_left_from, next_index, * from, * to_next;
   nsh_vxlan_gpe_main_t * ngm = &nsh_vxlan_gpe_main;
   vnet_main_t * vnm = ngm->vnet_main;
+  vnet_interface_main_t * im = &vnm->interface_main;
   u32 pkts_encapsulated = 0;
   u16 old_l0 = 0, old_l1 = 0;
+  u32 cpu_index = os_get_cpu_number();
+  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
 
   from = vlib_frame_vector_args (from_frame);
   n_left_from = from_frame->n_vectors;
 
   next_index = node->cached_next_index;
+  stats_sw_if_index = node->runtime_data[0];
+  stats_n_packets = stats_n_bytes = 0;
 
   while (n_left_from > 0)
     {
@@ -89,6 +94,7 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
          vlib_buffer_t * b0, * b1;
          u32 next0 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
           u32 next1 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+          u32 sw_if_index0, sw_if_index1, len0, len1;
           vnet_hw_interface_t * hi0, * hi1;
           ip4_header_t * ip0, * ip1;
           udp_header_t * udp0, * udp1;
@@ -127,9 +133,11 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
          b1 = vlib_get_buffer (vm, bi1);
 
           /* 1-wide cache? */
-          hi0 = vnet_get_sup_hw_interface 
+          sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+          sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
+          hi0 = vnet_get_sup_hw_interface
             (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
-          hi1 = vnet_get_sup_hw_interface 
+          hi1 = vnet_get_sup_hw_interface
             (vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
 
           t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
@@ -206,24 +214,59 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
           udp1 = (udp_header_t *)(ip1+1);
           new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
                                          - sizeof (*ip1));
-          
+
           udp0->length = new_l0;
           udp1->length = new_l1;
 
           /* Reset to look up tunnel partner in the configured FIB */
           vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
           vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
+          vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+          vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
+          pkts_encapsulated += 2;
+
+          len0 = vlib_buffer_length_in_chain(vm, b0);
+          len1 = vlib_buffer_length_in_chain(vm, b0);
+          stats_n_packets += 2;
+          stats_n_bytes += len0 + len1;
+
+          /* Batch stats increment on the same vxlan tunnel so counter is not
+           incremented per packet. Note stats are still incremented for deleted
+           and admin-down tunnel where packets are dropped. It is not worthwhile
+           to check for this rare case and affect normal path performance. */
+          if (PREDICT_FALSE(
+              (sw_if_index0 != stats_sw_if_index)
+                  || (sw_if_index1 != stats_sw_if_index))) {
+            stats_n_packets -= 2;
+            stats_n_bytes -= len0 + len1;
+            if (sw_if_index0 == sw_if_index1) {
+              if (stats_n_packets)
+                vlib_increment_combined_counter(
+                    im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+                    cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+              stats_sw_if_index = sw_if_index0;
+              stats_n_packets = 2;
+              stats_n_bytes = len0 + len1;
+            } else {
+              vlib_increment_combined_counter(
+                  im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+                  cpu_index, sw_if_index0, 1, len0);
+              vlib_increment_combined_counter(
+                  im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+                  cpu_index, sw_if_index1, 1, len1);
+            }
+          }
 
-          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) 
+          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
             {
-              nsh_vxlan_gpe_encap_trace_t *tr = 
+              nsh_vxlan_gpe_encap_trace_t *tr =
                 vlib_add_trace (vm, node, b0, sizeof (*tr));
               tr->tunnel_index = t0 - ngm->tunnels;
             }
 
-          if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) 
+          if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
             {
-              nsh_vxlan_gpe_encap_trace_t *tr = 
+              nsh_vxlan_gpe_encap_trace_t *tr =
                 vlib_add_trace (vm, node, b1, sizeof (*tr));
               tr->tunnel_index = t1 - ngm->tunnels;
             }
@@ -238,6 +281,7 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
          u32 bi0;
          vlib_buffer_t * b0;
          u32 next0 = NSH_VXLAN_GPE_ENCAP_NEXT_IP4_LOOKUP;
+      u32 sw_if_index0, len0;
           vnet_hw_interface_t * hi0;
           ip4_header_t * ip0;
           udp_header_t * udp0;
@@ -257,7 +301,8 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
          b0 = vlib_get_buffer (vm, bi0);
 
           /* 1-wide cache? */
-          hi0 = vnet_get_sup_hw_interface 
+          sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+          hi0 = vnet_get_sup_hw_interface
             (vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
 
           t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
@@ -299,21 +344,41 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
                                  length /* changed member */);
           ip0->checksum = ip_csum_fold (sum0);
           ip0->length = new_l0;
-          
+
           /* Fix UDP length */
           udp0 = (udp_header_t *)(ip0+1);
           new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
                                          - sizeof (*ip0));
-          
+
           udp0->length = new_l0;
 
           /* Reset to look up tunnel partner in the configured FIB */
           vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+          vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
           pkts_encapsulated ++;
 
-          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) 
+          len0 = vlib_buffer_length_in_chain(vm, b0);
+          stats_n_packets += 1;
+          stats_n_bytes += len0;
+
+          /* Batch stats increment on the same vxlan tunnel so counter is not
+           incremented per packet. Note stats are still incremented for deleted
+           and admin-down tunnel where packets are dropped. It is not worthwhile
+           to check for this rare case and affect normal path performance. */
+          if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) {
+            stats_n_packets -= 1;
+            stats_n_bytes -= len0;
+            if (stats_n_packets)
+              vlib_increment_combined_counter(
+                  im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+                  cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+            stats_n_packets = 1;
+            stats_n_bytes = len0;
+            stats_sw_if_index = sw_if_index0;
+          }
+          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
             {
-              nsh_vxlan_gpe_encap_trace_t *tr = 
+              nsh_vxlan_gpe_encap_trace_t *tr =
                 vlib_add_trace (vm, node, b0, sizeof (*tr));
               tr->tunnel_index = t0 - ngm->tunnels;
             }
@@ -324,9 +389,17 @@ nsh_vxlan_gpe_encap (vlib_main_t * vm,
 
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
-  vlib_node_increment_counter (vm, node->node_index, 
-                               NSH_VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED, 
+  vlib_node_increment_counter (vm, node->node_index,
+                               NSH_VXLAN_GPE_ENCAP_ERROR_ENCAPSULATED,
                                pkts_encapsulated);
+  /* Increment any remaining batch stats */
+  if (stats_n_packets) {
+    vlib_increment_combined_counter(
+        im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
+        stats_sw_if_index, stats_n_packets, stats_n_bytes);
+    node->runtime_data[0] = stats_sw_if_index;
+  }
+
   return from_frame->n_vectors;
 }
 
index dfa3bf7..90854ad 100644 (file)
@@ -246,7 +246,7 @@ vxlan_encap (vlib_main_t * vm,
             and admin-down tunnel where packets are dropped. It is not worthwhile
             to check for this rare case and affect normal path performance. */
          if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
-                            (sw_if_index0 != stats_sw_if_index))) 
+                            (sw_if_index1 != stats_sw_if_index))) 
            {
              stats_n_packets -= 2;
              stats_n_bytes -= len0 + len1;