session: measure dispatch period only if under load 68/15668/7
authorFlorin Coras <fcoras@cisco.com>
Thu, 1 Nov 2018 23:30:54 +0000 (16:30 -0700)
committerMarco Varlese <marco.varlese@suse.de>
Fri, 2 Nov 2018 08:06:46 +0000 (08:06 +0000)
Also reset pacer on tcp retransmit timeout

Change-Id: I5a9edee4c00d1d169248d79587a9b10437c2bd87
Signed-off-by: Florin Coras <fcoras@cisco.com>
src/vnet/session/session.c
src/vnet/session/session.h
src/vnet/session/session_node.c
src/vnet/session/transport.c
src/vnet/session/transport_interface.h
src/vnet/tcp/tcp.c
src/vnet/tcp/tcp.h
src/vnet/tcp/tcp_input.c
src/vnet/tcp/tcp_output.c

index 3dd80ad..f689486 100644 (file)
@@ -1366,6 +1366,7 @@ session_manager_main_enable (vlib_main_t * vm)
       _vec_len (wrk->postponed_event_vector) = 0;
 
       wrk->last_vlib_time = vlib_time_now (vlib_mains[i]);
+      wrk->dispatch_period = 500e-6;
 
       if (num_threads > 1)
        clib_rwlock_init (&smm->wrk[i].peekers_rw_locks);
index 131652a..3bdf6c9 100644 (file)
@@ -229,6 +229,8 @@ typedef struct session_manager_worker_
   /** Peekers rw lock */
   clib_rwlock_t peekers_rw_locks;
 
+  u32 last_tx_packets;
+
 } session_manager_worker_t;
 
 struct _session_manager_main
index 3413172..5ed681d 100644 (file)
@@ -751,10 +751,12 @@ static void
 session_update_dispatch_period (session_manager_worker_t * wrk, f64 now,
                                u32 thread_index)
 {
-  f64 sample, prev_period = wrk->dispatch_period, a = 0.8;
-
-  sample = now - wrk->last_vlib_time;
-  wrk->dispatch_period = a * sample + (1 - a) * prev_period;
+  if (wrk->last_tx_packets > 8)
+    {
+      f64 sample = now - wrk->last_vlib_time;
+      sample = (sample * wrk->last_tx_packets) / VLIB_FRAME_SIZE;
+      wrk->dispatch_period = (wrk->dispatch_period + sample) * 0.5;
+    }
   wrk->last_vlib_time = now;
 }
 
@@ -917,6 +919,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
 
   _vec_len (fifo_events) = 0;
   wrk->free_event_vector = fifo_events;
+  wrk->last_tx_packets = n_tx_packets;
 
   vlib_node_increment_counter (vm, session_queue_node.index,
                               SESSION_QUEUE_ERROR_TX, n_tx_packets);
index 0dd9ccd..42149eb 100644 (file)
@@ -529,23 +529,32 @@ spacer_set_pace_rate (spacer_t * pacer, u64 rate_bytes_per_sec)
 }
 
 void
-transport_connection_tx_pacer_init (transport_connection_t * tc,
-                                   u32 rate_bytes_per_sec,
-                                   u32 initial_bucket)
+transport_connection_tx_pacer_reset (transport_connection_t * tc,
+                                    u32 rate_bytes_per_sec,
+                                    u32 start_bucket, u64 time_now)
 {
-  vlib_main_t *vm = vlib_get_main ();
-  u64 time_now = vm->clib_time.last_cpu_time;
   spacer_t *pacer = &tc->pacer;
   f64 dispatch_period;
   u32 burst_size;
 
-  tc->flags |= TRANSPORT_CONNECTION_F_IS_TX_PACED;
   dispatch_period = transport_dispatch_period (tc->thread_index);
   burst_size = rate_bytes_per_sec * dispatch_period;
   spacer_update_max_burst_size (&tc->pacer, burst_size);
   spacer_set_pace_rate (&tc->pacer, rate_bytes_per_sec);
   pacer->last_update = time_now >> SPACER_CPU_TICKS_PER_PERIOD_SHIFT;
-  pacer->bucket = initial_bucket;
+  pacer->bucket = start_bucket;
+}
+
+void
+transport_connection_tx_pacer_init (transport_connection_t * tc,
+                                   u32 rate_bytes_per_sec,
+                                   u32 initial_bucket)
+{
+  vlib_main_t *vm = vlib_get_main ();
+  tc->flags |= TRANSPORT_CONNECTION_F_IS_TX_PACED;
+  transport_connection_tx_pacer_reset (tc, rate_bytes_per_sec,
+                                      initial_bucket,
+                                      vm->clib_time.last_cpu_time);
 }
 
 void
index b7aa4b7..3bfed41 100644 (file)
@@ -102,6 +102,9 @@ transport_tx_fn_type_t transport_protocol_tx_fn_type (transport_proto_t tp);
 void transport_update_time (f64 time_now, u8 thread_index);
 void transport_enable_disable (vlib_main_t * vm, u8 is_en);
 
+void transport_connection_tx_pacer_reset (transport_connection_t * tc,
+                                         u32 rate_bytes_per_sec,
+                                         u32 initial_bucket, u64 time_now);
 /**
  * Initialize tx pacer for connection
  *
index 1726355..ba427b7 100644 (file)
@@ -1153,7 +1153,7 @@ const static transport_proto_vft_t tcp_proto = {
 /* *INDENT-ON* */
 
 void
-tcp_update_pacer (tcp_connection_t * tc)
+tcp_connection_tx_pacer_update (tcp_connection_t * tc)
 {
   f64 srtt;
 
@@ -1165,6 +1165,17 @@ tcp_update_pacer (tcp_connection_t * tc)
                                        ((f64) tc->cwnd) / srtt);
 }
 
+void
+tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window,
+                              u32 start_bucket)
+{
+  tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+  u32 byte_rate = window / ((f64) TCP_TICK * tc->srtt);
+  u64 last_time = wrk->vm->clib_time.last_cpu_time;
+  transport_connection_tx_pacer_reset (&tc->connection, byte_rate,
+                                      start_bucket, last_time);
+}
+
 static void
 tcp_timer_keep_handler (u32 conn_index)
 {
index 71af461..bd5e4f7 100644 (file)
@@ -735,14 +735,16 @@ void tcp_connection_timers_init (tcp_connection_t * tc);
 void tcp_connection_timers_reset (tcp_connection_t * tc);
 void tcp_init_snd_vars (tcp_connection_t * tc);
 void tcp_connection_init_vars (tcp_connection_t * tc);
-void tcp_update_pacer (tcp_connection_t * tc);
+void tcp_connection_tx_pacer_update (tcp_connection_t * tc);
+void tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window,
+                                   u32 start_bucket);
 
 always_inline void
 tcp_cc_rcv_ack (tcp_connection_t * tc)
 {
   tc->cc_algo->rcv_ack (tc);
   tc->tsecr_last_ack = tc->rcv_opts.tsecr;
-  tcp_update_pacer (tc);
+  tcp_connection_tx_pacer_update (tc);
 }
 
 always_inline void
index af13c5f..fb2f1dd 100644 (file)
@@ -1085,10 +1085,6 @@ tcp_cc_fastrecovery_exit (tcp_connection_t * tc)
   tcp_fastrecovery_1_smss_off (tc);
   tcp_fastrecovery_first_off (tc);
 
-  /* Update pacer because our cwnd changed. Also makes sure
-   * that we recompute the max burst size */
-  tcp_update_pacer (tc);
-
   TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
 }
 
@@ -1290,7 +1286,8 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
        }
       else if (tcp_should_fastrecover (tc))
        {
-         u32 byte_rate;
+         u32 pacer_wnd;
+
          ASSERT (!tcp_in_fastrecovery (tc));
 
          /* Heuristic to catch potential late dupacks
@@ -1319,8 +1316,9 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
              tc->cwnd = tc->ssthresh + 3 * tc->snd_mss;
            }
 
-         byte_rate = (0.3 * tc->cwnd) / ((f64) TCP_TICK * tc->srtt);
-         transport_connection_tx_pacer_init (&tc->connection, byte_rate, 0);
+         pacer_wnd = clib_max (0.1 * tc->cwnd, 2 * tc->snd_mss);
+         tcp_connection_tx_pacer_reset (tc, pacer_wnd,
+                                        0 /* start bucket */ );
          tcp_program_fastretransmit (tcp_get_worker (tc->c_thread_index),
                                      tc);
          return;
index c315c03..c135a31 100644 (file)
@@ -1406,7 +1406,7 @@ tcp_rxt_timeout_cc (tcp_connection_t * tc)
   tc->snd_congestion = tc->snd_una_max;
   tc->rtt_ts = 0;
   tc->cwnd_acc_bytes = 0;
-
+  tcp_connection_tx_pacer_reset (tc, tc->cwnd, 2 * tc->snd_mss);
   tcp_recovery_on (tc);
 }