+ return pacer->bucket > 0 ? pacer->max_burst : 0;
+}
+
+static inline void
+spacer_update_bucket (spacer_t * pacer, u32 bytes)
+{
+ pacer->bucket -= bytes;
+}
+
+static inline void
+spacer_set_pace_rate (spacer_t * pacer, u64 rate_bytes_per_sec,
+ clib_us_time_t rtt, clib_time_type_t sec_per_loop)
+{
+ clib_us_time_t max_time;
+
+ ASSERT (rate_bytes_per_sec != 0);
+ pacer->bytes_per_sec = rate_bytes_per_sec;
+ pacer->tokens_per_period = rate_bytes_per_sec * CLIB_US_TIME_PERIOD;
+
+ /* Allow a min number of bursts per rtt, if their size is acceptable. Goal
+ * is to spread the sending of data over the rtt but to also allow for some
+ * coalescing that can potentially
+ * 1) reduce load on session layer by reducing scheduling frequency for a
+ * session and
+ * 2) optimize sending when tso if available
+ *
+ * Max "time-length" of a burst cannot be less than 1us or more than 1ms.
+ */
+ max_time = clib_max (rtt / TRANSPORT_PACER_BURSTS_PER_RTT,
+ (clib_us_time_t) (sec_per_loop * CLIB_US_TIME_FREQ));
+ max_time = clib_clamp (max_time, 1 /* 1us */ , 1000 /* 1ms */ );
+ pacer->max_burst = (rate_bytes_per_sec * max_time) * CLIB_US_TIME_PERIOD;
+ pacer->max_burst = clib_clamp (pacer->max_burst, TRANSPORT_PACER_MIN_BURST,
+ TRANSPORT_PACER_MAX_BURST);
+}
+
+static inline u64
+spacer_pace_rate (spacer_t * pacer)
+{
+ return pacer->bytes_per_sec;
+}
+
+static inline void
+spacer_reset (spacer_t * pacer, clib_us_time_t time_now, u64 bucket)
+{
+ pacer->last_update = time_now;
+ pacer->bucket = bucket;
+}
+
+void
+transport_connection_tx_pacer_reset (transport_connection_t * tc,
+ u64 rate_bytes_per_sec, u32 start_bucket,
+ clib_us_time_t rtt)
+{
+ spacer_set_pace_rate (&tc->pacer, rate_bytes_per_sec, rtt,
+ transport_seconds_per_loop (tc->thread_index));
+ spacer_reset (&tc->pacer, transport_us_time_now (tc->thread_index),
+ start_bucket);
+}
+
+void
+transport_connection_tx_pacer_reset_bucket (transport_connection_t * tc,
+ u32 bucket)
+{
+ spacer_reset (&tc->pacer, transport_us_time_now (tc->thread_index), bucket);
+}
+
+void
+transport_connection_tx_pacer_init (transport_connection_t * tc,
+ u64 rate_bytes_per_sec,
+ u32 initial_bucket)
+{
+ tc->flags |= TRANSPORT_CONNECTION_F_IS_TX_PACED;
+ transport_connection_tx_pacer_reset (tc, rate_bytes_per_sec,
+ initial_bucket, 1e6);
+}
+
+void
+transport_connection_tx_pacer_update (transport_connection_t * tc,
+ u64 bytes_per_sec, clib_us_time_t rtt)
+{
+ spacer_set_pace_rate (&tc->pacer, bytes_per_sec, rtt,
+ transport_seconds_per_loop (tc->thread_index));
+}
+
+u32
+transport_connection_tx_pacer_burst (transport_connection_t * tc)
+{
+ return spacer_max_burst (&tc->pacer,
+ transport_us_time_now (tc->thread_index));
+}
+
+u64
+transport_connection_tx_pacer_rate (transport_connection_t * tc)
+{
+ return spacer_pace_rate (&tc->pacer);
+}
+
+void
+transport_connection_update_tx_bytes (transport_connection_t * tc, u32 bytes)
+{
+ if (transport_connection_is_tx_paced (tc))
+ spacer_update_bucket (&tc->pacer, bytes);
+}
+
+void
+transport_connection_tx_pacer_update_bytes (transport_connection_t * tc,
+ u32 bytes)
+{
+ spacer_update_bucket (&tc->pacer, bytes);
+}
+
+void
+transport_update_pacer_time (u32 thread_index, clib_time_type_t now)
+{
+ session_wrk_update_time (session_main_get_worker (thread_index), now);
+}
+
+void
+transport_connection_reschedule (transport_connection_t * tc)
+{
+ tc->flags &= ~TRANSPORT_CONNECTION_F_DESCHED;
+ transport_connection_tx_pacer_reset_bucket (tc, TRANSPORT_PACER_MIN_BURST);
+ if (transport_max_tx_dequeue (tc))
+ sesssion_reschedule_tx (tc);
+ else