X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fsession%2Ftransport.c;h=2c88a4c4931d708a92f77363295d64e391d742fd;hb=534468e9f768ae7465ef722520dadfd916cdc9fb;hp=967ff5d9cba335ebac6664f12156a735782db4c8;hpb=8c4fa01d1360cd5315e671de96dfeff7dae246f5;p=vpp.git diff --git a/src/vnet/session/transport.c b/src/vnet/session/transport.c index 967ff5d9cba..2c88a4c4931 100644 --- a/src/vnet/session/transport.c +++ b/src/vnet/session/transport.c @@ -124,14 +124,14 @@ u8 * format_transport_half_open_connection (u8 * s, va_list * args) { u32 transport_proto = va_arg (*args, u32); - u32 listen_index = va_arg (*args, u32); + u32 ho_index = va_arg (*args, u32); transport_proto_vft_t *tp_vft; tp_vft = transport_protocol_get_vft (transport_proto); if (!tp_vft) return s; - s = format (s, "%U", tp_vft->format_half_open, listen_index); + s = format (s, "%U", tp_vft->format_half_open, ho_index); return s; } @@ -317,6 +317,13 @@ transport_connect (transport_proto_t tp, transport_endpoint_cfg_t * tep) return tp_vfts[tp].connect (tep); } +void +transport_half_close (transport_proto_t tp, u32 conn_index, u8 thread_index) +{ + if (tp_vfts[tp].half_close) + tp_vfts[tp].half_close (conn_index, thread_index); +} + void transport_close (transport_proto_t tp, u32 conn_index, u8 thread_index) { @@ -399,6 +406,17 @@ transport_get_listener_endpoint (transport_proto_t tp, u32 conn_index, } } +int +transport_connection_attribute (transport_proto_t tp, u32 conn_index, + u8 thread_index, u8 is_get, + transport_endpt_attr_t *attr) +{ + if (!tp_vfts[tp].attribute) + return -1; + + return tp_vfts[tp].attribute (conn_index, thread_index, is_get, attr); +} + #define PORT_MASK ((1 << 16)- 1) void @@ -632,9 +650,9 @@ format_transport_pacer (u8 * s, va_list * args) now = transport_us_time_now (thread_index); diff = now - pacer->last_update; - s = format (s, "rate %lu bucket %lu t/p %.3f last_update %U idle %u", + s = format (s, "rate %lu bucket %ld t/p %.3f last_update %U burst %u", pacer->bytes_per_sec, pacer->bucket, pacer->tokens_per_period, - format_clib_us_time, diff, pacer->idle_timeout_us); + format_clib_us_time, diff, pacer->max_burst); return s; } @@ -644,38 +662,46 @@ spacer_max_burst (spacer_t * pacer, clib_us_time_t time_now) u64 n_periods = (time_now - pacer->last_update); u64 inc; - if (PREDICT_FALSE (n_periods > pacer->idle_timeout_us)) - { - pacer->last_update = time_now; - pacer->bucket = TRANSPORT_PACER_MIN_BURST; - return TRANSPORT_PACER_MIN_BURST; - } - if ((inc = (f32) n_periods * pacer->tokens_per_period) > 10) { pacer->last_update = time_now; - pacer->bucket = clib_min (pacer->bucket + inc, pacer->bytes_per_sec); + pacer->bucket = clib_min (pacer->bucket + inc, pacer->max_burst); } - return clib_min (pacer->bucket, TRANSPORT_PACER_MAX_BURST); + return pacer->bucket > 0 ? pacer->max_burst : 0; } static inline void spacer_update_bucket (spacer_t * pacer, u32 bytes) { - ASSERT (pacer->bucket >= bytes); pacer->bucket -= bytes; } static inline void spacer_set_pace_rate (spacer_t * pacer, u64 rate_bytes_per_sec, - clib_us_time_t rtt) + clib_us_time_t rtt, clib_time_type_t sec_per_loop) { + clib_us_time_t max_time; + ASSERT (rate_bytes_per_sec != 0); pacer->bytes_per_sec = rate_bytes_per_sec; pacer->tokens_per_period = rate_bytes_per_sec * CLIB_US_TIME_PERIOD; - pacer->idle_timeout_us = clib_max (rtt * TRANSPORT_PACER_IDLE_FACTOR, - TRANSPORT_PACER_MIN_IDLE); + + /* Allow a min number of bursts per rtt, if their size is acceptable. Goal + * is to spread the sending of data over the rtt but to also allow for some + * coalescing that can potentially + * 1) reduce load on session layer by reducing scheduling frequency for a + * session and + * 2) optimize sending when tso if available + * + * Max "time-length" of a burst cannot be less than 1us or more than 1ms. + */ + max_time = clib_max (rtt / TRANSPORT_PACER_BURSTS_PER_RTT, + (clib_us_time_t) (sec_per_loop * CLIB_US_TIME_FREQ)); + max_time = clib_clamp (max_time, 1 /* 1us */ , 1000 /* 1ms */ ); + pacer->max_burst = (rate_bytes_per_sec * max_time) * CLIB_US_TIME_PERIOD; + pacer->max_burst = clib_clamp (pacer->max_burst, TRANSPORT_PACER_MIN_BURST, + TRANSPORT_PACER_MAX_BURST); } static inline u64 @@ -696,7 +722,8 @@ transport_connection_tx_pacer_reset (transport_connection_t * tc, u64 rate_bytes_per_sec, u32 start_bucket, clib_us_time_t rtt) { - spacer_set_pace_rate (&tc->pacer, rate_bytes_per_sec, rtt); + spacer_set_pace_rate (&tc->pacer, rate_bytes_per_sec, rtt, + transport_seconds_per_loop (tc->thread_index)); spacer_reset (&tc->pacer, transport_us_time_now (tc->thread_index), start_bucket); } @@ -722,7 +749,8 @@ void transport_connection_tx_pacer_update (transport_connection_t * tc, u64 bytes_per_sec, clib_us_time_t rtt) { - spacer_set_pace_rate (&tc->pacer, bytes_per_sec, rtt); + spacer_set_pace_rate (&tc->pacer, bytes_per_sec, rtt, + transport_seconds_per_loop (tc->thread_index)); } u32 @@ -752,6 +780,12 @@ transport_connection_tx_pacer_update_bytes (transport_connection_t * tc, spacer_update_bucket (&tc->pacer, bytes); } +void +transport_update_pacer_time (u32 thread_index, clib_time_type_t now) +{ + session_wrk_update_time (session_main_get_worker (thread_index), now); +} + void transport_connection_reschedule (transport_connection_t * tc) {