X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fsession%2Ftransport.c;h=c2d21bf28f915cd9efa433e5f0f6c1c582fb1c04;hb=5f45e01f5c3ce239eccd9546e2d04fa3141ca5cb;hp=fefedcae8f27c4393fd67a6ee23f3fb18577e815;hpb=5665cedf57165c05d00f28de06b627047902ffce;p=vpp.git diff --git a/src/vnet/session/transport.c b/src/vnet/session/transport.c index fefedcae8f2..c2d21bf28f9 100644 --- a/src/vnet/session/transport.c +++ b/src/vnet/session/transport.c @@ -47,7 +47,8 @@ static clib_spinlock_t local_endpoints_lock; */ static double transport_pacer_period; -#define TRANSPORT_PACER_MIN_MSS 1460 +#define TRANSPORT_PACER_MIN_MSS 1460 +#define TRANSPORT_PACER_MIN_BURST TRANSPORT_PACER_MIN_MSS u8 * format_transport_proto (u8 * s, va_list * args) @@ -318,7 +319,7 @@ transport_endpoint_mark_used (u8 proto, ip46_address_t * ip, u16 port) transport_endpoint_t *tep; clib_spinlock_lock_if_init (&local_endpoints_lock); tep = transport_endpoint_new (); - clib_memcpy (&tep->ip, ip, sizeof (*ip)); + clib_memcpy_fast (&tep->ip, ip, sizeof (*ip)); tep->port = port; transport_endpoint_table_add (&local_endpoints_table, proto, tep, tep - local_endpoints); @@ -387,7 +388,7 @@ transport_get_interface_ip (u32 sw_if_index, u8 is_ip4, ip46_address_t * addr) return clib_error_return (0, "no routable ip6 addresses on %U", format_vnet_sw_if_index_name, vnet_get_main (), sw_if_index); - clib_memcpy (&addr->ip6, ip6, sizeof (*ip6)); + clib_memcpy_fast (&addr->ip6, ip6, sizeof (*ip6)); } return 0; } @@ -403,7 +404,7 @@ transport_find_local_ip_for_remote (u32 sw_if_index, if (sw_if_index == ENDPOINT_INVALID_INDEX) { /* Find a FIB path to the destination */ - clib_memcpy (&prefix.fp_addr, &rmt->ip, sizeof (rmt->ip)); + clib_memcpy_fast (&prefix.fp_addr, &rmt->ip, sizeof (rmt->ip)); prefix.fp_proto = rmt->is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6; prefix.fp_len = rmt->is_ip4 ? 32 : 128; @@ -443,12 +444,16 @@ transport_alloc_local_endpoint (u8 proto, transport_endpoint_cfg_t * rmt_cfg, error = transport_find_local_ip_for_remote (rmt_cfg->peer.sw_if_index, rmt, lcl_addr); if (error) - return -1; + { + clib_error_report (error); + return -1; + } } else { /* Assume session layer vetted this address */ - clib_memcpy (lcl_addr, &rmt_cfg->peer.ip, sizeof (rmt_cfg->peer.ip)); + clib_memcpy_fast (lcl_addr, &rmt_cfg->peer.ip, + sizeof (rmt_cfg->peer.ip)); } /* @@ -497,9 +502,14 @@ static inline u32 spacer_max_burst (spacer_t * pacer, u64 norm_time_now) { u64 n_periods = norm_time_now - pacer->last_update; + u64 inc; + + if (n_periods > 0 && (inc = n_periods * pacer->tokens_per_period) > 10) + { + pacer->last_update = norm_time_now; + pacer->bucket += inc; + } - pacer->last_update = norm_time_now; - pacer->bucket += n_periods * pacer->tokens_per_period; return clib_min (pacer->bucket, pacer->max_burst_size); } @@ -513,7 +523,8 @@ spacer_update_bucket (spacer_t * pacer, u32 bytes) static inline void spacer_update_max_burst_size (spacer_t * pacer, u32 max_burst_bytes) { - pacer->max_burst_size = clib_max (max_burst_bytes, TRANSPORT_PACER_MIN_MSS); + pacer->max_burst_size = clib_max (max_burst_bytes, + TRANSPORT_PACER_MIN_BURST); } static inline void @@ -524,43 +535,63 @@ spacer_set_pace_rate (spacer_t * pacer, u64 rate_bytes_per_sec) } void -transport_connection_tx_pacer_init (transport_connection_t * tc, - u32 rate_bytes_per_sec, u32 burst_bytes) +transport_connection_tx_pacer_reset (transport_connection_t * tc, + u32 rate_bytes_per_sec, + u32 start_bucket, u64 time_now) { - vlib_main_t *vm = vlib_get_main (); - u64 time_now = vm->clib_time.last_cpu_time; spacer_t *pacer = &tc->pacer; + f64 dispatch_period; + u32 burst_size; - tc->flags |= TRANSPORT_CONNECTION_F_IS_TX_PACED; - spacer_update_max_burst_size (&tc->pacer, burst_bytes); + dispatch_period = transport_dispatch_period (tc->thread_index); + burst_size = rate_bytes_per_sec * dispatch_period; + spacer_update_max_burst_size (&tc->pacer, burst_size); spacer_set_pace_rate (&tc->pacer, rate_bytes_per_sec); pacer->last_update = time_now >> SPACER_CPU_TICKS_PER_PERIOD_SHIFT; - pacer->bucket = burst_bytes; + pacer->bucket = start_bucket; +} + +void +transport_connection_tx_pacer_init (transport_connection_t * tc, + u32 rate_bytes_per_sec, + u32 initial_bucket) +{ + vlib_main_t *vm = vlib_get_main (); + tc->flags |= TRANSPORT_CONNECTION_F_IS_TX_PACED; + transport_connection_tx_pacer_reset (tc, rate_bytes_per_sec, + initial_bucket, + vm->clib_time.last_cpu_time); } void transport_connection_tx_pacer_update (transport_connection_t * tc, u64 bytes_per_sec) { - u32 burst_size; - - burst_size = bytes_per_sec * transport_dispatch_period (tc->thread_index); + f64 dispatch_period = transport_dispatch_period (tc->thread_index); + u32 burst_size = 1.1 * bytes_per_sec * dispatch_period; spacer_set_pace_rate (&tc->pacer, bytes_per_sec); spacer_update_max_burst_size (&tc->pacer, burst_size); } u32 -transport_connection_max_tx_burst (transport_connection_t * tc, u64 time_now) +transport_connection_tx_pacer_burst (transport_connection_t * tc, + u64 time_now) +{ + time_now >>= SPACER_CPU_TICKS_PER_PERIOD_SHIFT; + return spacer_max_burst (&tc->pacer, time_now); +} + +u32 +transport_connection_snd_space (transport_connection_t * tc, u64 time_now, + u16 mss) { u32 snd_space, max_paced_burst; - u32 mss; snd_space = tp_vfts[tc->proto].send_space (tc); if (transport_connection_is_tx_paced (tc)) { time_now >>= SPACER_CPU_TICKS_PER_PERIOD_SHIFT; max_paced_burst = spacer_max_burst (&tc->pacer, time_now); - mss = tp_vfts[tc->proto].send_mss (tc); max_paced_burst = (max_paced_burst < mss) ? 0 : max_paced_burst; snd_space = clib_min (snd_space, max_paced_burst); snd_space = snd_space - snd_space % mss; @@ -576,6 +607,13 @@ transport_connection_update_tx_stats (transport_connection_t * tc, u32 bytes) spacer_update_bucket (&tc->pacer, bytes); } +void +transport_connection_tx_pacer_update_bytes (transport_connection_t * tc, + u32 bytes) +{ + spacer_update_bucket (&tc->pacer, bytes); +} + void transport_init_tx_pacers_period (void) {