s = format (s, "%U", tp_vft->format_connection, conn_index, thread_index,
verbose);
tc = tp_vft->get_connection (conn_index, thread_index);
- if (tc && transport_connection_is_tx_paced (tc) && verbose > 1)
+ if (tc && verbose > 1)
{
indent = format_get_indent (s) + 1;
- s = format (s, "%Upacer: %U\n", format_white_space, indent,
- format_transport_pacer, &tc->pacer, tc->thread_index);
+ if (transport_connection_is_tx_paced (tc))
+ s = format (s, "%Upacer: %U\n", format_white_space, indent,
+ format_transport_pacer, &tc->pacer, tc->thread_index);
s = format (s, "%Utransport: flags 0x%x\n", format_white_space, indent,
tc->flags);
}
format_transport_half_open_connection (u8 * s, va_list * args)
{
u32 transport_proto = va_arg (*args, u32);
- u32 listen_index = va_arg (*args, u32);
+ u32 ho_index = va_arg (*args, u32);
transport_proto_vft_t *tp_vft;
tp_vft = transport_protocol_get_vft (transport_proto);
if (!tp_vft)
return s;
- s = format (s, "%U", tp_vft->format_half_open, listen_index);
+ s = format (s, "%U", tp_vft->format_half_open, ho_index);
return s;
}
tp_vfts[tp].cleanup (conn_index, thread_index);
}
+void
+transport_cleanup_half_open (transport_proto_t tp, u32 conn_index)
+{
+ if (tp_vfts[tp].cleanup_ho)
+ tp_vfts[tp].cleanup_ho (conn_index);
+}
+
int
transport_connect (transport_proto_t tp, transport_endpoint_cfg_t * tep)
{
return tp_vfts[tp].connect (tep);
}
+void
+transport_half_close (transport_proto_t tp, u32 conn_index, u8 thread_index)
+{
+ if (tp_vfts[tp].half_close)
+ tp_vfts[tp].half_close (conn_index, thread_index);
+}
+
void
transport_close (transport_proto_t tp, u32 conn_index, u8 thread_index)
{
u32
transport_start_listen (transport_proto_t tp, u32 session_index,
- transport_endpoint_t * tep)
+ transport_endpoint_cfg_t *tep)
{
return tp_vfts[tp].start_listen (session_index, tep);
}
}
}
+int
+transport_connection_attribute (transport_proto_t tp, u32 conn_index,
+ u8 thread_index, u8 is_get,
+ transport_endpt_attr_t *attr)
+{
+ if (!tp_vfts[tp].attribute)
+ return -1;
+
+ return tp_vfts[tp].attribute (conn_index, thread_index, is_get, attr);
+}
+
#define PORT_MASK ((1 << 16)- 1)
void
}
static session_error_t
-transport_find_local_ip_for_remote (u32 sw_if_index,
- transport_endpoint_t * rmt,
- ip46_address_t * lcl_addr)
+transport_find_local_ip_for_remote (u32 *sw_if_index,
+ transport_endpoint_t *rmt,
+ ip46_address_t *lcl_addr)
{
fib_node_index_t fei;
fib_prefix_t prefix;
- if (sw_if_index == ENDPOINT_INVALID_INDEX)
+ if (*sw_if_index == ENDPOINT_INVALID_INDEX)
{
/* Find a FIB path to the destination */
clib_memcpy_fast (&prefix.fp_addr, &rmt->ip, sizeof (rmt->ip));
if (fei == FIB_NODE_INDEX_INVALID)
return SESSION_E_NOROUTE;
- sw_if_index = fib_entry_get_resolving_interface (fei);
- if (sw_if_index == ENDPOINT_INVALID_INDEX)
+ *sw_if_index = fib_entry_get_resolving_interface (fei);
+ if (*sw_if_index == ENDPOINT_INVALID_INDEX)
return SESSION_E_NOINTF;
}
clib_memset (lcl_addr, 0, sizeof (*lcl_addr));
- return transport_get_interface_ip (sw_if_index, rmt->is_ip4, lcl_addr);
+ return transport_get_interface_ip (*sw_if_index, rmt->is_ip4, lcl_addr);
}
int
*/
if (ip_is_zero (&rmt_cfg->peer.ip, rmt_cfg->peer.is_ip4))
{
- error = transport_find_local_ip_for_remote (rmt_cfg->peer.sw_if_index,
+ error = transport_find_local_ip_for_remote (&rmt_cfg->peer.sw_if_index,
rmt, lcl_addr);
if (error)
return error;
now = transport_us_time_now (thread_index);
diff = now - pacer->last_update;
- s = format (s, "rate %lu bucket %lu t/p %.3f last_update %U idle %u",
+ s = format (s, "rate %lu bucket %ld t/p %.3f last_update %U burst %u",
pacer->bytes_per_sec, pacer->bucket, pacer->tokens_per_period,
- format_clib_us_time, diff, pacer->idle_timeout_us);
+ format_clib_us_time, diff, pacer->max_burst);
return s;
}
spacer_max_burst (spacer_t * pacer, clib_us_time_t time_now)
{
u64 n_periods = (time_now - pacer->last_update);
- u64 inc;
-
- if (PREDICT_FALSE (n_periods > pacer->idle_timeout_us))
- {
- pacer->last_update = time_now;
- pacer->bucket = TRANSPORT_PACER_MIN_BURST;
- return TRANSPORT_PACER_MIN_BURST;
- }
+ i64 inc;
if ((inc = (f32) n_periods * pacer->tokens_per_period) > 10)
{
pacer->last_update = time_now;
- pacer->bucket = clib_min (pacer->bucket + inc, pacer->bytes_per_sec);
+ pacer->bucket = clib_min (pacer->bucket + inc, (i64) pacer->max_burst);
}
- return clib_min (pacer->bucket, TRANSPORT_PACER_MAX_BURST);
+ return pacer->bucket >= 0 ? pacer->max_burst : 0;
}
static inline void
spacer_update_bucket (spacer_t * pacer, u32 bytes)
{
- ASSERT (pacer->bucket >= bytes);
pacer->bucket -= bytes;
}
static inline void
spacer_set_pace_rate (spacer_t * pacer, u64 rate_bytes_per_sec,
- clib_us_time_t rtt)
+ clib_us_time_t rtt, clib_time_type_t sec_per_loop)
{
+ clib_us_time_t max_time;
+
ASSERT (rate_bytes_per_sec != 0);
pacer->bytes_per_sec = rate_bytes_per_sec;
pacer->tokens_per_period = rate_bytes_per_sec * CLIB_US_TIME_PERIOD;
- pacer->idle_timeout_us = clib_max (rtt * TRANSPORT_PACER_IDLE_FACTOR,
- TRANSPORT_PACER_MIN_IDLE);
+
+ /* Allow a min number of bursts per rtt, if their size is acceptable. Goal
+ * is to spread the sending of data over the rtt but to also allow for some
+ * coalescing that can potentially
+ * 1) reduce load on session layer by reducing scheduling frequency for a
+ * session and
+ * 2) optimize sending when tso if available
+ *
+ * Max "time-length" of a burst cannot be less than 1us or more than 1ms.
+ */
+ max_time = clib_max (rtt / TRANSPORT_PACER_BURSTS_PER_RTT,
+ (clib_us_time_t) (sec_per_loop * CLIB_US_TIME_FREQ));
+ max_time = clib_clamp (max_time, 1 /* 1us */ , 1000 /* 1ms */ );
+ pacer->max_burst = (rate_bytes_per_sec * max_time) * CLIB_US_TIME_PERIOD;
+ pacer->max_burst = clib_clamp (pacer->max_burst, TRANSPORT_PACER_MIN_BURST,
+ TRANSPORT_PACER_MAX_BURST);
}
static inline u64
u64 rate_bytes_per_sec, u32 start_bucket,
clib_us_time_t rtt)
{
- spacer_set_pace_rate (&tc->pacer, rate_bytes_per_sec, rtt);
+ spacer_set_pace_rate (&tc->pacer, rate_bytes_per_sec, rtt,
+ transport_seconds_per_loop (tc->thread_index));
spacer_reset (&tc->pacer, transport_us_time_now (tc->thread_index),
start_bucket);
}
transport_connection_tx_pacer_update (transport_connection_t * tc,
u64 bytes_per_sec, clib_us_time_t rtt)
{
- spacer_set_pace_rate (&tc->pacer, bytes_per_sec, rtt);
+ spacer_set_pace_rate (&tc->pacer, bytes_per_sec, rtt,
+ transport_seconds_per_loop (tc->thread_index));
}
u32
spacer_update_bucket (&tc->pacer, bytes);
}
+void
+transport_update_pacer_time (u32 thread_index, clib_time_type_t now)
+{
+ session_wrk_update_time (session_main_get_worker (thread_index), now);
+}
+
void
transport_connection_reschedule (transport_connection_t * tc)
{
tc->flags &= ~TRANSPORT_CONNECTION_F_DESCHED;
- transport_connection_tx_pacer_reset_bucket (tc, TRANSPORT_PACER_MIN_BURST);
+ transport_connection_tx_pacer_reset_bucket (tc, 0 /* bucket */);
if (transport_max_tx_dequeue (tc))
sesssion_reschedule_tx (tc);
else
}
}
+void
+transport_fifos_init_ooo (transport_connection_t * tc)
+{
+ session_t *s = session_get (tc->s_index, tc->thread_index);
+ svm_fifo_init_ooo_lookup (s->rx_fifo, 0 /* ooo enq */ );
+ svm_fifo_init_ooo_lookup (s->tx_fifo, 1 /* ooo deq */ );
+}
+
void
transport_update_time (clib_time_type_t time_now, u8 thread_index)
{
{
if (vft->enable)
(vft->enable) (vm, is_en);
+
+ if (vft->update_time)
+ session_register_update_time_fn (vft->update_time, is_en);
}
}
smm->local_endpoints_table_memory);
num_threads = 1 /* main thread */ + vtm->n_threads;
if (num_threads > 1)
- clib_spinlock_init (&local_endpoints_lock);
+ {
+ clib_spinlock_init (&local_endpoints_lock);
+ /* Main not polled if there are workers */
+ smm->transport_cl_thread = 1;
+ }
}
/*