*/
static clib_spinlock_t local_endpoints_lock;
+/*
+ * Period used by transport pacers. Initialized by session layer
+ */
+static double transport_pacer_period;
+
+#define TRANSPORT_PACER_MIN_MSS 1460
+
u8 *
format_transport_proto (u8 * s, va_list * args)
{
case TRANSPORT_PROTO_UDP:
s = format (s, "UDP");
break;
+ case TRANSPORT_PROTO_SCTP:
+ s = format (s, "SCTP");
+ break;
+ case TRANSPORT_PROTO_UDPC:
+ s = format (s, "UDPC");
+ break;
+ }
+ return s;
+}
+
+u8 *
+format_transport_proto_short (u8 * s, va_list * args)
+{
+ u32 transport_proto = va_arg (*args, u32);
+ switch (transport_proto)
+ {
+ case TRANSPORT_PROTO_TCP:
+ s = format (s, "T");
+ break;
+ case TRANSPORT_PROTO_UDP:
+ s = format (s, "U");
+ break;
+ case TRANSPORT_PROTO_SCTP:
+ s = format (s, "S");
+ break;
+ case TRANSPORT_PROTO_UDPC:
+ s = format (s, "U");
+ break;
}
return s;
}
*proto = TRANSPORT_PROTO_UDP;
else if (unformat (input, "UDP"))
*proto = TRANSPORT_PROTO_UDP;
+ else if (unformat (input, "sctp"))
+ *proto = TRANSPORT_PROTO_SCTP;
+ else if (unformat (input, "SCTP"))
+ *proto = TRANSPORT_PROTO_SCTP;
+ else if (unformat (input, "tls"))
+ *proto = TRANSPORT_PROTO_TLS;
+ else if (unformat (input, "TLS"))
+ *proto = TRANSPORT_PROTO_TLS;
+ else if (unformat (input, "udpc"))
+ *proto = TRANSPORT_PROTO_UDPC;
+ else if (unformat (input, "UDPC"))
+ *proto = TRANSPORT_PROTO_UDPC;
else
return 0;
return 1;
/**
* Register transport virtual function table.
*
- * @param type - session type (not protocol type)
- * @param vft - virtual function table
+ * @param transport_proto - transport protocol type (i.e., TCP, UDP ..)
+ * @param vft - virtual function table for transport proto
+ * @param fib_proto - network layer protocol
+ * @param output_node - output node index that session layer will hand off
+ * buffers to, for requested fib proto
*/
void
-transport_register_protocol (transport_proto_t transport_proto, u8 is_ip4,
- const transport_proto_vft_t * vft)
+transport_register_protocol (transport_proto_t transport_proto,
+ const transport_proto_vft_t * vft,
+ fib_protocol_t fib_proto, u32 output_node)
{
- u8 session_type;
- session_type = session_type_from_proto_and_ip (transport_proto, is_ip4);
+ u8 is_ip4 = fib_proto == FIB_PROTOCOL_IP4;
- vec_validate (tp_vfts, session_type);
- tp_vfts[session_type] = *vft;
+ vec_validate (tp_vfts, transport_proto);
+ tp_vfts[transport_proto] = *vft;
- /* If an offset function is provided, then peek instead of dequeue */
- session_manager_set_transport_rx_fn (session_type,
- vft->tx_fifo_offset != 0);
+ session_register_transport (transport_proto, vft, is_ip4, output_node);
}
/**
* @param type - session type (not protocol type)
*/
transport_proto_vft_t *
-transport_protocol_get_vft (u8 session_type)
+transport_protocol_get_vft (transport_proto_t transport_proto)
{
- if (session_type >= vec_len (tp_vfts))
+ if (transport_proto >= vec_len (tp_vfts))
return 0;
- return &tp_vfts[session_type];
+ return &tp_vfts[transport_proto];
+}
+
+transport_service_type_t
+transport_protocol_service_type (transport_proto_t tp)
+{
+ return tp_vfts[tp].service_type;
+}
+
+transport_tx_fn_type_t
+transport_protocol_tx_fn_type (transport_proto_t tp)
+{
+ return tp_vfts[tp].tx_type;
+}
+
+u8
+transport_protocol_is_cl (transport_proto_t tp)
+{
+ return (tp_vfts[tp].service_type == TRANSPORT_SERVICE_CL);
}
#define PORT_MASK ((1 << 16)- 1)
return -1;
}
- memset (lcl_addr, 0, sizeof (*lcl_addr));
+ clib_memset (lcl_addr, 0, sizeof (*lcl_addr));
if (rmt->is_ip4)
{
ip4_address_t *ip4;
ip4 = ip_interface_get_first_ip (sw_if_index, 1);
+ if (!ip4)
+ {
+ clib_warning ("no routable ip4 address on %U",
+ format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+ return -1;
+ }
lcl_addr->ip4.as_u32 = ip4->as_u32;
}
else
return 0;
}
+#define SPACER_CPU_TICKS_PER_PERIOD_SHIFT 10
+#define SPACER_CPU_TICKS_PER_PERIOD (1 << SPACER_CPU_TICKS_PER_PERIOD_SHIFT)
+
+u8 *
+format_transport_pacer (u8 * s, va_list * args)
+{
+ spacer_t *pacer = va_arg (*args, spacer_t *);
+
+ s = format (s, "bucket %u max_burst %u tokens/period %.3f last_update %x",
+ pacer->bucket, pacer->max_burst_size, pacer->tokens_per_period,
+ pacer->last_update);
+ return s;
+}
+
+static inline u32
+spacer_max_burst (spacer_t * pacer, u64 norm_time_now)
+{
+ u64 n_periods = norm_time_now - pacer->last_update;
+
+ pacer->last_update = norm_time_now;
+ pacer->bucket += n_periods * pacer->tokens_per_period;
+ return clib_min (pacer->bucket, pacer->max_burst_size);
+}
+
+static inline void
+spacer_update_bucket (spacer_t * pacer, u32 bytes)
+{
+ ASSERT (pacer->bucket >= bytes);
+ pacer->bucket -= bytes;
+}
+
+static inline void
+spacer_update_max_burst_size (spacer_t * pacer, u32 max_burst_bytes)
+{
+ pacer->max_burst_size = clib_max (max_burst_bytes, TRANSPORT_PACER_MIN_MSS);
+}
+
+static inline void
+spacer_set_pace_rate (spacer_t * pacer, u64 rate_bytes_per_sec)
+{
+ ASSERT (rate_bytes_per_sec != 0);
+ pacer->tokens_per_period = rate_bytes_per_sec / transport_pacer_period;
+}
+
+void
+transport_connection_tx_pacer_init (transport_connection_t * tc,
+ u32 rate_bytes_per_sec, u32 burst_bytes)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u64 time_now = vm->clib_time.last_cpu_time;
+ spacer_t *pacer = &tc->pacer;
+
+ tc->flags |= TRANSPORT_CONNECTION_F_IS_TX_PACED;
+ spacer_update_max_burst_size (&tc->pacer, burst_bytes);
+ spacer_set_pace_rate (&tc->pacer, rate_bytes_per_sec);
+ pacer->last_update = time_now >> SPACER_CPU_TICKS_PER_PERIOD_SHIFT;
+ pacer->bucket = burst_bytes;
+}
+
+void
+transport_connection_tx_pacer_update (transport_connection_t * tc,
+ u64 bytes_per_sec)
+{
+ u32 burst_size;
+
+ burst_size = bytes_per_sec * transport_dispatch_period (tc->thread_index);
+ spacer_set_pace_rate (&tc->pacer, bytes_per_sec);
+ spacer_update_max_burst_size (&tc->pacer, burst_size);
+}
+
+u32
+transport_connection_max_tx_burst (transport_connection_t * tc, u64 time_now)
+{
+ u32 snd_space, max_paced_burst;
+ u32 mss;
+
+ snd_space = tp_vfts[tc->proto].send_space (tc);
+ if (transport_connection_is_tx_paced (tc))
+ {
+ time_now >>= SPACER_CPU_TICKS_PER_PERIOD_SHIFT;
+ max_paced_burst = spacer_max_burst (&tc->pacer, time_now);
+ mss = tp_vfts[tc->proto].send_mss (tc);
+ max_paced_burst = (max_paced_burst < mss) ? 0 : max_paced_burst;
+ snd_space = clib_min (snd_space, max_paced_burst);
+ snd_space = snd_space - snd_space % mss;
+ }
+ return snd_space;
+}
+
+void
+transport_connection_update_tx_stats (transport_connection_t * tc, u32 bytes)
+{
+ tc->stats.tx_bytes += bytes;
+ if (transport_connection_is_tx_paced (tc))
+ spacer_update_bucket (&tc->pacer, bytes);
+}
+
+void
+transport_init_tx_pacers_period (void)
+{
+ f64 cpu_freq = os_cpu_clock_frequency ();
+ transport_pacer_period = cpu_freq / SPACER_CPU_TICKS_PER_PERIOD;
+}
+
+void
+transport_update_time (f64 time_now, u8 thread_index)
+{
+ transport_proto_vft_t *vft;
+ vec_foreach (vft, tp_vfts)
+ {
+ if (vft->update_time)
+ (vft->update_time) (time_now, thread_index);
+ }
+}
+
+void
+transport_enable_disable (vlib_main_t * vm, u8 is_en)
+{
+ transport_proto_vft_t *vft;
+ vec_foreach (vft, tp_vfts)
+ {
+ if (vft->enable)
+ (vft->enable) (vm, is_en);
+ }
+}
+
void
transport_init (void)
{
vlib_thread_main_t *vtm = vlib_get_thread_main ();
- u32 local_endpoints_table_buckets = 250000;
- u32 local_endpoints_table_memory = 512 << 20;
+ session_manager_main_t *smm = vnet_get_session_manager_main ();
u32 num_threads;
+ if (smm->local_endpoints_table_buckets == 0)
+ smm->local_endpoints_table_buckets = 250000;
+ if (smm->local_endpoints_table_memory == 0)
+ smm->local_endpoints_table_memory = 512 << 20;
+
/* Initialize [port-allocator] random number seed */
port_allocator_seed = (u32) clib_cpu_time_now ();
clib_bihash_init_24_8 (&local_endpoints_table, "local endpoints table",
- local_endpoints_table_buckets,
- local_endpoints_table_memory);
+ smm->local_endpoints_table_buckets,
+ smm->local_endpoints_table_memory);
num_threads = 1 /* main thread */ + vtm->n_threads;
if (num_threads > 1)
clib_spinlock_init (&local_endpoints_lock);