#include <vnet/fib/fib.h>
#include <vnet/dpo/load_balance.h>
#include <vnet/dpo/receive_dpo.h>
-#include <vnet/ip/ip6_neighbor.h>
+#include <vnet/ip-neighbor/ip_neighbor.h>
#include <math.h>
-#include <vnet/ethernet/arp.h>
tcp_main_t tcp_main;
static void
tcp_cc_init (tcp_connection_t * tc)
{
- tc->cc_algo = tcp_cc_algo_get (tcp_cfg.cc_algo);
tc->cc_algo->init (tc);
}
listener->c_s_index = session_index;
listener->c_fib_index = lcl->fib_index;
listener->state = TCP_STATE_LISTEN;
+ listener->cc_algo = tcp_cc_algo_get (tcp_cfg.cc_algo);
tcp_connection_timers_init (listener);
- TCP_EVT_DBG (TCP_EVT_BIND, listener);
+ TCP_EVT (TCP_EVT_BIND, listener);
return listener->c_c_index;
}
tc = pool_elt_at_index (tm->listener_pool, listener_index);
- TCP_EVT_DBG (TCP_EVT_UNBIND, tc);
+ TCP_EVT (TCP_EVT_UNBIND, tc);
/* Poison the entry */
if (CLIB_DEBUG > 0)
{
tcp_main_t *tm = vnet_get_tcp_main ();
clib_spinlock_lock_if_init (&tm->half_open_lock);
- pool_put_index (tm->half_open_connections, tc->c_c_index);
if (CLIB_DEBUG)
clib_memset (tc, 0xFA, sizeof (*tc));
+ pool_put (tm->half_open_connections, tc);
clib_spinlock_unlock_if_init (&tm->half_open_lock);
}
void
tcp_connection_cleanup (tcp_connection_t * tc)
{
- tcp_main_t *tm = &tcp_main;
-
- TCP_EVT_DBG (TCP_EVT_DELETE, tc);
+ TCP_EVT (TCP_EVT_DELETE, tc);
/* Cleanup local endpoint if this was an active connect */
- transport_endpoint_cleanup (TRANSPORT_PROTO_TCP, &tc->c_lcl_ip,
- tc->c_lcl_port);
+ if (!(tc->cfg_flags & TCP_CFG_F_NO_ENDPOINT))
+ transport_endpoint_cleanup (TRANSPORT_PROTO_TCP, &tc->c_lcl_ip,
+ tc->c_lcl_port);
/* Check if connection is not yet fully established */
if (tc->state == TCP_STATE_SYN_SENT)
}
else
{
- int thread_index = tc->c_thread_index;
-
/* Make sure all timers are cleared */
tcp_connection_timers_reset (tc);
tcp_cc_cleanup (tc);
vec_free (tc->snd_sacks);
vec_free (tc->snd_sacks_fl);
+ vec_free (tc->rcv_opts.sacks);
+ pool_free (tc->sack_sb.holes);
- if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
tcp_bt_cleanup (tc);
- /* Poison the entry */
- if (CLIB_DEBUG > 0)
- clib_memset (tc, 0xFA, sizeof (*tc));
- pool_put (tm->connections[thread_index], tc);
+ tcp_connection_free (tc);
}
}
tcp_connection_t *
tcp_connection_alloc (u8 thread_index)
{
- tcp_main_t *tm = vnet_get_tcp_main ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
tcp_connection_t *tc;
- pool_get (tm->connections[thread_index], tc);
+ pool_get (wrk->connections, tc);
clib_memset (tc, 0, sizeof (*tc));
- tc->c_c_index = tc - tm->connections[thread_index];
+ tc->c_c_index = tc - wrk->connections;
+ tc->c_thread_index = thread_index;
+ return tc;
+}
+
+tcp_connection_t *
+tcp_connection_alloc_w_base (u8 thread_index, tcp_connection_t * base)
+{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ tcp_connection_t *tc;
+
+ pool_get (wrk->connections, tc);
+ clib_memcpy_fast (tc, base, sizeof (*tc));
+ tc->c_c_index = tc - wrk->connections;
tc->c_thread_index = thread_index;
return tc;
}
void
tcp_connection_free (tcp_connection_t * tc)
{
- tcp_main_t *tm = &tcp_main;
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
if (CLIB_DEBUG)
{
- u8 thread_index = tc->c_thread_index;
clib_memset (tc, 0xFA, sizeof (*tc));
- pool_put (tm->connections[thread_index], tc);
+ pool_put (wrk->connections, tc);
return;
}
- pool_put (tm->connections[tc->c_thread_index], tc);
+ pool_put (wrk->connections, tc);
}
-/** Notify session that connection has been reset.
- *
- * Switch state to closed and wait for session to call cleanup.
- */
void
-tcp_connection_reset (tcp_connection_t * tc)
+tcp_program_cleanup (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
{
- TCP_EVT_DBG (TCP_EVT_RST_RCVD, tc);
- switch (tc->state)
- {
- case TCP_STATE_SYN_RCVD:
- /* Cleanup everything. App wasn't notified yet */
- session_transport_delete_notify (&tc->connection);
- tcp_connection_cleanup (tc);
- break;
- case TCP_STATE_SYN_SENT:
- session_stream_connect_notify (&tc->connection, 1 /* fail */ );
- tcp_connection_cleanup (tc);
- break;
- case TCP_STATE_ESTABLISHED:
- tcp_connection_timers_reset (tc);
- /* Set the cleanup timer, in case the session layer/app don't
- * cleanly close the connection */
- tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
- session_transport_reset_notify (&tc->connection);
- tcp_connection_set_state (tc, TCP_STATE_CLOSED);
- session_transport_closed_notify (&tc->connection);
- break;
- case TCP_STATE_CLOSE_WAIT:
- case TCP_STATE_FIN_WAIT_1:
- case TCP_STATE_FIN_WAIT_2:
- case TCP_STATE_CLOSING:
- case TCP_STATE_LAST_ACK:
- tcp_connection_timers_reset (tc);
- tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
- /* Make sure we mark the session as closed. In some states we may
- * be still trying to send data */
- tcp_connection_set_state (tc, TCP_STATE_CLOSED);
- session_transport_closed_notify (&tc->connection);
- break;
- case TCP_STATE_CLOSED:
- case TCP_STATE_TIME_WAIT:
- break;
- default:
- TCP_DBG ("reset state: %u", tc->state);
- }
+ tcp_cleanup_req_t *req;
+ clib_time_type_t now;
+
+ now = transport_time_now (tc->c_thread_index);
+ clib_fifo_add2 (wrk->pending_cleanups, req);
+ req->connection_index = tc->c_c_index;
+ req->free_time = now + tcp_cfg.cleanup_time;
}
/**
void
tcp_connection_close (tcp_connection_t * tc)
{
- TCP_EVT_DBG (TCP_EVT_CLOSE, tc);
+ TCP_EVT (TCP_EVT_CLOSE, tc);
/* Send/Program FIN if needed and switch state */
switch (tc->state)
tcp_send_reset (tc);
tcp_connection_timers_reset (tc);
tcp_connection_set_state (tc, TCP_STATE_CLOSED);
- tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
session_transport_closed_notify (&tc->connection);
+ tcp_program_cleanup (tcp_get_worker (tc->c_thread_index), tc);
+ tcp_worker_stats_inc (tc->c_thread_index, rst_unread, 1);
break;
}
if (!transport_max_tx_dequeue (&tc->connection))
tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.finwait1_time);
break;
case TCP_STATE_CLOSED:
- tcp_connection_timers_reset (tc);
- /* Delete connection but instead of doing it now wait until next
- * dispatch cycle to give the session layer a chance to clear
- * unhandled events */
- tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
+ /* Cleanup should've been programmed already */
break;
default:
TCP_DBG ("state: %u", tc->state);
{
tcp_connection_t *tc;
tc = tcp_connection_get (conn_index, thread_index);
+ if (!tc)
+ return;
tcp_connection_set_state (tc, TCP_STATE_CLOSED);
tcp_connection_cleanup (tc);
}
+static void
+tcp_session_reset (u32 conn_index, u32 thread_index)
+{
+ tcp_connection_t *tc;
+ tc = tcp_connection_get (conn_index, thread_index);
+ tcp_send_reset (tc);
+ tcp_connection_timers_reset (tc);
+ tcp_cong_recovery_off (tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ session_transport_closed_notify (&tc->connection);
+ tcp_program_cleanup (tcp_get_worker (thread_index), tc);
+}
+
/**
* Initialize all connection timers as invalid
*/
return ((tmp >> 32) ^ (tmp & 0xffffffff));
}
+/**
+ * Initialize max segment size we're able to process.
+ *
+ * The value is constrained by the output interface's MTU and by the size
+ * of the IP and TCP headers (see RFC6691). It is also what we advertise
+ * to our peer.
+ */
+static void
+tcp_init_rcv_mss (tcp_connection_t * tc)
+{
+ u8 ip_hdr_len;
+
+ /* Already provided at connection init time */
+ if (tc->mss)
+ return;
+
+ ip_hdr_len = tc->c_is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
+ tc->mss = tcp_cfg.default_mtu - sizeof (tcp_header_t) - ip_hdr_len;
+}
+
+static void
+tcp_init_mss (tcp_connection_t * tc)
+{
+ u16 default_min_mss = 536;
+
+ tcp_init_rcv_mss (tc);
+
+ /* TODO consider PMTU discovery */
+ tc->snd_mss = clib_min (tc->rcv_opts.mss, tc->mss);
+
+ if (tc->snd_mss < 45)
+ {
+ /* Assume that at least the min default mss works */
+ tc->snd_mss = default_min_mss;
+ tc->rcv_opts.mss = default_min_mss;
+ }
+
+ /* We should have enough space for 40 bytes of options */
+ ASSERT (tc->snd_mss > 45);
+
+ /* If we use timestamp option, account for it */
+ if (tcp_opts_tstamp (&tc->rcv_opts))
+ tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP;
+}
+
/**
* Initialize connection send variables.
*/
*/
tcp_set_time_now (tcp_get_worker (vlib_get_thread_index ()));
+ tcp_init_rcv_mss (tc);
tc->iss = tcp_generate_random_iss (tc);
tc->snd_una = tc->iss;
tc->snd_nxt = tc->iss + 1;
tc->snd_una_max = tc->snd_nxt;
- tc->srtt = 0;
+ tc->srtt = 100; /* 100 ms */
+
+ if (!tcp_cfg.csum_offload)
+ tc->cfg_flags |= TCP_CFG_F_NO_CSUM_OFFLOAD;
}
void
tcp_enable_pacing (tcp_connection_t * tc)
{
- u32 initial_bucket, byte_rate;
- initial_bucket = 16 * tc->snd_mss;
- byte_rate = 2 << 16;
- transport_connection_tx_pacer_init (&tc->connection, byte_rate,
- initial_bucket);
+ u32 byte_rate;
+ byte_rate = tc->cwnd / (tc->srtt * TCP_TICK);
+ transport_connection_tx_pacer_init (&tc->connection, byte_rate, tc->cwnd);
tc->mrtt_us = (u32) ~ 0;
}
tcp_connection_timers_init (tc);
tcp_init_mss (tc);
scoreboard_init (&tc->sack_sb);
- tcp_cc_init (tc);
if (tc->state == TCP_STATE_SYN_RCVD)
tcp_init_snd_vars (tc);
+ tcp_cc_init (tc);
+
if (!tc->c_is_ip4 && ip6_address_is_link_local_unicast (&tc->c_rmt_ip6))
tcp_add_del_adjacency (tc, 1);
|| tcp_cfg.enable_tx_pacing)
tcp_enable_pacing (tc);
- if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
tcp_bt_init (tc);
+ if (!tcp_cfg.allow_tso)
+ tc->cfg_flags |= TCP_CFG_F_NO_TSO;
+
tc->start_ts = tcp_time_now_us (tc->c_thread_index);
}
tc->c_is_ip4 = rmt->is_ip4;
tc->c_proto = TRANSPORT_PROTO_TCP;
tc->c_fib_index = rmt->fib_index;
+ tc->cc_algo = tcp_cc_algo_get (tcp_cfg.cc_algo);
/* The other connection vars will be initialized after SYN ACK */
tcp_connection_timers_init (tc);
+ tc->mss = rmt->mss;
- TCP_EVT_DBG (TCP_EVT_OPEN, tc);
+ TCP_EVT (TCP_EVT_OPEN, tc);
tc->state = TCP_STATE_SYN_SENT;
tcp_init_snd_vars (tc);
tcp_send_syn (tc);
return tc->c_c_index;
}
-const char *tcp_dbg_evt_str[] = {
-#define _(sym, str) str,
- foreach_tcp_dbg_evt
-#undef _
-};
-
const char *tcp_fsm_states[] = {
#define _(sym, str) str,
foreach_tcp_fsm_state
return s;
}
+const char *tcp_cfg_flags_str[] = {
+#define _(sym, str) str,
+ foreach_tcp_cfg_flag
+#undef _
+};
+
+static u8 *
+format_tcp_cfg_flags (u8 * s, va_list * args)
+{
+ tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
+ int i, last = -1;
+
+ for (i = 0; i < TCP_CFG_N_FLAG_BITS; i++)
+ if (tc->cfg_flags & (1 << i))
+ last = i;
+ for (i = 0; i < last; i++)
+ {
+ if (tc->cfg_flags & (1 << i))
+ s = format (s, "%s, ", tcp_cfg_flags_str[i]);
+ }
+ if (last >= 0)
+ s = format (s, "%s", tcp_cfg_flags_str[last]);
+ return s;
+}
+
const char *tcp_connection_flags_str[] = {
#define _(sym, str) str,
foreach_tcp_connection_flag
format_tcp_congestion (u8 * s, va_list * args)
{
tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
- u32 indent = format_get_indent (s);
+ u32 indent = format_get_indent (s), prr_space = 0;
s = format (s, "%U ", format_tcp_congestion_status, tc);
s = format (s, "algo %s cwnd %u ssthresh %u bytes_acked %u\n",
tc->cc_algo->name, tc->cwnd, tc->ssthresh, tc->bytes_acked);
- s = format (s, "%Ucc space %u prev_cwnd %u prev_ssthresh %u rtx_bytes %u\n",
+ s = format (s, "%Ucc space %u prev_cwnd %u prev_ssthresh %u\n",
format_white_space, indent, tcp_available_cc_snd_space (tc),
- tc->prev_cwnd, tc->prev_ssthresh, tc->snd_rxt_bytes);
- s = format (s, "%Usnd_congestion %u dupack %u limited_transmit %u\n",
+ tc->prev_cwnd, tc->prev_ssthresh);
+ s = format (s, "%Usnd_cong %u dupack %u limited_tx %u\n",
format_white_space, indent, tc->snd_congestion - tc->iss,
tc->rcv_dupacks, tc->limited_transmit - tc->iss);
+ s = format (s, "%Urxt_bytes %u rxt_delivered %u rxt_head %u rxt_ts %u\n",
+ format_white_space, indent, tc->snd_rxt_bytes,
+ tc->rxt_delivered, tc->rxt_head - tc->iss,
+ tcp_time_now_w_thread (tc->c_thread_index) - tc->snd_rxt_ts);
+ if (tcp_in_fastrecovery (tc))
+ prr_space = tcp_fastrecovery_prr_snd_space (tc);
+ s = format (s, "%Uprr_start %u prr_delivered %u prr space %u\n",
+ format_white_space, indent, tc->prr_start - tc->iss,
+ tc->prr_delivered, prr_space);
return s;
}
format_tcp_vars (u8 * s, va_list * args)
{
tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
- s = format (s, " index: %u flags: %U timers: %U\n", tc->c_c_index,
- format_tcp_connection_flags, tc, format_tcp_timers, tc);
+ s = format (s, " index: %u cfg: %U flags: %U timers: %U\n", tc->c_c_index,
+ format_tcp_cfg_flags, tc, format_tcp_connection_flags, tc,
+ format_tcp_timers, tc);
s = format (s, " snd_una %u snd_nxt %u snd_una_max %u",
tc->snd_una - tc->iss, tc->snd_nxt - tc->iss,
tc->snd_una_max - tc->iss);
tc->rto, tc->rto_boff, tc->srtt, tc->mrtt_us * 1000, tc->rttvar,
tc->rtt_ts);
s = format (s, " rtt_seq %u\n", tc->rtt_seq - tc->iss);
+ s = format (s, " next_node %u opaque 0x%x fib_index %u\n",
+ tc->next_node_index, tc->next_node_opaque, tc->c_fib_index);
s = format (s, " cong: %U", format_tcp_congestion, tc);
if (tc->state >= TCP_STATE_ESTABLISHED)
return s;
}
-static u8 *
+u8 *
format_tcp_connection_id (u8 * s, va_list * args)
{
tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
sack_scoreboard_hole_t *hole;
u32 indent = format_get_indent (s);
- s = format (s, "sacked_bytes %u last_sacked_bytes %u lost_bytes %u\n",
- sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes);
- s = format (s, "%Ulast_bytes_delivered %u high_sacked %u snd_una_adv %u\n",
+ s = format (s, "sacked %u last_sacked %u lost %u last_lost %u"
+ " rxt_sacked %u\n",
+ sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes,
+ sb->last_lost_bytes, sb->rxt_sacked);
+ s = format (s, "%Ulast_delivered %u high_sacked %u is_reneging %u\n",
format_white_space, indent, sb->last_bytes_delivered,
- sb->high_sacked - tc->iss, sb->snd_una_adv);
+ sb->high_sacked - tc->iss, sb->is_reneging);
s = format (s, "%Ucur_rxt_hole %u high_rxt %u rescue_rxt %u",
format_white_space, indent, sb->cur_rxt_hole,
sb->high_rxt - tc->iss, sb->rescue_rxt - tc->iss);
tcp_session_get_transport (u32 conn_index, u32 thread_index)
{
tcp_connection_t *tc = tcp_connection_get (conn_index, thread_index);
+ if (PREDICT_FALSE (!tc))
+ return 0;
return &tc->connection;
}
return &tc->connection;
}
-/**
- * Compute maximum segment size for session layer.
- *
- * Since the result needs to be the actual data length, it first computes
- * the tcp options to be used in the next burst and subtracts their
- * length from the connection's snd_mss.
- */
static u16
-tcp_session_send_mss (transport_connection_t * trans_conn)
+tcp_session_cal_goal_size (tcp_connection_t * tc)
{
- tcp_connection_t *tc = (tcp_connection_t *) trans_conn;
+ u16 goal_size = tc->snd_mss;
- /* Ensure snd_mss does accurately reflect the amount of data we can push
- * in a segment. This also makes sure that options are updated according to
- * the current state of the connection. */
- tcp_update_burst_snd_vars (tc);
+ goal_size = TCP_MAX_GSO_SZ - tc->snd_mss % TCP_MAX_GSO_SZ;
+ goal_size = clib_min (goal_size, tc->snd_wnd / 2);
- return tc->snd_mss;
+ return goal_size > tc->snd_mss ? goal_size : tc->snd_mss;
}
always_inline u32
static inline u32
tcp_snd_space_inline (tcp_connection_t * tc)
{
- int snd_space, snt_limited;
+ int snd_space;
if (PREDICT_FALSE (tcp_in_fastrecovery (tc)
|| tc->state == TCP_STATE_CLOSED))
snd_space = tcp_available_output_snd_space (tc);
- /* If we haven't gotten dupacks or if we did and have gotten sacked
- * bytes then we can still send as per Limited Transmit (RFC3042) */
- if (PREDICT_FALSE (tc->rcv_dupacks != 0
- && (tcp_opts_sack_permitted (tc)
- && tc->sack_sb.last_sacked_bytes == 0)))
+ /* If we got dupacks or sacked bytes but we're not yet in recovery, try
+ * to force the peer to send enough dupacks to start retransmitting as
+ * per Limited Transmit (RFC3042)
+ */
+ if (PREDICT_FALSE (tc->rcv_dupacks != 0 || tc->sack_sb.sacked_bytes))
{
- if (tc->rcv_dupacks == 1 && tc->limited_transmit != tc->snd_nxt)
+ if (tc->limited_transmit != tc->snd_nxt
+ && (seq_lt (tc->limited_transmit, tc->snd_nxt - 2 * tc->snd_mss)
+ || seq_gt (tc->limited_transmit, tc->snd_nxt)))
tc->limited_transmit = tc->snd_nxt;
+
ASSERT (seq_leq (tc->limited_transmit, tc->snd_nxt));
- snt_limited = tc->snd_nxt - tc->limited_transmit;
- snd_space = clib_max (2 * tc->snd_mss - snt_limited, 0);
+ int snt_limited = tc->snd_nxt - tc->limited_transmit;
+ snd_space = clib_max ((int) 2 * tc->snd_mss - snt_limited, 0);
}
return tcp_round_snd_space (tc, snd_space);
}
return tcp_snd_space_inline (tc);
}
-static u32
-tcp_session_send_space (transport_connection_t * trans_conn)
+static int
+tcp_session_send_params (transport_connection_t * trans_conn,
+ transport_send_params_t * sp)
{
tcp_connection_t *tc = (tcp_connection_t *) trans_conn;
- return clib_min (tcp_snd_space_inline (tc),
- tc->snd_wnd - (tc->snd_nxt - tc->snd_una));
-}
-static u32
-tcp_session_tx_fifo_offset (transport_connection_t * trans_conn)
-{
- tcp_connection_t *tc = (tcp_connection_t *) trans_conn;
+ /* Ensure snd_mss does accurately reflect the amount of data we can push
+ * in a segment. This also makes sure that options are updated according to
+ * the current state of the connection. */
+ tcp_update_burst_snd_vars (tc);
- ASSERT (seq_geq (tc->snd_nxt, tc->snd_una));
+ if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_TSO))
+ sp->snd_mss = tcp_session_cal_goal_size (tc);
+ else
+ sp->snd_mss = tc->snd_mss;
+
+ sp->snd_space = clib_min (tcp_snd_space_inline (tc),
+ tc->snd_wnd - (tc->snd_nxt - tc->snd_una));
+ ASSERT (seq_geq (tc->snd_nxt, tc->snd_una));
/* This still works if fast retransmit is on */
- return (tc->snd_nxt - tc->snd_una);
-}
+ sp->tx_offset = tc->snd_nxt - tc->snd_una;
-static void
-tcp_update_time (f64 now, u8 thread_index)
-{
- tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ sp->flags = 0;
+ if (!tc->snd_wnd)
+ {
+ if (tcp_timer_is_active (tc, TCP_TIMER_PERSIST))
+ sp->flags = TRANSPORT_SND_F_DESCHED;
+ else
+ sp->flags = TRANSPORT_SND_F_POSTPONE;
+ }
- tcp_set_time_now (wrk);
- tw_timer_expire_timers_16t_2w_512sl (&wrk->timer_wheel, now);
- tcp_flush_frames_to_output (wrk);
+ return 0;
}
static void
-tcp_session_flush_data (transport_connection_t * tconn)
-{
- tcp_connection_t *tc = (tcp_connection_t *) tconn;
- if (tc->flags & TCP_CONN_PSH_PENDING)
- return;
- tc->flags |= TCP_CONN_PSH_PENDING;
- tc->psh_seq = tc->snd_una + transport_max_tx_dequeue (tconn) - 1;
-}
-
-/* *INDENT-OFF* */
-const static transport_proto_vft_t tcp_proto = {
- .enable = vnet_tcp_enable_disable,
- .start_listen = tcp_session_bind,
- .stop_listen = tcp_session_unbind,
- .push_header = tcp_session_push_header,
- .get_connection = tcp_session_get_transport,
- .get_listener = tcp_session_get_listener,
- .get_half_open = tcp_half_open_session_get_transport,
- .connect = tcp_session_open,
- .close = tcp_session_close,
- .cleanup = tcp_session_cleanup,
- .send_mss = tcp_session_send_mss,
- .send_space = tcp_session_send_space,
- .update_time = tcp_update_time,
- .tx_fifo_offset = tcp_session_tx_fifo_offset,
- .flush_data = tcp_session_flush_data,
- .custom_tx = tcp_session_custom_tx,
- .format_connection = format_tcp_session,
- .format_listener = format_tcp_listener_session,
- .format_half_open = format_tcp_half_open_session,
- .transport_options = {
- .tx_type = TRANSPORT_TX_PEEK,
- .service_type = TRANSPORT_SERVICE_VC,
- },
-};
-/* *INDENT-ON* */
-
-void
-tcp_connection_tx_pacer_update (tcp_connection_t * tc)
-{
- f64 srtt;
- u64 rate;
-
- if (!transport_connection_is_tx_paced (&tc->connection))
- return;
-
- srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
- /* TODO should constrain to interface's max throughput but
- * we don't have link speeds for sw ifs ..*/
- rate = tc->cwnd / srtt;
- transport_connection_tx_pacer_update (&tc->connection, rate);
-}
-
-void
-tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window,
- u32 start_bucket)
+tcp_timer_waitclose_handler (tcp_connection_t * tc)
{
tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
- u32 byte_rate = window / ((f64) TCP_TICK * tc->srtt);
- u64 last_time = wrk->vm->clib_time.last_cpu_time;
- transport_connection_tx_pacer_reset (&tc->connection, byte_rate,
- start_bucket, last_time);
-}
-
-static void
-tcp_timer_waitclose_handler (u32 conn_index)
-{
- u32 thread_index = vlib_get_thread_index ();
- tcp_connection_t *tc;
-
- tc = tcp_connection_get (conn_index, thread_index);
- if (!tc)
- return;
-
- tc->timers[TCP_TIMER_WAITCLOSE] = TCP_TIMER_HANDLE_INVALID;
switch (tc->state)
{
case TCP_STATE_CLOSE_WAIT:
tcp_connection_timers_reset (tc);
- session_transport_closed_notify (&tc->connection);
-
+ /* App never returned with a close */
if (!(tc->flags & TCP_CONN_FINPNDG))
{
tcp_connection_set_state (tc, TCP_STATE_CLOSED);
- tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
+ session_transport_closed_notify (&tc->connection);
+ tcp_program_cleanup (wrk, tc);
+ tcp_workerp_stats_inc (wrk, to_closewait, 1);
break;
}
- /* Session didn't come back with a close. Send FIN either way
- * and switch to LAST_ACK. */
+ /* Send FIN either way and switch to LAST_ACK. */
tcp_cong_recovery_off (tc);
/* Make sure we don't try to send unsent data */
tc->snd_nxt = tc->snd_una;
tcp_send_fin (tc);
tcp_connection_set_state (tc, TCP_STATE_LAST_ACK);
+ session_transport_closed_notify (&tc->connection);
/* Make sure we don't wait in LAST ACK forever */
tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
+ tcp_workerp_stats_inc (wrk, to_closewait2, 1);
/* Don't delete the connection yet */
break;
case TCP_STATE_FIN_WAIT_1:
tcp_connection_timers_reset (tc);
- session_transport_closed_notify (&tc->connection);
if (tc->flags & TCP_CONN_FINPNDG)
{
/* If FIN pending, we haven't sent everything, but we did try.
* Notify session layer that transport is closed. */
tcp_connection_set_state (tc, TCP_STATE_CLOSED);
tcp_send_reset (tc);
- tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
+ tcp_program_cleanup (wrk, tc);
}
else
{
/* We've sent the fin but no progress. Close the connection and
* to make sure everything is flushed, setup a cleanup timer */
tcp_connection_set_state (tc, TCP_STATE_CLOSED);
- tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
+ tcp_program_cleanup (wrk, tc);
}
+ session_transport_closed_notify (&tc->connection);
+ tcp_workerp_stats_inc (wrk, to_finwait1, 1);
break;
case TCP_STATE_LAST_ACK:
+ tcp_connection_timers_reset (tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ session_transport_closed_notify (&tc->connection);
+ tcp_program_cleanup (wrk, tc);
+ tcp_workerp_stats_inc (wrk, to_lastack, 1);
+ break;
case TCP_STATE_CLOSING:
tcp_connection_timers_reset (tc);
tcp_connection_set_state (tc, TCP_STATE_CLOSED);
- tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
session_transport_closed_notify (&tc->connection);
+ tcp_program_cleanup (wrk, tc);
+ tcp_workerp_stats_inc (wrk, to_closing, 1);
+ break;
+ case TCP_STATE_FIN_WAIT_2:
+ tcp_send_reset (tc);
+ tcp_connection_timers_reset (tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ session_transport_closed_notify (&tc->connection);
+ tcp_program_cleanup (wrk, tc);
+ tcp_workerp_stats_inc (wrk, to_finwait2, 1);
+ break;
+ case TCP_STATE_TIME_WAIT:
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ tcp_program_cleanup (wrk, tc);
break;
default:
- tcp_connection_del (tc);
+ clib_warning ("waitclose in state: %U", format_tcp_state, tc->state);
break;
}
}
};
/* *INDENT-ON* */
+static void
+tcp_dispatch_pending_timers (tcp_worker_ctx_t * wrk)
+{
+ u32 n_timers, connection_index, timer_id, thread_index, timer_handle;
+ tcp_connection_t *tc;
+ int i;
+
+ if (!(n_timers = clib_fifo_elts (wrk->pending_timers)))
+ return;
+
+ thread_index = wrk->vm->thread_index;
+ for (i = 0; i < clib_min (n_timers, wrk->max_timers_per_loop); i++)
+ {
+ clib_fifo_sub1 (wrk->pending_timers, timer_handle);
+ connection_index = timer_handle & 0x0FFFFFFF;
+ timer_id = timer_handle >> 28;
+
+ if (PREDICT_TRUE (timer_id != TCP_TIMER_RETRANSMIT_SYN))
+ tc = tcp_connection_get (connection_index, thread_index);
+ else
+ tc = tcp_half_open_connection_get (connection_index);
+
+ if (PREDICT_FALSE (!tc))
+ continue;
+
+ /* Skip timer if it was rearmed while pending dispatch */
+ if (PREDICT_FALSE (tc->timers[timer_id] != TCP_TIMER_HANDLE_INVALID))
+ continue;
+
+ (*timer_expiration_handlers[timer_id]) (tc);
+ }
+
+ if (thread_index == 0 && clib_fifo_elts (wrk->pending_timers))
+ vlib_process_signal_event_mt (wrk->vm, session_queue_process_node.index,
+ SESSION_Q_PROCESS_FLUSH_FRAMES, 0);
+}
+
+/**
+ * Flush ip lookup tx frames populated by timer pops
+ */
+static void
+tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk)
+{
+ if (wrk->ip_lookup_tx_frames[0])
+ {
+ vlib_put_frame_to_node (wrk->vm, ip4_lookup_node.index,
+ wrk->ip_lookup_tx_frames[0]);
+ wrk->ip_lookup_tx_frames[0] = 0;
+ }
+ if (wrk->ip_lookup_tx_frames[1])
+ {
+ vlib_put_frame_to_node (wrk->vm, ip6_lookup_node.index,
+ wrk->ip_lookup_tx_frames[1]);
+ wrk->ip_lookup_tx_frames[1] = 0;
+ }
+}
+
+static void
+tcp_handle_cleanups (tcp_worker_ctx_t * wrk, clib_time_type_t now)
+{
+ u32 thread_index = wrk->vm->thread_index;
+ tcp_cleanup_req_t *req;
+ tcp_connection_t *tc;
+
+ while (clib_fifo_elts (wrk->pending_cleanups))
+ {
+ req = clib_fifo_head (wrk->pending_cleanups);
+ if (req->free_time > now)
+ break;
+ clib_fifo_sub2 (wrk->pending_cleanups, req);
+ tc = tcp_connection_get (req->connection_index, thread_index);
+ if (PREDICT_FALSE (!tc))
+ continue;
+ session_transport_delete_notify (&tc->connection);
+ tcp_connection_cleanup (tc);
+ }
+}
+
+static void
+tcp_update_time (f64 now, u8 thread_index)
+{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+
+ tcp_set_time_now (wrk);
+ tcp_handle_cleanups (wrk, now);
+ tw_timer_expire_timers_16t_2w_512sl (&wrk->timer_wheel, now);
+ tcp_dispatch_pending_timers (wrk);
+ tcp_flush_frames_to_output (wrk);
+}
+
+static void
+tcp_session_flush_data (transport_connection_t * tconn)
+{
+ tcp_connection_t *tc = (tcp_connection_t *) tconn;
+ if (tc->flags & TCP_CONN_PSH_PENDING)
+ return;
+ tc->flags |= TCP_CONN_PSH_PENDING;
+ tc->psh_seq = tc->snd_una + transport_max_tx_dequeue (tconn) - 1;
+}
+
+/* *INDENT-OFF* */
+const static transport_proto_vft_t tcp_proto = {
+ .enable = vnet_tcp_enable_disable,
+ .start_listen = tcp_session_bind,
+ .stop_listen = tcp_session_unbind,
+ .push_header = tcp_session_push_header,
+ .get_connection = tcp_session_get_transport,
+ .get_listener = tcp_session_get_listener,
+ .get_half_open = tcp_half_open_session_get_transport,
+ .connect = tcp_session_open,
+ .close = tcp_session_close,
+ .cleanup = tcp_session_cleanup,
+ .reset = tcp_session_reset,
+ .send_params = tcp_session_send_params,
+ .update_time = tcp_update_time,
+ .flush_data = tcp_session_flush_data,
+ .custom_tx = tcp_session_custom_tx,
+ .format_connection = format_tcp_session,
+ .format_listener = format_tcp_listener_session,
+ .format_half_open = format_tcp_half_open_session,
+ .transport_options = {
+ .tx_type = TRANSPORT_TX_PEEK,
+ .service_type = TRANSPORT_SERVICE_VC,
+ },
+};
+/* *INDENT-ON* */
+
+void
+tcp_connection_tx_pacer_update (tcp_connection_t * tc)
+{
+ if (!transport_connection_is_tx_paced (&tc->connection))
+ return;
+
+ f64 srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
+
+ transport_connection_tx_pacer_update (&tc->connection,
+ tcp_cc_get_pacing_rate (tc),
+ srtt * CLIB_US_TIME_FREQ);
+}
+
+void
+tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window,
+ u32 start_bucket)
+{
+ f64 srtt = clib_min ((f64) tc->srtt * TCP_TICK, tc->mrtt_us);
+ transport_connection_tx_pacer_reset (&tc->connection,
+ tcp_cc_get_pacing_rate (tc),
+ start_bucket,
+ srtt * CLIB_US_TIME_FREQ);
+}
+
static void
tcp_expired_timers_dispatch (u32 * expired_timers)
{
+ u32 thread_index = vlib_get_thread_index (), n_left, max_per_loop;
+ u32 connection_index, timer_id, n_expired, max_loops;
+ tcp_worker_ctx_t *wrk;
+ tcp_connection_t *tc;
int i;
- u32 connection_index, timer_id;
- for (i = 0; i < vec_len (expired_timers); i++)
+ wrk = tcp_get_worker (thread_index);
+ n_expired = vec_len (expired_timers);
+ tcp_workerp_stats_inc (wrk, timer_expirations, n_expired);
+ n_left = clib_fifo_elts (wrk->pending_timers);
+
+ /*
+ * Invalidate all timer handles before dispatching. This avoids dangling
+ * index references to timer wheel pool entries that have been freed.
+ */
+ for (i = 0; i < n_expired; i++)
{
- /* Get session index and timer id */
connection_index = expired_timers[i] & 0x0FFFFFFF;
timer_id = expired_timers[i] >> 28;
- TCP_EVT_DBG (TCP_EVT_TIMER_POP, connection_index, timer_id);
+ if (timer_id != TCP_TIMER_RETRANSMIT_SYN)
+ tc = tcp_connection_get (connection_index, thread_index);
+ else
+ tc = tcp_half_open_connection_get (connection_index);
+
+ TCP_EVT (TCP_EVT_TIMER_POP, connection_index, timer_id);
- /* Handle expiration */
- (*timer_expiration_handlers[timer_id]) (connection_index);
+ tc->timers[timer_id] = TCP_TIMER_HANDLE_INVALID;
}
+
+ clib_fifo_add (wrk->pending_timers, expired_timers, n_expired);
+
+ max_loops = clib_max (1, 0.5 * TCP_TIMER_TICK * wrk->vm->loops_per_second);
+ max_per_loop = clib_max ((n_left + n_expired) / max_loops, 10);
+ max_per_loop = clib_min (max_per_loop, VLIB_FRAME_SIZE);
+ wrk->max_timers_per_loop = clib_max (n_left ? wrk->max_timers_per_loop : 0,
+ max_per_loop);
+
+ if (thread_index == 0)
+ vlib_process_signal_event_mt (wrk->vm, session_queue_process_node.index,
+ SESSION_Q_PROCESS_FLUSH_FRAMES, 0);
}
static void
u32 num_threads, n_workers, prealloc_conn_per_wrk;
tcp_connection_t *tc __attribute__ ((unused));
tcp_main_t *tm = vnet_get_tcp_main ();
+ tcp_worker_ctx_t *wrk;
clib_error_t *error = 0;
int thread;
*/
num_threads = 1 /* main thread */ + vtm->n_threads;
- vec_validate (tm->connections, num_threads - 1);
vec_validate (tm->wrk_ctx, num_threads - 1);
n_workers = num_threads == 1 ? 1 : vtm->n_threads;
prealloc_conn_per_wrk = tcp_cfg.preallocated_connections / n_workers;
for (thread = 0; thread < num_threads; thread++)
{
- vec_validate (tm->wrk_ctx[thread].pending_deq_acked, 255);
- vec_validate (tm->wrk_ctx[thread].pending_disconnects, 255);
- vec_reset_length (tm->wrk_ctx[thread].pending_deq_acked);
- vec_reset_length (tm->wrk_ctx[thread].pending_disconnects);
- tm->wrk_ctx[thread].vm = vlib_mains[thread];
+ wrk = &tm->wrk_ctx[thread];
+
+ vec_validate (wrk->pending_deq_acked, 255);
+ vec_validate (wrk->pending_disconnects, 255);
+ vec_validate (wrk->pending_resets, 255);
+ vec_reset_length (wrk->pending_deq_acked);
+ vec_reset_length (wrk->pending_disconnects);
+ vec_reset_length (wrk->pending_resets);
+ wrk->vm = vlib_mains[thread];
+ wrk->max_timers_per_loop = 10;
/*
* Preallocate connections. Assume that thread 0 won't
* use preallocated threads when running multi-core
*/
if ((thread > 0 || num_threads == 1) && prealloc_conn_per_wrk)
- pool_init_fixed (tm->connections[thread], prealloc_conn_per_wrk);
+ pool_init_fixed (wrk->connections, prealloc_conn_per_wrk);
}
/*
tcp_cfg.max_rx_fifo = 32 << 20;
tcp_cfg.min_rx_fifo = 4 << 10;
- tcp_cfg.default_mtu = 1460;
+ tcp_cfg.default_mtu = 1500;
tcp_cfg.initial_cwnd_multiplier = 0;
tcp_cfg.enable_tx_pacing = 1;
+ tcp_cfg.allow_tso = 0;
+ tcp_cfg.csum_offload = 1;
tcp_cfg.cc_algo = TCP_CC_NEWRENO;
+ tcp_cfg.rwnd_min_update_ack = 1;
/* Time constants defined as timer tick (100ms) multiples */
tcp_cfg.delack_time = 1; /* 0.1s */
tcp_cfg.lastack_time = 300; /* 30s */
tcp_cfg.finwait2_time = 300; /* 30s */
tcp_cfg.closing_time = 300; /* 30s */
- tcp_cfg.cleanup_time = 1; /* 0.1s */
+ tcp_cfg.cleanup_time = 0.1; /* 100ms */
}
static clib_error_t *
uword
unformat_tcp_cc_algo (unformat_input_t * input, va_list * va)
{
- uword *result = va_arg (*va, uword *);
+ tcp_cc_algorithm_type_e *result = va_arg (*va, tcp_cc_algorithm_type_e *);
tcp_main_t *tm = &tcp_main;
char *cc_algo_name;
u8 found = 0;
static clib_error_t *
tcp_config_fn (vlib_main_t * vm, unformat_input_t * input)
{
+ u32 cwnd_multiplier, tmp_time;
+ uword memory_size;
+
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "preallocated-connections %d",
&tcp_cfg.buffer_fail_fraction))
;
else if (unformat (input, "max-rx-fifo %U", unformat_memory_size,
- &tcp_cfg.max_rx_fifo))
- ;
+ &memory_size))
+ {
+ if (memory_size >= 0x100000000)
+ {
+ return clib_error_return
+ (0, "max-rx-fifo %llu (0x%llx) too large", memory_size,
+ memory_size);
+ }
+ tcp_cfg.max_rx_fifo = memory_size;
+ }
else if (unformat (input, "min-rx-fifo %U", unformat_memory_size,
- &tcp_cfg.min_rx_fifo))
- ;
- else if (unformat (input, "mtu %d", &tcp_cfg.default_mtu))
+ &memory_size))
+ {
+ if (memory_size >= 0x100000000)
+ {
+ return clib_error_return
+ (0, "min-rx-fifo %llu (0x%llx) too large", memory_size,
+ memory_size);
+ }
+ tcp_cfg.min_rx_fifo = memory_size;
+ }
+ else if (unformat (input, "mtu %u", &tcp_cfg.default_mtu))
;
- else if (unformat (input, "initial-cwnd-multiplier %d",
- &tcp_cfg.initial_cwnd_multiplier))
+ else if (unformat (input, "rwnd-min-update-ack %d",
+ &tcp_cfg.rwnd_min_update_ack))
;
+ else if (unformat (input, "initial-cwnd-multiplier %u",
+ &cwnd_multiplier))
+ tcp_cfg.initial_cwnd_multiplier = cwnd_multiplier;
else if (unformat (input, "no-tx-pacing"))
tcp_cfg.enable_tx_pacing = 0;
+ else if (unformat (input, "tso"))
+ tcp_cfg.allow_tso = 1;
+ else if (unformat (input, "no-csum-offload"))
+ tcp_cfg.csum_offload = 0;
else if (unformat (input, "cc-algo %U", unformat_tcp_cc_algo,
&tcp_cfg.cc_algo))
;
else if (unformat (input, "%U", unformat_tcp_cc_algo_cfg))
;
- else if (unformat (input, "closewait-time %d", &tcp_cfg.closewait_time))
- tcp_cfg.closewait_time /= TCP_TIMER_TICK;
- else if (unformat (input, "timewait-time %d", &tcp_cfg.timewait_time))
- tcp_cfg.timewait_time /= TCP_TIMER_TICK;
- else if (unformat (input, "finwait1-time %d", &tcp_cfg.finwait1_time))
- tcp_cfg.finwait1_time /= TCP_TIMER_TICK;
- else if (unformat (input, "finwait2-time %d", &tcp_cfg.finwait2_time))
- tcp_cfg.finwait2_time /= TCP_TIMER_TICK;
- else if (unformat (input, "lastack-time %d", &tcp_cfg.lastack_time))
- tcp_cfg.lastack_time /= TCP_TIMER_TICK;
- else if (unformat (input, "closing-time %d", &tcp_cfg.closing_time))
- tcp_cfg.closing_time /= TCP_TIMER_TICK;
- else if (unformat (input, "cleanup-time %d", &tcp_cfg.cleanup_time))
- tcp_cfg.cleanup_time /= TCP_TIMER_TICK;
+ else if (unformat (input, "closewait-time %u", &tmp_time))
+ tcp_cfg.closewait_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "timewait-time %u", &tmp_time))
+ tcp_cfg.timewait_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "finwait1-time %u", &tmp_time))
+ tcp_cfg.finwait1_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "finwait2-time %u", &tmp_time))
+ tcp_cfg.finwait2_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "lastack-time %u", &tmp_time))
+ tcp_cfg.lastack_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "closing-time %u", &tmp_time))
+ tcp_cfg.closing_time = tmp_time / TCP_TIMER_TICK;
+ else if (unformat (input, "cleanup-time %u", &tmp_time))
+ tcp_cfg.cleanup_time = tmp_time / 1000.0;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
ip4_address_t * start,
ip4_address_t * end, u32 table_id)
{
- vnet_main_t *vnm = vnet_get_main ();
u32 start_host_byte_order, end_host_byte_order;
fib_prefix_t prefix;
fib_node_index_t fei;
sw_if_index = fib_entry_get_resolving_interface (fei);
/* Configure proxy arp across the range */
- rv = vnet_proxy_arp_add_del (start, end, fib_index, 0 /* is_del */ );
+ rv = ip4_neighbor_proxy_add (fib_index, start, end);
if (rv)
return rv;
- rv = vnet_proxy_arp_enable_disable (vnm, sw_if_index, 1);
+ rv = ip4_neighbor_proxy_enable (sw_if_index);
if (rv)
return rv;
return VNET_API_ERROR_NO_MATCHING_INTERFACE;
/* Add a proxy neighbor discovery entry for this address */
- ip6_neighbor_proxy_add_del (sw_if_index, start, 0 /* is_del */ );
+ ip6_neighbor_proxy_add (sw_if_index, start);
/* Add a receive adjacency for this address */
receive_dpo_add_or_lock (DPO_PROTO_IP6, ~0 /* sw_if_index */ ,
}
static clib_error_t *
-tcp_src_address (vlib_main_t * vm,
- unformat_input_t * input, vlib_cli_command_t * cmd_arg)
+tcp_src_address_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd_arg)
{
ip4_address_t v4start, v4end;
ip6_address_t v6start, v6end;
{
.path = "tcp src-address",
.short_help = "tcp src-address <ip-addr> [- <ip-addr>] add src address range",
- .function = tcp_src_address,
+ .function = tcp_src_address_fn,
};
/* *INDENT-ON* */
/* Push segments */
tcp_rcv_sacks (dummy_tc, next_ack);
if (has_new_ack)
- dummy_tc->snd_una = next_ack + dummy_tc->sack_sb.snd_una_adv;
+ dummy_tc->snd_una = next_ack;
if (verbose)
s = format (s, "result: %U", format_tcp_scoreboard,
};
/* *INDENT-ON* */
+static clib_error_t *
+show_tcp_stats_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ tcp_main_t *tm = vnet_get_tcp_main ();
+ tcp_worker_ctx_t *wrk;
+ u32 thread;
+
+ if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ return clib_error_return (0, "unknown input `%U'", format_unformat_error,
+ input);
+ for (thread = 0; thread < vec_len (tm->wrk_ctx); thread++)
+ {
+ wrk = tcp_get_worker (thread);
+ vlib_cli_output (vm, "Thread %u:\n", thread);
+
+ if (clib_fifo_elts (wrk->pending_timers))
+ vlib_cli_output (vm, " %lu pending timers",
+ clib_fifo_elts (wrk->pending_timers));
+
+#define _(name,type,str) \
+ if (wrk->stats.name) \
+ vlib_cli_output (vm, " %lu %s", wrk->stats.name, str);
+ foreach_tcp_wrk_stat
+#undef _
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_tcp_stats_command, static) =
+{
+ .path = "show tcp stats",
+ .short_help = "show tcp stats",
+ .function = show_tcp_stats_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+clear_tcp_stats_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ tcp_main_t *tm = vnet_get_tcp_main ();
+ tcp_worker_ctx_t *wrk;
+ u32 thread;
+
+ if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ return clib_error_return (0, "unknown input `%U'", format_unformat_error,
+ input);
+
+ for (thread = 0; thread < vec_len (tm->wrk_ctx); thread++)
+ {
+ wrk = tcp_get_worker (thread);
+ clib_memset (&wrk->stats, 0, sizeof (wrk->stats));
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (clear_tcp_stats_command, static) =
+{
+ .path = "clear tcp stats",
+ .short_help = "clear tcp stats",
+ .function = clear_tcp_stats_fn,
+};
+/* *INDENT-ON* */
+
/*
* fd.io coding-style-patch-verification: ON
*