{
tcp_main_t *tm = vnet_get_tcp_main ();
tcp_connection_t *tc = 0;
+ ASSERT (vlib_get_thread_index () == 0);
pool_get (tm->half_open_connections, tc);
memset (tc, 0, sizeof (*tc));
tc->c_c_index = tc - tm->half_open_connections;
tcp_send_fin (tc);
tc->state = TCP_STATE_LAST_ACK;
break;
+ case TCP_STATE_FIN_WAIT_1:
+ break;
default:
- clib_warning ("shouldn't be here");
+ clib_warning ("state: %u", tc->state);
}
TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc);
}
#endif /* 0 */
+/**
+ * Initialize connection send variables.
+ */
+void
+tcp_init_snd_vars (tcp_connection_t * tc)
+{
+ u32 time_now;
+
+ /* Set random initial sequence */
+ time_now = tcp_time_now ();
+ tc->iss = random_u32 (&time_now);
+ tc->snd_una = tc->iss;
+ tc->snd_nxt = tc->iss + 1;
+ tc->snd_una_max = tc->snd_nxt;
+}
+
/** Initialize tcp connection variables
*
* Should be called after having received a msg from the peer, i.e., a SYN or
tcp_init_mss (tc);
scoreboard_init (&tc->sack_sb);
tcp_cc_init (tc);
+ if (tc->state == TCP_STATE_SYN_RCVD)
+ tcp_init_snd_vars (tc);
+
// tcp_connection_fib_attach (tc);
}
prefix.fp_len = rmt->is_ip4 ? 32 : 128;
fib_index = fib_table_find (prefix.fp_proto, rmt->vrf);
+ if (fib_index == (u32) ~ 0)
+ {
+ clib_warning ("no fib table");
+ return -1;
+ }
+
fei = fib_table_lookup (fib_index, &prefix);
/* Couldn't find route to destination. Bail out. */
TCP_EVT_DBG (TCP_EVT_OPEN, tc);
tc->state = TCP_STATE_SYN_SENT;
+ tcp_init_snd_vars (tc);
tcp_send_syn (tc);
clib_spinlock_unlock_if_init (&tm->half_open_lock);
tc->snd_wnd, tc->rcv_wnd, tc->snd_wl1 - tc->irs,
tc->snd_wl2 - tc->iss);
s = format (s, " flight size %u send space %u rcv_wnd_av %d\n",
- tcp_flight_size (tc), tcp_available_snd_space (tc),
+ tcp_flight_size (tc), tcp_available_output_snd_space (tc),
tcp_rcv_wnd_available (tc));
s = format (s, " cong %U ", format_tcp_congestion_status, tc);
s = format (s, "cwnd %u ssthresh %u rtx_bytes %u bytes_acked %u\n",
s = format (s, "rtt_seq %u\n", tc->rtt_seq);
s = format (s, " tsval_recent %u tsval_recent_age %u\n", tc->tsval_recent,
tcp_time_now () - tc->tsval_recent_age);
- s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb);
+ s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb,
+ tc);
if (vec_len (tc->snd_sacks))
s = format (s, " sacks tx: %U\n", format_tcp_sacks, tc);
tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
u32 verbose = va_arg (*args, u32);
+ if (!tc)
+ return s;
s = format (s, "%-50U", format_tcp_connection_id, tc);
if (verbose)
{
if (tc)
s = format (s, "%U", format_tcp_connection, tc, verbose);
else
- s = format (s, "empty");
+ s = format (s, "empty\n");
return s;
}
format_tcp_sack_hole (u8 * s, va_list * args)
{
sack_scoreboard_hole_t *hole = va_arg (*args, sack_scoreboard_hole_t *);
- s = format (s, "[%u, %u]", hole->start, hole->end);
+ tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
+ if (tc)
+ s = format (s, " [%u, %u]", hole->start - tc->iss, hole->end - tc->iss);
+ else
+ s = format (s, " [%u, %u]", hole->start, hole->end);
return s;
}
format_tcp_scoreboard (u8 * s, va_list * args)
{
sack_scoreboard_t *sb = va_arg (*args, sack_scoreboard_t *);
+ tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
sack_scoreboard_hole_t *hole;
s = format (s, "sacked_bytes %u last_sacked_bytes %u lost_bytes %u\n",
sb->sacked_bytes, sb->last_sacked_bytes, sb->lost_bytes);
while (hole)
{
- s = format (s, "%U", format_tcp_sack_hole, hole);
+ s = format (s, "%U", format_tcp_sack_hole, hole, tc);
hole = scoreboard_next_hole (sb, hole);
}
return tc->snd_wnd <= snd_space ? tc->snd_wnd : 0;
}
- /* If we can't write at least a segment, don't try at all */
+ /* If not snd_wnd constrained and we can't write at least a segment,
+ * don't try at all */
if (PREDICT_FALSE (snd_space < tc->snd_mss))
- {
- if (snd_space > clib_min (tc->mss, tc->rcv_opts.mss) - TCP_HDR_LEN_MAX)
- return snd_space;
- return 0;
- }
+ return snd_space < tc->cwnd ? 0 : snd_space;
/* round down to mss multiple */
return snd_space - (snd_space % tc->snd_mss);
if (PREDICT_TRUE (tcp_in_cong_recovery (tc) == 0))
{
- snd_space = tcp_available_snd_space (tc);
+ snd_space = tcp_available_output_snd_space (tc);
/* If we haven't gotten dupacks or if we did and have gotten sacked
* bytes then we can still send as per Limited Transmit (RFC3042) */
if (tcp_in_recovery (tc))
{
tc->snd_nxt = tc->snd_una_max;
- snd_space = tcp_available_wnd (tc) - tc->snd_rxt_bytes
+ snd_space = tcp_available_snd_wnd (tc) - tc->snd_rxt_bytes
- (tc->snd_una_max - tc->snd_congestion);
if (snd_space <= 0 || (tc->snd_una_max - tc->snd_una) >= tc->snd_wnd)
return 0;
return tcp_round_snd_space (tc, snd_space);
}
- /* If in fast recovery, send 1 SMSS if wnd allows */
- if (tcp_in_fastrecovery (tc)
- && tcp_available_snd_space (tc) && !tcp_fastrecovery_sent_1_smss (tc))
+ /* RFC 5681: When previously unsent data is available and the new value of
+ * cwnd and the receiver's advertised window allow, a TCP SHOULD send 1*SMSS
+ * bytes of previously unsent data. */
+ if (tcp_in_fastrecovery (tc) && !tcp_fastrecovery_sent_1_smss (tc))
{
+ if (tcp_available_output_snd_space (tc) < tc->snd_mss)
+ return 0;
tcp_fastrecovery_1_smss_on (tc);
return tc->snd_mss;
}
tcp_session_send_space (transport_connection_t * trans_conn)
{
tcp_connection_t *tc = (tcp_connection_t *) trans_conn;
- return tcp_snd_space (tc);
+ return clib_min (tcp_snd_space (tc),
+ tc->snd_wnd - (tc->snd_nxt - tc->snd_una));
}
i32
{
ASSERT (tc->state == TCP_STATE_SYN_SENT);
stream_session_connect_notify (&tc->connection, 1 /* fail */ );
+ TCP_DBG ("establish pop: %U", format_tcp_connection, tc, 2);
}
else
{
tc = tcp_connection_get (conn_index, vlib_get_thread_index ());
+ /* note: the connection may have already disappeared */
+ if (PREDICT_FALSE (tc == 0))
+ return;
+ TCP_DBG ("establish pop: %U", format_tcp_connection, tc, 2);
ASSERT (tc->state == TCP_STATE_SYN_RCVD);
+ /* Start cleanup. App wasn't notified yet so use delete notify as
+ * opposed to delete to cleanup session layer state. */
+ stream_session_delete_notify (&tc->connection);
}
tc->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID;
tcp_connection_cleanup (tc);
vlib_thread_main_t *vtm = vlib_get_thread_main ();
clib_error_t *error = 0;
u32 num_threads;
- int i, thread;
+ int thread;
tcp_connection_t *tc __attribute__ ((unused));
u32 preallocated_connections_per_thread;
}
for (; thread < num_threads; thread++)
{
- for (i = 0; i < preallocated_connections_per_thread; i++)
- pool_get (tm->connections[thread], tc);
-
- for (i = 0; i < preallocated_connections_per_thread; i++)
- pool_put_index (tm->connections[thread], i);
+ if (preallocated_connections_per_thread)
+ pool_init_fixed (tm->connections[thread],
+ preallocated_connections_per_thread);
}
/*
- * Preallocate half-open connections
+ * Use a preallocated half-open connection pool?
*/
- for (i = 0; i < tm->preallocated_half_open_connections; i++)
- pool_get (tm->half_open_connections, tc);
-
- for (i = 0; i < tm->preallocated_half_open_connections; i++)
- pool_put_index (tm->half_open_connections, i);
+ if (tm->preallocated_half_open_connections)
+ pool_init_fixed (tm->half_open_connections,
+ tm->preallocated_half_open_connections);
/* Initialize per worker thread tx buffers (used for control messages) */
vec_validate (tm->tx_buffers, num_threads - 1);
tm->tstamp_ticks_per_clock = vm->clib_time.seconds_per_clock
/ TCP_TSTAMP_RESOLUTION;
+ if (tm->local_endpoints_table_buckets == 0)
+ tm->local_endpoints_table_buckets = 250000;
+ if (tm->local_endpoints_table_memory == 0)
+ tm->local_endpoints_table_memory = 512 << 20;
+
clib_bihash_init_24_8 (&tm->local_endpoints_table, "local endpoint table",
- 1000000 /* $$$$ config parameter nbuckets */ ,
- (512 << 20) /*$$$ config parameter table size */ );
+ tm->local_endpoints_table_buckets,
+ tm->local_endpoints_table_memory);
/* Initialize [port-allocator] random number seed */
tm->port_allocator_seed = (u32) clib_cpu_time_now ();
vec_validate (tm->tx_frames[0], num_threads - 1);
vec_validate (tm->tx_frames[1], num_threads - 1);
+ vec_validate (tm->ip_lookup_tx_frames[0], num_threads - 1);
+ vec_validate (tm->ip_lookup_tx_frames[1], num_threads - 1);
tm->bytes_per_buffer = vlib_buffer_free_list_buffer_size
(vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
return 0;
}
+void
+tcp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add)
+{
+ tcp_main_t *tm = &tcp_main;
+ if (is_ip4)
+ tm->punt_unknown4 = is_add;
+ else
+ tm->punt_unknown6 = is_add;
+}
+
clib_error_t *
tcp_init (vlib_main_t * vm)
{
tcp_config_fn (vlib_main_t * vm, unformat_input_t * input)
{
tcp_main_t *tm = vnet_get_tcp_main ();
+ u64 tmp;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
else if (unformat (input, "preallocated-half-open-connections %d",
&tm->preallocated_half_open_connections))
;
+ else if (unformat (input, "local-endpoints-table-memory %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000)
+ return clib_error_return (0, "memory size %llx (%lld) too large",
+ tmp, tmp);
+ tm->local_endpoints_table_memory = tmp;
+ }
+ else if (unformat (input, "local-endpoints-table-buckets %d",
+ &tm->local_endpoints_table_buckets))
+ ;
+
+
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
v6set = 1;
else if (unformat (input, "%U", unformat_ip6_address, &v6start))
{
- memcpy (&v6end, &v6start, sizeof (v4start));
+ memcpy (&v6end, &v6start, sizeof (v6start));
v6set = 1;
}
else if (unformat (input, "fib-table %d", &table_id))
};
/* *INDENT-ON* */
+static clib_error_t *
+show_tcp_punt_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd_arg)
+{
+ tcp_main_t *tm = vnet_get_tcp_main ();
+ if (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ return clib_error_return (0, "unknown input `%U'", format_unformat_error,
+ input);
+ vlib_cli_output (vm, "IPv4 TCP punt: %s",
+ tm->punt_unknown4 ? "enabled" : "disabled");
+ vlib_cli_output (vm, "IPv6 TCP punt: %s",
+ tm->punt_unknown6 ? "enabled" : "disabled");
+ return 0;
+}
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_tcp_punt_command, static) =
+{
+ .path = "show tcp punt",
+ .short_help = "show tcp punt",
+ .function = show_tcp_punt_fn,
+};
+/* *INDENT-ON* */
+
/*
* fd.io coding-style-patch-verification: ON
*