tcp_connection_t tcp_connection;
} tcp_tx_trace_t;
-u16 dummy_mtu = 400;
+u16 dummy_mtu = 1460;
u8 *
format_tcp_tx_trace (u8 * s, va_list * args)
return s;
}
-void
-tcp_set_snd_mss (tcp_connection_t * tc)
-{
- u16 snd_mss;
-
- /* TODO find our iface MTU */
- snd_mss = dummy_mtu;
-
- /* TODO cache mss and consider PMTU discovery */
- snd_mss = tc->opt.mss < snd_mss ? tc->opt.mss : snd_mss;
-
- tc->snd_mss = snd_mss;
-
- if (tc->snd_mss == 0)
- {
- clib_warning ("snd mss is 0");
- tc->snd_mss = dummy_mtu;
- }
-}
-
static u8
tcp_window_compute_scale (u32 available_space)
{
always_inline u32
tcp_initial_wnd_unscaled (tcp_connection_t * tc)
{
- return TCP_IW_N_SEGMENTS * dummy_mtu;
+ return TCP_IW_N_SEGMENTS * tc->mss;
}
/**
u8 len = 0;
opts->flags |= TCP_OPTS_FLAG_MSS;
- opts->mss = dummy_mtu; /*XXX discover that */
+ opts->mss = tc->mss;
len += TCP_OPTION_LEN_MSS;
if (tcp_opts_wscale (&tc->opt))
{
opts->flags |= TCP_OPTS_FLAG_SACK;
opts->sacks = tc->snd_sacks;
- opts->n_sack_blocks = vec_len (tc->snd_sacks);
+ opts->n_sack_blocks = clib_min (vec_len (tc->snd_sacks),
+ TCP_OPTS_MAX_SACK_BLOCKS);
len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
}
}
}
}
+/**
+ * Update max segment size we're able to process.
+ *
+ * The value is constrained by our interface's MTU and IP options. It is
+ * also what we advertise to our peer.
+ */
+void
+tcp_update_rcv_mss (tcp_connection_t * tc)
+{
+ /* TODO find our iface MTU */
+ tc->mss = dummy_mtu;
+}
+
+/**
+ * Update snd_mss to reflect the effective segment size that we can send
+ * by taking into account all TCP options, including SACKs
+ */
+void
+tcp_update_snd_mss (tcp_connection_t * tc)
+{
+ /* Compute options to be used for connection. These may be reused when
+ * sending data or to compute the effective mss (snd_mss) */
+ tc->snd_opts_len =
+ tcp_make_options (tc, &tc->snd_opts, TCP_STATE_ESTABLISHED);
+
+ /* XXX check if MTU has been updated */
+ tc->snd_mss = clib_min (tc->mss, tc->opt.mss) - tc->snd_opts_len;
+}
+
+void
+tcp_init_mss (tcp_connection_t * tc)
+{
+ tcp_update_rcv_mss (tc);
+
+ /* TODO cache mss and consider PMTU discovery */
+ tc->snd_mss = clib_min (tc->opt.mss, tc->mss);
+
+ if (tc->snd_mss == 0)
+ {
+ clib_warning ("snd mss is 0");
+ tc->snd_mss = tc->mss;
+ }
+
+ /* We should have enough space for 40 bytes of options */
+ ASSERT (tc->snd_mss > 45);
+
+ /* If we use timestamp option, account for it */
+ if (tcp_opts_tstamp (&tc->opt))
+ tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP;
+}
+
#define tcp_get_free_buffer_index(tm, bidx) \
do { \
u32 *my_tx_buffers, n_free_buffers; \
*/
static void
tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b,
- tcp_state_t next_state)
+ tcp_state_t next_state, u8 compute_opts)
{
u32 advertise_wnd, data_len;
- u8 tcp_opts_len, tcp_hdr_opts_len, opts_write_len, flags;
- tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
+ u8 tcp_hdr_opts_len, opts_write_len, flags;
tcp_header_t *th;
- data_len = b->current_length;
+ data_len = b->current_length + b->total_length_not_including_first_buffer;
vnet_buffer (b)->tcp.flags = 0;
- /* Make and write options */
- memset (snd_opts, 0, sizeof (*snd_opts));
- tcp_opts_len = tcp_make_options (tc, snd_opts, next_state);
- tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
+ if (compute_opts)
+ tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
+
+ /* Write pre-computed options */
+ tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
/* Get rcv window to advertise */
advertise_wnd = tcp_window_to_advertise (tc, next_state);
tc->rcv_nxt, tcp_hdr_opts_len, flags,
advertise_wnd);
- opts_write_len = tcp_options_write ((u8 *) (th + 1), snd_opts);
+ opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
- ASSERT (opts_write_len == tcp_opts_len);
+ ASSERT (opts_write_len == tc->snd_opts_len);
/* Tag the buffer with the connection index */
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
tc->snd_nxt += data_len;
+ tc->rcv_las = tc->rcv_nxt;
/* TODO this is updated in output as well ... */
if (tc->snd_nxt > tc->snd_una_max)
tc->snd_una_max = tc->snd_nxt;
+
+ if (tc->rtt_ts == 0)
+ {
+ tc->rtt_ts = tcp_time_now ();
+ tc->rtt_seq = tc->snd_nxt;
+ }
TCP_EVT_DBG (TCP_EVT_PKTIZE, tc);
}
goto done;
}
+ tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
+
ASSERT (max_bytes <= tc->snd_mss);
n_bytes = stream_session_peek_bytes (&tc->connection,
max_bytes);
ASSERT (n_bytes != 0);
b->current_length = n_bytes;
- tcp_push_hdr_i (tc, b, tc->state);
+ tcp_push_hdr_i (tc, b, tc->state, 0);
+ tc->rtx_bytes += n_bytes;
done:
TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes);
}
/* Start again from the beginning */
- tcp_recovery_on (tc);
+
tc->cwnd = tcp_loss_wnd (tc);
tc->snd_congestion = tc->snd_una_max;
+ tcp_recovery_on (tc);
}
static void
u32 thread_index = vlib_get_thread_index ();
tcp_connection_t *tc;
vlib_buffer_t *b;
- u32 bi, snd_space, n_bytes;
+ u32 bi, n_bytes;
if (is_syn)
{
/* Exponential backoff */
tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
- /* Figure out what and how many bytes we can send */
- snd_space = tcp_available_snd_space (tc);
-
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1);
- if (snd_space == 0)
- {
- clib_warning ("no wnd to retransmit");
- tcp_return_buffer (tm);
-
- /* Force one segment */
- tcp_retransmit_first_unacked (tc);
+ /* Send one segment. No fancy recovery for now! */
+ n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, tc->snd_mss);
+ scoreboard_clear (&tc->sack_sb);
- /* Re-enable retransmit timer. Output may be unwilling
- * to do it for us */
- tcp_retransmit_timer_set (tc);
-
- return;
- }
- else
+ if (n_bytes == 0)
{
- /* No fancy recovery for now! */
- n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, snd_space);
- scoreboard_clear (&tc->sack_sb);
-
- if (n_bytes == 0)
- return;
-
- tc->rtx_bytes += n_bytes;
+ clib_warning ("could not retransmit");
+ return;
}
}
else
vlib_buffer_make_headroom (b, MAX_HDRS_LEN);
- tcp_push_hdr_i (tc, b, tc->state);
+ tcp_push_hdr_i (tc, b, tc->state, 1);
/* Account for the SYN */
tc->snd_nxt += 1;
/* Try to force the first unsent segment */
tcp_get_free_buffer_index (tm, &bi);
b = vlib_get_buffer (vm, bi);
+ tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
n_bytes = stream_session_peek_bytes (&tc->connection,
vlib_buffer_get_current (b),
tc->snd_una_max - tc->snd_una,
}
b->current_length = n_bytes;
- tcp_push_hdr_i (tc, b, tc->state);
+ tcp_push_hdr_i (tc, b, tc->state, 0);
tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
/* Re-enable persist timer */
n_bytes = tcp_prepare_retransmit_segment (tc, b, 0, tc->snd_mss);
if (n_bytes == 0)
- return;
+ goto done;
tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
- tc->rtx_bytes += n_bytes;
+
+done:
+ tc->snd_nxt = tc->snd_una_max;
}
sack_scoreboard_hole_t *
}
tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
- tc->rtx_bytes += n_written;
snd_space -= n_written;
}
}
/* If not retransmitting
- * 1) update snd_una_max (SYN, SYNACK, new data, FIN)
+ * 1) update snd_una_max (SYN, SYNACK, FIN)
* 2) If we're not tracking an ACK, start tracking */
if (seq_lt (tc0->snd_una_max, tc0->snd_nxt))
{
tcp_connection_t *tc;
tc = (tcp_connection_t *) tconn;
- tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED);
+ tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, 0);
return 0;
}
vlib_buffer_t *b0;
tcp_tx_trace_t *t0;
tcp_header_t *th0;
- tcp_connection_t *tc0;
u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
bi0 = from[0];
th0 = ip4_next_header ((ip4_header_t *) th0);
else
th0 = ip6_next_header ((ip6_header_t *) th0);
- tc0 =
- tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
- my_thread_index);
t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
- clib_memcpy (&t0->tcp_connection, tc0,
- sizeof (t0->tcp_connection));
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,