tcp_update_rcv_wnd (tc);
- if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
tc->flags |= TCP_CONN_TRACK_BURST;
if (tc->snd_una == tc->snd_nxt)
- tcp_cc_event (tc, TCP_CC_EVT_START_TX);
+ {
+ tcp_cc_event (tc, TCP_CC_EVT_START_TX);
+ tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_MSS);
+ }
}
#endif /* CLIB_MARCH_VARIANT */
tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b)
{
u16 checksum = 0;
- if (PREDICT_FALSE (tc->flags & TCP_CONN_NO_CSUM_OFFLOAD))
+ if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
{
tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
vlib_main_t *vm = wrk->vm;
return checksum;
}
-
/**
* Prepare ACK
*/
ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
&pkt_ih4->src_address, IP_PROTOCOL_TCP,
- (!(tc->flags & TCP_CONN_NO_CSUM_OFFLOAD)));
+ tcp_csum_offload (tc));
th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
}
else
ip4_header_t *ih;
ih = vlib_buffer_push_ip4 (vm, b, &tc->c_lcl_ip4,
&tc->c_rmt_ip4, IP_PROTOCOL_TCP,
- (!(tc->flags & TCP_CONN_NO_CSUM_OFFLOAD)));
+ tcp_csum_offload (tc));
th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih);
}
else
return;
}
+ /* If we have non-dupacks programmed, no need to send them */
+ if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
+ tc->flags &= ~TCP_CONN_SNDACK;
+
tcp_retransmit_timer_force_update (tc);
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
tc->bytes_out += data_len;
tc->data_segs_out += 1;
-
th->checksum = tcp_compute_checksum (tc, b);
TCP_EVT (TCP_EVT_PKTIZE, tc);
max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
- /* Start is beyond snd_congestion */
start = tc->snd_una + offset;
- if (seq_geq (start, tc->snd_congestion))
- return 0;
-
- /* Don't overshoot snd_congestion */
- if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
- {
- max_deq_bytes = tc->snd_congestion - start;
- if (max_deq_bytes == 0)
- return 0;
- }
-
n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
if (!n_bytes)
return 0;
tc->snd_rxt_bytes += n_bytes;
- if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
tcp_bt_track_rxt (tc, start, start + n_bytes);
tc->bytes_retrans += n_bytes;
}
if (tcp_opts_sack_permitted (&tc->rcv_opts))
- scoreboard_init_high_rxt (&tc->sack_sb, tc->snd_una + tc->snd_mss);
+ scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
tcp_program_retransmit (tc);
}
|| tc->snd_nxt == tc->snd_una_max
|| tc->rto_boff > 1));
- if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
{
tcp_bt_check_app_limited (tc);
tcp_bt_track_tx (tc);
/**
* Estimate send space using proportional rate reduction (RFC6937)
*/
-static int
+int
tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
{
u32 pipe, prr_out;
}
else
{
- int limit = tc->prr_delivered - prr_out + tc->snd_mss;
+ int limit;
+ limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
space = clib_min (tc->ssthresh - pipe, limit);
}
space = clib_max (space, prr_out ? 0 : tc->snd_mss);
return space;
}
+static inline u8
+tcp_retransmit_should_retry_head (tcp_connection_t * tc,
+ sack_scoreboard_t * sb)
+{
+ u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
+ f64 rr = (f64) tc->ssthresh / tc->prev_cwnd;
+
+ return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
+}
+
+static inline u8
+tcp_max_tx_deq (tcp_connection_t * tc)
+{
+ return (transport_max_tx_dequeue (&tc->connection)
+ - (tc->snd_nxt - tc->snd_una));
+}
+
#define scoreboard_rescue_rxt_valid(_sb, _tc) \
(seq_geq (_sb->rescue_rxt, _tc->snd_una) \
&& seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
u32 burst_size)
{
+ u8 snd_limited = 0, can_rescue = 0, reset_pacer = 0;
u32 n_written = 0, offset, max_bytes, n_segs = 0;
+ u32 bi, max_deq, burst_bytes, sent_bytes;
sack_scoreboard_hole_t *hole;
vlib_main_t *vm = wrk->vm;
vlib_buffer_t *b = 0;
sack_scoreboard_t *sb;
- u32 bi, max_deq;
int snd_space;
- u8 snd_limited = 0, can_rescue = 0;
+ u64 time_now;
ASSERT (tcp_in_cong_recovery (tc));
+ time_now = wrk->vm->clib_time.last_cpu_time;
+ burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
+ time_now);
+ burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
+ if (!burst_size)
+ {
+ tcp_program_retransmit (tc);
+ return 0;
+ }
+
if (tcp_in_recovery (tc))
snd_space = tcp_available_cc_snd_space (tc);
else
if (snd_space < tc->snd_mss)
{
- /* We're cc constrained so don't accumulate tokens */
- transport_connection_tx_pacer_reset_bucket (&tc->connection,
- vm->
- clib_time.last_cpu_time);
- return 0;
+ reset_pacer = burst_bytes > tc->snd_mss;
+ goto done;
}
- TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
+ reset_pacer = snd_space < burst_bytes;
+
sb = &tc->sack_sb;
+
+ /* Check if snd_una is a lost retransmit */
+ if (pool_elts (sb->holes)
+ && seq_gt (sb->high_sacked, tc->snd_congestion)
+ && tc->rxt_head != tc->snd_una
+ && tcp_retransmit_should_retry_head (tc, sb))
+ {
+ n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss,
+ &b);
+ if (!n_written)
+ {
+ tcp_program_retransmit (tc);
+ goto done;
+ }
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ n_segs = 1;
+
+ tc->rxt_head = tc->snd_una;
+ tc->rxt_delivered += n_written;
+ tc->prr_delivered += n_written;
+ ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
+ }
+
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
max_deq = transport_max_tx_dequeue (&tc->connection);
if (!hole)
{
/* We are out of lost holes to retransmit so send some new data. */
- if (max_deq)
+ if (max_deq > tc->snd_mss)
{
- u32 n_segs_new, av_window;
- av_window = tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
- snd_space = clib_min (snd_space, av_window);
+ u32 n_segs_new;
+ int av_wnd;
+
+ av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
+ av_wnd = clib_max (av_wnd, 0);
+ snd_space = clib_min (snd_space, av_wnd);
snd_space = clib_min (max_deq, snd_space);
burst_size = clib_min (burst_size - n_segs,
snd_space / tc->snd_mss);
done:
+ if (reset_pacer)
+ {
+ transport_connection_tx_pacer_reset_bucket (&tc->connection,
+ vm->clib_time.
+ last_cpu_time);
+ }
+ else
+ {
+ sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
+ transport_connection_tx_pacer_update_bytes (&tc->connection,
+ sent_bytes);
+ }
+
return n_segs;
}
u32 burst_size)
{
u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now;
+ u32 burst_bytes, sent_bytes;
vlib_main_t *vm = wrk->vm;
int snd_space, n_segs = 0;
+ u8 cc_limited = 0;
vlib_buffer_t *b;
+ u64 time_now;
ASSERT (tcp_in_fastrecovery (tc));
TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
+ time_now = wrk->vm->clib_time.last_cpu_time;
+ burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
+ time_now);
+ burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
+ if (!burst_size)
+ {
+ tcp_program_retransmit (tc);
+ return 0;
+ }
+
snd_space = tcp_available_cc_snd_space (tc);
+ cc_limited = snd_space < burst_bytes;
if (!tcp_fastrecovery_first (tc))
goto send_unsent;
done:
tcp_fastrecovery_first_off (tc);
- return n_segs;
-}
-/**
- * Do fast retransmit
- */
-static int
-tcp_retransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, u32 burst_size)
-{
- if (tcp_opts_sack_permitted (&tc->rcv_opts))
- return tcp_retransmit_sack (wrk, tc, burst_size);
- else
- return tcp_retransmit_no_sack (wrk, tc, burst_size);
+ sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
+ sent_bytes = cc_limited ? burst_bytes : sent_bytes;
+ transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
+
+ return n_segs;
}
static int
if (!tc->pending_dupacks)
{
- tcp_send_ack (tc);
- return 1;
+ if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
+ || tc->state != TCP_STATE_ESTABLISHED)
+ {
+ tcp_send_ack (tc);
+ return 1;
+ }
+ return 0;
}
/* If we're supposed to send dupacks but have no ooo data
if (!vec_len (tc->snd_sacks))
{
tcp_send_ack (tc);
+ tc->pending_dupacks = 0;
return 1;
}
static int
tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
{
- u32 n_segs = 0, burst_size, sent_bytes, burst_bytes;
tcp_worker_ctx_t *wrk;
+ u32 n_segs;
wrk = tcp_get_worker (tc->c_thread_index);
- burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
- wrk->vm->
- clib_time.last_cpu_time);
- burst_size = clib_min (max_burst_size, burst_bytes / tc->snd_mss);
- if (!burst_size)
- {
- tcp_program_retransmit (tc);
- return 0;
- }
- n_segs = tcp_retransmit (wrk, tc, burst_size);
- sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
- transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
+ if (tcp_opts_sack_permitted (&tc->rcv_opts))
+ n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
+ else
+ n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
+
return n_segs;
}
if (is_ip4)
ih0 = vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
- IP_PROTOCOL_TCP,
- (!(tc0->flags & TCP_CONN_NO_CSUM_OFFLOAD)));
+ IP_PROTOCOL_TCP, tcp_csum_offload (tc0));
else
ih0 = vlib_buffer_push_ip6 (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6,
IP_PROTOCOL_TCP);
always_inline void
tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
{
- if (!tc->is_tso)
+ if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO)))
return;
+
u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len;
if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))