tcp_update_rcv_wnd (tc);
if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
- tc->flags |= TCP_CONN_TRACK_BURST;
+ tcp_bt_check_app_limited (tc);
if (tc->snd_una == tc->snd_nxt)
{
return checksum;
}
-
/**
* Prepare ACK
*/
session_flush_frames_main_thread (wrk->vm);
}
-always_inline void
-tcp_enqueue_to_output_i (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
- u8 is_ip4, u8 flush)
+static void
+tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
+ u8 is_ip4)
{
- u32 *to_next, next_index;
- vlib_frame_t *f;
+ session_type_t st;
b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
b->error = 0;
- /* Decide where to send the packet */
- next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index;
- tcp_trajectory_add_start (b, 2);
-
- /* Get frame to v4/6 output node */
- f = wrk->tx_frames[!is_ip4];
- if (!f)
- {
- f = vlib_get_frame_to_node (wrk->vm, next_index);
- ASSERT (f);
- wrk->tx_frames[!is_ip4] = f;
- }
- to_next = vlib_frame_vector_args (f);
- to_next[f->n_vectors] = bi;
- f->n_vectors += 1;
- if (flush || f->n_vectors == VLIB_FRAME_SIZE)
- {
- vlib_put_frame_to_node (wrk->vm, next_index, f);
- wrk->tx_frames[!is_ip4] = 0;
- }
-}
-
-static void
-tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
- u8 is_ip4)
-{
- tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 0);
+ st = session_type_from_proto_and_ip (TRANSPORT_PROTO_TCP, is_ip4);
+ session_add_pending_tx_buffer (st, wrk->vm->thread_index, bi);
}
-static void
-tcp_enqueue_to_output_now (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
- u8 is_ip4)
-{
- tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 1);
-}
#endif /* CLIB_MARCH_VARIANT */
static int
TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
}
-/**
- * Flush tx frame populated by retransmits and timer pops
- */
-void
-tcp_flush_frame_to_output (tcp_worker_ctx_t * wrk, u8 is_ip4)
-{
- if (wrk->tx_frames[!is_ip4])
- {
- u32 next_index;
- next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index;
- vlib_put_frame_to_node (wrk->vm, next_index, wrk->tx_frames[!is_ip4]);
- wrk->tx_frames[!is_ip4] = 0;
- }
-}
-
/**
* Flush ip lookup tx frames populated by timer pops
*/
void
tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk)
{
- tcp_flush_frame_to_output (wrk, 1);
- tcp_flush_frame_to_output (wrk, 0);
tcp_flush_frame_to_ip_lookup (wrk, 1);
tcp_flush_frame_to_ip_lookup (wrk, 0);
}
return;
}
+ /* If we have non-dupacks programmed, no need to send them */
+ if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
+ tc->flags &= ~TCP_CONN_SNDACK;
+
tcp_retransmit_timer_force_update (tc);
b = vlib_get_buffer (vm, bi);
tcp_init_buffer (vm, b);
tcp_make_fin (tc, b);
- tcp_enqueue_to_output_now (wrk, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
TCP_EVT (TCP_EVT_FIN_SENT, tc);
/* Account for the FIN */
tc->snd_nxt += 1;
tc->bytes_out += data_len;
tc->data_segs_out += 1;
-
th->checksum = tcp_compute_checksum (tc, b);
TCP_EVT (TCP_EVT_PKTIZE, tc);
}
+always_inline u32
+tcp_buffer_len (vlib_buffer_t * b)
+{
+ u32 data_len = b->current_length;
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ data_len += b->total_length_not_including_first_buffer;
+ return data_len;
+}
+
u32
tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
{
tcp_connection_t *tc = (tcp_connection_t *) tconn;
- if (tc->flags & TCP_CONN_TRACK_BURST)
- {
- tcp_bt_check_app_limited (tc);
- tcp_bt_track_tx (tc);
- tc->flags &= ~TCP_CONN_TRACK_BURST;
- }
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
+ tcp_bt_track_tx (tc, tcp_buffer_len (b));
tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
/* update_snd_nxt */ 1);
max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
start = tc->snd_una + offset;
+ ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
+
n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
if (!n_bytes)
return 0;
/* Send the first unacked segment. If we're short on buffers, return
* as soon as possible */
- n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
+ n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
+ n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
if (!n_bytes)
{
tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
{
tcp_bt_check_app_limited (tc);
- tcp_bt_track_tx (tc);
+ tcp_bt_track_tx (tc, n_bytes);
}
tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
available_wnd = tc->snd_wnd - offset;
burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
+ tcp_bt_check_app_limited (tc);
+
while (n_segs < burst_size)
{
n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
offset += n_written;
n_segs += 1;
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
+ tcp_bt_track_tx (tc, n_written);
+
tc->snd_nxt += n_written;
tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
}
return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
}
+static inline u8
+tcp_max_tx_deq (tcp_connection_t * tc)
+{
+ return (transport_max_tx_dequeue (&tc->connection)
+ - (tc->snd_nxt - tc->snd_una));
+}
+
#define scoreboard_rescue_rxt_valid(_sb, _tc) \
(seq_geq (_sb->rescue_rxt, _tc->snd_una) \
&& seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
while (snd_space > 0 && n_segs < burst_size)
{
- hole = scoreboard_next_rxt_hole (sb, hole, max_deq, &can_rescue,
+ hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue,
&snd_limited);
if (!hole)
{
tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
sb->high_rxt += n_written;
+ ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt));
+
snd_space -= n_written;
n_segs += 1;
}
if (!tc->pending_dupacks)
{
- tcp_send_ack (tc);
- return 1;
+ if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
+ || tc->state != TCP_STATE_ESTABLISHED)
+ {
+ tcp_send_ack (tc);
+ return 1;
+ }
+ return 0;
}
/* If we're supposed to send dupacks but have no ooo data
if (!vec_len (tc->snd_sacks))
{
tcp_send_ack (tc);
+ tc->pending_dupacks = 0;
return 1;
}