+ if (seq_geq (tc->psh_seq, snd_nxt)
+ && seq_lt (tc->psh_seq, snd_nxt + data_len))
+ flags |= TCP_FLAG_PSH;
+ }
+ th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
+ tc->rcv_nxt, tcp_hdr_opts_len, flags,
+ advertise_wnd);
+
+ if (maybe_burst)
+ {
+ clib_memcpy_fast ((u8 *) (th + 1),
+ tm->wrk_ctx[tc->c_thread_index].cached_opts,
+ tc->snd_opts_len);
+ }
+ else
+ {
+ u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
+ ASSERT (len == tc->snd_opts_len);
+ }
+
+ /*
+ * Update connection variables
+ */
+
+ if (update_snd_nxt)
+ tc->snd_nxt += data_len;
+ tc->rcv_las = tc->rcv_nxt;
+
+ tc->bytes_out += data_len;
+ tc->data_segs_out += 1;
+
+ th->checksum = tcp_compute_checksum (tc, b);
+
+ TCP_EVT (TCP_EVT_PKTIZE, tc);
+}
+
+always_inline u32
+tcp_buffer_len (vlib_buffer_t * b)
+{
+ u32 data_len = b->current_length;
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ data_len += b->total_length_not_including_first_buffer;
+ return data_len;
+}
+
+always_inline u32
+tcp_push_one_header (tcp_connection_t *tc, vlib_buffer_t *b)
+{
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
+ tcp_bt_track_tx (tc, tcp_buffer_len (b));
+
+ tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
+ /* update_snd_nxt */ 1);
+
+ tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
+ return 0;
+}
+
+u32
+tcp_session_push_header (transport_connection_t *tconn, vlib_buffer_t **bs,
+ u32 n_bufs)
+{
+ tcp_connection_t *tc = (tcp_connection_t *) tconn;
+
+ while (n_bufs >= 4)
+ {
+ vlib_prefetch_buffer_header (bs[2], STORE);
+ vlib_prefetch_buffer_header (bs[3], STORE);
+
+ tcp_push_one_header (tc, bs[0]);
+ tcp_push_one_header (tc, bs[1]);
+
+ n_bufs -= 2;
+ bs += 2;
+ }
+ while (n_bufs)
+ {
+ if (n_bufs > 1)
+ vlib_prefetch_buffer_header (bs[1], STORE);
+
+ tcp_push_one_header (tc, bs[0]);
+
+ n_bufs -= 1;
+ bs += 1;
+ }
+
+ /* If not tracking an ACK, start tracking */
+ if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
+ {
+ tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
+ tc->rtt_seq = tc->snd_nxt;
+ }
+ if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
+ {
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ tcp_retransmit_timer_set (&wrk->timer_wheel, tc);
+ tc->rto_boff = 0;
+ }
+ return 0;
+}
+
+void
+tcp_send_ack (tcp_connection_t * tc)
+{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b;
+ u32 bi;
+
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_update_rcv_wnd (tc);
+ tcp_worker_stats_inc (wrk, no_buffer, 1);
+ return;
+ }
+ b = vlib_get_buffer (vm, bi);
+ tcp_init_buffer (vm, b);
+ tcp_make_ack (tc, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+}
+
+void
+tcp_program_ack (tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ {
+ session_add_self_custom_tx_evt (&tc->connection, 1);
+ tc->flags |= TCP_CONN_SNDACK;
+ }
+}
+
+void
+tcp_program_dupack (tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_SNDACK))
+ {
+ session_add_self_custom_tx_evt (&tc->connection, 1);
+ tc->flags |= TCP_CONN_SNDACK;
+ }
+ if (tc->pending_dupacks < 255)
+ tc->pending_dupacks += 1;
+}
+
+void
+tcp_program_retransmit (tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_RXT_PENDING))
+ {
+ session_add_self_custom_tx_evt (&tc->connection, 0);
+ tc->flags |= TCP_CONN_RXT_PENDING;
+ }
+}
+
+/**
+ * Send window update ack
+ *
+ * Ensures that it will be sent only once, after a zero rwnd has been
+ * advertised in a previous ack, and only if rwnd has grown beyond a
+ * configurable value.
+ */
+void
+tcp_send_window_update_ack (tcp_connection_t * tc)
+{
+ if (tcp_zero_rwnd_sent (tc))
+ {
+ tcp_update_rcv_wnd (tc);
+ if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
+ {
+ tcp_zero_rwnd_sent_off (tc);
+ tcp_program_ack (tc);
+ }
+ }
+}
+
+/**
+ * Allocate a new buffer and build a new tcp segment
+ *
+ * @param wrk tcp worker
+ * @param tc connection for which the segment will be allocated
+ * @param offset offset of the first byte in the tx fifo
+ * @param max_deq_byte segment size
+ * @param[out] b pointer to buffer allocated
+ *
+ * @return the number of bytes in the segment or 0 if buffer cannot be
+ * allocated or no data available
+ */
+static int
+tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
+{
+ u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
+ vlib_main_t *vm = wrk->vm;
+ u32 bi, seg_size;
+ int n_bytes = 0;
+ u8 *data;
+
+ seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
+
+ /*
+ * Prepare options
+ */
+ tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
+
+ /*
+ * Allocate and fill in buffer(s)
+ */
+
+ /* Easy case, buffer size greater than mss */
+ if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
+ {
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_worker_stats_inc (wrk, no_buffer, 1);
+ return 0;
+ }
+ *b = vlib_get_buffer (vm, bi);
+ data = tcp_init_buffer (vm, *b);
+ n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
+ max_deq_bytes);
+ ASSERT (n_bytes > 0);
+ b[0]->current_length = n_bytes;
+ tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
+ /* burst */ 0, /* update_snd_nxt */ 0);
+ }
+ /* Split mss into multiple buffers */
+ else
+ {
+ u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
+ u16 n_peeked, len_to_deq;
+ vlib_buffer_t *chain_b, *prev_b;
+ int i;
+
+ /* Make sure we have enough buffers */
+ n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
+ vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
+ CLIB_CACHE_LINE_BYTES);
+ n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
+ if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
+ {
+ if (n_bufs)
+ vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
+ tcp_worker_stats_inc (wrk, no_buffer, 1);
+ return 0;
+ }
+
+ *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
+ data = tcp_init_buffer (vm, *b);
+ n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
+ bytes_per_buffer -
+ TRANSPORT_MAX_HDRS_LEN);
+ b[0]->current_length = n_bytes;
+ b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ b[0]->total_length_not_including_first_buffer = 0;
+ max_deq_bytes -= n_bytes;
+
+ chain_b = *b;
+ for (i = 1; i < n_bufs_per_seg; i++)
+ {
+ prev_b = chain_b;
+ len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
+ chain_bi = wrk->tx_buffers[--n_bufs];
+ chain_b = vlib_get_buffer (vm, chain_bi);
+ chain_b->current_data = 0;
+ data = vlib_buffer_get_current (chain_b);
+ n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
+ offset + n_bytes,
+ len_to_deq);
+ ASSERT (n_peeked == len_to_deq);
+ n_bytes += n_peeked;
+ chain_b->current_length = n_peeked;
+ chain_b->next_buffer = 0;
+
+ /* update previous buffer */
+ prev_b->next_buffer = chain_bi;
+ prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ max_deq_bytes -= n_peeked;
+ b[0]->total_length_not_including_first_buffer += n_peeked;
+ }
+
+ tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
+ /* burst */ 0, /* update_snd_nxt */ 0);
+
+ if (PREDICT_FALSE (n_bufs))
+ {
+ clib_warning ("not all buffers consumed");
+ vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
+ }
+ }
+
+ ASSERT (n_bytes > 0);
+ ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
+
+ return n_bytes;
+}
+
+/**
+ * Build a retransmit segment
+ *
+ * @return the number of bytes in the segment or 0 if there's nothing to
+ * retransmit
+ */
+static u32
+tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
+ tcp_connection_t * tc, u32 offset,
+ u32 max_deq_bytes, vlib_buffer_t ** b)
+{
+ u32 start, available_bytes;
+ int n_bytes = 0;
+
+ ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
+ ASSERT (max_deq_bytes != 0);
+
+ /*
+ * Make sure we can retransmit something
+ */
+ available_bytes = transport_max_tx_dequeue (&tc->connection);
+ ASSERT (available_bytes >= offset);
+ available_bytes -= offset;
+ if (!available_bytes)
+ return 0;
+
+ max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
+ max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
+
+ start = tc->snd_una + offset;
+ ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
+
+ n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
+ if (!n_bytes)
+ return 0;
+
+ tc->snd_rxt_bytes += n_bytes;
+
+ if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
+ tcp_bt_track_rxt (tc, start, start + n_bytes);
+
+ tc->bytes_retrans += n_bytes;
+ tc->segs_retrans += 1;
+ tcp_worker_stats_inc (wrk, rxt_segs, 1);
+ TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
+
+ return n_bytes;
+}
+
+static void
+tcp_check_sack_reneging (tcp_connection_t * tc)
+{
+ sack_scoreboard_t *sb = &tc->sack_sb;
+ sack_scoreboard_hole_t *hole;
+
+ hole = scoreboard_first_hole (sb);
+ if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
+ return;
+
+ scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
+}
+
+/**
+ * Reset congestion control, switch cwnd to loss window and try again.
+ */
+static void
+tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
+{
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
+
+ tc->prev_ssthresh = tc->ssthresh;
+ tc->prev_cwnd = tc->cwnd;
+
+ /* If we entrered loss without fast recovery, notify cc algo of the
+ * congestion event such that it can update ssthresh and its state */
+ if (!tcp_in_fastrecovery (tc))
+ tcp_cc_congestion (tc);
+
+ /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
+ tcp_cc_loss (tc);
+
+ tc->rtt_ts = 0;
+ tc->cwnd_acc_bytes = 0;
+ tc->tr_occurences += 1;
+ tc->sack_sb.reorder = TCP_DUPACK_THRESHOLD;
+ tc->sack_sb.rescue_rxt = tc->snd_una - 1;
+ tcp_recovery_on (tc);
+}
+
+void
+tcp_timer_retransmit_handler (tcp_connection_t * tc)
+{
+ tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b = 0;
+ u32 bi, n_bytes;
+
+ tcp_worker_stats_inc (wrk, tr_events, 1);
+
+ /* Should be handled by a different handler */
+ if (PREDICT_FALSE (tc->state == TCP_STATE_SYN_SENT))
+ return;
+
+ /* Wait-close and retransmit could pop at the same time */
+ if (tc->state == TCP_STATE_CLOSED)
+ return;
+
+ if (tc->state >= TCP_STATE_ESTABLISHED)
+ {
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
+
+ /* Lost FIN, retransmit and return */
+ if (tc->flags & TCP_CONN_FINSNT)
+ {
+ tcp_send_fin (tc);
+ tc->rto_boff += 1;
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+ return;
+ }
+
+ /* Shouldn't be here */
+ if (tc->snd_una == tc->snd_nxt)
+ {
+ ASSERT (!tcp_in_recovery (tc));
+ tc->rto_boff = 0;
+ return;
+ }
+
+ /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
+ * to persist timer timeout */
+ if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
+ {
+ tc->rto_boff = 0;
+ tcp_update_rto (tc);
+ }
+
+ /* Peer is dead or network connectivity is lost. Close connection.
+ * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
+ * a min rto of 0.2s we need to retry about 8 times. */
+ if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
+ {
+ tcp_send_reset (tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ session_transport_closing_notify (&tc->connection);
+ session_transport_closed_notify (&tc->connection);
+ tcp_connection_timers_reset (tc);
+ tcp_program_cleanup (wrk, tc);
+ tcp_worker_stats_inc (wrk, tr_abort, 1);
+ return;
+ }
+
+ if (tcp_opts_sack_permitted (&tc->rcv_opts))
+ {
+ tcp_check_sack_reneging (tc);
+ scoreboard_rxt_mark_lost (&tc->sack_sb, tc->snd_una, tc->snd_nxt);
+ }
+
+ /* Update send congestion to make sure that rxt has data to send */
+ tc->snd_congestion = tc->snd_nxt;
+
+ /* Send the first unacked segment. If we're short on buffers, return
+ * as soon as possible */
+ n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
+ n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
+ if (!n_bytes)
+ {
+ tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT,
+ tcp_cfg.alloc_err_timeout);
+ return;
+ }
+
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+ tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
+
+ tc->rto_boff += 1;
+ if (tc->rto_boff == 1)
+ {
+ tcp_cc_init_rxt_timeout (tc);
+ /* Record timestamp. Eifel detection algorithm RFC3522 */
+ tc->snd_rxt_ts = tcp_tstamp (tc);
+ }
+
+ if (tcp_opts_sack_permitted (&tc->rcv_opts))
+ scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
+
+ tcp_program_retransmit (tc);