+ */
+static u32
+tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
+ tcp_connection_t * tc, u32 offset,
+ u32 max_deq_bytes, vlib_buffer_t ** b)
+{
+ u32 start, available_bytes;
+ int n_bytes = 0;
+
+ ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
+ ASSERT (max_deq_bytes != 0);
+
+ /*
+ * Make sure we can retransmit something
+ */
+ available_bytes = transport_max_tx_dequeue (&tc->connection);
+ ASSERT (available_bytes >= offset);
+ available_bytes -= offset;
+ if (!available_bytes)
+ return 0;
+
+ max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
+ max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
+
+ /* Start is beyond snd_congestion */
+ start = tc->snd_una + offset;
+ if (seq_geq (start, tc->snd_congestion))
+ return 0;
+
+ /* Don't overshoot snd_congestion */
+ if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
+ {
+ max_deq_bytes = tc->snd_congestion - start;
+ if (max_deq_bytes == 0)
+ return 0;
+ }
+
+ n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
+ if (!n_bytes)
+ return 0;
+
+ tc->snd_rxt_bytes += n_bytes;
+
+ if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ tcp_bt_track_rxt (tc, start, start + n_bytes);
+
+ tc->bytes_retrans += n_bytes;
+ tc->segs_retrans += 1;
+ TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
+
+ return n_bytes;
+}
+
+static void
+tcp_check_sack_reneging (tcp_connection_t * tc)
+{
+ sack_scoreboard_t *sb = &tc->sack_sb;
+ sack_scoreboard_hole_t *hole;
+
+ hole = scoreboard_first_hole (sb);
+ if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
+ return;
+
+ scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
+}
+
+/**
+ * Reset congestion control, switch cwnd to loss window and try again.
+ */
+static void
+tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
+{
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
+
+ tc->prev_ssthresh = tc->ssthresh;
+ tc->prev_cwnd = tc->cwnd;
+
+ /* If we entrered loss without fast recovery, notify cc algo of the
+ * congestion event such that it can update ssthresh and its state */
+ if (!tcp_in_fastrecovery (tc))
+ tcp_cc_congestion (tc);
+
+ /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
+ tcp_cc_loss (tc);
+
+ tc->rtt_ts = 0;
+ tc->cwnd_acc_bytes = 0;
+ tc->tr_occurences += 1;
+ tcp_recovery_on (tc);
+}
+
+void
+tcp_timer_retransmit_handler (u32 tc_index)
+{
+ u32 thread_index = vlib_get_thread_index ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ vlib_main_t *vm = wrk->vm;
+ tcp_connection_t *tc;
+ vlib_buffer_t *b = 0;
+ u32 bi, n_bytes;
+
+ tc = tcp_connection_get (tc_index, thread_index);
+
+ /* Note: the connection may have been closed and pool_put */
+ if (PREDICT_FALSE (tc == 0 || tc->state == TCP_STATE_SYN_SENT))
+ return;
+
+ tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
+
+ /* Wait-close and retransmit could pop at the same time */
+ if (tc->state == TCP_STATE_CLOSED)
+ return;
+
+ if (tc->state >= TCP_STATE_ESTABLISHED)
+ {
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
+
+ /* Lost FIN, retransmit and return */
+ if (tc->flags & TCP_CONN_FINSNT)
+ {
+ tcp_send_fin (tc);
+ tc->rto_boff += 1;
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+ return;
+ }
+
+ /* Shouldn't be here. This condition is tricky because it has to take
+ * into account boff > 0 due to persist timeout. */
+ if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_nxt)
+ || (tc->rto_boff > 0 && seq_geq (tc->snd_una, tc->snd_congestion)
+ && !tcp_flight_size (tc)))
+ {
+ ASSERT (!tcp_in_recovery (tc));
+ tc->rto_boff = 0;
+ return;
+ }
+
+ /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
+ * to persist timer timeout */
+ if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
+ {
+ tc->rto_boff = 0;
+ tcp_update_rto (tc);
+ }
+
+ /* Peer is dead or network connectivity is lost. Close connection.
+ * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
+ * a min rto of 0.2s we need to retry about 8 times. */
+ if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
+ {
+ tcp_send_reset (tc);
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ session_transport_closing_notify (&tc->connection);
+ tcp_connection_timers_reset (tc);
+ tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
+ return;
+ }
+
+ if (tcp_opts_sack_permitted (&tc->rcv_opts))
+ tcp_check_sack_reneging (tc);
+
+ /* Update send congestion to make sure that rxt has data to send */
+ tc->snd_congestion = tc->snd_nxt;
+
+ /* Send the first unacked segment. If we're short on buffers, return
+ * as soon as possible */
+ n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
+ if (!n_bytes)
+ {
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
+ return;
+ }
+
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+ tcp_retransmit_timer_force_update (tc);
+
+ tc->rto_boff += 1;
+ if (tc->rto_boff == 1)
+ {
+ tcp_cc_init_rxt_timeout (tc);
+ /* Record timestamp. Eifel detection algorithm RFC3522 */
+ tc->snd_rxt_ts = tcp_tstamp (tc);
+ }
+
+ if (tcp_opts_sack_permitted (&tc->rcv_opts))
+ scoreboard_init_high_rxt (&tc->sack_sb, tc->snd_una + tc->snd_mss);
+
+ tcp_program_retransmit (tc);
+ }
+ /* Retransmit SYN-ACK */
+ else if (tc->state == TCP_STATE_SYN_RCVD)
+ {
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
+
+ tc->rtt_ts = 0;
+
+ /* Passive open establish timeout */
+ if (tc->rto > TCP_ESTABLISH_TIME >> 1)
+ {
+ tcp_connection_set_state (tc, TCP_STATE_CLOSED);
+ tcp_connection_timers_reset (tc);
+ tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
+ return;
+ }
+
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
+ return;
+ }
+
+ tc->rto_boff += 1;
+ if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+
+ tcp_retransmit_timer_force_update (tc);
+
+ b = vlib_get_buffer (vm, bi);
+ tcp_init_buffer (vm, b);
+ tcp_make_synack (tc, b);
+ TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
+
+ /* Retransmit timer already updated, just enqueue to output */
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ }
+ else
+ {
+ ASSERT (tc->state == TCP_STATE_CLOSED);
+ return;
+ }
+}
+
+/**
+ * SYN retransmit timer handler. Active open only.
+ */
+void
+tcp_timer_retransmit_syn_handler (u32 tc_index)
+{
+ u32 thread_index = vlib_get_thread_index ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ vlib_main_t *vm = wrk->vm;
+ tcp_connection_t *tc;
+ vlib_buffer_t *b = 0;
+ u32 bi;
+
+ tc = tcp_half_open_connection_get (tc_index);
+
+ /* Note: the connection may have transitioned to ESTABLISHED... */
+ if (PREDICT_FALSE (tc == 0 || tc->state != TCP_STATE_SYN_SENT))
+ return;
+
+ tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
+
+ /* Half-open connection actually moved to established but we were
+ * waiting for syn retransmit to pop to call cleanup from the right
+ * thread. */
+ if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
+ {
+ if (tcp_half_open_connection_cleanup (tc))
+ TCP_DBG ("could not remove half-open connection");
+ return;
+ }
+
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
+ tc->rtt_ts = 0;
+
+ /* Active open establish timeout */
+ if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
+ {
+ session_stream_connect_notify (&tc->connection, 1 /* fail */ );
+ tcp_connection_cleanup (tc);
+ return;
+ }
+
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
+ return;
+ }
+
+ /* Try without increasing RTO a number of times. If this fails,
+ * start growing RTO exponentially */
+ tc->rto_boff += 1;
+ if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+
+ b = vlib_get_buffer (vm, bi);
+ tcp_init_buffer (vm, b);
+ tcp_make_syn (tc, b);
+
+ TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
+
+ /* This goes straight to ipx_lookup */
+ tcp_push_ip_hdr (wrk, tc, b);
+ tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
+
+ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
+ tc->rto * TCP_TO_TIMER_TICK);
+}
+
+/**
+ * Got 0 snd_wnd from peer, try to do something about it.
+ *
+ */
+void
+tcp_timer_persist_handler (u32 index)
+{
+ u32 thread_index = vlib_get_thread_index ();
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ u32 bi, max_snd_bytes, available_bytes, offset;
+ tcp_main_t *tm = vnet_get_tcp_main ();
+ vlib_main_t *vm = wrk->vm;
+ tcp_connection_t *tc;
+ vlib_buffer_t *b;
+ int n_bytes = 0;
+ u8 *data;
+
+ tc = tcp_connection_get_if_valid (index, thread_index);
+ if (!tc)
+ return;
+
+ /* Make sure timer handle is set to invalid */
+ tc->timers[TCP_TIMER_PERSIST] = TCP_TIMER_HANDLE_INVALID;
+
+ /* Problem already solved or worse */
+ if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
+ || (tc->flags & TCP_CONN_FINSNT))
+ return;
+
+ available_bytes = transport_max_tx_dequeue (&tc->connection);
+ offset = tc->snd_nxt - tc->snd_una;
+
+ /* Reprogram persist if no new bytes available to send. We may have data
+ * next time */
+ if (!available_bytes)
+ {
+ tcp_persist_timer_set (tc);
+ return;
+ }
+
+ if (available_bytes <= offset)
+ {
+ ASSERT (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT));
+ return;
+ }
+
+ /* Increment RTO backoff */
+ tc->rto_boff += 1;
+ tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
+
+ /*
+ * Try to force the first unsent segment (or buffer)
+ */
+ if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
+ {
+ tcp_persist_timer_set (tc);
+ return;
+ }
+ b = vlib_get_buffer (vm, bi);
+ data = tcp_init_buffer (vm, b);
+
+ tcp_validate_txf_size (tc, offset);
+ tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
+ max_snd_bytes = clib_min (tc->snd_mss,
+ tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
+ n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
+ max_snd_bytes);
+ b->current_length = n_bytes;
+ ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
+ || tc->snd_nxt == tc->snd_una_max
+ || tc->rto_boff > 1));
+
+ if (tc->flags & TCP_CONN_RATE_SAMPLE)
+ {
+ tcp_bt_check_app_limited (tc);
+ tcp_bt_track_tx (tc);
+ }
+
+ tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
+ /* burst */ 0, /* update_snd_nxt */ 1);
+ tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
+ tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+
+ /* Just sent new data, enable retransmit */
+ tcp_retransmit_timer_update (tc);
+}
+
+/**
+ * Retransmit first unacked segment
+ */
+int
+tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b;
+ u32 bi, n_bytes;
+
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
+
+ n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
+ if (!n_bytes)
+ return -1;
+
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+
+ return 0;
+}
+
+static int
+tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
+{
+ u32 offset, n_segs = 0, n_written, bi, available_wnd;
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b = 0;
+
+ offset = tc->snd_nxt - tc->snd_una;
+ available_wnd = tc->snd_wnd - offset;
+ burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
+
+ while (n_segs < burst_size)
+ {
+ n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
+ if (!n_written)
+ goto done;
+
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ offset += n_written;
+ n_segs += 1;
+
+ tc->snd_nxt += n_written;
+ tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
+ }
+
+done:
+ return n_segs;
+}
+
+/**
+ * Estimate send space using proportional rate reduction (RFC6937)
+ */
+static int
+tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
+{
+ u32 pipe, prr_out;
+ int space;
+
+ pipe = tcp_flight_size (tc);
+ prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
+
+ if (pipe > tc->ssthresh)
+ {
+ space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
+ - prr_out;
+ }
+ else
+ {
+ int limit = tc->prr_delivered - prr_out + tc->snd_mss;
+ space = clib_min (tc->ssthresh - pipe, limit);
+ }
+ space = clib_max (space, prr_out ? 0 : tc->snd_mss);
+ return space;
+}
+
+#define scoreboard_rescue_rxt_valid(_sb, _tc) \
+ (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
+ && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
+
+/**
+ * Do retransmit with SACKs
+ */
+static int
+tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)
+{
+ u32 n_written = 0, offset, max_bytes, n_segs = 0;
+ sack_scoreboard_hole_t *hole;
+ vlib_main_t *vm = wrk->vm;
+ vlib_buffer_t *b = 0;
+ sack_scoreboard_t *sb;
+ u32 bi, max_deq;
+ int snd_space;
+ u8 snd_limited = 0, can_rescue = 0;
+
+ ASSERT (tcp_in_cong_recovery (tc));
+
+ if (tcp_in_recovery (tc))
+ snd_space = tcp_available_cc_snd_space (tc);
+ else
+ snd_space = tcp_fastrecovery_prr_snd_space (tc);
+
+ if (snd_space < tc->snd_mss)
+ {
+ /* We're cc constrained so don't accumulate tokens */
+ transport_connection_tx_pacer_reset_bucket (&tc->connection,
+ vm->
+ clib_time.last_cpu_time);
+ return 0;
+ }
+
+ TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
+ sb = &tc->sack_sb;
+ hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
+
+ max_deq = transport_max_tx_dequeue (&tc->connection);
+ max_deq -= tc->snd_nxt - tc->snd_una;
+
+ while (snd_space > 0 && n_segs < burst_size)
+ {
+ hole = scoreboard_next_rxt_hole (sb, hole, max_deq, &can_rescue,
+ &snd_limited);
+ if (!hole)
+ {
+ /* We are out of lost holes to retransmit so send some new data. */
+ if (max_deq)
+ {
+ u32 n_segs_new, av_window;
+ av_window = tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
+ snd_space = clib_min (snd_space, av_window);
+ snd_space = clib_min (max_deq, snd_space);
+ burst_size = clib_min (burst_size - n_segs,
+ snd_space / tc->snd_mss);
+ burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
+ n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
+ if (max_deq > n_segs_new * tc->snd_mss)
+ tcp_program_retransmit (tc);
+
+ n_segs += n_segs_new;
+ goto done;
+ }
+
+ if (tcp_in_recovery (tc) || !can_rescue
+ || scoreboard_rescue_rxt_valid (sb, tc))
+ break;
+
+ /* If rescue rxt undefined or less than snd_una then one segment of
+ * up to SMSS octets that MUST include the highest outstanding
+ * unSACKed sequence number SHOULD be returned, and RescueRxt set to
+ * RecoveryPoint. HighRxt MUST NOT be updated.
+ */
+ max_bytes = clib_min (tc->snd_mss,
+ tc->snd_congestion - tc->snd_una);
+ max_bytes = clib_min (max_bytes, snd_space);
+ offset = tc->snd_congestion - tc->snd_una - max_bytes;
+ sb->rescue_rxt = tc->snd_congestion;
+ n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
+ max_bytes, &b);
+ if (!n_written)
+ goto done;
+
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+ n_segs += 1;
+ break;
+ }
+
+ max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
+ max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
+ if (max_bytes == 0)
+ break;
+
+ offset = sb->high_rxt - tc->snd_una;
+ n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
+ &b);
+ ASSERT (n_written <= snd_space);
+
+ /* Nothing left to retransmit */
+ if (n_written == 0)
+ break;
+
+ bi = vlib_get_buffer_index (vm, b);
+ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
+
+ sb->high_rxt += n_written;
+ snd_space -= n_written;
+ n_segs += 1;
+ }
+
+ if (hole)
+ tcp_program_retransmit (tc);
+
+done:
+
+ return n_segs;
+}
+
+/**
+ * Fast retransmit without SACK info
+ */
+static int
+tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
+ u32 burst_size)