+ vec_add1 (wrk->pending_fast_rxt, tc->c_c_index);
+ tc->flags |= TCP_CONN_FRXT_PENDING;
+ }
+}
+
+void
+tcp_do_fastretransmits (tcp_worker_ctx_t * wrk)
+{
+ u32 *ongoing_fast_rxt, burst_bytes, sent_bytes, thread_index;
+ u32 max_burst_size, burst_size, n_segs = 0, n_segs_now;
+ tcp_connection_t *tc;
+ u64 last_cpu_time;
+ int i;
+
+ if (vec_len (wrk->pending_fast_rxt) == 0
+ && vec_len (wrk->postponed_fast_rxt) == 0)
+ return;
+
+ thread_index = wrk->vm->thread_index;
+ last_cpu_time = wrk->vm->clib_time.last_cpu_time;
+ ongoing_fast_rxt = wrk->ongoing_fast_rxt;
+ vec_append (ongoing_fast_rxt, wrk->postponed_fast_rxt);
+ vec_append (ongoing_fast_rxt, wrk->pending_fast_rxt);
+
+ _vec_len (wrk->postponed_fast_rxt) = 0;
+ _vec_len (wrk->pending_fast_rxt) = 0;
+
+ max_burst_size = VLIB_FRAME_SIZE / vec_len (ongoing_fast_rxt);
+ max_burst_size = clib_max (max_burst_size, 1);
+
+ for (i = 0; i < vec_len (ongoing_fast_rxt); i++)
+ {
+ if (n_segs >= VLIB_FRAME_SIZE)
+ {
+ vec_add1 (wrk->postponed_fast_rxt, ongoing_fast_rxt[i]);
+ continue;
+ }
+
+ tc = tcp_connection_get (ongoing_fast_rxt[i], thread_index);
+ tc->flags &= ~TCP_CONN_FRXT_PENDING;
+
+ if (!tcp_in_fastrecovery (tc))
+ continue;
+
+ burst_size = clib_min (max_burst_size, VLIB_FRAME_SIZE - n_segs);
+ burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
+ last_cpu_time);
+ burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
+ if (!burst_size)
+ {
+ tcp_program_fastretransmit (wrk, tc);
+ continue;
+ }
+
+ n_segs_now = tcp_fast_retransmit (wrk, tc, burst_size);
+ sent_bytes = clib_min (n_segs_now * tc->snd_mss, burst_bytes);
+ transport_connection_tx_pacer_update_bytes (&tc->connection,
+ sent_bytes);
+ n_segs += n_segs_now;
+ }
+ _vec_len (ongoing_fast_rxt) = 0;
+ wrk->ongoing_fast_rxt = ongoing_fast_rxt;
+}
+
+/**
+ * One function to rule them all ... and in the darkness bind them
+ */
+static void
+tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
+{
+ u32 rxt_delivered;
+
+ if (tcp_in_fastrecovery (tc) && tcp_opts_sack_permitted (&tc->rcv_opts))
+ {
+ if (tc->bytes_acked)
+ goto partial_ack;
+ tcp_program_fastretransmit (tcp_get_worker (tc->c_thread_index), tc);
+ return;
+ }
+ /*
+ * Duplicate ACK. Check if we should enter fast recovery, or if already in
+ * it account for the bytes that left the network.
+ */
+ else if (is_dack && !tcp_in_recovery (tc))
+ {
+ TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
+ ASSERT (tc->snd_una != tc->snd_una_max
+ || tc->sack_sb.last_sacked_bytes);
+
+ tc->rcv_dupacks++;
+
+ /* Pure duplicate ack. If some data got acked, it's handled lower */
+ if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
+ {
+ ASSERT (tcp_in_fastrecovery (tc));
+ tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
+ return;
+ }
+ else if (tcp_should_fastrecover (tc))
+ {
+ u32 pacer_wnd;
+
+ ASSERT (!tcp_in_fastrecovery (tc));
+
+ /* Heuristic to catch potential late dupacks
+ * after fast retransmit exits */
+ if (is_dack && tc->snd_una == tc->snd_congestion
+ && timestamp_leq (tc->rcv_opts.tsecr, tc->tsecr_last_ack))
+ {
+ tc->rcv_dupacks = 0;
+ return;
+ }
+
+ tcp_cc_init_congestion (tc);
+ tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
+
+ if (tcp_opts_sack_permitted (&tc->rcv_opts))
+ {
+ tc->cwnd = tc->ssthresh;
+ scoreboard_init_high_rxt (&tc->sack_sb, tc->snd_una);
+ }
+ else
+ {
+ /* Post retransmit update cwnd to ssthresh and account for the
+ * three segments that have left the network and should've been
+ * buffered at the receiver XXX */
+ tc->cwnd = tc->ssthresh + 3 * tc->snd_mss;
+ }
+
+ /* Constrain rate until we get a partial ack */
+ pacer_wnd = clib_max (0.1 * tc->cwnd, 2 * tc->snd_mss);
+ tcp_connection_tx_pacer_reset (tc, pacer_wnd,
+ 0 /* start bucket */ );
+ tcp_program_fastretransmit (tcp_get_worker (tc->c_thread_index),
+ tc);
+ return;
+ }
+ else if (!tc->bytes_acked
+ || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
+ {
+ tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
+ return;
+ }
+ else
+ goto partial_ack;
+ }
+ /* Don't allow entry in fast recovery if still in recovery, for now */
+ else if (0 && is_dack && tcp_in_recovery (tc))
+ {
+ /* If of of the two conditions lower hold, reset dupacks because
+ * we're probably after timeout (RFC6582 heuristics).
+ * If Cumulative ack does not cover more than congestion threshold,
+ * and:
+ * 1) The following doesn't hold: The congestion window is greater
+ * than SMSS bytes and the difference between highest_ack
+ * and prev_highest_ack is at most 4*SMSS bytes
+ * 2) Echoed timestamp in the last non-dup ack does not equal the
+ * stored timestamp
+ */
+ if (seq_leq (tc->snd_una, tc->snd_congestion)
+ && ((!(tc->cwnd > tc->snd_mss
+ && tc->bytes_acked <= 4 * tc->snd_mss))
+ || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))