+tcp_cc_congestion_undo (tcp_connection_t * tc)
+{
+ tc->cwnd = tc->prev_cwnd;
+ tc->ssthresh = tc->prev_ssthresh;
+ tc->snd_nxt = tc->snd_una_max;
+ tc->rcv_dupacks = 0;
+ if (tcp_in_recovery (tc))
+ tcp_cc_recovery_exit (tc);
+ else if (tcp_in_fastrecovery (tc))
+ tcp_cc_fastrecovery_exit (tc);
+ ASSERT (tc->rto_boff == 0);
+ TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 5);
+}
+
+static inline u8
+tcp_cc_is_spurious_timeout_rxt (tcp_connection_t * tc)
+{
+ return (tcp_in_recovery (tc) && tc->rto_boff == 1
+ && tc->snd_rxt_ts
+ && tcp_opts_tstamp (&tc->rcv_opts)
+ && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
+}
+
+static inline u8
+tcp_cc_is_spurious_fast_rxt (tcp_connection_t * tc)
+{
+ return (tcp_in_fastrecovery (tc)
+ && tc->cwnd > tc->ssthresh + 3 * tc->snd_mss);
+}
+
+static u8
+tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
+{
+ return (tcp_cc_is_spurious_timeout_rxt (tc)
+ || tcp_cc_is_spurious_fast_rxt (tc));
+}
+
+static int
+tcp_cc_recover (tcp_connection_t * tc)
+{
+ ASSERT (tcp_in_cong_recovery (tc));
+ if (tcp_cc_is_spurious_retransmit (tc))
+ {
+ tcp_cc_congestion_undo (tc);
+ return 1;
+ }
+
+ if (tcp_in_recovery (tc))
+ tcp_cc_recovery_exit (tc);
+ else if (tcp_in_fastrecovery (tc))
+ tcp_cc_fastrecovery_exit (tc);
+
+ ASSERT (tc->rto_boff == 0);
+ ASSERT (!tcp_in_cong_recovery (tc));
+ ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
+ return 0;
+}
+
+static void
+tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b)
+{
+ ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
+
+ /* Congestion avoidance */
+ tcp_cc_rcv_ack (tc);
+
+ /* If a cumulative ack, make sure dupacks is 0 */
+ tc->rcv_dupacks = 0;
+
+ /* When dupacks hits the threshold we only enter fast retransmit if
+ * cumulative ack covers more than snd_congestion. Should snd_una
+ * wrap this test may fail under otherwise valid circumstances.
+ * Therefore, proactively update snd_congestion when wrap detected. */
+ if (PREDICT_FALSE
+ (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
+ && seq_gt (tc->snd_congestion, tc->snd_una)))
+ tc->snd_congestion = tc->snd_una - 1;
+}
+
+static u8
+tcp_should_fastrecover_sack (tcp_connection_t * tc)
+{
+ return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
+}
+
+static u8
+tcp_should_fastrecover (tcp_connection_t * tc)
+{
+ return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
+ || tcp_should_fastrecover_sack (tc));
+}
+
+void
+tcp_program_fastretransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
+{
+ if (!(tc->flags & TCP_CONN_FRXT_PENDING))
+ {
+ vec_add1 (wrk->pending_fast_rxt, tc->c_c_index);
+ tc->flags |= TCP_CONN_FRXT_PENDING;
+ }
+}
+
+void
+tcp_do_fastretransmits (tcp_worker_ctx_t * wrk)
+{
+ u32 *ongoing_fast_rxt, burst_bytes, sent_bytes, thread_index;
+ u32 max_burst_size, burst_size, n_segs = 0, n_segs_now;
+ tcp_connection_t *tc;
+ u64 last_cpu_time;
+ int i;
+
+ if (vec_len (wrk->pending_fast_rxt) == 0
+ && vec_len (wrk->postponed_fast_rxt) == 0)
+ return;
+
+ thread_index = wrk->vm->thread_index;
+ last_cpu_time = wrk->vm->clib_time.last_cpu_time;
+ ongoing_fast_rxt = wrk->ongoing_fast_rxt;
+ vec_append (ongoing_fast_rxt, wrk->postponed_fast_rxt);
+ vec_append (ongoing_fast_rxt, wrk->pending_fast_rxt);
+
+ _vec_len (wrk->postponed_fast_rxt) = 0;
+ _vec_len (wrk->pending_fast_rxt) = 0;
+
+ max_burst_size = VLIB_FRAME_SIZE / vec_len (ongoing_fast_rxt);
+ max_burst_size = clib_max (max_burst_size, 1);
+
+ for (i = 0; i < vec_len (ongoing_fast_rxt); i++)
+ {
+ if (n_segs >= VLIB_FRAME_SIZE)
+ {
+ vec_add1 (wrk->postponed_fast_rxt, ongoing_fast_rxt[i]);
+ continue;
+ }
+
+ tc = tcp_connection_get (ongoing_fast_rxt[i], thread_index);
+ tc->flags &= ~TCP_CONN_FRXT_PENDING;
+
+ if (!tcp_in_fastrecovery (tc))
+ continue;
+
+ burst_size = clib_min (max_burst_size, VLIB_FRAME_SIZE - n_segs);
+ burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
+ last_cpu_time);
+ burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
+ if (!burst_size)
+ {
+ tcp_program_fastretransmit (wrk, tc);
+ continue;
+ }
+
+ n_segs_now = tcp_fast_retransmit (wrk, tc, burst_size);
+ sent_bytes = clib_min (n_segs_now * tc->snd_mss, burst_bytes);
+ transport_connection_tx_pacer_update_bytes (&tc->connection,
+ sent_bytes);
+ n_segs += n_segs_now;
+ }
+ _vec_len (ongoing_fast_rxt) = 0;
+ wrk->ongoing_fast_rxt = ongoing_fast_rxt;
+}
+
+/**
+ * One function to rule them all ... and in the darkness bind them
+ */
+static void
+tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)