/*
- * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
#include <vnet/tcp/tcp.h>
-void
+static void
newreno_congestion (tcp_connection_t * tc)
{
- tc->prev_ssthresh = tc->ssthresh;
tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
}
-void
+static void
+newreno_loss (tcp_connection_t * tc)
+{
+ tc->ssthresh = clib_max (tcp_flight_size (tc) / 2, 2 * tc->snd_mss);
+ tc->cwnd = tcp_loss_wnd (tc);
+}
+
+static void
newreno_recovered (tcp_connection_t * tc)
{
tc->cwnd = tc->ssthresh;
}
-void
-newreno_rcv_ack (tcp_connection_t * tc)
+static void
+newreno_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
{
if (tcp_in_slowstart (tc))
{
}
else
{
- /* Round up to 1 if needed */
- tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1);
+ /* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */
+ tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
}
}
void
-newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type)
+newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type,
+ tcp_rate_sample_t * rs)
{
if (ack_type == TCP_CC_DUPACK)
{
- tc->cwnd += tc->snd_mss;
+ if (!tcp_opts_sack_permitted (tc))
+ tc->cwnd += tc->snd_mss;
}
else if (ack_type == TCP_CC_PARTIALACK)
{
- tc->cwnd -= tc->bytes_acked;
- if (tc->bytes_acked > tc->snd_mss)
- tc->bytes_acked += tc->snd_mss;
+ /* RFC 6582 Sec. 3.2 */
+ if (!tcp_opts_sack_permitted (&tc->rcv_opts))
+ {
+ /* Deflate the congestion window by the amount of new data
+ * acknowledged by the Cumulative Acknowledgment field.
+ * If the partial ACK acknowledges at least one SMSS of new data,
+ * then add back SMSS bytes to the congestion window. This
+ * artificially inflates the congestion window in order to reflect
+ * the additional segment that has left the network. This "partial
+ * window deflation" attempts to ensure that, when fast recovery
+ * eventually ends, approximately ssthresh amount of data will be
+ * outstanding in the network.*/
+ tc->cwnd = (tc->cwnd > tc->bytes_acked + tc->snd_mss) ?
+ tc->cwnd - tc->bytes_acked : tc->snd_mss;
+ if (tc->bytes_acked > tc->snd_mss)
+ tc->cwnd += tc->snd_mss;
+ }
}
}
-void
+static void
newreno_conn_init (tcp_connection_t * tc)
{
tc->ssthresh = tc->snd_wnd;
}
const static tcp_cc_algorithm_t tcp_newreno = {
+ .name = "newreno",
.congestion = newreno_congestion,
+ .loss = newreno_loss,
.recovered = newreno_recovered,
.rcv_ack = newreno_rcv_ack,
.rcv_cong_ack = newreno_rcv_cong_ack,