/* Congestion control */
u32 cwnd; /**< Congestion window */
+ u32 cwnd_acc_bytes; /**< Bytes accumulated for cwnd increment */
u32 ssthresh; /**< Slow-start threshold */
u32 prev_ssthresh; /**< ssthresh before congestion */
u32 prev_cwnd; /**< ssthresh before congestion */
{ \
ELOG_TYPE_DECLARE (_e) = \
{ \
- .format = "rto_stat: rto %u srtt %u rttvar %u ", \
+ .format = "rcv_stat: rto %u srtt %u rttvar %u ", \
.format_args = "i4i4i4", \
}; \
DECLARE_ETD(_tc, _e, 3); \
ed->data[2] = _tc->rttvar; \
} \
}
+#define TCP_EVT_CC_SND_STAT_HANDLER(_tc, ...) \
+{ \
+if (_tc->c_cc_stat_tstamp + STATS_INTERVAL < tcp_time_now()) \
+{ \
+ ELOG_TYPE_DECLARE (_e) = \
+ { \
+ .format = "snd_stat: dack %u sacked %u lost %u out %u rxt %u", \
+ .format_args = "i4i4i4i4i4", \
+ }; \
+ DECLARE_ETD(_tc, _e, 5); \
+ ed->data[0] = _tc->rcv_dupacks; \
+ ed->data[1] = _tc->sack_sb.sacked_bytes; \
+ ed->data[2] = _tc->sack_sb.lost_bytes; \
+ ed->data[3] = tcp_bytes_out (_tc); \
+ ed->data[3] = _tc->snd_rxt_bytes; \
+} \
+}
#define TCP_EVT_CC_STAT_HANDLER(_tc, ...) \
{ \
{
tcp_fastrecovery_on (tc);
tc->snd_congestion = tc->snd_una_max;
+ tc->cwnd_acc_bytes = 0;
tc->cc_algo->congestion (tc);
TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
}
}
else
{
- /* Round up to 1 if needed */
- tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1);
+ /* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */
+ tc->cwnd_acc_bytes += tc->bytes_acked;
+ if (tc->cwnd_acc_bytes >= tc->cwnd)
+ {
+ u32 inc = tc->cwnd_acc_bytes / tc->cwnd;
+ tc->cwnd += inc * tc->snd_mss;
+ tc->cwnd_acc_bytes -= inc * tc->cwnd;
+ }
}
}