2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 * TCP byte tracker that can generate delivery rate estimates. Based on
16 * draft-cheng-iccrg-delivery-rate-estimation-00
19 #include <vnet/tcp/tcp.h>
21 static tcp_bt_sample_t *
22 bt_get_sample (tcp_byte_tracker_t * bt, u32 bts_index)
24 if (pool_is_free_index (bt->samples, bts_index))
26 return pool_elt_at_index (bt->samples, bts_index);
29 static tcp_bt_sample_t *
30 bt_next_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts)
32 return bt_get_sample (bt, bts->next);
35 static tcp_bt_sample_t *
36 bt_prev_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts)
38 return bt_get_sample (bt, bts->prev);
42 bt_sample_index (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts)
45 return TCP_BTS_INVALID_INDEX;
46 return bts - bt->samples;
50 bt_seq_lt (u32 a, u32 b)
55 static tcp_bt_sample_t *
56 bt_alloc_sample (tcp_byte_tracker_t * bt, u32 min_seq)
60 pool_get_zero (bt->samples, bts);
61 bts->next = bts->prev = TCP_BTS_INVALID_INDEX;
62 bts->min_seq = min_seq;
63 rb_tree_add_custom (&bt->sample_lookup, bts->min_seq, bts - bt->samples,
69 bt_free_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts)
71 if (bts->prev != TCP_BTS_INVALID_INDEX)
73 tcp_bt_sample_t *prev = bt_prev_sample (bt, bts);
74 prev->next = bts->next;
79 if (bts->next != TCP_BTS_INVALID_INDEX)
81 tcp_bt_sample_t *next = bt_next_sample (bt, bts);
82 next->prev = bts->prev;
87 rb_tree_del_custom (&bt->sample_lookup, bts->min_seq, bt_seq_lt);
89 memset (bts, 0xfc, sizeof (*bts));
90 pool_put (bt->samples, bts);
93 static tcp_bt_sample_t *
94 bt_lookup_seq (tcp_byte_tracker_t * bt, u32 seq)
96 rb_tree_t *rt = &bt->sample_lookup;
97 rb_node_t *cur, *prev;
100 cur = rb_node (rt, rt->root);
101 if (rb_node_is_tnil (rt, cur))
104 while (seq != cur->key)
107 if (seq_lt (seq, cur->key))
108 cur = rb_node_left (rt, cur);
110 cur = rb_node_right (rt, cur);
112 if (rb_node_is_tnil (rt, cur))
114 /* Hit tnil as a left child. Find predecessor */
115 if (seq_lt (seq, prev->key))
117 cur = rb_tree_predecessor (rt, prev);
118 if (rb_node_is_tnil (rt, cur))
120 bts = bt_get_sample (bt, cur->opaque);
122 /* Hit tnil as a right child */
125 bts = bt_get_sample (bt, prev->opaque);
128 if (seq_geq (seq, bts->min_seq))
135 if (!rb_node_is_tnil (rt, cur))
136 return bt_get_sample (bt, cur->opaque);
142 bt_update_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts, u32 seq)
144 rb_tree_del_custom (&bt->sample_lookup, bts->min_seq, bt_seq_lt);
146 rb_tree_add_custom (&bt->sample_lookup, bts->min_seq,
147 bt_sample_index (bt, bts), bt_seq_lt);
150 static tcp_bt_sample_t *
151 bt_fix_overlapped (tcp_byte_tracker_t * bt, tcp_bt_sample_t * start,
154 tcp_bt_sample_t *cur, *next;
157 while ((next = bt_next_sample (bt, cur)) && seq_lt (next->min_seq, seq))
159 bt_free_sample (bt, cur);
165 bt_free_sample (bt, cur);
169 /* Overlapping current entirely */
172 bt_free_sample (bt, cur);
176 /* Overlapping head of current but not all */
177 bt_update_sample (bt, cur, seq);
182 tcp_bt_is_sane (tcp_byte_tracker_t * bt)
184 tcp_bt_sample_t *bts, *tmp;
186 if (pool_elts (bt->samples) != pool_elts (bt->sample_lookup.nodes) - 1)
189 if (bt->head == TCP_BTS_INVALID_INDEX)
191 if (bt->tail != TCP_BTS_INVALID_INDEX)
193 if (pool_elts (bt->samples) != 0)
198 bts = bt_get_sample (bt, bt->tail);
202 bts = bt_get_sample (bt, bt->head);
203 if (!bts || bts->prev != TCP_BTS_INVALID_INDEX)
208 tmp = bt_lookup_seq (bt, bts->min_seq);
213 tmp = bt_next_sample (bt, bts);
216 if (tmp->prev != bt_sample_index (bt, bts))
218 clib_warning ("next %u thinks prev is %u should be %u",
219 bts->next, tmp->prev, bt_sample_index (bt, bts));
222 if (!seq_lt (bts->min_seq, tmp->min_seq))
227 if (bt->tail != bt_sample_index (bt, bts))
229 if (bts->next != TCP_BTS_INVALID_INDEX)
237 static tcp_bt_sample_t *
238 tcp_bt_alloc_tx_sample (tcp_connection_t * tc, u32 min_seq)
240 tcp_bt_sample_t *bts;
241 bts = bt_alloc_sample (tc->bt, min_seq);
242 bts->delivered = tc->delivered;
243 bts->delivered_time = tc->delivered_time;
244 bts->tx_rate = transport_connection_tx_pacer_rate (&tc->connection);
245 bts->tx_time = tcp_time_now_us (tc->c_thread_index);
246 bts->flags |= tc->app_limited ? TCP_BTS_IS_APP_LIMITED : 0;
251 tcp_bt_check_app_limited (tcp_connection_t * tc)
253 u32 available_bytes, flight_size;
255 available_bytes = transport_max_tx_dequeue (&tc->connection);
256 flight_size = tcp_flight_size (tc);
258 /* Not enough bytes to fill the cwnd */
259 if (available_bytes + flight_size + tc->snd_mss < tc->cwnd
260 /* Bytes considered lost have been retransmitted */
261 && tc->sack_sb.lost_bytes <= tc->snd_rxt_bytes)
262 tc->app_limited = tc->delivered + flight_size ? : 1;
266 tcp_bt_track_tx (tcp_connection_t * tc)
268 tcp_byte_tracker_t *bt = tc->bt;
269 tcp_bt_sample_t *bts, *tail;
272 if (tc->snd_una == tc->snd_nxt)
273 tc->delivered_time = tcp_time_now_us (tc->c_thread_index);
275 bts = tcp_bt_alloc_tx_sample (tc, tc->snd_nxt);
276 bts_index = bt_sample_index (bt, bts);
277 tail = bt_get_sample (bt, bt->tail);
280 tail->next = bts_index;
281 bts->prev = bt->tail;
282 bt->tail = bts_index;
286 bt->tail = bt->head = bts_index;
291 tcp_bt_track_rxt (tcp_connection_t * tc, u32 start, u32 end)
293 tcp_byte_tracker_t *bt = tc->bt;
294 tcp_bt_sample_t *bts, *next, *cur, *prev, *nbts;
295 u32 bts_index, cur_index, next_index, prev_index, min_seq;
296 u8 is_end = end == tc->snd_nxt;
298 bts = bt_get_sample (bt, bt->last_ooo);
299 if (bts && bts->max_seq == start)
302 next = bt_next_sample (bt, bts);
304 bt_fix_overlapped (bt, next, end, is_end);
309 /* Find original tx sample */
310 bts = bt_lookup_seq (bt, start);
312 ASSERT (bts != 0 && seq_geq (start, bts->min_seq));
314 /* Head in the past */
315 if (seq_lt (bts->min_seq, tc->snd_una))
316 bt_update_sample (bt, bts, tc->snd_una);
319 if (bts->min_seq == start)
321 prev_index = bts->prev;
322 next = bt_fix_overlapped (bt, bts, end, is_end);
323 next_index = bt_sample_index (bt, next);
325 cur = tcp_bt_alloc_tx_sample (tc, start);
327 cur->flags |= TCP_BTS_IS_RXT;
328 cur->next = next_index;
329 cur->prev = prev_index;
331 cur_index = bt_sample_index (bt, cur);
333 if (next_index != TCP_BTS_INVALID_INDEX)
335 next = bt_get_sample (bt, next_index);
336 next->prev = cur_index;
340 bt->tail = cur_index;
343 if (prev_index != TCP_BTS_INVALID_INDEX)
345 prev = bt_get_sample (bt, prev_index);
346 prev->next = cur_index;
350 bt->head = cur_index;
353 bt->last_ooo = cur_index;
357 bts_index = bt_sample_index (bt, bts);
358 next = bt_next_sample (bt, bts);
360 next = bt_fix_overlapped (bt, next, end, is_end);
362 min_seq = next ? next->min_seq : tc->snd_nxt;
363 ASSERT (seq_lt (start, min_seq));
365 /* Have to split or tail overlap */
366 cur = tcp_bt_alloc_tx_sample (tc, start);
368 cur->flags |= TCP_BTS_IS_RXT;
369 cur->prev = bts_index;
370 cur_index = bt_sample_index (bt, cur);
372 /* Split. Allocate another sample */
373 if (seq_lt (end, min_seq))
375 nbts = tcp_bt_alloc_tx_sample (tc, end);
376 cur = bt_get_sample (bt, cur_index);
377 bts = bt_get_sample (bt, bts_index);
382 if (nbts->next != TCP_BTS_INVALID_INDEX)
384 next = bt_get_sample (bt, nbts->next);
385 next->prev = bt_sample_index (bt, nbts);
388 bt->tail = bt_sample_index (bt, nbts);
390 bts->next = nbts->prev = cur_index;
391 cur->next = bt_sample_index (bt, nbts);
393 bt->last_ooo = cur_index;
395 /* Tail completely overlapped */
398 bts = bt_get_sample (bt, bts_index);
400 if (bts->next != TCP_BTS_INVALID_INDEX)
402 next = bt_get_sample (bt, bts->next);
403 next->prev = cur_index;
406 bt->tail = cur_index;
408 cur->next = bts->next;
409 bts->next = cur_index;
411 bt->last_ooo = cur_index;
416 tcp_bt_sample_to_rate_sample (tcp_connection_t * tc, tcp_bt_sample_t * bts,
417 tcp_rate_sample_t * rs)
419 if (rs->prior_delivered && rs->prior_delivered >= bts->delivered)
422 rs->prior_delivered = bts->delivered;
423 rs->prior_time = bts->delivered_time;
424 rs->rtt_time = bts->tx_time;
425 rs->tx_rate = bts->tx_rate;
426 rs->flags = bts->flags;
430 tcp_bt_walk_samples (tcp_connection_t * tc, tcp_rate_sample_t * rs)
432 tcp_byte_tracker_t *bt = tc->bt;
433 tcp_bt_sample_t *next, *cur;
435 cur = bt_get_sample (bt, bt->head);
436 tcp_bt_sample_to_rate_sample (tc, cur, rs);
437 while ((next = bt_get_sample (bt, cur->next))
438 && seq_lt (next->min_seq, tc->snd_una))
440 bt_free_sample (bt, cur);
441 tcp_bt_sample_to_rate_sample (tc, next, rs);
445 ASSERT (seq_lt (cur->min_seq, tc->snd_una));
447 /* All samples acked */
448 if (tc->snd_una == tc->snd_nxt)
450 ASSERT (pool_elts (bt->samples) == 1);
451 bt_free_sample (bt, cur);
455 /* Current sample completely consumed */
456 if (next && next->min_seq == tc->snd_una)
458 bt_free_sample (bt, cur);
464 tcp_bt_walk_samples_ooo (tcp_connection_t * tc, tcp_rate_sample_t * rs)
466 sack_block_t *blks = tc->rcv_opts.sacks, *blk;
467 tcp_byte_tracker_t *bt = tc->bt;
468 tcp_bt_sample_t *next, *cur;
471 for (i = 0; i < vec_len (blks); i++)
475 /* Ignore blocks that are already covered by snd_una */
476 if (seq_lt (blk->end, tc->snd_una))
479 cur = bt_lookup_seq (bt, blk->start);
483 tcp_bt_sample_to_rate_sample (tc, cur, rs);
485 /* Current shouldn't be removed */
486 if (cur->min_seq != blk->start)
488 cur = bt_next_sample (bt, cur);
493 while ((next = bt_get_sample (bt, cur->next))
494 && seq_lt (next->min_seq, blk->end))
496 bt_free_sample (bt, cur);
497 tcp_bt_sample_to_rate_sample (tc, next, rs);
501 /* Current consumed entirely */
502 if (next && next->min_seq == blk->end)
503 bt_free_sample (bt, cur);
508 tcp_bt_sample_delivery_rate (tcp_connection_t * tc, tcp_rate_sample_t * rs)
512 if (PREDICT_FALSE (tc->flags & TCP_CONN_FINSNT))
515 delivered = tc->bytes_acked + tc->sack_sb.last_sacked_bytes;
516 if (!delivered || tc->bt->head == TCP_BTS_INVALID_INDEX)
519 /* Do not count bytes that were previously sacked again */
520 tc->delivered += delivered - tc->sack_sb.last_bytes_delivered;
521 tc->delivered_time = tcp_time_now_us (tc->c_thread_index);
523 if (tc->app_limited && tc->delivered > tc->app_limited)
527 tcp_bt_walk_samples (tc, rs);
529 if (tc->sack_sb.last_sacked_bytes)
530 tcp_bt_walk_samples_ooo (tc, rs);
532 rs->interval_time = tc->delivered_time - rs->prior_time;
533 rs->delivered = tc->delivered - rs->prior_delivered;
534 rs->rtt_time = tc->delivered_time - rs->rtt_time;
535 rs->acked_and_sacked = delivered;
536 rs->lost = tc->sack_sb.last_lost_bytes;
540 tcp_bt_flush_samples (tcp_connection_t * tc)
542 tcp_byte_tracker_t *bt = tc->bt;
543 tcp_bt_sample_t *bts;
544 u32 *samples = 0, *si;
546 vec_validate (samples, pool_elts (bt->samples) - 1);
549 pool_foreach (bts, bt->samples, ({
550 vec_add1 (samples, bts - bt->samples);
554 vec_foreach (si, samples)
556 bts = bt_get_sample (bt, *si);
557 bt_free_sample (bt, bts);
564 tcp_bt_cleanup (tcp_connection_t * tc)
566 tcp_byte_tracker_t *bt = tc->bt;
568 rb_tree_free_nodes (&bt->sample_lookup);
569 pool_free (bt->samples);
575 tcp_bt_init (tcp_connection_t * tc)
577 tcp_byte_tracker_t *bt;
579 bt = clib_mem_alloc (sizeof (tcp_byte_tracker_t));
580 clib_memset (bt, 0, sizeof (tcp_byte_tracker_t));
582 rb_tree_init (&bt->sample_lookup);
583 bt->head = bt->tail = TCP_BTS_INVALID_INDEX;
588 * fd.io coding-style-patch-verification: ON
591 * eval: (c-set-style "gnu")