2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 * TCP byte tracker that can generate delivery rate estimates. Based on
16 * draft-cheng-iccrg-delivery-rate-estimation-00
19 #include <vnet/tcp/tcp_bt.h>
20 #include <vnet/tcp/tcp.h>
21 #include <vnet/tcp/tcp_inlines.h>
23 static tcp_bt_sample_t *
24 bt_get_sample (tcp_byte_tracker_t * bt, u32 bts_index)
26 if (pool_is_free_index (bt->samples, bts_index))
28 return pool_elt_at_index (bt->samples, bts_index);
31 static tcp_bt_sample_t *
32 bt_next_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts)
34 return bt_get_sample (bt, bts->next);
37 static tcp_bt_sample_t *
38 bt_prev_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts)
40 return bt_get_sample (bt, bts->prev);
44 bt_sample_index (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts)
47 return TCP_BTS_INVALID_INDEX;
48 return bts - bt->samples;
52 bt_seq_lt (u32 a, u32 b)
57 static tcp_bt_sample_t *
58 bt_alloc_sample (tcp_byte_tracker_t * bt, u32 min_seq, u32 max_seq)
62 pool_get_zero (bt->samples, bts);
63 bts->next = bts->prev = TCP_BTS_INVALID_INDEX;
64 bts->min_seq = min_seq;
65 bts->max_seq = max_seq;
66 rb_tree_add_custom (&bt->sample_lookup, bts->min_seq, bts - bt->samples,
72 bt_free_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts)
74 if (bts->prev != TCP_BTS_INVALID_INDEX)
76 tcp_bt_sample_t *prev = bt_prev_sample (bt, bts);
77 prev->next = bts->next;
82 if (bts->next != TCP_BTS_INVALID_INDEX)
84 tcp_bt_sample_t *next = bt_next_sample (bt, bts);
85 next->prev = bts->prev;
90 rb_tree_del_custom (&bt->sample_lookup, bts->min_seq, bt_seq_lt);
92 memset (bts, 0xfc, sizeof (*bts));
93 pool_put (bt->samples, bts);
96 static tcp_bt_sample_t *
97 bt_split_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts, u32 seq)
99 tcp_bt_sample_t *ns, *next;
102 bts_index = bt_sample_index (bt, bts);
104 ASSERT (seq_leq (bts->min_seq, seq) && seq_lt (seq, bts->max_seq));
106 ns = bt_alloc_sample (bt, seq, bts->max_seq);
107 bts = bt_get_sample (bt, bts_index);
113 next = bt_next_sample (bt, bts);
115 next->prev = bt_sample_index (bt, ns);
117 bt->tail = bt_sample_index (bt, ns);
119 bts->next = bt_sample_index (bt, ns);
120 ns->prev = bt_sample_index (bt, bts);
125 static tcp_bt_sample_t *
126 bt_merge_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * prev,
127 tcp_bt_sample_t * cur)
129 ASSERT (prev->max_seq == cur->min_seq);
130 prev->max_seq = cur->max_seq;
131 if (bt_sample_index (bt, cur) == bt->tail)
132 bt->tail = bt_sample_index (bt, prev);
133 bt_free_sample (bt, cur);
137 static tcp_bt_sample_t *
138 bt_lookup_seq (tcp_byte_tracker_t * bt, u32 seq)
140 rb_tree_t *rt = &bt->sample_lookup;
141 rb_node_t *cur, *prev;
142 tcp_bt_sample_t *bts;
144 cur = rb_node (rt, rt->root);
145 if (rb_node_is_tnil (rt, cur))
148 while (seq != cur->key)
151 if (seq_lt (seq, cur->key))
152 cur = rb_node_left (rt, cur);
154 cur = rb_node_right (rt, cur);
156 if (rb_node_is_tnil (rt, cur))
158 /* Hit tnil as a left child. Find predecessor */
159 if (seq_lt (seq, prev->key))
161 cur = rb_tree_predecessor (rt, prev);
162 if (rb_node_is_tnil (rt, cur))
164 bts = bt_get_sample (bt, cur->opaque);
166 /* Hit tnil as a right child */
169 bts = bt_get_sample (bt, prev->opaque);
172 if (seq_geq (seq, bts->min_seq))
179 if (!rb_node_is_tnil (rt, cur))
180 return bt_get_sample (bt, cur->opaque);
186 bt_update_sample (tcp_byte_tracker_t * bt, tcp_bt_sample_t * bts, u32 seq)
188 rb_tree_del_custom (&bt->sample_lookup, bts->min_seq, bt_seq_lt);
190 rb_tree_add_custom (&bt->sample_lookup, bts->min_seq,
191 bt_sample_index (bt, bts), bt_seq_lt);
194 static tcp_bt_sample_t *
195 bt_fix_overlapped (tcp_byte_tracker_t * bt, tcp_bt_sample_t * start,
198 tcp_bt_sample_t *cur, *next;
201 while (cur && seq_leq (cur->max_seq, seq))
203 next = bt_next_sample (bt, cur);
204 bt_free_sample (bt, cur);
208 if (cur && seq_lt (cur->min_seq, seq))
209 bt_update_sample (bt, cur, seq);
215 tcp_bt_is_sane (tcp_byte_tracker_t * bt)
217 tcp_bt_sample_t *bts, *tmp;
219 if (pool_elts (bt->samples) != pool_elts (bt->sample_lookup.nodes) - 1)
222 if (bt->head == TCP_BTS_INVALID_INDEX)
224 if (bt->tail != TCP_BTS_INVALID_INDEX)
226 if (pool_elts (bt->samples) != 0)
231 bts = bt_get_sample (bt, bt->tail);
235 bts = bt_get_sample (bt, bt->head);
236 if (!bts || bts->prev != TCP_BTS_INVALID_INDEX)
241 tmp = bt_lookup_seq (bt, bts->min_seq);
246 tmp = bt_next_sample (bt, bts);
249 if (tmp->prev != bt_sample_index (bt, bts))
251 clib_warning ("next %u thinks prev is %u should be %u",
252 bts->next, tmp->prev, bt_sample_index (bt, bts));
255 if (!seq_lt (bts->min_seq, tmp->min_seq))
260 if (bt->tail != bt_sample_index (bt, bts))
262 if (bts->next != TCP_BTS_INVALID_INDEX)
270 static tcp_bt_sample_t *
271 tcp_bt_alloc_tx_sample (tcp_connection_t * tc, u32 min_seq, u32 max_seq)
273 tcp_bt_sample_t *bts;
274 bts = bt_alloc_sample (tc->bt, min_seq, max_seq);
275 bts->delivered = tc->delivered;
276 bts->delivered_time = tc->delivered_time;
277 bts->tx_time = tcp_time_now_us (tc->c_thread_index);
278 bts->first_tx_time = tc->first_tx_time;
279 bts->flags |= tc->app_limited ? TCP_BTS_IS_APP_LIMITED : 0;
280 bts->tx_in_flight = tcp_flight_size (tc);
281 bts->tx_lost = tc->lost;
286 tcp_bt_check_app_limited (tcp_connection_t * tc)
288 u32 available_bytes, flight_size;
290 available_bytes = transport_max_tx_dequeue (&tc->connection);
291 flight_size = tcp_flight_size (tc);
293 /* Not enough bytes to fill the cwnd */
294 if (available_bytes + flight_size + tc->snd_mss < tc->cwnd
295 /* Bytes considered lost have been retransmitted */
296 && tc->sack_sb.lost_bytes <= tc->snd_rxt_bytes)
297 tc->app_limited = tc->delivered + flight_size ? : 1;
301 tcp_bt_track_tx (tcp_connection_t * tc, u32 len)
303 tcp_byte_tracker_t *bt = tc->bt;
304 tcp_bt_sample_t *bts, *tail;
307 tail = bt_get_sample (bt, bt->tail);
308 if (tail && tail->max_seq == tc->snd_nxt
309 && !(tail->flags & TCP_BTS_IS_SACKED)
310 && tail->tx_time == tcp_time_now_us (tc->c_thread_index))
312 tail->max_seq += len;
316 if (tc->snd_una == tc->snd_nxt)
318 tc->delivered_time = tcp_time_now_us (tc->c_thread_index);
319 tc->first_tx_time = tc->delivered_time;
322 bts = tcp_bt_alloc_tx_sample (tc, tc->snd_nxt, tc->snd_nxt + len);
323 bts_index = bt_sample_index (bt, bts);
324 tail = bt_get_sample (bt, bt->tail);
327 tail->next = bts_index;
328 bts->prev = bt->tail;
329 bt->tail = bts_index;
333 bt->tail = bt->head = bts_index;
338 tcp_bt_track_rxt (tcp_connection_t * tc, u32 start, u32 end)
340 tcp_byte_tracker_t *bt = tc->bt;
341 tcp_bt_sample_t *bts, *next, *cur, *prev, *nbts;
342 u32 bts_index, cur_index, next_index, prev_index, max_seq;
343 u8 is_end = end == tc->snd_nxt;
344 tcp_bts_flags_t bts_flags;
346 /* Contiguous blocks retransmitted at the same time */
347 bts = bt_get_sample (bt, bt->last_ooo);
348 if (bts && bts->max_seq == start
349 && bts->tx_time == tcp_time_now_us (tc->c_thread_index))
352 next = bt_next_sample (bt, bts);
354 bt_fix_overlapped (bt, next, end, is_end);
359 /* Find original tx sample and cache flags in case the sample
360 * is freed or the pool moves */
361 bts = bt_lookup_seq (bt, start);
362 bts_flags = bts->flags;
364 ASSERT (bts != 0 && seq_geq (start, bts->min_seq));
366 /* Head in the past */
367 if (seq_lt (bts->min_seq, tc->snd_una))
368 bt_update_sample (bt, bts, tc->snd_una);
371 if (bts->min_seq == start)
373 prev_index = bts->prev;
374 next = bt_fix_overlapped (bt, bts, end, is_end);
375 /* bts might no longer be valid from here */
376 next_index = bt_sample_index (bt, next);
378 cur = tcp_bt_alloc_tx_sample (tc, start, end);
379 cur->flags |= TCP_BTS_IS_RXT;
380 if (bts_flags & TCP_BTS_IS_RXT)
381 cur->flags |= TCP_BTS_IS_RXT_LOST;
382 cur->next = next_index;
383 cur->prev = prev_index;
385 cur_index = bt_sample_index (bt, cur);
387 if (next_index != TCP_BTS_INVALID_INDEX)
389 next = bt_get_sample (bt, next_index);
390 next->prev = cur_index;
394 bt->tail = cur_index;
397 if (prev_index != TCP_BTS_INVALID_INDEX)
399 prev = bt_get_sample (bt, prev_index);
400 prev->next = cur_index;
404 bt->head = cur_index;
407 bt->last_ooo = cur_index;
411 bts_index = bt_sample_index (bt, bts);
412 next = bt_next_sample (bt, bts);
414 bt_fix_overlapped (bt, next, end, is_end);
416 max_seq = bts->max_seq;
417 ASSERT (seq_lt (start, max_seq));
419 /* Have to split or tail overlap */
420 cur = tcp_bt_alloc_tx_sample (tc, start, end);
421 cur->flags |= TCP_BTS_IS_RXT;
422 if (bts_flags & TCP_BTS_IS_RXT)
423 cur->flags |= TCP_BTS_IS_RXT_LOST;
424 cur->prev = bts_index;
425 cur_index = bt_sample_index (bt, cur);
427 /* Split. Allocate another sample */
428 if (seq_lt (end, max_seq))
430 nbts = tcp_bt_alloc_tx_sample (tc, end, bts->max_seq);
431 cur = bt_get_sample (bt, cur_index);
432 bts = bt_get_sample (bt, bts_index);
437 if (nbts->next != TCP_BTS_INVALID_INDEX)
439 next = bt_get_sample (bt, nbts->next);
440 next->prev = bt_sample_index (bt, nbts);
443 bt->tail = bt_sample_index (bt, nbts);
445 bts->next = nbts->prev = cur_index;
446 cur->next = bt_sample_index (bt, nbts);
448 bts->max_seq = start;
449 bt->last_ooo = cur_index;
451 /* Tail completely overlapped */
454 bts = bt_get_sample (bt, bts_index);
455 bts->max_seq = start;
457 if (bts->next != TCP_BTS_INVALID_INDEX)
459 next = bt_get_sample (bt, bts->next);
460 next->prev = cur_index;
463 bt->tail = cur_index;
465 cur->next = bts->next;
466 bts->next = cur_index;
468 bt->last_ooo = cur_index;
473 tcp_bt_sample_to_rate_sample (tcp_connection_t * tc, tcp_bt_sample_t * bts,
474 tcp_rate_sample_t * rs)
476 if (bts->flags & TCP_BTS_IS_SACKED)
479 if (rs->prior_delivered && rs->prior_delivered >= bts->delivered)
482 rs->prior_delivered = bts->delivered;
483 rs->prior_time = bts->delivered_time;
484 rs->interval_time = bts->tx_time - bts->first_tx_time;
485 rs->rtt_time = tc->delivered_time - bts->tx_time;
486 rs->flags = bts->flags;
487 rs->tx_in_flight = bts->tx_in_flight;
488 rs->tx_lost = bts->tx_lost;
489 tc->first_tx_time = bts->tx_time;
493 tcp_bt_walk_samples (tcp_connection_t * tc, tcp_rate_sample_t * rs)
495 tcp_byte_tracker_t *bt = tc->bt;
496 tcp_bt_sample_t *next, *cur;
498 cur = bt_get_sample (bt, bt->head);
499 while (cur && seq_leq (cur->max_seq, tc->snd_una))
501 next = bt_next_sample (bt, cur);
502 tcp_bt_sample_to_rate_sample (tc, cur, rs);
503 bt_free_sample (bt, cur);
507 if (cur && seq_lt (cur->min_seq, tc->snd_una))
509 bt_update_sample (bt, cur, tc->snd_una);
510 tcp_bt_sample_to_rate_sample (tc, cur, rs);
515 tcp_bt_walk_samples_ooo (tcp_connection_t * tc, tcp_rate_sample_t * rs)
517 sack_block_t *blks = tc->rcv_opts.sacks, *blk;
518 tcp_byte_tracker_t *bt = tc->bt;
519 tcp_bt_sample_t *cur, *prev, *next;
522 for (i = 0; i < vec_len (blks); i++)
526 /* Ignore blocks that are already covered by snd_una */
527 if (seq_lt (blk->end, tc->snd_una))
530 cur = bt_lookup_seq (bt, blk->start);
534 ASSERT (seq_geq (blk->start, cur->min_seq)
535 && seq_lt (blk->start, cur->max_seq));
537 /* Current should be split. Second part will be consumed */
538 if (PREDICT_FALSE (cur->min_seq != blk->start))
540 cur = bt_split_sample (bt, cur, blk->start);
541 prev = bt_prev_sample (bt, cur);
544 prev = bt_prev_sample (bt, cur);
546 while (cur && seq_leq (cur->max_seq, blk->end))
548 if (!(cur->flags & TCP_BTS_IS_SACKED))
550 tcp_bt_sample_to_rate_sample (tc, cur, rs);
551 cur->flags |= TCP_BTS_IS_SACKED;
552 if (prev && (prev->flags & TCP_BTS_IS_SACKED))
554 cur = bt_merge_sample (bt, prev, cur);
555 next = bt_next_sample (bt, cur);
559 next = bt_next_sample (bt, cur);
560 if (next && (next->flags & TCP_BTS_IS_SACKED))
562 cur = bt_merge_sample (bt, cur, next);
563 next = bt_next_sample (bt, cur);
568 next = bt_next_sample (bt, cur);
574 if (cur && seq_lt (cur->min_seq, blk->end))
576 tcp_bt_sample_to_rate_sample (tc, cur, rs);
577 prev = bt_prev_sample (bt, cur);
578 /* Extend previous to include the newly sacked bytes */
579 if (prev && (prev->flags & TCP_BTS_IS_SACKED))
581 prev->max_seq = blk->end;
582 bt_update_sample (bt, cur, blk->end);
584 /* Split sample into two. First part is consumed */
587 next = bt_split_sample (bt, cur, blk->end);
588 cur = bt_prev_sample (bt, next);
589 cur->flags |= TCP_BTS_IS_SACKED;
596 tcp_bt_sample_delivery_rate (tcp_connection_t * tc, tcp_rate_sample_t * rs)
600 if (PREDICT_FALSE (tc->flags & TCP_CONN_FINSNT))
603 tc->lost += tc->sack_sb.last_lost_bytes;
605 delivered = tc->bytes_acked + tc->sack_sb.last_sacked_bytes;
606 /* Do not count bytes that were previously sacked again */
607 delivered -= tc->sack_sb.last_bytes_delivered;
608 if (!delivered || tc->bt->head == TCP_BTS_INVALID_INDEX)
611 tc->delivered += delivered;
612 tc->delivered_time = tcp_time_now_us (tc->c_thread_index);
614 if (tc->app_limited && tc->delivered > tc->app_limited)
618 tcp_bt_walk_samples (tc, rs);
620 if (tc->sack_sb.last_sacked_bytes)
621 tcp_bt_walk_samples_ooo (tc, rs);
623 rs->interval_time = clib_max ((tc->delivered_time - rs->prior_time),
625 rs->delivered = tc->delivered - rs->prior_delivered;
626 rs->acked_and_sacked = delivered;
627 rs->last_lost = tc->sack_sb.last_lost_bytes;
628 rs->lost = tc->lost - rs->tx_lost;
632 tcp_bt_flush_samples (tcp_connection_t * tc)
634 tcp_byte_tracker_t *bt = tc->bt;
635 tcp_bt_sample_t *bts;
636 u32 *samples = 0, *si;
638 vec_validate (samples, pool_elts (bt->samples) - 1);
639 vec_reset_length (samples);
642 pool_foreach (bts, bt->samples) {
643 vec_add1 (samples, bts - bt->samples);
647 vec_foreach (si, samples)
649 bts = bt_get_sample (bt, *si);
650 bt_free_sample (bt, bts);
657 tcp_bt_cleanup (tcp_connection_t * tc)
659 tcp_byte_tracker_t *bt = tc->bt;
661 rb_tree_free_nodes (&bt->sample_lookup);
662 pool_free (bt->samples);
668 tcp_bt_init (tcp_connection_t * tc)
670 tcp_byte_tracker_t *bt;
672 bt = clib_mem_alloc (sizeof (tcp_byte_tracker_t));
673 clib_memset (bt, 0, sizeof (tcp_byte_tracker_t));
675 rb_tree_init (&bt->sample_lookup);
676 bt->head = bt->tail = TCP_BTS_INVALID_INDEX;
681 format_tcp_bt_sample (u8 * s, va_list * args)
683 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
684 tcp_bt_sample_t *bts = va_arg (*args, tcp_bt_sample_t *);
685 f64 now = tcp_time_now_us (tc->c_thread_index);
686 s = format (s, "[%u, %u] d %u dt %.3f txt %.3f ftxt %.3f flags 0x%x",
687 bts->min_seq - tc->iss, bts->max_seq - tc->iss, bts->delivered,
688 now - bts->delivered_time, now - bts->tx_time,
689 now - bts->first_tx_time, bts->flags);
694 format_tcp_bt (u8 * s, va_list * args)
696 tcp_connection_t *tc = va_arg (*args, tcp_connection_t *);
697 tcp_byte_tracker_t *bt = tc->bt;
698 tcp_bt_sample_t *bts;
700 bts = bt_get_sample (bt, bt->head);
703 s = format (s, "%U\n", format_tcp_bt_sample, tc, bts);
704 bts = bt_next_sample (bt, bts);
711 * fd.io coding-style-patch-verification: ON
714 * eval: (c-set-style "gnu")