2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/tcp/tcp_packet.h>
18 #include <vnet/tcp/tcp.h>
19 #include <vnet/session/session.h>
22 static char *tcp_error_strings[] = {
23 #define tcp_error(n,s) s,
24 #include <vnet/tcp/tcp_error.def>
28 /* All TCP nodes have the same outgoing arcs */
29 #define foreach_tcp_state_next \
30 _ (DROP4, "ip4-drop") \
31 _ (DROP6, "ip6-drop") \
32 _ (TCP4_OUTPUT, "tcp4-output") \
33 _ (TCP6_OUTPUT, "tcp6-output")
35 typedef enum _tcp_established_next
37 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
38 foreach_tcp_state_next
40 TCP_ESTABLISHED_N_NEXT,
41 } tcp_established_next_t;
43 typedef enum _tcp_rcv_process_next
45 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
46 foreach_tcp_state_next
48 TCP_RCV_PROCESS_N_NEXT,
49 } tcp_rcv_process_next_t;
51 typedef enum _tcp_syn_sent_next
53 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
54 foreach_tcp_state_next
57 } tcp_syn_sent_next_t;
59 typedef enum _tcp_listen_next
61 #define _(s,n) TCP_LISTEN_NEXT_##s,
62 foreach_tcp_state_next
67 /* Generic, state independent indices */
68 typedef enum _tcp_state_next
70 #define _(s,n) TCP_NEXT_##s,
71 foreach_tcp_state_next
76 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
77 : TCP_NEXT_TCP6_OUTPUT)
79 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \
82 vlib_node_registration_t tcp4_established_node;
83 vlib_node_registration_t tcp6_established_node;
86 * Validate segment sequence number. As per RFC793:
88 * Segment Receive Test
90 * ------- ------- -------------------------------------------
91 * 0 0 SEG.SEQ = RCV.NXT
92 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
94 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
95 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
97 * This ultimately consists in checking if segment falls within the window.
98 * The one important difference compared to RFC793 is that we use rcv_las,
99 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
100 * peer's reference when computing our receive window.
103 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
104 * however, is too strict when we have retransmits. Instead we just check that
105 * the seq is not beyond the right edge and that the end of the segment is not
106 * less than the left edge.
108 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
109 * use rcv_nxt in the right edge window test instead of rcv_las.
113 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
115 return (seq_geq (end_seq, tc->rcv_las)
116 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
120 * Parse TCP header options.
122 * @param th TCP header
123 * @param to TCP options data structure to be populated
124 * @return -1 if parsing failed
127 tcp_options_parse (tcp_header_t * th, tcp_options_t * to)
130 u8 opt_len, opts_len, kind;
134 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
135 data = (const u8 *) (th + 1);
137 /* Zero out all flags but those set in SYN */
138 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
139 | TCP_OPTS_FLAG_SACK);
141 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
145 /* Get options length */
146 if (kind == TCP_OPTION_EOL)
148 else if (kind == TCP_OPTION_NOOP)
160 /* weird option length */
161 if (opt_len < 2 || opt_len > opts_len)
169 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
171 to->flags |= TCP_OPTS_FLAG_MSS;
172 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
175 case TCP_OPTION_WINDOW_SCALE:
176 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
178 to->flags |= TCP_OPTS_FLAG_WSCALE;
179 to->wscale = data[2];
180 if (to->wscale > TCP_MAX_WND_SCALE)
182 clib_warning ("Illegal window scaling value: %d",
184 to->wscale = TCP_MAX_WND_SCALE;
188 case TCP_OPTION_TIMESTAMP:
189 if (opt_len == TCP_OPTION_LEN_TIMESTAMP)
191 to->flags |= TCP_OPTS_FLAG_TSTAMP;
192 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
193 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
196 case TCP_OPTION_SACK_PERMITTED:
197 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
198 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
200 case TCP_OPTION_SACK_BLOCK:
201 /* If SACK permitted was not advertised or a SYN, break */
202 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
205 /* If too short or not correctly formatted, break */
206 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
209 to->flags |= TCP_OPTS_FLAG_SACK;
210 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
211 vec_reset_length (to->sacks);
212 for (j = 0; j < to->n_sack_blocks; j++)
214 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j));
215 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j));
216 vec_add1 (to->sacks, b);
220 /* Nothing to see here */
228 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
229 * timestamp to echo and it's less than tsval_recent, drop segment
230 * but still send an ACK in order to retain TCP's mechanism for detecting
231 * and recovering from half-open connections
233 * Or at least that's what the theory says. It seems that this might not work
234 * very well with packet reordering and fast retransmit. XXX
237 tcp_segment_check_paws (tcp_connection_t * tc)
239 return tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent
240 && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
244 * Update tsval recent
247 tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
250 * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
251 * of an incoming segment:
252 * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
253 * then the TSval from the segment is copied to TS.Recent;
254 * otherwise, the TSval is ignored.
256 if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
257 && seq_leq (tc->rcv_las, seq_end))
259 ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
260 tc->tsval_recent = tc->rcv_opts.tsval;
261 tc->tsval_recent_age = tcp_time_now ();
266 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
268 * It first verifies if segment has a wrapped sequence number (PAWS) and then
269 * does the processing associated to the first four steps (ignoring security
270 * and precedence): sequence number, rst bit and syn bit checks.
272 * @return 0 if segments passes validation.
275 tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0,
276 vlib_buffer_t * b0, tcp_header_t * th0,
277 u32 * next0, u32 * error0)
279 /* We could get a burst of RSTs interleaved with acks */
280 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
282 tcp_send_reset (tc0);
283 *error0 = TCP_ERROR_CONNECTION_CLOSED;
287 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
289 *error0 = TCP_ERROR_SEGMENT_INVALID;
293 if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts)))
295 clib_warning ("options parse error");
296 *error0 = TCP_ERROR_OPTIONS;
300 if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
302 *error0 = TCP_ERROR_PAWS;
304 clib_warning ("paws failed\n%U", format_tcp_connection, tc0, 2);
305 TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
306 vnet_buffer (b0)->tcp.seq_end);
308 /* If it just so happens that a segment updates tsval_recent for a
309 * segment over 24 days old, invalidate tsval_recent. */
310 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
313 /* Age isn't reset until we get a valid tsval (bsd inspired) */
314 tc0->tsval_recent = 0;
315 clib_warning ("paws failed - really old segment. REALLY?");
319 /* Drop after ack if not rst */
322 tcp_make_ack (tc0, b0);
323 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
330 /* 1st: check sequence number */
331 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
332 vnet_buffer (b0)->tcp.seq_end))
334 *error0 = TCP_ERROR_RCV_WND;
335 /* If our window is 0 and the packet is in sequence, let it pass
336 * through for ack processing. It should be dropped later. */
337 if (!(tc0->rcv_wnd == 0
338 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number))
340 /* If not RST, send dup ack */
343 tcp_make_ack (tc0, b0);
344 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
351 /* 2nd: check the RST bit */
352 if (PREDICT_FALSE (tcp_rst (th0)))
354 tcp_connection_reset (tc0);
355 *error0 = TCP_ERROR_RST_RCVD;
359 /* 3rd: check security and precedence (skip) */
361 /* 4th: check the SYN bit */
362 if (PREDICT_FALSE (tcp_syn (th0)))
364 /* TODO implement RFC 5961 */
365 if (tc0->state == TCP_STATE_SYN_RCVD)
367 tcp_make_synack (tc0, b0);
368 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0);
372 tcp_make_ack (tc0, b0);
373 TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, tc0);
378 /* If segment in window, save timestamp */
379 tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
380 vnet_buffer (b0)->tcp.seq_end);
384 *next0 = tcp_next_drop (tc0->c_is_ip4);
387 *next0 = tcp_next_output (tc0->c_is_ip4);
392 tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0)
394 /* SND.UNA =< SEG.ACK =< SND.NXT */
395 return (seq_leq (tc0->snd_una, vnet_buffer (tb0)->tcp.ack_number)
396 && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_nxt));
400 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
402 * Note that although the original article, srtt and rttvar are scaled
403 * to minimize round-off errors, here we don't. Instead, we rely on
404 * better precision time measurements.
406 * TODO support us rtt resolution
409 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
415 err = mrtt - tc->srtt;
417 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
418 * The increase should be bound */
419 tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
420 diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
421 tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
425 /* First measurement. */
427 tc->rttvar = mrtt >> 1;
432 tcp_update_rto (tcp_connection_t * tc)
434 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
435 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
439 * Update RTT estimate and RTO timer
441 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
442 * timing. Middle boxes are known to fiddle with TCP options so we
443 * should give higher priority to ACK timing.
445 * This should be called only if previously sent bytes have been acked.
447 * return 1 if valid rtt 0 otherwise
450 tcp_update_rtt (tcp_connection_t * tc, u32 ack)
454 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
455 * RTT because they're ambiguous. */
456 if (tcp_in_cong_recovery (tc) || tc->sack_sb.sacked_bytes)
458 if (tcp_in_recovery (tc))
463 if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
465 mrtt = tcp_time_now () - tc->rtt_ts;
467 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
468 * snd_una, i.e., the left side of the send window:
469 * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
470 else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
472 mrtt = tcp_time_now () - tc->rcv_opts.tsecr;
475 /* Ignore dubious measurements */
476 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
479 tcp_estimate_rtt (tc, mrtt);
483 /* Allow measuring of a new RTT */
486 /* If we got here something must've been ACKed so make sure boff is 0,
487 * even if mrtt is not valid since we update the rto lower */
495 * Dequeue bytes that have been acked and while at it update RTT estimates.
498 tcp_dequeue_acked (tcp_connection_t * tc, u32 ack)
500 /* Dequeue the newly ACKed add SACKed bytes */
501 stream_session_dequeue_drop (&tc->connection,
502 tc->bytes_acked + tc->sack_sb.snd_una_adv);
504 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
506 /* Update rtt and rto */
507 tcp_update_rtt (tc, ack);
509 /* If everything has been acked, stop retransmit timer
510 * otherwise update. */
511 tcp_retransmit_timer_update (tc);
515 * Check if duplicate ack as per RFC5681 Sec. 2
518 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
521 return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
522 && seq_gt (tc->snd_una_max, tc->snd_una)
523 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
524 && (prev_snd_wnd == tc->snd_wnd));
528 * Checks if ack is a congestion control event.
531 tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
532 u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
534 /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
535 * defined to be 'duplicate' */
536 *is_dack = tc->sack_sb.last_sacked_bytes
537 || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
539 return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc));
543 scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
545 ASSERT (!pool_is_free_index (sb->holes, hole - sb->holes));
546 return hole - sb->holes;
550 scoreboard_hole_bytes (sack_scoreboard_hole_t * hole)
552 return hole->end - hole->start;
555 sack_scoreboard_hole_t *
556 scoreboard_get_hole (sack_scoreboard_t * sb, u32 index)
558 if (index != TCP_INVALID_SACK_HOLE_INDEX)
559 return pool_elt_at_index (sb->holes, index);
563 sack_scoreboard_hole_t *
564 scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
566 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
567 return pool_elt_at_index (sb->holes, hole->next);
571 sack_scoreboard_hole_t *
572 scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
574 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
575 return pool_elt_at_index (sb->holes, hole->prev);
579 sack_scoreboard_hole_t *
580 scoreboard_first_hole (sack_scoreboard_t * sb)
582 if (sb->head != TCP_INVALID_SACK_HOLE_INDEX)
583 return pool_elt_at_index (sb->holes, sb->head);
587 sack_scoreboard_hole_t *
588 scoreboard_last_hole (sack_scoreboard_t * sb)
590 if (sb->tail != TCP_INVALID_SACK_HOLE_INDEX)
591 return pool_elt_at_index (sb->holes, sb->tail);
596 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
598 sack_scoreboard_hole_t *next, *prev;
600 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
602 next = pool_elt_at_index (sb->holes, hole->next);
603 next->prev = hole->prev;
607 sb->tail = hole->prev;
610 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
612 prev = pool_elt_at_index (sb->holes, hole->prev);
613 prev->next = hole->next;
617 sb->head = hole->next;
620 if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
621 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
623 /* Poison the entry */
625 memset (hole, 0xfe, sizeof (*hole));
627 pool_put (sb->holes, hole);
630 static sack_scoreboard_hole_t *
631 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
634 sack_scoreboard_hole_t *hole, *next, *prev;
637 pool_get (sb->holes, hole);
638 memset (hole, 0, sizeof (*hole));
642 hole_index = scoreboard_hole_index (sb, hole);
644 prev = scoreboard_get_hole (sb, prev_index);
647 hole->prev = prev_index;
648 hole->next = prev->next;
650 if ((next = scoreboard_next_hole (sb, hole)))
651 next->prev = hole_index;
653 sb->tail = hole_index;
655 prev->next = hole_index;
659 sb->head = hole_index;
660 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
661 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
668 scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb)
670 sack_scoreboard_hole_t *left, *right;
671 u32 bytes = 0, blks = 0;
674 sb->sacked_bytes = 0;
675 left = scoreboard_last_hole (sb);
679 if (seq_gt (sb->high_sacked, left->end))
681 bytes = sb->high_sacked - left->end;
683 if (bytes > (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
684 && left->prev == TCP_INVALID_SACK_HOLE_INDEX)
685 sb->lost_bytes += scoreboard_hole_bytes (left);
689 while ((left = scoreboard_prev_hole (sb, right))
690 && (bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
691 && blks < TCP_DUPACK_THRESHOLD))
693 bytes += right->start - left->end;
700 bytes += right->start - left->end;
701 sb->lost_bytes += scoreboard_hole_bytes (left);
704 left = scoreboard_prev_hole (sb, left);
706 sb->sacked_bytes = bytes;
710 * Figure out the next hole to retransmit
712 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
714 sack_scoreboard_hole_t *
715 scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
716 sack_scoreboard_hole_t * start,
718 u8 * can_rescue, u8 * snd_limited)
720 sack_scoreboard_hole_t *hole = 0;
722 hole = start ? start : scoreboard_first_hole (sb);
723 while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
724 hole = scoreboard_next_hole (sb, hole);
726 /* Nothing, return */
729 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
733 /* Rule (1): if higher than rxt, less than high_sacked and lost */
734 if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
736 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
740 /* Rule (2): output takes care of transmitting new data */
741 if (!have_sent_1_smss)
744 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
746 /* Rule (3): if hole not lost */
747 else if (seq_lt (hole->start, sb->high_sacked))
750 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
752 /* Rule (4): if hole beyond high_sacked */
755 ASSERT (seq_geq (hole->start, sb->high_sacked));
758 /* HighRxt MUST NOT be updated */
763 if (hole && seq_lt (sb->high_rxt, hole->start))
764 sb->high_rxt = hole->start;
770 scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 seq)
772 sack_scoreboard_hole_t *hole;
773 hole = scoreboard_first_hole (sb);
776 seq = seq_gt (seq, hole->start) ? seq : hole->start;
777 sb->cur_rxt_hole = sb->head;
783 scoreboard_init (sack_scoreboard_t * sb)
785 sb->head = TCP_INVALID_SACK_HOLE_INDEX;
786 sb->tail = TCP_INVALID_SACK_HOLE_INDEX;
787 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
791 scoreboard_clear (sack_scoreboard_t * sb)
793 sack_scoreboard_hole_t *hole;
794 while ((hole = scoreboard_first_hole (sb)))
796 scoreboard_remove_hole (sb, hole);
798 ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX);
799 ASSERT (pool_elts (sb->holes) == 0);
800 sb->sacked_bytes = 0;
801 sb->last_sacked_bytes = 0;
802 sb->last_bytes_delivered = 0;
807 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
811 * Test that scoreboard is sane after recovery
813 * Returns 1 if scoreboard is empty or if first hole beyond
817 tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
819 sack_scoreboard_hole_t *hole;
820 hole = scoreboard_first_hole (&tc->sack_sb);
821 return (!hole || (seq_geq (hole->start, tc->snd_una)
822 && seq_lt (hole->end, tc->snd_una_max)));
826 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
828 sack_scoreboard_t *sb = &tc->sack_sb;
829 sack_block_t *blk, tmp;
830 sack_scoreboard_hole_t *hole, *next_hole, *last_hole;
831 u32 blk_index = 0, old_sacked_bytes, hole_index;
834 sb->last_sacked_bytes = 0;
835 sb->last_bytes_delivered = 0;
838 if (!tcp_opts_sack (&tc->rcv_opts)
839 && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
842 old_sacked_bytes = sb->sacked_bytes;
844 /* Remove invalid blocks */
845 blk = tc->rcv_opts.sacks;
846 while (blk < vec_end (tc->rcv_opts.sacks))
848 if (seq_lt (blk->start, blk->end)
849 && seq_gt (blk->start, tc->snd_una)
850 && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_una_max))
855 vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
858 /* Add block for cumulative ack */
859 if (seq_gt (ack, tc->snd_una))
861 tmp.start = tc->snd_una;
863 vec_add1 (tc->rcv_opts.sacks, tmp);
866 if (vec_len (tc->rcv_opts.sacks) == 0)
869 tcp_scoreboard_trace_add (tc, ack);
871 /* Make sure blocks are ordered */
872 for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++)
873 for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++)
874 if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start))
876 tmp = tc->rcv_opts.sacks[i];
877 tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j];
878 tc->rcv_opts.sacks[j] = tmp;
881 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
883 /* If no holes, insert the first that covers all outstanding bytes */
884 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
885 tc->snd_una, tc->snd_una_max);
886 sb->tail = scoreboard_hole_index (sb, last_hole);
887 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
888 sb->high_sacked = tmp.end;
892 /* If we have holes but snd_una_max is beyond the last hole, update
894 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
895 last_hole = scoreboard_last_hole (sb);
896 if (seq_gt (tc->snd_una_max, last_hole->end))
898 if (seq_geq (last_hole->start, sb->high_sacked))
900 last_hole->end = tc->snd_una_max;
902 /* New hole after high sacked block */
903 else if (seq_lt (sb->high_sacked, tc->snd_una_max))
905 scoreboard_insert_hole (sb, sb->tail, sb->high_sacked,
909 /* Keep track of max byte sacked for when the last hole
911 if (seq_gt (tmp.end, sb->high_sacked))
912 sb->high_sacked = tmp.end;
915 /* Walk the holes with the SACK blocks */
916 hole = pool_elt_at_index (sb->holes, sb->head);
917 while (hole && blk_index < vec_len (tc->rcv_opts.sacks))
919 blk = &tc->rcv_opts.sacks[blk_index];
920 if (seq_leq (blk->start, hole->start))
922 /* Block covers hole. Remove hole */
923 if (seq_geq (blk->end, hole->end))
925 next_hole = scoreboard_next_hole (sb, hole);
927 /* Byte accounting: snd_una needs to be advanced */
932 if (seq_lt (ack, next_hole->start))
933 sb->snd_una_adv = next_hole->start - ack;
934 sb->last_bytes_delivered +=
935 next_hole->start - hole->end;
939 ASSERT (seq_geq (sb->high_sacked, ack));
940 sb->snd_una_adv = sb->high_sacked - ack;
941 sb->last_bytes_delivered += sb->high_sacked - hole->end;
945 scoreboard_remove_hole (sb, hole);
948 /* Partial 'head' overlap */
951 if (seq_gt (blk->end, hole->start))
953 hole->start = blk->end;
960 /* Hole must be split */
961 if (seq_lt (blk->end, hole->end))
963 hole_index = scoreboard_hole_index (sb, hole);
964 next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
967 /* Pool might've moved */
968 hole = scoreboard_get_hole (sb, hole_index);
969 hole->end = blk->start;
971 ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
973 else if (seq_lt (blk->start, hole->end))
975 hole->end = blk->start;
977 hole = scoreboard_next_hole (sb, hole);
981 if (pool_elts (sb->holes) == 1)
983 hole = scoreboard_first_hole (sb);
984 if (hole->start == ack + sb->snd_una_adv
985 && hole->end == tc->snd_una_max)
986 scoreboard_remove_hole (sb, hole);
989 scoreboard_update_bytes (tc, sb);
990 sb->last_sacked_bytes = sb->sacked_bytes
991 - (old_sacked_bytes - sb->last_bytes_delivered);
992 ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc));
993 ASSERT (sb->sacked_bytes == 0
994 || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack));
995 ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max
996 - seq_max (tc->snd_una, ack));
997 ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
998 || sb->holes[sb->head].start == ack + sb->snd_una_adv);
999 TCP_EVT_DBG (TCP_EVT_CC_SCOREBOARD, tc);
1003 * Try to update snd_wnd based on feedback received from peer.
1005 * If successful, and new window is 'effectively' 0, activate persist
1009 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
1011 /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
1012 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
1013 if (seq_lt (tc->snd_wl1, seq)
1014 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
1016 tc->snd_wnd = snd_wnd;
1019 TCP_EVT_DBG (TCP_EVT_SND_WND, tc);
1021 if (tc->snd_wnd < tc->snd_mss)
1023 /* Set persist timer if not set and we just got 0 wnd */
1024 if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
1025 && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
1026 tcp_persist_timer_set (tc);
1030 tcp_persist_timer_reset (tc);
1031 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1034 tcp_update_rto (tc);
1041 * Init loss recovery/fast recovery.
1043 * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
1044 * updated in @ref tcp_cc_handle_event after fast retransmit
1047 tcp_cc_init_congestion (tcp_connection_t * tc)
1049 tcp_fastrecovery_on (tc);
1050 tc->snd_congestion = tc->snd_una_max;
1051 tc->cwnd_acc_bytes = 0;
1052 tc->cc_algo->congestion (tc);
1053 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
1057 tcp_cc_recovery_exit (tcp_connection_t * tc)
1060 tcp_update_rto (tc);
1062 tc->snd_nxt = tc->snd_una_max;
1063 tcp_recovery_off (tc);
1064 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
1068 tcp_cc_fastrecovery_exit (tcp_connection_t * tc)
1070 tc->cc_algo->recovered (tc);
1071 tc->snd_rxt_bytes = 0;
1072 tc->rcv_dupacks = 0;
1073 tc->snd_nxt = tc->snd_una_max;
1074 tcp_fastrecovery_off (tc);
1075 tcp_fastrecovery_1_smss_off (tc);
1076 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
1080 tcp_cc_congestion_undo (tcp_connection_t * tc)
1082 tc->cwnd = tc->prev_cwnd;
1083 tc->ssthresh = tc->prev_ssthresh;
1084 tc->snd_nxt = tc->snd_una_max;
1085 tc->rcv_dupacks = 0;
1086 if (tcp_in_recovery (tc))
1087 tcp_cc_recovery_exit (tc);
1088 ASSERT (tc->rto_boff == 0);
1089 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 5);
1090 /* TODO extend for fastrecovery */
1094 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
1096 return (tcp_in_recovery (tc) && tc->rto_boff == 1
1098 && tcp_opts_tstamp (&tc->rcv_opts)
1099 && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
1103 tcp_cc_recover (tcp_connection_t * tc)
1105 ASSERT (tcp_in_cong_recovery (tc));
1106 if (tcp_cc_is_spurious_retransmit (tc))
1108 tcp_cc_congestion_undo (tc);
1112 if (tcp_in_recovery (tc))
1113 tcp_cc_recovery_exit (tc);
1114 else if (tcp_in_fastrecovery (tc))
1115 tcp_cc_fastrecovery_exit (tc);
1117 ASSERT (tc->rto_boff == 0);
1118 ASSERT (!tcp_in_cong_recovery (tc));
1119 ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
1124 tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b)
1126 ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
1128 /* Congestion avoidance */
1129 tc->cc_algo->rcv_ack (tc);
1130 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1132 /* If a cumulative ack, make sure dupacks is 0 */
1133 tc->rcv_dupacks = 0;
1135 /* When dupacks hits the threshold we only enter fast retransmit if
1136 * cumulative ack covers more than snd_congestion. Should snd_una
1137 * wrap this test may fail under otherwise valid circumstances.
1138 * Therefore, proactively update snd_congestion when wrap detected. */
1140 (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
1141 && seq_gt (tc->snd_congestion, tc->snd_una)))
1142 tc->snd_congestion = tc->snd_una - 1;
1146 tcp_should_fastrecover_sack (tcp_connection_t * tc)
1148 return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
1152 tcp_should_fastrecover (tcp_connection_t * tc)
1154 return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
1155 || tcp_should_fastrecover_sack (tc));
1159 * One function to rule them all ... and in the darkness bind them
1162 tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
1166 if (tcp_in_fastrecovery (tc) && tcp_opts_sack_permitted (&tc->rcv_opts))
1168 if (tc->bytes_acked)
1170 tcp_fast_retransmit (tc);
1174 * Duplicate ACK. Check if we should enter fast recovery, or if already in
1175 * it account for the bytes that left the network.
1177 else if (is_dack && !tcp_in_recovery (tc))
1179 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
1180 ASSERT (tc->snd_una != tc->snd_una_max
1181 || tc->sack_sb.last_sacked_bytes);
1185 /* Pure duplicate ack. If some data got acked, it's handled lower */
1186 if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
1188 ASSERT (tcp_in_fastrecovery (tc));
1189 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1192 else if (tcp_should_fastrecover (tc))
1194 ASSERT (!tcp_in_fastrecovery (tc));
1196 /* If of of the two conditions lower hold, reset dupacks because
1197 * we're probably after timeout (RFC6582 heuristics).
1198 * If Cumulative ack does not cover more than congestion threshold,
1200 * 1) The following doesn't hold: The congestion window is greater
1201 * than SMSS bytes and the difference between highest_ack
1202 * and prev_highest_ack is at most 4*SMSS bytes
1203 * 2) Echoed timestamp in the last non-dup ack does not equal the
1206 if (seq_leq (tc->snd_una, tc->snd_congestion)
1207 && ((!(tc->cwnd > tc->snd_mss
1208 && tc->bytes_acked <= 4 * tc->snd_mss))
1209 || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
1211 tc->rcv_dupacks = 0;
1215 tcp_cc_init_congestion (tc);
1216 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1218 /* The first segment MUST be retransmitted */
1219 tcp_retransmit_first_unacked (tc);
1221 /* Post retransmit update cwnd to ssthresh and account for the
1222 * three segments that have left the network and should've been
1223 * buffered at the receiver XXX */
1224 tc->cwnd = tc->ssthresh + tc->rcv_dupacks * tc->snd_mss;
1225 ASSERT (tc->cwnd >= tc->snd_mss);
1227 /* If cwnd allows, send more data */
1228 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1230 scoreboard_init_high_rxt (&tc->sack_sb,
1231 tc->snd_una + tc->snd_mss);
1232 tcp_fast_retransmit_sack (tc);
1236 tcp_fast_retransmit_no_sack (tc);
1240 else if (!tc->bytes_acked
1241 || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
1243 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1250 if (!tc->bytes_acked)
1254 TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
1257 * Legitimate ACK. 1) See if we can exit recovery
1259 /* XXX limit this only to first partial ack? */
1260 if (seq_lt (tc->snd_una, tc->snd_congestion))
1261 tcp_retransmit_timer_force_update (tc);
1263 tcp_retransmit_timer_update (tc);
1265 if (seq_geq (tc->snd_una, tc->snd_congestion))
1267 /* If spurious return, we've already updated everything */
1268 if (tcp_cc_recover (tc))
1270 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1274 tc->snd_nxt = tc->snd_una_max;
1276 /* Treat as congestion avoidance ack */
1277 tc->cc_algo->rcv_ack (tc);
1278 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1283 * Legitimate ACK. 2) If PARTIAL ACK try to retransmit
1286 /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1287 * reset dupacks to 0. Also needed if in congestion recovery */
1288 tc->rcv_dupacks = 0;
1290 /* Post RTO timeout don't try anything fancy */
1291 if (tcp_in_recovery (tc))
1293 tc->cc_algo->rcv_ack (tc);
1294 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1295 transport_add_tx_event (&tc->connection);
1299 /* Remove retransmitted bytes that have been delivered */
1300 ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv
1301 >= tc->sack_sb.last_bytes_delivered
1302 || (tc->flags & TCP_CONN_FINSNT));
1304 if (seq_lt (tc->snd_una, tc->sack_sb.high_rxt))
1306 /* If we have sacks and we haven't gotten an ack beyond high_rxt,
1307 * remove sacked bytes delivered */
1308 rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv
1309 - tc->sack_sb.last_bytes_delivered;
1310 ASSERT (tc->snd_rxt_bytes >= rxt_delivered);
1311 tc->snd_rxt_bytes -= rxt_delivered;
1315 /* Either all retransmitted holes have been acked, or we're
1316 * "in the blind" and retransmitting segment by segment */
1317 tc->snd_rxt_bytes = 0;
1320 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK);
1323 * Since this was a partial ack, try to retransmit some more data
1325 tcp_fast_retransmit (tc);
1329 * Process incoming ACK
1332 tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b,
1333 tcp_header_t * th, u32 * next, u32 * error)
1335 u32 prev_snd_wnd, prev_snd_una;
1338 TCP_EVT_DBG (TCP_EVT_CC_STAT, tc);
1340 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1341 if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1343 /* When we entered recovery, we reset snd_nxt to snd_una. Seems peer
1344 * still has the data so accept the ack */
1345 if (tcp_in_recovery (tc)
1346 && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_congestion))
1348 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1349 if (seq_gt (tc->snd_nxt, tc->snd_una_max))
1350 tc->snd_una_max = tc->snd_nxt;
1354 /* If we have outstanding data and this is within the window, accept it,
1355 * probably retransmit has timed out. Otherwise ACK segment and then
1357 if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max))
1359 tcp_make_ack (tc, b);
1360 *next = tcp_next_output (tc->c_is_ip4);
1361 *error = TCP_ERROR_ACK_INVALID;
1362 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0,
1363 vnet_buffer (b)->tcp.ack_number);
1367 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2,
1368 vnet_buffer (b)->tcp.ack_number);
1370 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1371 *error = TCP_ERROR_ACK_FUTURE;
1374 /* If old ACK, probably it's an old dupack */
1375 if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1377 *error = TCP_ERROR_ACK_OLD;
1378 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1,
1379 vnet_buffer (b)->tcp.ack_number);
1380 if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1381 tcp_cc_handle_event (tc, 1);
1382 /* Don't drop yet */
1387 * Looks okay, process feedback
1390 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1391 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1393 prev_snd_wnd = tc->snd_wnd;
1394 prev_snd_una = tc->snd_una;
1395 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1396 vnet_buffer (b)->tcp.ack_number,
1397 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1398 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1399 tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
1400 tcp_validate_txf_size (tc, tc->bytes_acked);
1402 if (tc->bytes_acked)
1403 tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number);
1405 TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
1408 * Check if we have congestion event
1411 if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1413 tcp_cc_handle_event (tc, is_dack);
1414 if (!tcp_in_cong_recovery (tc))
1416 *error = TCP_ERROR_ACK_DUP;
1417 if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1423 * Update congestion control (slow start/congestion avoidance)
1425 tcp_cc_update (tc, b);
1426 *error = TCP_ERROR_ACK_OK;
1431 tcp_sack_vector_is_sane (sack_block_t * sacks)
1434 for (i = 1; i < vec_len (sacks); i++)
1436 if (sacks[i - 1].end == sacks[i].start)
1443 * Build SACK list as per RFC2018.
1445 * Makes sure the first block contains the segment that generated the current
1446 * ACK and the following ones are the ones most recently reported in SACK
1449 * @param tc TCP connection for which the SACK list is updated
1450 * @param start Start sequence number of the newest SACK block
1451 * @param end End sequence of the newest SACK block
1454 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1456 sack_block_t *new_list = 0, *block = 0;
1459 /* If the first segment is ooo add it to the list. Last write might've moved
1460 * rcv_nxt over the first segment. */
1461 if (seq_lt (tc->rcv_nxt, start))
1463 vec_add2 (new_list, block, 1);
1464 block->start = start;
1468 /* Find the blocks still worth keeping. */
1469 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1471 /* Discard if rcv_nxt advanced beyond current block */
1472 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1475 /* Merge or drop if segment overlapped by the new segment */
1476 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1477 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1479 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1480 new_list[0].start = tc->snd_sacks[i].start;
1481 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1482 new_list[0].end = tc->snd_sacks[i].end;
1486 /* Save to new SACK list if we have space. */
1487 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1489 vec_add1 (new_list, tc->snd_sacks[i]);
1493 clib_warning ("sack discarded");
1497 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1499 /* Replace old vector with new one */
1500 vec_free (tc->snd_sacks);
1501 tc->snd_sacks = new_list;
1503 /* Segments should not 'touch' */
1504 ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks));
1508 tcp_sack_list_bytes (tcp_connection_t * tc)
1511 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1512 bytes += tc->snd_sacks[i].end - tc->snd_sacks[i].start;
1516 /** Enqueue data for delivery to application */
1518 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1521 int written, error = TCP_ERROR_ENQUEUED;
1523 ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1525 written = session_enqueue_stream_connection (&tc->connection, b, 0,
1526 1 /* queue event */ , 1);
1528 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 0, data_len, written);
1530 /* Update rcv_nxt */
1531 if (PREDICT_TRUE (written == data_len))
1533 tc->rcv_nxt += written;
1535 /* If more data written than expected, account for out-of-order bytes. */
1536 else if (written > data_len)
1538 tc->rcv_nxt += written;
1540 /* Send ACK confirming the update */
1541 tc->flags |= TCP_CONN_SNDACK;
1542 TCP_EVT_DBG (TCP_EVT_CC_INPUT, tc, data_len, written);
1544 else if (written > 0)
1546 /* We've written something but FIFO is probably full now */
1547 tc->rcv_nxt += written;
1549 /* Depending on how fast the app is, all remaining buffers in burst will
1550 * not be enqueued. Inform peer */
1551 tc->flags |= TCP_CONN_SNDACK;
1553 error = TCP_ERROR_PARTIALLY_ENQUEUED;
1557 tc->flags |= TCP_CONN_SNDACK;
1558 return TCP_ERROR_FIFO_FULL;
1561 /* Update SACK list if need be */
1562 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1564 /* Remove SACK blocks that have been delivered */
1565 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1571 /** Enqueue out-of-order data */
1573 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1576 stream_session_t *s0;
1579 ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1582 /* Enqueue out-of-order data with relative offset */
1583 rv = session_enqueue_stream_connection (&tc->connection, b,
1584 vnet_buffer (b)->tcp.seq_number -
1585 tc->rcv_nxt, 0 /* queue event */ ,
1588 /* Nothing written */
1591 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, 0);
1592 return TCP_ERROR_FIFO_FULL;
1595 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1597 /* Update SACK list if in use */
1598 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1600 ooo_segment_t *newest;
1603 s0 = session_get (tc->c_s_index, tc->c_thread_index);
1605 /* Get the newest segment from the fifo */
1606 newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo);
1609 offset = ooo_segment_offset (s0->server_rx_fifo, newest);
1610 ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1611 start = tc->rcv_nxt + offset;
1612 end = start + ooo_segment_length (s0->server_rx_fifo, newest);
1613 tcp_update_sack_list (tc, start, end);
1614 svm_fifo_newest_ooo_segment_reset (s0->server_rx_fifo);
1615 TCP_EVT_DBG (TCP_EVT_CC_SACKS, tc);
1619 return TCP_ERROR_ENQUEUED_OOO;
1623 * Check if ACK could be delayed. If ack can be delayed, it should return
1624 * true for a full frame. If we're always acking return 0.
1627 tcp_can_delack (tcp_connection_t * tc)
1629 /* Send ack if ... */
1631 /* just sent a rcv wnd 0 */
1632 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0
1633 /* constrained to send ack */
1634 || (tc->flags & TCP_CONN_SNDACK) != 0
1635 /* we're almost out of tx wnd */
1636 || tcp_available_cc_snd_space (tc) < 4 * tc->snd_mss)
1643 tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
1645 u32 discard, first = b->current_length;
1646 vlib_main_t *vm = vlib_get_main ();
1648 /* Handle multi-buffer segments */
1649 if (n_bytes_to_drop > b->current_length)
1651 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1655 discard = clib_min (n_bytes_to_drop, b->current_length);
1656 vlib_buffer_advance (b, discard);
1657 b = vlib_get_buffer (vm, b->next_buffer);
1658 n_bytes_to_drop -= discard;
1660 while (n_bytes_to_drop);
1661 if (n_bytes_to_drop > first)
1662 b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1665 vlib_buffer_advance (b, n_bytes_to_drop);
1666 vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1671 * Receive buffer for connection and handle acks
1673 * It handles both in order or out-of-order data.
1676 tcp_segment_rcv (tcp_connection_t * tc, vlib_buffer_t * b, u32 * next0)
1678 u32 error, n_bytes_to_drop, n_data_bytes;
1680 vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1681 n_data_bytes = vnet_buffer (b)->tcp.data_len;
1682 ASSERT (n_data_bytes);
1684 /* Handle out-of-order data */
1685 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1687 /* Old sequence numbers allowed through because they overlapped
1689 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1691 /* Completely in the past (possible retransmit). Ack
1692 * retransmissions since we may not have any data to send */
1693 if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1695 tcp_make_ack (tc, b);
1696 error = TCP_ERROR_SEGMENT_OLD;
1697 *next0 = tcp_next_output (tc->c_is_ip4);
1701 /* Chop off the bytes in the past and see if what is left
1702 * can be enqueued in order */
1703 n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1704 n_data_bytes -= n_bytes_to_drop;
1705 vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1706 if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1708 error = TCP_ERROR_SEGMENT_OLD;
1709 *next0 = tcp_next_drop (tc->c_is_ip4);
1715 /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1716 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1717 *next0 = tcp_next_output (tc->c_is_ip4);
1718 tcp_make_ack (tc, b);
1719 vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_DUPACK;
1720 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1726 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1727 * segments can be enqueued after fifo tail offset changes. */
1728 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1729 if (tcp_can_delack (tc))
1731 *next0 = tcp_next_drop (tc->c_is_ip4);
1732 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1733 tcp_timer_set (tc, TCP_TIMER_DELACK, TCP_DELACK_TIME);
1737 *next0 = tcp_next_output (tc->c_is_ip4);
1738 tcp_make_ack (tc, b);
1746 tcp_header_t tcp_header;
1747 tcp_connection_t tcp_connection;
1751 format_tcp_rx_trace (u8 * s, va_list * args)
1753 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1754 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1755 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1756 u32 indent = format_get_indent (s);
1758 s = format (s, "%U\n%U%U",
1759 format_tcp_header, &t->tcp_header, 128,
1760 format_white_space, indent,
1761 format_tcp_connection, &t->tcp_connection, 1);
1767 format_tcp_rx_trace_short (u8 * s, va_list * args)
1769 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1770 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1771 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1773 s = format (s, "%d -> %d (%U)",
1774 clib_net_to_host_u16 (t->tcp_header.dst_port),
1775 clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
1776 t->tcp_connection.state);
1782 tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
1783 tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
1787 clib_memcpy (&t0->tcp_connection, tc0, sizeof (t0->tcp_connection));
1791 th0 = tcp_buffer_hdr (b0);
1793 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
1797 tcp_established_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
1798 vlib_frame_t * frame, u8 is_ip4)
1802 n_left = frame->n_vectors;
1803 from = vlib_frame_vector_args (frame);
1807 tcp_connection_t *tc0;
1814 b0 = vlib_get_buffer (vm, bi0);
1816 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1818 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
1819 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1821 th0 = tcp_buffer_hdr (b0);
1822 tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
1831 tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
1832 u8 is_ip4, u32 evt, u32 val)
1835 vlib_node_increment_counter (vm, tcp4_node, evt, val);
1837 vlib_node_increment_counter (vm, tcp6_node, evt, val);
1840 #define tcp_maybe_inc_counter(node_id, err, count) \
1842 if (next0 != tcp_next_drop (is_ip4)) \
1843 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
1844 tcp6_##node_id##_node.index, is_ip4, err, \
1847 #define tcp_inc_counter(node_id, err, count) \
1848 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
1849 tcp6_##node_id##_node.index, is_ip4, \
1851 #define tcp_maybe_inc_err_counter(cnts, err) \
1853 cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
1855 #define tcp_inc_err_counter(cnts, err, val) \
1859 #define tcp_store_err_counters(node_id, cnts) \
1862 for (i = 0; i < TCP_N_ERROR; i++) \
1864 tcp_inc_counter(node_id, i, cnts[i]); \
1869 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1870 vlib_frame_t * frame, int is_ip4)
1872 u32 thread_index = vm->thread_index, errors = 0;
1873 u32 n_left_from, next_index, *from, *to_next;
1874 u16 err_counters[TCP_N_ERROR] = { 0 };
1877 if (node->flags & VLIB_NODE_FLAG_TRACE)
1878 tcp_established_trace_frame (vm, node, frame, is_ip4);
1880 from = vlib_frame_vector_args (frame);
1881 n_left_from = frame->n_vectors;
1882 next_index = node->cached_next_index;
1884 while (n_left_from > 0)
1888 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1889 while (n_left_from > 0 && n_left_to_next > 0)
1893 tcp_header_t *th0 = 0;
1894 tcp_connection_t *tc0;
1895 u32 next0 = tcp_next_drop (is_ip4), error0 = TCP_ERROR_ACK_OK;
1897 if (n_left_from > 1)
1900 pb = vlib_get_buffer (vm, from[1]);
1901 vlib_prefetch_buffer_header (pb, LOAD);
1902 CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1910 n_left_to_next -= 1;
1912 b0 = vlib_get_buffer (vm, bi0);
1913 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1916 if (PREDICT_FALSE (tc0 == 0))
1918 error0 = TCP_ERROR_INVALID_CONNECTION;
1922 th0 = tcp_buffer_hdr (b0);
1923 /* N.B. buffer is rewritten if segment is ooo. Thus, th0 becomes a
1924 * dangling reference. */
1925 is_fin = tcp_is_fin (th0);
1927 /* SYNs, FINs and data consume sequence numbers */
1928 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
1929 + tcp_is_syn (th0) + is_fin + vnet_buffer (b0)->tcp.data_len;
1931 /* TODO header prediction fast path */
1933 /* 1-4: check SEQ, RST, SYN */
1934 if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, th0, &next0,
1937 tcp_maybe_inc_err_counter (err_counters, error0);
1938 TCP_EVT_DBG (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
1942 /* 5: check the ACK field */
1943 if (PREDICT_FALSE (tcp_rcv_ack (tc0, b0, th0, &next0, &error0)))
1945 tcp_maybe_inc_err_counter (err_counters, error0);
1949 /* 6: check the URG bit TODO */
1951 /* 7: process the segment text */
1952 if (vnet_buffer (b0)->tcp.data_len)
1954 error0 = tcp_segment_rcv (tc0, b0, &next0);
1955 tcp_maybe_inc_err_counter (err_counters, error0);
1958 /* 8: check the FIN bit */
1959 if (PREDICT_FALSE (is_fin))
1961 /* Enter CLOSE-WAIT and notify session. To avoid lingering
1962 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1963 /* Account for the FIN if nothing else was received */
1964 if (vnet_buffer (b0)->tcp.data_len == 0)
1966 tcp_make_ack (tc0, b0);
1967 next0 = tcp_next_output (tc0->c_is_ip4);
1968 tc0->state = TCP_STATE_CLOSE_WAIT;
1969 stream_session_disconnect_notify (&tc0->connection);
1970 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
1971 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
1972 tcp_inc_err_counter (err_counters, TCP_ERROR_FIN_RCVD, 1);
1976 b0->error = node->errors[error0];
1977 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1978 n_left_to_next, bi0, next0);
1981 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1984 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
1986 err_counters[TCP_ERROR_EVENT_FIFO_FULL] = errors;
1987 tcp_store_err_counters (established, err_counters);
1988 tcp_flush_frame_to_output (vm, thread_index, is_ip4);
1990 return frame->n_vectors;
1994 tcp4_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1995 vlib_frame_t * from_frame)
1997 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2001 tcp6_established (vlib_main_t * vm, vlib_node_runtime_t * node,
2002 vlib_frame_t * from_frame)
2004 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2008 VLIB_REGISTER_NODE (tcp4_established_node) =
2010 .function = tcp4_established,
2011 .name = "tcp4-established",
2012 /* Takes a vector of packets. */
2013 .vector_size = sizeof (u32),
2014 .n_errors = TCP_N_ERROR,
2015 .error_strings = tcp_error_strings,
2016 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2019 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2020 foreach_tcp_state_next
2023 .format_trace = format_tcp_rx_trace_short,
2027 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_established_node, tcp4_established);
2030 VLIB_REGISTER_NODE (tcp6_established_node) =
2032 .function = tcp6_established,
2033 .name = "tcp6-established",
2034 /* Takes a vector of packets. */
2035 .vector_size = sizeof (u32),
2036 .n_errors = TCP_N_ERROR,
2037 .error_strings = tcp_error_strings,
2038 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2041 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2042 foreach_tcp_state_next
2045 .format_trace = format_tcp_rx_trace_short,
2050 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_established_node, tcp6_established);
2052 vlib_node_registration_t tcp4_syn_sent_node;
2053 vlib_node_registration_t tcp6_syn_sent_node;
2056 tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr)
2058 transport_connection_t *tmp = 0;
2065 if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
2068 u8 is_valid = (tc->c_lcl_port == hdr->dst_port
2069 && (tc->state == TCP_STATE_LISTEN
2070 || tc->c_rmt_port == hdr->src_port));
2074 handle = session_lookup_half_open_handle (&tc->connection);
2075 tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
2076 tc->c_proto, tc->c_is_ip4);
2080 if (tmp->lcl_port == hdr->dst_port
2081 && tmp->rmt_port == hdr->src_port)
2083 TCP_DBG ("half-open is valid!");
2091 * Lookup transport connection
2093 static tcp_connection_t *
2094 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
2098 transport_connection_t *tconn;
2099 tcp_connection_t *tc;
2104 ip4 = vlib_buffer_get_current (b);
2105 tcp = ip4_next_header (ip4);
2106 tconn = session_lookup_connection_wt4 (fib_index,
2111 TRANSPORT_PROTO_TCP,
2112 thread_index, &is_filtered);
2113 tc = tcp_get_connection_from_transport (tconn);
2114 ASSERT (tcp_lookup_is_valid (tc, tcp));
2119 ip6 = vlib_buffer_get_current (b);
2120 tcp = ip6_next_header (ip6);
2121 tconn = session_lookup_connection_wt6 (fib_index,
2126 TRANSPORT_PROTO_TCP,
2127 thread_index, &is_filtered);
2128 tc = tcp_get_connection_from_transport (tconn);
2129 ASSERT (tcp_lookup_is_valid (tc, tcp));
2135 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2136 vlib_frame_t * from_frame, int is_ip4)
2138 tcp_main_t *tm = vnet_get_tcp_main ();
2139 u32 n_left_from, next_index, *from, *to_next;
2140 u32 my_thread_index = vm->thread_index, errors = 0;
2142 from = vlib_frame_vector_args (from_frame);
2143 n_left_from = from_frame->n_vectors;
2145 next_index = node->cached_next_index;
2147 while (n_left_from > 0)
2151 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2153 while (n_left_from > 0 && n_left_to_next > 0)
2155 u32 bi0, ack0, seq0;
2158 tcp_header_t *tcp0 = 0;
2159 tcp_connection_t *tc0;
2160 tcp_connection_t *new_tc0;
2161 u32 next0 = tcp_next_drop (is_ip4), error0 = TCP_ERROR_ENQUEUED;
2168 n_left_to_next -= 1;
2170 b0 = vlib_get_buffer (vm, bi0);
2172 tcp_half_open_connection_get (vnet_buffer (b0)->
2173 tcp.connection_index);
2174 if (PREDICT_FALSE (tc0 == 0))
2176 error0 = TCP_ERROR_INVALID_CONNECTION;
2180 /* Half-open completed recently but the connection was't removed
2181 * yet by the owning thread */
2182 if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
2184 /* Make sure the connection actually exists */
2185 ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
2186 my_thread_index, is_ip4));
2190 ack0 = vnet_buffer (b0)->tcp.ack_number;
2191 seq0 = vnet_buffer (b0)->tcp.seq_number;
2192 tcp0 = tcp_buffer_hdr (b0);
2194 /* Crude check to see if the connection handle does not match
2195 * the packet. Probably connection just switched to established */
2196 if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
2197 || tcp0->src_port != tc0->c_rmt_port))
2201 (!tcp_ack (tcp0) && !tcp_rst (tcp0) && !tcp_syn (tcp0)))
2204 /* SYNs, FINs and data consume sequence numbers */
2205 vnet_buffer (b0)->tcp.seq_end = seq0 + tcp_is_syn (tcp0)
2206 + tcp_is_fin (tcp0) + vnet_buffer (b0)->tcp.data_len;
2209 * 1. check the ACK bit
2213 * If the ACK bit is set
2214 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
2215 * the RST bit is set, if so drop the segment and return)
2216 * <SEQ=SEG.ACK><CTL=RST>
2217 * and discard the segment. Return.
2218 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
2222 if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
2224 clib_warning ("ack not in rcv wnd");
2225 if (!tcp_rst (tcp0))
2226 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2230 /* Make sure ACK is valid */
2231 if (seq_gt (tc0->snd_una, ack0))
2233 clib_warning ("ack invalid");
2239 * 2. check the RST bit
2244 /* If ACK is acceptable, signal client that peer is not
2245 * willing to accept connection and drop connection*/
2247 tcp_connection_reset (tc0);
2252 * 3. check the security and precedence (skipped)
2256 * 4. check the SYN bit
2259 /* No SYN flag. Drop. */
2260 if (!tcp_syn (tcp0))
2262 clib_warning ("not synack");
2267 if (tcp_options_parse (tcp0, &tc0->rcv_opts))
2269 clib_warning ("options parse fail");
2273 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
2274 * current thread pool. */
2275 pool_get (tm->connections[my_thread_index], new_tc0);
2276 clib_memcpy (new_tc0, tc0, sizeof (*new_tc0));
2277 new_tc0->c_c_index = new_tc0 - tm->connections[my_thread_index];
2278 new_tc0->c_thread_index = my_thread_index;
2279 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
2280 new_tc0->irs = seq0;
2281 new_tc0->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID;
2282 new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] =
2283 TCP_TIMER_HANDLE_INVALID;
2284 new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2286 /* If this is not the owning thread, wait for syn retransmit to
2287 * expire and cleanup then */
2288 if (tcp_half_open_connection_cleanup (tc0))
2289 tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2291 if (tcp_opts_tstamp (&new_tc0->rcv_opts))
2293 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
2294 new_tc0->tsval_recent_age = tcp_time_now ();
2297 if (tcp_opts_wscale (&new_tc0->rcv_opts))
2298 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
2300 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2301 << new_tc0->snd_wscale;
2302 new_tc0->snd_wl1 = seq0;
2303 new_tc0->snd_wl2 = ack0;
2305 tcp_connection_init_vars (new_tc0);
2307 /* SYN-ACK: See if we can switch to ESTABLISHED state */
2308 if (PREDICT_TRUE (tcp_ack (tcp0)))
2310 /* Our SYN is ACKed: we have iss < ack = snd_una */
2312 /* TODO Dequeue acknowledged segments if we support Fast Open */
2313 new_tc0->snd_una = ack0;
2314 new_tc0->state = TCP_STATE_ESTABLISHED;
2316 /* Make sure las is initialized for the wnd computation */
2317 new_tc0->rcv_las = new_tc0->rcv_nxt;
2319 /* Notify app that we have connection. If session layer can't
2320 * allocate session send reset */
2321 if (session_stream_connect_notify (&new_tc0->connection, 0))
2323 clib_warning ("connect notify fail");
2324 tcp_send_reset_w_pkt (new_tc0, b0, is_ip4);
2325 tcp_connection_cleanup (new_tc0);
2329 /* Make sure after data segment processing ACK is sent */
2330 new_tc0->flags |= TCP_CONN_SNDACK;
2332 /* Update rtt with the syn-ack sample */
2333 tcp_update_rtt (new_tc0, vnet_buffer (b0)->tcp.ack_number);
2334 TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, new_tc0);
2336 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
2339 new_tc0->state = TCP_STATE_SYN_RCVD;
2341 /* Notify app that we have connection */
2342 if (session_stream_connect_notify (&new_tc0->connection, 0))
2344 tcp_connection_cleanup (new_tc0);
2345 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2346 TCP_EVT_DBG (TCP_EVT_RST_SENT, tc0);
2351 tcp_init_snd_vars (tc0);
2352 tcp_make_synack (new_tc0, b0);
2353 next0 = tcp_next_output (is_ip4);
2358 /* Read data, if any */
2359 if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2361 clib_warning ("rcvd data in syn-sent");
2362 error0 = tcp_segment_rcv (new_tc0, b0, &next0);
2363 if (error0 == TCP_ERROR_ACK_OK)
2364 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2365 tcp_maybe_inc_counter (syn_sent, error0, 1);
2369 tcp_make_ack (new_tc0, b0);
2370 next0 = tcp_next_output (new_tc0->c_is_ip4);
2375 b0->error = error0 ? node->errors[error0] : 0;
2377 ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2379 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2380 clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2381 clib_memcpy (&t0->tcp_connection, tc0,
2382 sizeof (t0->tcp_connection));
2385 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2386 n_left_to_next, bi0, next0);
2389 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2392 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2394 tcp_inc_counter (syn_sent, TCP_ERROR_EVENT_FIFO_FULL, errors);
2395 return from_frame->n_vectors;
2399 tcp4_syn_sent (vlib_main_t * vm, vlib_node_runtime_t * node,
2400 vlib_frame_t * from_frame)
2402 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2406 tcp6_syn_sent_rcv (vlib_main_t * vm, vlib_node_runtime_t * node,
2407 vlib_frame_t * from_frame)
2409 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2413 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2415 .function = tcp4_syn_sent,
2416 .name = "tcp4-syn-sent",
2417 /* Takes a vector of packets. */
2418 .vector_size = sizeof (u32),
2419 .n_errors = TCP_N_ERROR,
2420 .error_strings = tcp_error_strings,
2421 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2424 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2425 foreach_tcp_state_next
2428 .format_trace = format_tcp_rx_trace_short,
2432 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_syn_sent_node, tcp4_syn_sent);
2435 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2437 .function = tcp6_syn_sent_rcv,
2438 .name = "tcp6-syn-sent",
2439 /* Takes a vector of packets. */
2440 .vector_size = sizeof (u32),
2441 .n_errors = TCP_N_ERROR,
2442 .error_strings = tcp_error_strings,
2443 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2446 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2447 foreach_tcp_state_next
2450 .format_trace = format_tcp_rx_trace_short,
2454 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_syn_sent_node, tcp6_syn_sent_rcv);
2456 vlib_node_registration_t tcp4_rcv_process_node;
2457 vlib_node_registration_t tcp6_rcv_process_node;
2460 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2461 * as per RFC793 p. 64
2464 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2465 vlib_frame_t * from_frame, int is_ip4)
2467 u32 n_left_from, next_index, *from, *to_next, n_fins = 0;
2468 u32 my_thread_index = vm->thread_index, errors = 0;
2470 from = vlib_frame_vector_args (from_frame);
2471 n_left_from = from_frame->n_vectors;
2472 next_index = node->cached_next_index;
2474 while (n_left_from > 0)
2478 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2480 while (n_left_from > 0 && n_left_to_next > 0)
2484 tcp_header_t *tcp0 = 0;
2485 tcp_connection_t *tc0;
2486 u32 next0 = tcp_next_drop (is_ip4), error0 = TCP_ERROR_NONE;
2494 n_left_to_next -= 1;
2496 b0 = vlib_get_buffer (vm, bi0);
2497 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2499 if (PREDICT_FALSE (tc0 == 0))
2501 error0 = TCP_ERROR_INVALID_CONNECTION;
2505 tcp0 = tcp_buffer_hdr (b0);
2506 is_fin0 = tcp_is_fin (tcp0);
2508 /* SYNs, FINs and data consume sequence numbers */
2509 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
2510 + tcp_is_syn (tcp0) + is_fin0 + vnet_buffer (b0)->tcp.data_len;
2514 tcp_connection_t *tmp;
2515 tmp = tcp_lookup_connection (tc0->c_fib_index, b0,
2516 my_thread_index, is_ip4);
2517 if (tmp->state != tc0->state)
2519 clib_warning ("state changed");
2525 * Special treatment for CLOSED
2527 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2529 error0 = TCP_ERROR_CONNECTION_CLOSED;
2534 * For all other states (except LISTEN)
2537 /* 1-4: check SEQ, RST, SYN */
2538 if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, tcp0,
2541 tcp_maybe_inc_counter (rcv_process, error0, 1);
2545 /* 5: check the ACK field */
2548 case TCP_STATE_SYN_RCVD:
2550 * If the segment acknowledgment is not acceptable, form a
2552 * <SEQ=SEG.ACK><CTL=RST>
2555 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2557 TCP_DBG ("connection not accepted");
2558 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2559 error0 = TCP_ERROR_ACK_INVALID;
2563 /* Update rtt and rto */
2564 tcp_update_rtt (tc0, vnet_buffer (b0)->tcp.ack_number);
2566 /* Switch state to ESTABLISHED */
2567 tc0->state = TCP_STATE_ESTABLISHED;
2568 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2570 /* Initialize session variables */
2571 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2572 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2573 << tc0->rcv_opts.wscale;
2574 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2575 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2577 /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2578 tcp_retransmit_timer_reset (tc0);
2579 tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH);
2580 stream_session_accept_notify (&tc0->connection);
2581 error0 = TCP_ERROR_ACK_OK;
2583 case TCP_STATE_ESTABLISHED:
2584 /* We can get packets in established state here because they
2585 * were enqueued before state change */
2586 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2588 tcp_maybe_inc_counter (rcv_process, error0, 1);
2593 case TCP_STATE_FIN_WAIT_1:
2594 /* In addition to the processing for the ESTABLISHED state, if
2595 * our FIN is now acknowledged then enter FIN-WAIT-2 and
2596 * continue processing in that state. */
2597 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2599 tcp_maybe_inc_counter (rcv_process, error0, 1);
2603 /* Still have to send the FIN */
2604 if (tc0->flags & TCP_CONN_FINPNDG)
2606 /* TX fifo finally drained */
2607 if (!session_tx_fifo_max_dequeue (&tc0->connection))
2610 /* If FIN is ACKed */
2611 else if (tc0->snd_una == tc0->snd_una_max)
2613 tc0->state = TCP_STATE_FIN_WAIT_2;
2614 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2616 /* Stop all retransmit timers because we have nothing more
2617 * to send. Enable waitclose though because we're willing to
2618 * wait for peer's FIN but not indefinitely. */
2619 tcp_connection_timers_reset (tc0);
2620 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2623 case TCP_STATE_FIN_WAIT_2:
2624 /* In addition to the processing for the ESTABLISHED state, if
2625 * the retransmission queue is empty, the user's CLOSE can be
2626 * acknowledged ("ok") but do not delete the TCB. */
2627 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2629 tcp_maybe_inc_counter (rcv_process, error0, 1);
2633 case TCP_STATE_CLOSE_WAIT:
2634 /* Do the same processing as for the ESTABLISHED state. */
2635 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2637 tcp_maybe_inc_counter (rcv_process, error0, 1);
2640 if (tc0->flags & TCP_CONN_FINPNDG)
2642 /* TX fifo finally drained */
2643 if (!session_tx_fifo_max_dequeue (&tc0->connection))
2646 tcp_connection_timers_reset (tc0);
2647 tc0->state = TCP_STATE_LAST_ACK;
2648 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
2653 case TCP_STATE_CLOSING:
2654 /* In addition to the processing for the ESTABLISHED state, if
2655 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2656 * otherwise ignore the segment. */
2657 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2659 tcp_maybe_inc_counter (rcv_process, error0, 1);
2663 tc0->state = TCP_STATE_TIME_WAIT;
2664 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2665 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2669 case TCP_STATE_LAST_ACK:
2670 /* The only thing that [should] arrive in this state is an
2671 * acknowledgment of our FIN. If our FIN is now acknowledged,
2672 * delete the TCB, enter the CLOSED state, and return. */
2674 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2676 error0 = TCP_ERROR_ACK_INVALID;
2679 error0 = TCP_ERROR_ACK_OK;
2680 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2681 /* Apparently our ACK for the peer's FIN was lost */
2682 if (is_fin0 && tc0->snd_una != tc0->snd_una_max)
2688 tc0->state = TCP_STATE_CLOSED;
2689 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2691 /* Don't free the connection from the data path since
2692 * we can't ensure that we have no packets already enqueued
2693 * to output. Rely instead on the waitclose timer */
2694 tcp_connection_timers_reset (tc0);
2695 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, 1);
2700 case TCP_STATE_TIME_WAIT:
2701 /* The only thing that can arrive in this state is a
2702 * retransmission of the remote FIN. Acknowledge it, and restart
2703 * the 2 MSL timeout. */
2705 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2707 tcp_maybe_inc_counter (rcv_process, error0, 1);
2711 tcp_make_ack (tc0, b0);
2712 next0 = tcp_next_output (is_ip4);
2713 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2721 /* 6: check the URG bit TODO */
2723 /* 7: process the segment text */
2726 case TCP_STATE_ESTABLISHED:
2727 case TCP_STATE_FIN_WAIT_1:
2728 case TCP_STATE_FIN_WAIT_2:
2729 if (vnet_buffer (b0)->tcp.data_len)
2731 error0 = tcp_segment_rcv (tc0, b0, &next0);
2732 tcp_maybe_inc_counter (rcv_process, error0, 1);
2737 case TCP_STATE_CLOSE_WAIT:
2738 case TCP_STATE_CLOSING:
2739 case TCP_STATE_LAST_ACK:
2740 case TCP_STATE_TIME_WAIT:
2741 /* This should not occur, since a FIN has been received from the
2742 * remote side. Ignore the segment text. */
2746 /* 8: check the FIN bit */
2752 case TCP_STATE_ESTABLISHED:
2753 case TCP_STATE_SYN_RCVD:
2754 /* Send FIN-ACK notify app and enter CLOSE-WAIT */
2755 tcp_connection_timers_reset (tc0);
2756 tcp_make_fin (tc0, b0);
2758 tc0->snd_una_max = tc0->snd_nxt;
2759 tcp_retransmit_timer_set (tc0);
2760 next0 = tcp_next_output (tc0->c_is_ip4);
2761 stream_session_disconnect_notify (&tc0->connection);
2762 tc0->state = TCP_STATE_CLOSE_WAIT;
2763 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2765 case TCP_STATE_CLOSE_WAIT:
2766 case TCP_STATE_CLOSING:
2767 case TCP_STATE_LAST_ACK:
2770 case TCP_STATE_FIN_WAIT_1:
2771 tc0->state = TCP_STATE_CLOSING;
2772 tcp_make_ack (tc0, b0);
2773 next0 = tcp_next_output (is_ip4);
2774 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2775 /* Wait for ACK but not forever */
2776 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2778 case TCP_STATE_FIN_WAIT_2:
2779 /* Got FIN, send ACK! Be more aggressive with resource cleanup */
2780 tc0->state = TCP_STATE_TIME_WAIT;
2781 tcp_connection_timers_reset (tc0);
2782 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2783 tcp_make_ack (tc0, b0);
2784 next0 = tcp_next_output (is_ip4);
2785 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2787 case TCP_STATE_TIME_WAIT:
2788 /* Remain in the TIME-WAIT state. Restart the time-wait
2791 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2794 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
2798 b0->error = error0 ? node->errors[error0] : 0;
2800 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2802 tcp_rx_trace_t *t0 =
2803 vlib_add_trace (vm, node, b0, sizeof (*t0));
2804 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2807 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2808 n_left_to_next, bi0, next0);
2811 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2814 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2816 tcp_inc_counter (rcv_process, TCP_ERROR_EVENT_FIFO_FULL, errors);
2817 tcp_inc_counter (rcv_process, TCP_ERROR_FIN_RCVD, n_fins);
2818 return from_frame->n_vectors;
2822 tcp4_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2823 vlib_frame_t * from_frame)
2825 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2829 tcp6_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2830 vlib_frame_t * from_frame)
2832 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2836 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
2838 .function = tcp4_rcv_process,
2839 .name = "tcp4-rcv-process",
2840 /* Takes a vector of packets. */
2841 .vector_size = sizeof (u32),
2842 .n_errors = TCP_N_ERROR,
2843 .error_strings = tcp_error_strings,
2844 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2847 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2848 foreach_tcp_state_next
2851 .format_trace = format_tcp_rx_trace_short,
2855 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_rcv_process_node, tcp4_rcv_process);
2858 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
2860 .function = tcp6_rcv_process,
2861 .name = "tcp6-rcv-process",
2862 /* Takes a vector of packets. */
2863 .vector_size = sizeof (u32),
2864 .n_errors = TCP_N_ERROR,
2865 .error_strings = tcp_error_strings,
2866 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2869 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2870 foreach_tcp_state_next
2873 .format_trace = format_tcp_rx_trace_short,
2877 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_rcv_process_node, tcp6_rcv_process);
2879 vlib_node_registration_t tcp4_listen_node;
2880 vlib_node_registration_t tcp6_listen_node;
2883 * LISTEN state processing as per RFC 793 p. 65
2886 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2887 vlib_frame_t * from_frame, int is_ip4)
2889 u32 n_left_from, next_index, *from, *to_next, n_syns = 0;
2890 u32 my_thread_index = vm->thread_index;
2892 from = vlib_frame_vector_args (from_frame);
2893 n_left_from = from_frame->n_vectors;
2895 next_index = node->cached_next_index;
2897 while (n_left_from > 0)
2901 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2903 while (n_left_from > 0 && n_left_to_next > 0)
2908 tcp_header_t *th0 = 0;
2909 tcp_connection_t *lc0;
2912 tcp_connection_t *child0;
2913 u32 error0 = TCP_ERROR_NONE, next0 = tcp_next_drop (is_ip4);
2920 n_left_to_next -= 1;
2922 b0 = vlib_get_buffer (vm, bi0);
2923 lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
2927 ip40 = vlib_buffer_get_current (b0);
2928 th0 = ip4_next_header (ip40);
2932 ip60 = vlib_buffer_get_current (b0);
2933 th0 = ip6_next_header (ip60);
2936 /* Create child session. For syn-flood protection use filter */
2938 /* 1. first check for an RST: handled in dispatch */
2939 /* if (tcp_rst (th0))
2942 /* 2. second check for an ACK: handled in dispatch */
2943 /* if (tcp_ack (th0))
2945 tcp_send_reset (b0, is_ip4);
2949 /* 3. check for a SYN (did that already) */
2951 /* Make sure connection wasn't just created */
2952 child0 = tcp_lookup_connection (lc0->c_fib_index, b0,
2953 my_thread_index, is_ip4);
2954 if (PREDICT_FALSE (child0->state != TCP_STATE_LISTEN))
2956 error0 = TCP_ERROR_CREATE_EXISTS;
2960 /* Create child session and send SYN-ACK */
2961 child0 = tcp_connection_new (my_thread_index);
2962 child0->c_lcl_port = th0->dst_port;
2963 child0->c_rmt_port = th0->src_port;
2964 child0->c_is_ip4 = is_ip4;
2965 child0->state = TCP_STATE_SYN_RCVD;
2966 child0->c_fib_index = lc0->c_fib_index;
2970 child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
2971 child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
2975 clib_memcpy (&child0->c_lcl_ip6, &ip60->dst_address,
2976 sizeof (ip6_address_t));
2977 clib_memcpy (&child0->c_rmt_ip6, &ip60->src_address,
2978 sizeof (ip6_address_t));
2981 if (tcp_options_parse (th0, &child0->rcv_opts))
2983 clib_warning ("options parse fail");
2987 child0->irs = vnet_buffer (b0)->tcp.seq_number;
2988 child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
2989 child0->rcv_las = child0->rcv_nxt;
2990 child0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2992 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
2993 * segments are used to initialize PAWS. */
2994 if (tcp_opts_tstamp (&child0->rcv_opts))
2996 child0->tsval_recent = child0->rcv_opts.tsval;
2997 child0->tsval_recent_age = tcp_time_now ();
3000 if (tcp_opts_wscale (&child0->rcv_opts))
3001 child0->snd_wscale = child0->rcv_opts.wscale;
3003 child0->snd_wnd = clib_net_to_host_u16 (th0->window)
3004 << child0->snd_wscale;
3005 child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
3006 child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
3008 tcp_connection_init_vars (child0);
3009 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, child0, 1);
3011 if (stream_session_accept (&child0->connection, lc0->c_s_index,
3014 clib_warning ("session accept fail");
3015 tcp_connection_cleanup (child0);
3016 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
3020 /* Reuse buffer to make syn-ack and send */
3021 tcp_make_synack (child0, b0);
3022 next0 = tcp_next_output (is_ip4);
3023 tcp_timer_set (child0, TCP_TIMER_ESTABLISH, TCP_SYN_RCVD_TIME);
3026 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3028 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3029 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
3030 clib_memcpy (&t0->tcp_connection, lc0,
3031 sizeof (t0->tcp_connection));
3034 n_syns += (error0 == TCP_ERROR_NONE);
3035 b0->error = node->errors[error0];
3037 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
3038 n_left_to_next, bi0, next0);
3041 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
3044 tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
3045 return from_frame->n_vectors;
3049 tcp4_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
3050 vlib_frame_t * from_frame)
3052 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3056 tcp6_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
3057 vlib_frame_t * from_frame)
3059 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3063 VLIB_REGISTER_NODE (tcp4_listen_node) =
3065 .function = tcp4_listen,
3066 .name = "tcp4-listen",
3067 /* Takes a vector of packets. */
3068 .vector_size = sizeof (u32),
3069 .n_errors = TCP_N_ERROR,
3070 .error_strings = tcp_error_strings,
3071 .n_next_nodes = TCP_LISTEN_N_NEXT,
3074 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3075 foreach_tcp_state_next
3078 .format_trace = format_tcp_rx_trace_short,
3082 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_listen_node, tcp4_listen);
3085 VLIB_REGISTER_NODE (tcp6_listen_node) =
3087 .function = tcp6_listen,
3088 .name = "tcp6-listen",
3089 /* Takes a vector of packets. */
3090 .vector_size = sizeof (u32),
3091 .n_errors = TCP_N_ERROR,
3092 .error_strings = tcp_error_strings,
3093 .n_next_nodes = TCP_LISTEN_N_NEXT,
3096 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3097 foreach_tcp_state_next
3100 .format_trace = format_tcp_rx_trace_short,
3104 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_listen_node, tcp6_listen);
3106 vlib_node_registration_t tcp4_input_node;
3107 vlib_node_registration_t tcp6_input_node;
3109 typedef enum _tcp_input_next
3111 TCP_INPUT_NEXT_DROP,
3112 TCP_INPUT_NEXT_LISTEN,
3113 TCP_INPUT_NEXT_RCV_PROCESS,
3114 TCP_INPUT_NEXT_SYN_SENT,
3115 TCP_INPUT_NEXT_ESTABLISHED,
3116 TCP_INPUT_NEXT_RESET,
3117 TCP_INPUT_NEXT_PUNT,
3121 #define foreach_tcp4_input_next \
3122 _ (DROP, "ip4-drop") \
3123 _ (LISTEN, "tcp4-listen") \
3124 _ (RCV_PROCESS, "tcp4-rcv-process") \
3125 _ (SYN_SENT, "tcp4-syn-sent") \
3126 _ (ESTABLISHED, "tcp4-established") \
3127 _ (RESET, "tcp4-reset") \
3128 _ (PUNT, "ip4-punt")
3130 #define foreach_tcp6_input_next \
3131 _ (DROP, "ip6-drop") \
3132 _ (LISTEN, "tcp6-listen") \
3133 _ (RCV_PROCESS, "tcp6-rcv-process") \
3134 _ (SYN_SENT, "tcp6-syn-sent") \
3135 _ (ESTABLISHED, "tcp6-established") \
3136 _ (RESET, "tcp6-reset") \
3137 _ (PUNT, "ip6-punt")
3139 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
3142 tcp_input_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
3143 vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
3145 tcp_connection_t *tc;
3150 for (i = 0; i < n_bufs; i++)
3152 if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
3154 t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
3155 tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
3157 tcp = vlib_buffer_get_current (bs[i]);
3158 tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
3164 tcp_input_set_error_next (tcp_main_t * tm, u16 * next, u32 * error, u8 is_ip4)
3166 if (*error == TCP_ERROR_FILTERED)
3168 *next = TCP_INPUT_NEXT_DROP;
3170 else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
3172 *next = TCP_INPUT_NEXT_PUNT;
3173 *error = TCP_ERROR_PUNT;
3177 *next = TCP_INPUT_NEXT_RESET;
3178 *error = TCP_ERROR_NO_LISTENER;
3182 static inline tcp_connection_t *
3183 tcp_input_lookup_buffer (vlib_buffer_t * b, u8 thread_index, u32 * error,
3186 u32 fib_index = vnet_buffer (b)->ip.fib_index;
3187 int n_advance_bytes, n_data_bytes;
3188 transport_connection_t *tc;
3194 ip4_header_t *ip4 = vlib_buffer_get_current (b);
3195 tcp = ip4_next_header (ip4);
3196 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip4;
3197 n_advance_bytes = (ip4_header_bytes (ip4) + tcp_header_bytes (tcp));
3198 n_data_bytes = clib_net_to_host_u16 (ip4->length) - n_advance_bytes;
3200 /* Length check. Checksum computed by ipx_local no need to compute again */
3201 if (PREDICT_FALSE (n_advance_bytes < 0))
3203 *error = TCP_ERROR_LENGTH;
3207 tc = session_lookup_connection_wt4 (fib_index, &ip4->dst_address,
3208 &ip4->src_address, tcp->dst_port,
3209 tcp->src_port, TRANSPORT_PROTO_TCP,
3210 thread_index, &is_filtered);
3214 ip6_header_t *ip6 = vlib_buffer_get_current (b);
3215 tcp = ip6_next_header (ip6);
3216 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip6;
3217 n_advance_bytes = tcp_header_bytes (tcp);
3218 n_data_bytes = clib_net_to_host_u16 (ip6->payload_length)
3220 n_advance_bytes += sizeof (ip6[0]);
3222 if (PREDICT_FALSE (n_advance_bytes < 0))
3224 *error = TCP_ERROR_LENGTH;
3228 tc = session_lookup_connection_wt6 (fib_index, &ip6->dst_address,
3229 &ip6->src_address, tcp->dst_port,
3230 tcp->src_port, TRANSPORT_PROTO_TCP,
3231 thread_index, &is_filtered);
3234 vnet_buffer (b)->tcp.seq_number = clib_net_to_host_u32 (tcp->seq_number);
3235 vnet_buffer (b)->tcp.ack_number = clib_net_to_host_u32 (tcp->ack_number);
3236 vnet_buffer (b)->tcp.data_offset = n_advance_bytes;
3237 vnet_buffer (b)->tcp.data_len = n_data_bytes;
3238 vnet_buffer (b)->tcp.flags = 0;
3240 *error = is_filtered ? TCP_ERROR_FILTERED : *error;
3242 return tcp_get_connection_from_transport (tc);
3246 tcp_input_dispatch_buffer (tcp_main_t * tm, tcp_connection_t * tc,
3247 vlib_buffer_t * b, u16 * next, u32 * error)
3252 tcp = tcp_buffer_hdr (b);
3253 flags = tcp->flags & filter_flags;
3254 *next = tm->dispatch_table[tc->state][flags].next;
3255 *error = tm->dispatch_table[tc->state][flags].error;
3257 if (PREDICT_FALSE (*error == TCP_ERROR_DISPATCH
3258 || *next == TCP_INPUT_NEXT_RESET))
3260 /* Overload tcp flags to store state */
3261 tcp_state_t state = tc->state;
3262 vnet_buffer (b)->tcp.flags = tc->state;
3264 if (*error == TCP_ERROR_DISPATCH)
3265 clib_warning ("disp error state %U flags %U", format_tcp_state,
3266 state, format_tcp_flags, (int) flags);
3271 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3272 vlib_frame_t * frame, int is_ip4)
3274 u32 n_left_from, *from, thread_index = vm->thread_index;
3275 tcp_main_t *tm = vnet_get_tcp_main ();
3276 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
3277 u16 nexts[VLIB_FRAME_SIZE], *next;
3279 tcp_set_time_now (thread_index);
3281 from = vlib_frame_vector_args (frame);
3282 n_left_from = frame->n_vectors;
3283 vlib_get_buffers (vm, from, bufs, n_left_from);
3288 while (n_left_from >= 4)
3290 u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
3291 tcp_connection_t *tc0, *tc1;
3294 vlib_prefetch_buffer_header (b[2], STORE);
3295 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3297 vlib_prefetch_buffer_header (b[3], STORE);
3298 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3301 next[0] = next[1] = TCP_INPUT_NEXT_DROP;
3303 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4);
3304 tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4);
3306 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
3308 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3309 ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
3311 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3312 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3314 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3315 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3319 if (PREDICT_TRUE (tc0 != 0))
3321 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3322 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3323 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3326 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3328 if (PREDICT_TRUE (tc1 != 0))
3330 ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
3331 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3332 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3335 tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
3342 while (n_left_from > 0)
3344 tcp_connection_t *tc0;
3345 u32 error0 = TCP_ERROR_NO_LISTENER;
3347 if (n_left_from > 1)
3349 vlib_prefetch_buffer_header (b[1], STORE);
3350 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3353 next[0] = TCP_INPUT_NEXT_DROP;
3354 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4);
3355 if (PREDICT_TRUE (tc0 != 0))
3357 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3358 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3359 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3362 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3369 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
3370 tcp_input_trace_frame (vm, node, bufs, frame->n_vectors, is_ip4);
3372 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
3373 return frame->n_vectors;
3377 tcp4_input (vlib_main_t * vm, vlib_node_runtime_t * node,
3378 vlib_frame_t * from_frame)
3380 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3384 tcp6_input (vlib_main_t * vm, vlib_node_runtime_t * node,
3385 vlib_frame_t * from_frame)
3387 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3391 VLIB_REGISTER_NODE (tcp4_input_node) =
3393 .function = tcp4_input,
3394 .name = "tcp4-input",
3395 /* Takes a vector of packets. */
3396 .vector_size = sizeof (u32),
3397 .n_errors = TCP_N_ERROR,
3398 .error_strings = tcp_error_strings,
3399 .n_next_nodes = TCP_INPUT_N_NEXT,
3402 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3403 foreach_tcp4_input_next
3406 .format_buffer = format_tcp_header,
3407 .format_trace = format_tcp_rx_trace,
3411 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_input_node, tcp4_input);
3414 VLIB_REGISTER_NODE (tcp6_input_node) =
3416 .function = tcp6_input,
3417 .name = "tcp6-input",
3418 /* Takes a vector of packets. */
3419 .vector_size = sizeof (u32),
3420 .n_errors = TCP_N_ERROR,
3421 .error_strings = tcp_error_strings,
3422 .n_next_nodes = TCP_INPUT_N_NEXT,
3425 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3426 foreach_tcp6_input_next
3429 .format_buffer = format_tcp_header,
3430 .format_trace = format_tcp_rx_trace,
3434 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input);
3437 tcp_dispatch_table_init (tcp_main_t * tm)
3440 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3441 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3443 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3444 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3447 #define _(t,f,n,e) \
3449 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3450 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3453 /* SYNs for new connections -> tcp-listen. */
3454 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3455 _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3456 _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_RST_RCVD);
3457 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3459 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3460 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3461 _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3462 _(SYN_RCVD, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3464 _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3465 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3467 /* SYN-ACK for a SYN */
3468 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3470 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3471 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3472 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3474 /* ACK for for established connection -> tcp-established. */
3475 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3476 /* FIN for for established connection -> tcp-established. */
3477 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3478 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3480 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3481 _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3483 _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3484 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3486 /* ACK or FIN-ACK to our FIN */
3487 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3488 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
3490 /* FIN in reply to our FIN from the other side */
3491 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3492 _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3493 _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3494 /* FIN confirming that the peer (app) has closed */
3495 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3496 _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3497 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3499 _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3500 _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3502 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3503 _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3504 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3506 _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3507 _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3508 _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3509 _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3511 _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3512 _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3513 _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3514 _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3515 _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3516 TCP_ERROR_CONNECTION_CLOSED);
3520 static clib_error_t *
3521 tcp_input_init (vlib_main_t * vm)
3523 clib_error_t *error = 0;
3524 tcp_main_t *tm = vnet_get_tcp_main ();
3526 if ((error = vlib_call_init_function (vm, tcp_init)))
3529 /* Initialize dispatch table. */
3530 tcp_dispatch_table_init (tm);
3535 VLIB_INIT_FUNCTION (tcp_input_init);
3538 * fd.io coding-style-patch-verification: ON
3541 * eval: (c-set-style "gnu")