2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/tcp/tcp_packet.h>
18 #include <vnet/tcp/tcp.h>
19 #include <vnet/session/session.h>
22 static char *tcp_error_strings[] = {
23 #define tcp_error(n,s) s,
24 #include <vnet/tcp/tcp_error.def>
28 /* All TCP nodes have the same outgoing arcs */
29 #define foreach_tcp_state_next \
30 _ (DROP, "error-drop") \
31 _ (TCP4_OUTPUT, "tcp4-output") \
32 _ (TCP6_OUTPUT, "tcp6-output")
34 typedef enum _tcp_established_next
36 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
37 foreach_tcp_state_next
39 TCP_ESTABLISHED_N_NEXT,
40 } tcp_established_next_t;
42 typedef enum _tcp_rcv_process_next
44 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
45 foreach_tcp_state_next
47 TCP_RCV_PROCESS_N_NEXT,
48 } tcp_rcv_process_next_t;
50 typedef enum _tcp_syn_sent_next
52 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
53 foreach_tcp_state_next
56 } tcp_syn_sent_next_t;
58 typedef enum _tcp_listen_next
60 #define _(s,n) TCP_LISTEN_NEXT_##s,
61 foreach_tcp_state_next
66 /* Generic, state independent indices */
67 typedef enum _tcp_state_next
69 #define _(s,n) TCP_NEXT_##s,
70 foreach_tcp_state_next
75 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
76 : TCP_NEXT_TCP6_OUTPUT)
78 vlib_node_registration_t tcp4_established_node;
79 vlib_node_registration_t tcp6_established_node;
82 * Validate segment sequence number. As per RFC793:
84 * Segment Receive Test
86 * ------- ------- -------------------------------------------
87 * 0 0 SEG.SEQ = RCV.NXT
88 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
90 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
91 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
93 * This ultimately consists in checking if segment falls within the window.
94 * The one important difference compared to RFC793 is that we use rcv_las,
95 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
96 * peer's reference when computing our receive window.
99 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
100 * however, is too strict when we have retransmits. Instead we just check that
101 * the seq is not beyond the right edge and that the end of the segment is not
102 * less than the left edge.
104 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
105 * use rcv_nxt in the right edge window test instead of rcv_las.
109 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
111 return (seq_geq (end_seq, tc->rcv_las)
112 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
116 * Parse TCP header options.
118 * @param th TCP header
119 * @param to TCP options data structure to be populated
120 * @return -1 if parsing failed
123 tcp_options_parse (tcp_header_t * th, tcp_options_t * to)
126 u8 opt_len, opts_len, kind;
130 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
131 data = (const u8 *) (th + 1);
133 /* Zero out all flags but those set in SYN */
134 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE);
136 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
140 /* Get options length */
141 if (kind == TCP_OPTION_EOL)
143 else if (kind == TCP_OPTION_NOOP)
155 /* weird option length */
156 if (opt_len < 2 || opt_len > opts_len)
164 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
166 to->flags |= TCP_OPTS_FLAG_MSS;
167 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
170 case TCP_OPTION_WINDOW_SCALE:
171 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
173 to->flags |= TCP_OPTS_FLAG_WSCALE;
174 to->wscale = data[2];
175 if (to->wscale > TCP_MAX_WND_SCALE)
177 clib_warning ("Illegal window scaling value: %d",
179 to->wscale = TCP_MAX_WND_SCALE;
183 case TCP_OPTION_TIMESTAMP:
184 if (opt_len == TCP_OPTION_LEN_TIMESTAMP)
186 to->flags |= TCP_OPTS_FLAG_TSTAMP;
187 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
188 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
191 case TCP_OPTION_SACK_PERMITTED:
192 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
193 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
195 case TCP_OPTION_SACK_BLOCK:
196 /* If SACK permitted was not advertised or a SYN, break */
197 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
200 /* If too short or not correctly formatted, break */
201 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
204 to->flags |= TCP_OPTS_FLAG_SACK;
205 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
206 vec_reset_length (to->sacks);
207 for (j = 0; j < to->n_sack_blocks; j++)
209 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j));
210 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j));
211 vec_add1 (to->sacks, b);
215 /* Nothing to see here */
223 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
224 * timestamp to echo and it's less than tsval_recent, drop segment
225 * but still send an ACK in order to retain TCP's mechanism for detecting
226 * and recovering from half-open connections
228 * Or at least that's what the theory says. It seems that this might not work
229 * very well with packet reordering and fast retransmit. XXX
232 tcp_segment_check_paws (tcp_connection_t * tc)
234 return tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent
235 && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
239 * Update tsval recent
242 tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
245 * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
246 * of an incoming segment:
247 * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
248 * then the TSval from the segment is copied to TS.Recent;
249 * otherwise, the TSval is ignored.
251 if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
252 && seq_leq (tc->rcv_las, seq_end))
254 ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
255 tc->tsval_recent = tc->rcv_opts.tsval;
256 tc->tsval_recent_age = tcp_time_now ();
261 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
263 * It first verifies if segment has a wrapped sequence number (PAWS) and then
264 * does the processing associated to the first four steps (ignoring security
265 * and precedence): sequence number, rst bit and syn bit checks.
267 * @return 0 if segments passes validation.
270 tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0,
271 vlib_buffer_t * b0, tcp_header_t * th0, u32 * next0)
273 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
276 if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts)))
278 clib_warning ("options parse error");
282 if (tcp_segment_check_paws (tc0))
286 clib_warning ("paws failed\n%U", format_tcp_connection, tc0, 2);
287 clib_warning ("seq %u seq_end %u ack %u",
288 vnet_buffer (b0)->tcp.seq_number - tc0->irs,
289 vnet_buffer (b0)->tcp.seq_end - tc0->irs,
290 vnet_buffer (b0)->tcp.ack_number - tc0->iss);
292 TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
293 vnet_buffer (b0)->tcp.seq_end);
295 /* If it just so happens that a segment updates tsval_recent for a
296 * segment over 24 days old, invalidate tsval_recent. */
297 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
300 /* Age isn't reset until we get a valid tsval (bsd inspired) */
301 tc0->tsval_recent = 0;
302 clib_warning ("paws failed - really old segment. REALLY?");
306 /* Drop after ack if not rst */
309 tcp_make_ack (tc0, b0);
310 *next0 = tcp_next_output (tc0->c_is_ip4);
311 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0);
317 /* 1st: check sequence number */
318 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
319 vnet_buffer (b0)->tcp.seq_end))
321 /* If our window is 0 and the packet is in sequence, let it pass
322 * through for ack processing. It should be dropped later.*/
323 if (tc0->rcv_wnd == 0
324 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
326 /* TODO Should segment be tagged? */
330 /* If not RST, send dup ack */
333 tcp_make_ack (tc0, b0);
334 *next0 = tcp_next_output (tc0->c_is_ip4);
335 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0);
341 /* 2nd: check the RST bit */
344 tcp_connection_reset (tc0);
348 /* 3rd: check security and precedence (skip) */
350 /* 4th: check the SYN bit */
353 /* TODO implement RFC 5961 */
354 if (tc0->state == TCP_STATE_SYN_RCVD)
356 tcp_make_synack (tc0, b0);
357 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0);
361 tcp_make_ack (tc0, b0);
362 TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, tc0);
364 *next0 = tcp_next_output (tc0->c_is_ip4);
368 /* If segment in window, save timestamp */
369 tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
370 vnet_buffer (b0)->tcp.seq_end);
375 tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0)
377 /* SND.UNA =< SEG.ACK =< SND.NXT */
378 return (seq_leq (tc0->snd_una, vnet_buffer (tb0)->tcp.ack_number)
379 && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_nxt));
383 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
385 * Note that although the original article, srtt and rttvar are scaled
386 * to minimize round-off errors, here we don't. Instead, we rely on
387 * better precision time measurements.
389 * TODO support us rtt resolution
392 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
398 err = mrtt - tc->srtt;
400 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
401 * The increase should be bound */
402 tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
403 diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
404 tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
408 /* First measurement. */
410 tc->rttvar = mrtt >> 1;
415 tcp_update_rto (tcp_connection_t * tc)
417 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
418 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
422 * Update RTT estimate and RTO timer
424 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
425 * timing. Middle boxes are known to fiddle with TCP options so we
426 * should give higher priority to ACK timing.
428 * This should be called only if previously sent bytes have been acked.
430 * return 1 if valid rtt 0 otherwise
433 tcp_update_rtt (tcp_connection_t * tc, u32 ack)
437 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
438 * RTT because they're ambiguous. */
439 if (tcp_in_cong_recovery (tc) || tc->sack_sb.sacked_bytes)
442 if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
444 mrtt = tcp_time_now () - tc->rtt_ts;
446 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
447 * snd_una, i.e., the left side of the send window:
448 * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
449 else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
451 mrtt = tcp_time_now () - tc->rcv_opts.tsecr;
454 /* Ignore dubious measurements */
455 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
458 tcp_estimate_rtt (tc, mrtt);
462 /* Allow measuring of a new RTT */
465 /* If we got here something must've been ACKed so make sure boff is 0,
466 * even if mrrt is not valid since we update the rto lower */
474 * Dequeue bytes that have been acked and while at it update RTT estimates.
477 tcp_dequeue_acked (tcp_connection_t * tc, u32 ack)
479 /* Dequeue the newly ACKed add SACKed bytes */
480 stream_session_dequeue_drop (&tc->connection,
481 tc->bytes_acked + tc->sack_sb.snd_una_adv);
483 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
485 /* Update rtt and rto */
486 tcp_update_rtt (tc, ack);
488 /* If everything has been acked, stop retransmit timer
489 * otherwise update. */
490 tcp_retransmit_timer_update (tc);
494 * Check if duplicate ack as per RFC5681 Sec. 2
497 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
500 return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
501 && seq_gt (tc->snd_una_max, tc->snd_una)
502 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
503 && (prev_snd_wnd == tc->snd_wnd));
507 * Checks if ack is a congestion control event.
510 tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
511 u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
513 /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
514 * defined to be 'duplicate' */
515 *is_dack = tc->sack_sb.last_sacked_bytes
516 || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
518 return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc));
522 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
524 sack_scoreboard_hole_t *next, *prev;
526 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
528 next = pool_elt_at_index (sb->holes, hole->next);
529 next->prev = hole->prev;
533 sb->tail = hole->prev;
536 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
538 prev = pool_elt_at_index (sb->holes, hole->prev);
539 prev->next = hole->next;
543 sb->head = hole->next;
546 if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
547 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
549 /* Poison the entry */
551 memset (hole, 0xfe, sizeof (*hole));
553 pool_put (sb->holes, hole);
556 sack_scoreboard_hole_t *
557 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
560 sack_scoreboard_hole_t *hole, *next, *prev;
563 pool_get (sb->holes, hole);
564 memset (hole, 0, sizeof (*hole));
568 hole_index = scoreboard_hole_index (sb, hole);
570 prev = scoreboard_get_hole (sb, prev_index);
573 hole->prev = prev_index;
574 hole->next = prev->next;
576 if ((next = scoreboard_next_hole (sb, hole)))
577 next->prev = hole_index;
579 sb->tail = hole_index;
581 prev->next = hole_index;
585 sb->head = hole_index;
586 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
587 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
594 scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb)
596 sack_scoreboard_hole_t *hole, *prev;
597 u32 bytes = 0, blks = 0;
600 sb->sacked_bytes = 0;
601 hole = scoreboard_last_hole (sb);
605 if (seq_gt (sb->high_sacked, hole->end))
607 bytes = sb->high_sacked - hole->end;
611 while ((prev = scoreboard_prev_hole (sb, hole))
612 && (bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
613 && blks < TCP_DUPACK_THRESHOLD))
615 bytes += hole->start - prev->end;
622 sb->lost_bytes += scoreboard_hole_bytes (hole);
625 hole = scoreboard_prev_hole (sb, hole);
627 bytes += prev->start - hole->end;
629 sb->sacked_bytes = bytes;
633 * Figure out the next hole to retransmit
635 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
637 sack_scoreboard_hole_t *
638 scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
639 sack_scoreboard_hole_t * start,
641 u8 * can_rescue, u8 * snd_limited)
643 sack_scoreboard_hole_t *hole = 0;
645 hole = start ? start : scoreboard_first_hole (sb);
646 while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
647 hole = scoreboard_next_hole (sb, hole);
649 /* Nothing, return */
652 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
656 /* Rule (1): if higher than rxt, less than high_sacked and lost */
657 if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
659 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
663 /* Rule (2): output takes care of transmitting new data */
664 if (!have_sent_1_smss)
667 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
669 /* Rule (3): if hole not lost */
670 else if (seq_lt (hole->start, sb->high_sacked))
673 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
675 /* Rule (4): if hole beyond high_sacked */
678 ASSERT (seq_geq (hole->start, sb->high_sacked));
681 /* HighRxt MUST NOT be updated */
686 if (hole && seq_lt (sb->high_rxt, hole->start))
687 sb->high_rxt = hole->start;
693 scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 seq)
695 sack_scoreboard_hole_t *hole;
696 hole = scoreboard_first_hole (sb);
699 seq = seq_gt (seq, hole->start) ? seq : hole->start;
700 sb->cur_rxt_hole = sb->head;
706 * Test that scoreboard is sane after recovery
708 * Returns 1 if scoreboard is empty or if first hole beyond
712 tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
714 sack_scoreboard_hole_t *hole;
715 hole = scoreboard_first_hole (&tc->sack_sb);
716 return (!hole || seq_geq (hole->start, tc->snd_una));
720 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
722 sack_scoreboard_t *sb = &tc->sack_sb;
723 sack_block_t *blk, tmp;
724 sack_scoreboard_hole_t *hole, *next_hole, *last_hole;
725 u32 blk_index = 0, old_sacked_bytes, hole_index;
728 sb->last_sacked_bytes = 0;
730 old_sacked_bytes = sb->sacked_bytes;
731 sb->last_bytes_delivered = 0;
733 if (!tcp_opts_sack (&tc->rcv_opts)
734 && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
737 /* Remove invalid blocks */
738 blk = tc->rcv_opts.sacks;
739 while (blk < vec_end (tc->rcv_opts.sacks))
741 if (seq_lt (blk->start, blk->end)
742 && seq_gt (blk->start, tc->snd_una)
743 && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_una_max))
748 vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
751 /* Add block for cumulative ack */
752 if (seq_gt (ack, tc->snd_una))
754 tmp.start = tc->snd_una;
756 vec_add1 (tc->rcv_opts.sacks, tmp);
759 if (vec_len (tc->rcv_opts.sacks) == 0)
762 tcp_scoreboard_trace_add (tc, ack);
764 /* Make sure blocks are ordered */
765 for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++)
766 for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++)
767 if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start))
769 tmp = tc->rcv_opts.sacks[i];
770 tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j];
771 tc->rcv_opts.sacks[j] = tmp;
774 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
776 /* If no holes, insert the first that covers all outstanding bytes */
777 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
778 tc->snd_una, tc->snd_una_max);
779 sb->tail = scoreboard_hole_index (sb, last_hole);
780 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
781 sb->high_sacked = tmp.end;
785 /* If we have holes but snd_una_max is beyond the last hole, update
787 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
788 last_hole = scoreboard_last_hole (sb);
789 if (seq_gt (tc->snd_una_max, last_hole->end))
791 if (seq_geq (last_hole->start, sb->high_sacked))
793 last_hole->end = tc->snd_una_max;
795 /* New hole after high sacked block */
796 else if (seq_lt (sb->high_sacked, tc->snd_una_max))
798 scoreboard_insert_hole (sb, sb->tail, sb->high_sacked,
802 /* Keep track of max byte sacked for when the last hole
804 if (seq_gt (tmp.end, sb->high_sacked))
805 sb->high_sacked = tmp.end;
808 /* Walk the holes with the SACK blocks */
809 hole = pool_elt_at_index (sb->holes, sb->head);
810 while (hole && blk_index < vec_len (tc->rcv_opts.sacks))
812 blk = &tc->rcv_opts.sacks[blk_index];
813 if (seq_leq (blk->start, hole->start))
815 /* Block covers hole. Remove hole */
816 if (seq_geq (blk->end, hole->end))
818 next_hole = scoreboard_next_hole (sb, hole);
820 /* Byte accounting: snd_una needs to be advanced */
825 if (seq_lt (ack, next_hole->start))
826 sb->snd_una_adv = next_hole->start - ack;
827 sb->last_bytes_delivered +=
828 next_hole->start - hole->end;
832 ASSERT (seq_geq (sb->high_sacked, ack));
833 sb->snd_una_adv = sb->high_sacked - ack;
834 sb->last_bytes_delivered += sb->high_sacked - hole->end;
838 scoreboard_remove_hole (sb, hole);
841 /* Partial 'head' overlap */
844 if (seq_gt (blk->end, hole->start))
846 hole->start = blk->end;
853 /* Hole must be split */
854 if (seq_lt (blk->end, hole->end))
856 hole_index = scoreboard_hole_index (sb, hole);
857 next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
860 /* Pool might've moved */
861 hole = scoreboard_get_hole (sb, hole_index);
862 hole->end = blk->start;
864 ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
866 else if (seq_lt (blk->start, hole->end))
868 hole->end = blk->start;
870 hole = scoreboard_next_hole (sb, hole);
874 scoreboard_update_bytes (tc, sb);
875 sb->last_sacked_bytes = sb->sacked_bytes
876 - (old_sacked_bytes - sb->last_bytes_delivered);
877 ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes);
878 ASSERT (sb->sacked_bytes == 0
879 || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack));
880 ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max
881 - seq_max (tc->snd_una, ack));
882 ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
883 || sb->holes[sb->head].start == ack + sb->snd_una_adv);
887 * Try to update snd_wnd based on feedback received from peer.
889 * If successful, and new window is 'effectively' 0, activate persist
893 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
895 /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
896 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
897 if (seq_lt (tc->snd_wl1, seq)
898 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
900 tc->snd_wnd = snd_wnd;
903 TCP_EVT_DBG (TCP_EVT_SND_WND, tc);
905 if (tc->snd_wnd < tc->snd_mss)
907 /* Set persist timer if not set and we just got 0 wnd */
908 if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
909 && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
910 tcp_persist_timer_set (tc);
914 tcp_persist_timer_reset (tc);
915 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
925 tcp_cc_init_congestion (tcp_connection_t * tc)
927 tcp_fastrecovery_on (tc);
928 tc->snd_congestion = tc->snd_una_max;
929 tc->cc_algo->congestion (tc);
930 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
934 tcp_cc_recovery_exit (tcp_connection_t * tc)
940 tc->snd_nxt = tc->snd_una_max;
941 tcp_recovery_off (tc);
942 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
946 tcp_cc_fastrecovery_exit (tcp_connection_t * tc)
948 tc->cc_algo->recovered (tc);
949 tc->snd_rxt_bytes = 0;
951 tc->snd_nxt = tc->snd_una_max;
952 tcp_fastrecovery_off (tc);
953 tcp_fastrecovery_1_smss_off (tc);
954 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
958 tcp_cc_congestion_undo (tcp_connection_t * tc)
960 tc->cwnd = tc->prev_cwnd;
961 tc->ssthresh = tc->prev_ssthresh;
962 tc->snd_nxt = tc->snd_una_max;
964 if (tcp_in_recovery (tc))
965 tcp_cc_recovery_exit (tc);
966 ASSERT (tc->rto_boff == 0);
967 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 5);
968 /* TODO extend for fastrecovery */
972 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
974 return (tcp_in_recovery (tc) && tc->rto_boff == 1
976 && tcp_opts_tstamp (&tc->rcv_opts)
977 && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
981 tcp_cc_recover (tcp_connection_t * tc)
983 ASSERT (tcp_in_cong_recovery (tc));
984 if (tcp_cc_is_spurious_retransmit (tc))
986 tcp_cc_congestion_undo (tc);
990 if (tcp_in_recovery (tc))
991 tcp_cc_recovery_exit (tc);
992 else if (tcp_in_fastrecovery (tc))
993 tcp_cc_fastrecovery_exit (tc);
995 ASSERT (tc->rto_boff == 0);
996 ASSERT (!tcp_in_cong_recovery (tc));
997 ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
1002 tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b)
1004 ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
1006 /* Congestion avoidance */
1007 tc->cc_algo->rcv_ack (tc);
1008 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1010 /* If a cumulative ack, make sure dupacks is 0 */
1011 tc->rcv_dupacks = 0;
1013 /* When dupacks hits the threshold we only enter fast retransmit if
1014 * cumulative ack covers more than snd_congestion. Should snd_una
1015 * wrap this test may fail under otherwise valid circumstances.
1016 * Therefore, proactively update snd_congestion when wrap detected. */
1018 (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
1019 && seq_gt (tc->snd_congestion, tc->snd_una)))
1020 tc->snd_congestion = tc->snd_una - 1;
1024 tcp_should_fastrecover_sack (tcp_connection_t * tc)
1026 return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
1030 tcp_should_fastrecover (tcp_connection_t * tc)
1032 return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
1033 || tcp_should_fastrecover_sack (tc));
1037 * One function to rule them all ... and in the darkness bind them
1040 tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
1045 * Duplicate ACK. Check if we should enter fast recovery, or if already in
1046 * it account for the bytes that left the network.
1050 ASSERT (tc->snd_una != tc->snd_una_max
1051 || tc->sack_sb.last_sacked_bytes);
1055 if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
1057 ASSERT (tcp_in_fastrecovery (tc));
1058 /* Pure duplicate ack. If some data got acked, it's handled lower */
1059 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1062 else if (tcp_should_fastrecover (tc))
1064 /* Things are already bad */
1065 if (tcp_in_cong_recovery (tc))
1067 tc->rcv_dupacks = 0;
1068 goto partial_ack_test;
1071 /* If of of the two conditions lower hold, reset dupacks because
1072 * we're probably after timeout (RFC6582 heuristics).
1073 * If Cumulative ack does not cover more than congestion threshold,
1075 * 1) The following doesn't hold: The congestion window is greater
1076 * than SMSS bytes and the difference between highest_ack
1077 * and prev_highest_ack is at most 4*SMSS bytes
1078 * 2) Echoed timestamp in the last non-dup ack does not equal the
1081 if (seq_leq (tc->snd_una, tc->snd_congestion)
1082 && ((!(tc->cwnd > tc->snd_mss
1083 && tc->bytes_acked <= 4 * tc->snd_mss))
1084 || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
1086 tc->rcv_dupacks = 0;
1090 tcp_cc_init_congestion (tc);
1091 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1093 /* The first segment MUST be retransmitted */
1094 tcp_retransmit_first_unacked (tc);
1096 /* Post retransmit update cwnd to ssthresh and account for the
1097 * three segments that have left the network and should've been
1098 * buffered at the receiver XXX */
1099 tc->cwnd = tc->ssthresh + tc->rcv_dupacks * tc->snd_mss;
1100 ASSERT (tc->cwnd >= tc->snd_mss);
1102 /* If cwnd allows, send more data */
1103 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1105 scoreboard_init_high_rxt (&tc->sack_sb,
1106 tc->snd_una + tc->snd_mss);
1107 tcp_fast_retransmit_sack (tc);
1111 tcp_fast_retransmit_no_sack (tc);
1116 else if (!tc->bytes_acked
1117 || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
1119 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1128 if (!tc->bytes_acked)
1133 * Legitimate ACK. 1) See if we can exit recovery
1135 /* XXX limit this only to first partial ack? */
1136 tcp_retransmit_timer_update (tc);
1138 if (seq_geq (tc->snd_una, tc->snd_congestion))
1140 /* If spurious return, we've already updated everything */
1141 if (tcp_cc_recover (tc))
1143 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1147 tc->snd_nxt = tc->snd_una_max;
1149 /* Treat as congestion avoidance ack */
1150 tc->cc_algo->rcv_ack (tc);
1151 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1156 * Legitimate ACK. 2) If PARTIAL ACK try to retransmit
1158 TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
1160 /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1161 * reset dupacks to 0 */
1162 tc->rcv_dupacks = 0;
1164 tcp_retransmit_first_unacked (tc);
1166 /* Post RTO timeout don't try anything fancy */
1167 if (tcp_in_recovery (tc))
1170 /* Remove retransmitted bytes that have been delivered */
1171 ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv
1172 >= tc->sack_sb.last_bytes_delivered
1173 || (tc->flags & TCP_CONN_FINSNT));
1175 if (seq_lt (tc->snd_una, tc->sack_sb.high_rxt))
1177 /* If we have sacks and we haven't gotten an ack beyond high_rxt,
1178 * remove sacked bytes delivered */
1179 rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv
1180 - tc->sack_sb.last_bytes_delivered;
1181 ASSERT (tc->snd_rxt_bytes >= rxt_delivered);
1182 tc->snd_rxt_bytes -= rxt_delivered;
1186 /* Either all retransmitted holes have been acked, or we're
1187 * "in the blind" and retransmitting segment by segment */
1188 tc->snd_rxt_bytes = 0;
1191 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK);
1194 * Since this was a partial ack, try to retransmit some more data
1196 tcp_fast_retransmit (tc);
1200 tcp_cc_init (tcp_connection_t * tc)
1202 tc->cc_algo = tcp_cc_algo_get (TCP_CC_NEWRENO);
1203 tc->cc_algo->init (tc);
1207 * Process incoming ACK
1210 tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b,
1211 tcp_header_t * th, u32 * next, u32 * error)
1213 u32 prev_snd_wnd, prev_snd_una;
1216 TCP_EVT_DBG (TCP_EVT_CC_STAT, tc);
1218 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1219 if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1221 /* If we have outstanding data and this is within the window, accept it,
1222 * probably retransmit has timed out. Otherwise ACK segment and then
1224 if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max))
1226 tcp_make_ack (tc, b);
1227 *next = tcp_next_output (tc->c_is_ip4);
1228 *error = TCP_ERROR_ACK_INVALID;
1229 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0,
1230 vnet_buffer (b)->tcp.ack_number);
1234 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2,
1235 vnet_buffer (b)->tcp.ack_number);
1237 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1238 *error = TCP_ERROR_ACK_FUTURE;
1241 /* If old ACK, probably it's an old dupack */
1242 if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1244 *error = TCP_ERROR_ACK_OLD;
1245 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1,
1246 vnet_buffer (b)->tcp.ack_number);
1247 if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1249 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc);
1250 tcp_cc_handle_event (tc, 1);
1252 /* Don't drop yet */
1257 * Looks okay, process feedback
1260 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1261 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1263 prev_snd_wnd = tc->snd_wnd;
1264 prev_snd_una = tc->snd_una;
1265 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1266 vnet_buffer (b)->tcp.ack_number,
1267 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1268 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1269 tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
1270 tcp_validate_txf_size (tc, tc->bytes_acked);
1272 if (tc->bytes_acked)
1273 tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number);
1275 TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
1278 * Check if we have congestion event
1281 if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1283 tcp_cc_handle_event (tc, is_dack);
1284 if (!tcp_in_cong_recovery (tc))
1286 *error = TCP_ERROR_ACK_DUP;
1287 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
1288 return vnet_buffer (b)->tcp.data_len ? 0 : -1;
1292 * Update congestion control (slow start/congestion avoidance)
1294 tcp_cc_update (tc, b);
1300 tcp_sack_vector_is_sane (sack_block_t * sacks)
1303 for (i = 1; i < vec_len (sacks); i++)
1305 if (sacks[i - 1].end == sacks[i].start)
1312 * Build SACK list as per RFC2018.
1314 * Makes sure the first block contains the segment that generated the current
1315 * ACK and the following ones are the ones most recently reported in SACK
1318 * @param tc TCP connection for which the SACK list is updated
1319 * @param start Start sequence number of the newest SACK block
1320 * @param end End sequence of the newest SACK block
1323 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1325 sack_block_t *new_list = 0, *block = 0;
1328 /* If the first segment is ooo add it to the list. Last write might've moved
1329 * rcv_nxt over the first segment. */
1330 if (seq_lt (tc->rcv_nxt, start))
1332 vec_add2 (new_list, block, 1);
1333 block->start = start;
1337 /* Find the blocks still worth keeping. */
1338 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1340 /* Discard if rcv_nxt advanced beyond current block */
1341 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1344 /* Merge or drop if segment overlapped by the new segment */
1345 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1346 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1348 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1349 new_list[0].start = tc->snd_sacks[i].start;
1350 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1351 new_list[0].end = tc->snd_sacks[i].end;
1355 /* Save to new SACK list if we have space. */
1356 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1358 vec_add1 (new_list, tc->snd_sacks[i]);
1362 clib_warning ("sack discarded");
1366 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1368 /* Replace old vector with new one */
1369 vec_free (tc->snd_sacks);
1370 tc->snd_sacks = new_list;
1372 /* Segments should not 'touch' */
1373 ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks));
1376 /** Enqueue data for delivery to application */
1378 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1381 int written, error = TCP_ERROR_ENQUEUED;
1383 ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1385 /* Pure ACK. Update rcv_nxt and be done. */
1386 if (PREDICT_FALSE (data_len == 0))
1388 return TCP_ERROR_PURE_ACK;
1391 written = session_enqueue_stream_connection (&tc->connection, b, 0,
1392 1 /* queue event */ , 1);
1394 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 0, data_len, written);
1396 /* Update rcv_nxt */
1397 if (PREDICT_TRUE (written == data_len))
1399 tc->rcv_nxt += written;
1401 /* If more data written than expected, account for out-of-order bytes. */
1402 else if (written > data_len)
1404 tc->rcv_nxt += written;
1406 /* Send ACK confirming the update */
1407 tc->flags |= TCP_CONN_SNDACK;
1409 else if (written > 0)
1411 /* We've written something but FIFO is probably full now */
1412 tc->rcv_nxt += written;
1414 /* Depending on how fast the app is, all remaining buffers in burst will
1415 * not be enqueued. Inform peer */
1416 tc->flags |= TCP_CONN_SNDACK;
1418 error = TCP_ERROR_PARTIALLY_ENQUEUED;
1422 tc->flags |= TCP_CONN_SNDACK;
1423 return TCP_ERROR_FIFO_FULL;
1426 /* Update SACK list if need be */
1427 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1429 /* Remove SACK blocks that have been delivered */
1430 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1436 /** Enqueue out-of-order data */
1438 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1441 stream_session_t *s0;
1444 ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1446 /* Pure ACK. Do nothing */
1447 if (PREDICT_FALSE (data_len == 0))
1449 return TCP_ERROR_PURE_ACK;
1452 /* Enqueue out-of-order data with relative offset */
1453 rv = session_enqueue_stream_connection (&tc->connection, b,
1454 vnet_buffer (b)->tcp.seq_number -
1455 tc->rcv_nxt, 0 /* queue event */ ,
1458 /* Nothing written */
1461 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, 0);
1462 return TCP_ERROR_FIFO_FULL;
1465 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1467 /* Update SACK list if in use */
1468 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1470 ooo_segment_t *newest;
1473 s0 = session_get (tc->c_s_index, tc->c_thread_index);
1475 /* Get the newest segment from the fifo */
1476 newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo);
1479 offset = ooo_segment_offset (s0->server_rx_fifo, newest);
1480 ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1481 start = tc->rcv_nxt + offset;
1482 end = start + ooo_segment_length (s0->server_rx_fifo, newest);
1483 tcp_update_sack_list (tc, start, end);
1484 svm_fifo_newest_ooo_segment_reset (s0->server_rx_fifo);
1488 return TCP_ERROR_ENQUEUED;
1492 * Check if ACK could be delayed. If ack can be delayed, it should return
1493 * true for a full frame. If we're always acking return 0.
1496 tcp_can_delack (tcp_connection_t * tc)
1498 /* Send ack if ... */
1500 /* just sent a rcv wnd 0 */
1501 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0
1502 /* constrained to send ack */
1503 || (tc->flags & TCP_CONN_SNDACK) != 0
1504 /* we're almost out of tx wnd */
1505 || tcp_available_snd_space (tc) < 4 * tc->snd_mss)
1512 tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
1514 u32 discard, first = b->current_length;
1515 vlib_main_t *vm = vlib_get_main ();
1517 /* Handle multi-buffer segments */
1518 if (n_bytes_to_drop > b->current_length)
1520 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1524 discard = clib_min (n_bytes_to_drop, b->current_length);
1525 vlib_buffer_advance (b, discard);
1526 b = vlib_get_buffer (vm, b->next_buffer);
1527 n_bytes_to_drop -= discard;
1529 while (n_bytes_to_drop);
1530 if (n_bytes_to_drop > first)
1531 b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1534 vlib_buffer_advance (b, n_bytes_to_drop);
1535 vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1540 tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b,
1543 u32 error = 0, n_bytes_to_drop, n_data_bytes;
1545 vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1546 n_data_bytes = vnet_buffer (b)->tcp.data_len;
1547 ASSERT (n_data_bytes);
1549 /* Handle out-of-order data */
1550 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1552 /* Old sequence numbers allowed through because they overlapped
1554 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1556 error = TCP_ERROR_SEGMENT_OLD;
1557 *next0 = TCP_NEXT_DROP;
1559 /* Completely in the past (possible retransmit) */
1560 if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1562 /* Ack retransmissions since we may not have any data to send */
1563 tcp_make_ack (tc, b);
1564 *next0 = tcp_next_output (tc->c_is_ip4);
1568 /* Chop off the bytes in the past */
1569 n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1570 n_data_bytes -= n_bytes_to_drop;
1571 vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1572 if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1578 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1580 /* N.B. Should not filter burst of dupacks. Two issues 1) dupacks open
1581 * cwnd on remote peer when congested 2) acks leaving should have the
1582 * latest rcv_wnd since the burst may eaten up all of it, so only the
1583 * old ones could be filtered.
1586 /* RFC2581: Send DUPACK for fast retransmit */
1587 tcp_make_ack (tc, b);
1588 *next0 = tcp_next_output (tc->c_is_ip4);
1590 /* Mark as DUPACK. We may filter these in output if
1591 * the burst fills the holes. */
1593 vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_DUPACK;
1595 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc);
1601 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1602 * segments can be enqueued after fifo tail offset changes. */
1603 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1605 /* Check if ACK can be delayed */
1606 if (tcp_can_delack (tc))
1608 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1609 tcp_timer_set (tc, TCP_TIMER_DELACK, TCP_DELACK_TIME);
1613 *next0 = tcp_next_output (tc->c_is_ip4);
1614 tcp_make_ack (tc, b);
1622 tcp_header_t tcp_header;
1623 tcp_connection_t tcp_connection;
1627 format_tcp_rx_trace (u8 * s, va_list * args)
1629 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1630 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1631 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1632 u32 indent = format_get_indent (s);
1634 s = format (s, "%U\n%U%U",
1635 format_tcp_header, &t->tcp_header, 128,
1636 format_white_space, indent,
1637 format_tcp_connection, &t->tcp_connection, 1);
1643 format_tcp_rx_trace_short (u8 * s, va_list * args)
1645 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1646 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1647 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1649 s = format (s, "%d -> %d (%U)",
1650 clib_net_to_host_u16 (t->tcp_header.src_port),
1651 clib_net_to_host_u16 (t->tcp_header.dst_port), format_tcp_state,
1652 t->tcp_connection.state);
1658 tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
1659 tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
1663 clib_memcpy (&t0->tcp_connection, tc0, sizeof (t0->tcp_connection));
1667 th0 = tcp_buffer_hdr (b0);
1669 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
1673 tcp_node_inc_counter (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
1674 u8 is_ip4, u8 evt, u8 val)
1676 if (PREDICT_TRUE (!val))
1680 vlib_node_increment_counter (vm, tcp4_node, evt, val);
1682 vlib_node_increment_counter (vm, tcp6_node, evt, val);
1686 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1687 vlib_frame_t * from_frame, int is_ip4)
1689 u32 n_left_from, next_index, *from, *to_next;
1690 u32 my_thread_index = vm->thread_index, errors = 0;
1691 tcp_main_t *tm = vnet_get_tcp_main ();
1694 from = vlib_frame_vector_args (from_frame);
1695 n_left_from = from_frame->n_vectors;
1697 next_index = node->cached_next_index;
1699 while (n_left_from > 0)
1703 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1704 while (n_left_from > 0 && n_left_to_next > 0)
1708 tcp_header_t *th0 = 0;
1709 tcp_connection_t *tc0;
1710 u32 next0 = TCP_ESTABLISHED_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1717 n_left_to_next -= 1;
1719 b0 = vlib_get_buffer (vm, bi0);
1720 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1723 if (PREDICT_FALSE (tc0 == 0))
1725 error0 = TCP_ERROR_INVALID_CONNECTION;
1729 th0 = tcp_buffer_hdr (b0);
1730 /* N.B. buffer is rewritten if segment is ooo. Thus, th0 becomes a
1731 * dangling reference. */
1732 is_fin = tcp_is_fin (th0);
1734 /* SYNs, FINs and data consume sequence numbers */
1735 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
1736 + tcp_is_syn (th0) + is_fin + vnet_buffer (b0)->tcp.data_len;
1738 /* TODO header prediction fast path */
1740 /* 1-4: check SEQ, RST, SYN */
1741 if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, th0, &next0)))
1743 error0 = TCP_ERROR_SEGMENT_INVALID;
1744 TCP_EVT_DBG (TCP_EVT_SEG_INVALID, tc0,
1745 vnet_buffer (b0)->tcp.seq_number,
1746 vnet_buffer (b0)->tcp.seq_end);
1750 /* 5: check the ACK field */
1751 if (tcp_rcv_ack (tc0, b0, th0, &next0, &error0))
1754 /* 6: check the URG bit TODO */
1756 /* 7: process the segment text */
1757 if (vnet_buffer (b0)->tcp.data_len)
1758 error0 = tcp_segment_rcv (tm, tc0, b0, &next0);
1760 /* 8: check the FIN bit */
1761 if (PREDICT_FALSE (is_fin))
1763 /* Enter CLOSE-WAIT and notify session. To avoid lingering
1764 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1765 /* Account for the FIN if nothing else was received */
1766 if (vnet_buffer (b0)->tcp.data_len == 0)
1768 tcp_make_ack (tc0, b0);
1769 next0 = tcp_next_output (tc0->c_is_ip4);
1770 tc0->state = TCP_STATE_CLOSE_WAIT;
1771 stream_session_disconnect_notify (&tc0->connection);
1772 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
1773 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
1777 b0->error = node->errors[error0];
1778 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1780 tcp_rx_trace_t *t0 =
1781 vlib_add_trace (vm, node, b0, sizeof (*t0));
1782 tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
1785 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1786 n_left_to_next, bi0, next0);
1789 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1792 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
1794 tcp_node_inc_counter (vm, is_ip4, tcp4_established_node.index,
1795 tcp6_established_node.index,
1796 TCP_ERROR_EVENT_FIFO_FULL, errors);
1797 tcp_flush_frame_to_output (vm, my_thread_index, is_ip4);
1799 return from_frame->n_vectors;
1803 tcp4_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1804 vlib_frame_t * from_frame)
1806 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1810 tcp6_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1811 vlib_frame_t * from_frame)
1813 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
1817 VLIB_REGISTER_NODE (tcp4_established_node) =
1819 .function = tcp4_established,
1820 .name = "tcp4-established",
1821 /* Takes a vector of packets. */
1822 .vector_size = sizeof (u32),
1823 .n_errors = TCP_N_ERROR,
1824 .error_strings = tcp_error_strings,
1825 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1828 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1829 foreach_tcp_state_next
1832 .format_trace = format_tcp_rx_trace_short,
1836 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_established_node, tcp4_established);
1839 VLIB_REGISTER_NODE (tcp6_established_node) =
1841 .function = tcp6_established,
1842 .name = "tcp6-established",
1843 /* Takes a vector of packets. */
1844 .vector_size = sizeof (u32),
1845 .n_errors = TCP_N_ERROR,
1846 .error_strings = tcp_error_strings,
1847 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1850 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1851 foreach_tcp_state_next
1854 .format_trace = format_tcp_rx_trace_short,
1859 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_established_node, tcp6_established);
1861 vlib_node_registration_t tcp4_syn_sent_node;
1862 vlib_node_registration_t tcp6_syn_sent_node;
1865 tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr)
1867 transport_connection_t *tmp = 0;
1873 u8 is_valid = (tc->c_lcl_port == hdr->dst_port
1874 && (tc->state == TCP_STATE_LISTEN
1875 || tc->c_rmt_port == hdr->src_port));
1879 handle = session_lookup_half_open_handle (&tc->connection);
1880 tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
1881 tc->c_proto, tc->c_is_ip4);
1885 if (tmp->lcl_port == hdr->dst_port
1886 && tmp->rmt_port == hdr->src_port)
1888 TCP_DBG ("half-open is valid!");
1896 * Lookup transport connection
1898 static tcp_connection_t *
1899 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
1903 transport_connection_t *tconn;
1904 tcp_connection_t *tc;
1908 ip4 = vlib_buffer_get_current (b);
1909 tcp = ip4_next_header (ip4);
1910 tconn = session_lookup_connection_wt4 (fib_index,
1915 TRANSPORT_PROTO_TCP,
1917 tc = tcp_get_connection_from_transport (tconn);
1918 ASSERT (tcp_lookup_is_valid (tc, tcp));
1923 ip6 = vlib_buffer_get_current (b);
1924 tcp = ip6_next_header (ip6);
1925 tconn = session_lookup_connection_wt6 (fib_index,
1930 TRANSPORT_PROTO_TCP,
1932 tc = tcp_get_connection_from_transport (tconn);
1933 ASSERT (tcp_lookup_is_valid (tc, tcp));
1939 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1940 vlib_frame_t * from_frame, int is_ip4)
1942 tcp_main_t *tm = vnet_get_tcp_main ();
1943 u32 n_left_from, next_index, *from, *to_next;
1944 u32 my_thread_index = vm->thread_index, errors = 0;
1946 from = vlib_frame_vector_args (from_frame);
1947 n_left_from = from_frame->n_vectors;
1949 next_index = node->cached_next_index;
1951 while (n_left_from > 0)
1955 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1957 while (n_left_from > 0 && n_left_to_next > 0)
1959 u32 bi0, ack0, seq0;
1962 tcp_header_t *tcp0 = 0;
1963 tcp_connection_t *tc0;
1964 tcp_connection_t *new_tc0;
1965 u32 next0 = TCP_SYN_SENT_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1972 n_left_to_next -= 1;
1974 b0 = vlib_get_buffer (vm, bi0);
1976 tcp_half_open_connection_get (vnet_buffer (b0)->
1977 tcp.connection_index);
1978 if (PREDICT_FALSE (tc0 == 0))
1980 error0 = TCP_ERROR_INVALID_CONNECTION;
1984 /* Half-open completed recently but the connection was't removed
1985 * yet by the owning thread */
1986 if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
1988 /* Make sure the connection actually exists */
1989 ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
1990 my_thread_index, is_ip4));
1994 ack0 = vnet_buffer (b0)->tcp.ack_number;
1995 seq0 = vnet_buffer (b0)->tcp.seq_number;
1996 tcp0 = tcp_buffer_hdr (b0);
1998 /* Crude check to see if the connection handle does not match
1999 * the packet. Probably connection just switched to established */
2000 if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
2001 || tcp0->src_port != tc0->c_rmt_port))
2005 (!tcp_ack (tcp0) && !tcp_rst (tcp0) && !tcp_syn (tcp0)))
2008 /* SYNs, FINs and data consume sequence numbers */
2009 vnet_buffer (b0)->tcp.seq_end = seq0 + tcp_is_syn (tcp0)
2010 + tcp_is_fin (tcp0) + vnet_buffer (b0)->tcp.data_len;
2013 * 1. check the ACK bit
2017 * If the ACK bit is set
2018 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
2019 * the RST bit is set, if so drop the segment and return)
2020 * <SEQ=SEG.ACK><CTL=RST>
2021 * and discard the segment. Return.
2022 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
2026 if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
2028 clib_warning ("ack not in rcv wnd");
2029 if (!tcp_rst (tcp0))
2030 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2034 /* Make sure ACK is valid */
2035 if (seq_gt (tc0->snd_una, ack0))
2037 clib_warning ("ack invalid");
2043 * 2. check the RST bit
2048 /* If ACK is acceptable, signal client that peer is not
2049 * willing to accept connection and drop connection*/
2051 tcp_connection_reset (tc0);
2056 * 3. check the security and precedence (skipped)
2060 * 4. check the SYN bit
2063 /* No SYN flag. Drop. */
2064 if (!tcp_syn (tcp0))
2066 clib_warning ("not synack");
2071 if (tcp_options_parse (tcp0, &tc0->rcv_opts))
2073 clib_warning ("options parse fail");
2077 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
2078 * current thread pool. */
2079 pool_get (tm->connections[my_thread_index], new_tc0);
2080 clib_memcpy (new_tc0, tc0, sizeof (*new_tc0));
2081 new_tc0->c_c_index = new_tc0 - tm->connections[my_thread_index];
2082 new_tc0->c_thread_index = my_thread_index;
2083 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
2084 new_tc0->irs = seq0;
2085 new_tc0->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID;
2086 new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] =
2087 TCP_TIMER_HANDLE_INVALID;
2089 /* If this is not the owning thread, wait for syn retransmit to
2090 * expire and cleanup then */
2091 if (tcp_half_open_connection_cleanup (tc0))
2092 tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2094 if (tcp_opts_tstamp (&new_tc0->rcv_opts))
2096 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
2097 new_tc0->tsval_recent_age = tcp_time_now ();
2100 if (tcp_opts_wscale (&new_tc0->rcv_opts))
2101 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
2103 /* RFC1323: SYN and SYN-ACK wnd not scaled */
2104 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window);
2105 new_tc0->snd_wl1 = seq0;
2106 new_tc0->snd_wl2 = ack0;
2108 tcp_connection_init_vars (new_tc0);
2110 /* SYN-ACK: See if we can switch to ESTABLISHED state */
2111 if (PREDICT_TRUE (tcp_ack (tcp0)))
2113 /* Our SYN is ACKed: we have iss < ack = snd_una */
2115 /* TODO Dequeue acknowledged segments if we support Fast Open */
2116 new_tc0->snd_una = ack0;
2117 new_tc0->state = TCP_STATE_ESTABLISHED;
2119 /* Make sure las is initialized for the wnd computation */
2120 new_tc0->rcv_las = new_tc0->rcv_nxt;
2122 /* Notify app that we have connection. If session layer can't
2123 * allocate session send reset */
2124 if (session_stream_connect_notify (&new_tc0->connection, 0))
2126 clib_warning ("connect notify fail");
2127 tcp_send_reset_w_pkt (new_tc0, b0, is_ip4);
2128 tcp_connection_cleanup (new_tc0);
2132 /* Make sure after data segment processing ACK is sent */
2133 new_tc0->flags |= TCP_CONN_SNDACK;
2135 /* Update rtt with the syn-ack sample */
2136 tcp_update_rtt (new_tc0, vnet_buffer (b0)->tcp.ack_number);
2137 TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, new_tc0);
2139 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
2142 new_tc0->state = TCP_STATE_SYN_RCVD;
2144 /* Notify app that we have connection */
2145 if (session_stream_connect_notify (&new_tc0->connection, 0))
2147 tcp_connection_cleanup (new_tc0);
2148 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2149 TCP_EVT_DBG (TCP_EVT_RST_SENT, tc0);
2154 tcp_init_snd_vars (tc0);
2155 tcp_make_synack (new_tc0, b0);
2156 next0 = tcp_next_output (is_ip4);
2161 /* Read data, if any */
2162 if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2165 error0 = tcp_segment_rcv (tm, new_tc0, b0, &next0);
2166 if (error0 == TCP_ERROR_PURE_ACK)
2167 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2171 tcp_make_ack (new_tc0, b0);
2172 next0 = tcp_next_output (new_tc0->c_is_ip4);
2177 b0->error = error0 ? node->errors[error0] : 0;
2179 ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2181 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2182 clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2183 clib_memcpy (&t0->tcp_connection, tc0,
2184 sizeof (t0->tcp_connection));
2187 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2188 n_left_to_next, bi0, next0);
2191 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2194 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2196 tcp_node_inc_counter (vm, is_ip4, tcp4_syn_sent_node.index,
2197 tcp6_syn_sent_node.index,
2198 TCP_ERROR_EVENT_FIFO_FULL, errors);
2199 return from_frame->n_vectors;
2203 tcp4_syn_sent (vlib_main_t * vm, vlib_node_runtime_t * node,
2204 vlib_frame_t * from_frame)
2206 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2210 tcp6_syn_sent_rcv (vlib_main_t * vm, vlib_node_runtime_t * node,
2211 vlib_frame_t * from_frame)
2213 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2217 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2219 .function = tcp4_syn_sent,
2220 .name = "tcp4-syn-sent",
2221 /* Takes a vector of packets. */
2222 .vector_size = sizeof (u32),
2223 .n_errors = TCP_N_ERROR,
2224 .error_strings = tcp_error_strings,
2225 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2228 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2229 foreach_tcp_state_next
2232 .format_trace = format_tcp_rx_trace_short,
2236 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_syn_sent_node, tcp4_syn_sent);
2239 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2241 .function = tcp6_syn_sent_rcv,
2242 .name = "tcp6-syn-sent",
2243 /* Takes a vector of packets. */
2244 .vector_size = sizeof (u32),
2245 .n_errors = TCP_N_ERROR,
2246 .error_strings = tcp_error_strings,
2247 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2250 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2251 foreach_tcp_state_next
2254 .format_trace = format_tcp_rx_trace_short,
2258 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_syn_sent_node, tcp6_syn_sent_rcv);
2260 vlib_node_registration_t tcp4_rcv_process_node;
2261 vlib_node_registration_t tcp6_rcv_process_node;
2264 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2265 * as per RFC793 p. 64
2268 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2269 vlib_frame_t * from_frame, int is_ip4)
2271 tcp_main_t *tm = vnet_get_tcp_main ();
2272 u32 n_left_from, next_index, *from, *to_next;
2273 u32 my_thread_index = vm->thread_index, errors = 0;
2275 from = vlib_frame_vector_args (from_frame);
2276 n_left_from = from_frame->n_vectors;
2278 next_index = node->cached_next_index;
2280 while (n_left_from > 0)
2284 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2286 while (n_left_from > 0 && n_left_to_next > 0)
2290 tcp_header_t *tcp0 = 0;
2291 tcp_connection_t *tc0;
2292 u32 next0 = TCP_RCV_PROCESS_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
2300 n_left_to_next -= 1;
2302 b0 = vlib_get_buffer (vm, bi0);
2303 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2305 if (PREDICT_FALSE (tc0 == 0))
2307 error0 = TCP_ERROR_INVALID_CONNECTION;
2311 tcp0 = tcp_buffer_hdr (b0);
2312 is_fin0 = tcp_is_fin (tcp0);
2314 /* SYNs, FINs and data consume sequence numbers */
2315 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
2316 + tcp_is_syn (tcp0) + is_fin0 + vnet_buffer (b0)->tcp.data_len;
2320 tcp_connection_t *tmp;
2322 tcp_lookup_connection (tc0->c_fib_index, b0, my_thread_index,
2324 if (tmp->state != tc0->state)
2326 clib_warning ("state changed");
2333 * Special treatment for CLOSED
2337 case TCP_STATE_CLOSED:
2343 * For all other states (except LISTEN)
2346 /* 1-4: check SEQ, RST, SYN */
2347 if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, tcp0,
2350 error0 = TCP_ERROR_SEGMENT_INVALID;
2354 /* 5: check the ACK field */
2357 case TCP_STATE_SYN_RCVD:
2359 * If the segment acknowledgment is not acceptable, form a
2361 * <SEQ=SEG.ACK><CTL=RST>
2364 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2366 TCP_DBG ("connection not accepted");
2367 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2371 /* Update rtt and rto */
2372 tcp_update_rtt (tc0, vnet_buffer (b0)->tcp.ack_number);
2374 /* Switch state to ESTABLISHED */
2375 tc0->state = TCP_STATE_ESTABLISHED;
2377 /* Initialize session variables */
2378 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2379 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2380 << tc0->rcv_opts.wscale;
2381 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2382 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2383 stream_session_accept_notify (&tc0->connection);
2385 /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2386 tcp_retransmit_timer_reset (tc0);
2387 tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH);
2388 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2390 case TCP_STATE_ESTABLISHED:
2391 /* We can get packets in established state here because they
2392 * were enqueued before state change */
2393 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2397 case TCP_STATE_FIN_WAIT_1:
2398 /* In addition to the processing for the ESTABLISHED state, if
2399 * our FIN is now acknowledged then enter FIN-WAIT-2 and
2400 * continue processing in that state. */
2401 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2404 /* Still have to send the FIN */
2405 if (tc0->flags & TCP_CONN_FINPNDG)
2407 /* TX fifo finally drained */
2408 if (!stream_session_tx_fifo_max_dequeue (&tc0->connection))
2411 /* If FIN is ACKed */
2412 else if (tc0->snd_una == tc0->snd_una_max)
2414 tc0->state = TCP_STATE_FIN_WAIT_2;
2415 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2417 /* Stop all retransmit timers because we have nothing more
2418 * to send. Enable waitclose though because we're willing to
2419 * wait for peer's FIN but not indefinitely. */
2420 tcp_connection_timers_reset (tc0);
2421 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2424 case TCP_STATE_FIN_WAIT_2:
2425 /* In addition to the processing for the ESTABLISHED state, if
2426 * the retransmission queue is empty, the user's CLOSE can be
2427 * acknowledged ("ok") but do not delete the TCB. */
2428 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2431 case TCP_STATE_CLOSE_WAIT:
2432 /* Do the same processing as for the ESTABLISHED state. */
2433 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2436 case TCP_STATE_CLOSING:
2437 /* In addition to the processing for the ESTABLISHED state, if
2438 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2439 * otherwise ignore the segment. */
2440 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2443 tc0->state = TCP_STATE_TIME_WAIT;
2444 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2445 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2449 case TCP_STATE_LAST_ACK:
2450 /* The only thing that [should] arrive in this state is an
2451 * acknowledgment of our FIN. If our FIN is now acknowledged,
2452 * delete the TCB, enter the CLOSED state, and return. */
2454 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2456 error0 = TCP_ERROR_ACK_INVALID;
2460 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2461 /* Apparently our ACK for the peer's FIN was lost */
2462 if (is_fin0 && tc0->snd_una != tc0->snd_una_max)
2468 tc0->state = TCP_STATE_CLOSED;
2469 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2470 tcp_connection_timers_reset (tc0);
2472 /* Don't delete the connection/session yet. Instead, wait a
2473 * reasonable amount of time until the pipes are cleared. In
2474 * particular, this makes sure that we won't have dead sessions
2475 * when processing events on the tx path */
2476 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
2481 case TCP_STATE_TIME_WAIT:
2482 /* The only thing that can arrive in this state is a
2483 * retransmission of the remote FIN. Acknowledge it, and restart
2484 * the 2 MSL timeout. */
2486 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2489 tcp_make_ack (tc0, b0);
2490 next0 = tcp_next_output (is_ip4);
2491 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2500 /* 6: check the URG bit TODO */
2502 /* 7: process the segment text */
2505 case TCP_STATE_ESTABLISHED:
2506 case TCP_STATE_FIN_WAIT_1:
2507 case TCP_STATE_FIN_WAIT_2:
2508 if (vnet_buffer (b0)->tcp.data_len)
2509 error0 = tcp_segment_rcv (tm, tc0, b0, &next0);
2513 case TCP_STATE_CLOSE_WAIT:
2514 case TCP_STATE_CLOSING:
2515 case TCP_STATE_LAST_ACK:
2516 case TCP_STATE_TIME_WAIT:
2517 /* This should not occur, since a FIN has been received from the
2518 * remote side. Ignore the segment text. */
2522 /* 8: check the FIN bit */
2528 case TCP_STATE_ESTABLISHED:
2529 case TCP_STATE_SYN_RCVD:
2530 /* Send FIN-ACK notify app and enter CLOSE-WAIT */
2531 tcp_connection_timers_reset (tc0);
2532 tcp_make_fin (tc0, b0);
2534 next0 = tcp_next_output (tc0->c_is_ip4);
2535 stream_session_disconnect_notify (&tc0->connection);
2536 tc0->state = TCP_STATE_CLOSE_WAIT;
2537 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2539 case TCP_STATE_CLOSE_WAIT:
2540 case TCP_STATE_CLOSING:
2541 case TCP_STATE_LAST_ACK:
2544 case TCP_STATE_FIN_WAIT_1:
2545 tc0->state = TCP_STATE_CLOSING;
2546 tcp_make_ack (tc0, b0);
2547 next0 = tcp_next_output (is_ip4);
2548 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2549 /* Wait for ACK but not forever */
2550 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2552 case TCP_STATE_FIN_WAIT_2:
2553 /* Got FIN, send ACK! Be more aggressive with resource cleanup */
2554 tc0->state = TCP_STATE_TIME_WAIT;
2555 tcp_connection_timers_reset (tc0);
2556 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2557 tcp_make_ack (tc0, b0);
2558 next0 = tcp_next_output (is_ip4);
2559 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2561 case TCP_STATE_TIME_WAIT:
2562 /* Remain in the TIME-WAIT state. Restart the time-wait
2565 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2568 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
2571 b0->error = error0 ? node->errors[error0] : 0;
2573 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2575 tcp_rx_trace_t *t0 =
2576 vlib_add_trace (vm, node, b0, sizeof (*t0));
2577 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2580 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2581 n_left_to_next, bi0, next0);
2584 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2587 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2589 tcp_node_inc_counter (vm, is_ip4, tcp4_rcv_process_node.index,
2590 tcp6_rcv_process_node.index,
2591 TCP_ERROR_EVENT_FIFO_FULL, errors);
2593 return from_frame->n_vectors;
2597 tcp4_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2598 vlib_frame_t * from_frame)
2600 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2604 tcp6_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2605 vlib_frame_t * from_frame)
2607 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2611 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
2613 .function = tcp4_rcv_process,
2614 .name = "tcp4-rcv-process",
2615 /* Takes a vector of packets. */
2616 .vector_size = sizeof (u32),
2617 .n_errors = TCP_N_ERROR,
2618 .error_strings = tcp_error_strings,
2619 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2622 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2623 foreach_tcp_state_next
2626 .format_trace = format_tcp_rx_trace_short,
2630 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_rcv_process_node, tcp4_rcv_process);
2633 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
2635 .function = tcp6_rcv_process,
2636 .name = "tcp6-rcv-process",
2637 /* Takes a vector of packets. */
2638 .vector_size = sizeof (u32),
2639 .n_errors = TCP_N_ERROR,
2640 .error_strings = tcp_error_strings,
2641 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2644 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2645 foreach_tcp_state_next
2648 .format_trace = format_tcp_rx_trace_short,
2652 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_rcv_process_node, tcp6_rcv_process);
2654 vlib_node_registration_t tcp4_listen_node;
2655 vlib_node_registration_t tcp6_listen_node;
2658 * LISTEN state processing as per RFC 793 p. 65
2661 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2662 vlib_frame_t * from_frame, int is_ip4)
2664 u32 n_left_from, next_index, *from, *to_next;
2665 u32 my_thread_index = vm->thread_index;
2667 from = vlib_frame_vector_args (from_frame);
2668 n_left_from = from_frame->n_vectors;
2670 next_index = node->cached_next_index;
2672 while (n_left_from > 0)
2676 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2678 while (n_left_from > 0 && n_left_to_next > 0)
2683 tcp_header_t *th0 = 0;
2684 tcp_connection_t *lc0;
2687 tcp_connection_t *child0;
2688 u32 error0 = TCP_ERROR_SYNS_RCVD, next0 = TCP_LISTEN_NEXT_DROP;
2695 n_left_to_next -= 1;
2697 b0 = vlib_get_buffer (vm, bi0);
2698 lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
2702 ip40 = vlib_buffer_get_current (b0);
2703 th0 = ip4_next_header (ip40);
2707 ip60 = vlib_buffer_get_current (b0);
2708 th0 = ip6_next_header (ip60);
2711 /* Create child session. For syn-flood protection use filter */
2713 /* 1. first check for an RST: handled in dispatch */
2714 /* if (tcp_rst (th0))
2717 /* 2. second check for an ACK: handled in dispatch */
2718 /* if (tcp_ack (th0))
2720 tcp_send_reset (b0, is_ip4);
2724 /* 3. check for a SYN (did that already) */
2726 /* Make sure connection wasn't just created */
2728 tcp_lookup_connection (lc0->c_fib_index, b0, my_thread_index,
2730 if (PREDICT_FALSE (child0->state != TCP_STATE_LISTEN))
2732 error0 = TCP_ERROR_CREATE_EXISTS;
2736 /* Create child session and send SYN-ACK */
2737 child0 = tcp_connection_new (my_thread_index);
2738 child0->c_lcl_port = th0->dst_port;
2739 child0->c_rmt_port = th0->src_port;
2740 child0->c_is_ip4 = is_ip4;
2741 child0->state = TCP_STATE_SYN_RCVD;
2745 child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
2746 child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
2750 clib_memcpy (&child0->c_lcl_ip6, &ip60->dst_address,
2751 sizeof (ip6_address_t));
2752 clib_memcpy (&child0->c_rmt_ip6, &ip60->src_address,
2753 sizeof (ip6_address_t));
2756 if (tcp_options_parse (th0, &child0->rcv_opts))
2758 clib_warning ("options parse fail");
2762 child0->irs = vnet_buffer (b0)->tcp.seq_number;
2763 child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
2764 child0->rcv_las = child0->rcv_nxt;
2766 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
2767 * segments are used to initialize PAWS. */
2768 if (tcp_opts_tstamp (&child0->rcv_opts))
2770 child0->tsval_recent = child0->rcv_opts.tsval;
2771 child0->tsval_recent_age = tcp_time_now ();
2774 if (tcp_opts_wscale (&child0->rcv_opts))
2775 child0->snd_wscale = child0->rcv_opts.wscale;
2777 child0->snd_wnd = clib_net_to_host_u16 (th0->window)
2778 << child0->snd_wscale;
2779 child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2780 child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2782 tcp_connection_init_vars (child0);
2783 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, child0, 1);
2785 if (stream_session_accept (&child0->connection, lc0->c_s_index,
2788 clib_warning ("session accept fail");
2789 tcp_connection_cleanup (child0);
2790 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2794 /* Reuse buffer to make syn-ack and send */
2795 tcp_make_synack (child0, b0);
2796 next0 = tcp_next_output (is_ip4);
2797 tcp_timer_set (child0, TCP_TIMER_ESTABLISH, TCP_SYN_RCVD_TIME);
2800 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2802 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2803 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2804 clib_memcpy (&t0->tcp_connection, lc0,
2805 sizeof (t0->tcp_connection));
2808 b0->error = node->errors[error0];
2810 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2811 n_left_to_next, bi0, next0);
2814 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2816 return from_frame->n_vectors;
2820 tcp4_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
2821 vlib_frame_t * from_frame)
2823 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2827 tcp6_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
2828 vlib_frame_t * from_frame)
2830 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2834 VLIB_REGISTER_NODE (tcp4_listen_node) =
2836 .function = tcp4_listen,
2837 .name = "tcp4-listen",
2838 /* Takes a vector of packets. */
2839 .vector_size = sizeof (u32),
2840 .n_errors = TCP_N_ERROR,
2841 .error_strings = tcp_error_strings,
2842 .n_next_nodes = TCP_LISTEN_N_NEXT,
2845 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2846 foreach_tcp_state_next
2849 .format_trace = format_tcp_rx_trace_short,
2853 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_listen_node, tcp4_listen);
2856 VLIB_REGISTER_NODE (tcp6_listen_node) =
2858 .function = tcp6_listen,
2859 .name = "tcp6-listen",
2860 /* Takes a vector of packets. */
2861 .vector_size = sizeof (u32),
2862 .n_errors = TCP_N_ERROR,
2863 .error_strings = tcp_error_strings,
2864 .n_next_nodes = TCP_LISTEN_N_NEXT,
2867 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2868 foreach_tcp_state_next
2871 .format_trace = format_tcp_rx_trace_short,
2875 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_listen_node, tcp6_listen);
2877 vlib_node_registration_t tcp4_input_node;
2878 vlib_node_registration_t tcp6_input_node;
2880 typedef enum _tcp_input_next
2882 TCP_INPUT_NEXT_DROP,
2883 TCP_INPUT_NEXT_LISTEN,
2884 TCP_INPUT_NEXT_RCV_PROCESS,
2885 TCP_INPUT_NEXT_SYN_SENT,
2886 TCP_INPUT_NEXT_ESTABLISHED,
2887 TCP_INPUT_NEXT_RESET,
2888 TCP_INPUT_NEXT_PUNT,
2892 #define foreach_tcp4_input_next \
2893 _ (DROP, "error-drop") \
2894 _ (LISTEN, "tcp4-listen") \
2895 _ (RCV_PROCESS, "tcp4-rcv-process") \
2896 _ (SYN_SENT, "tcp4-syn-sent") \
2897 _ (ESTABLISHED, "tcp4-established") \
2898 _ (RESET, "tcp4-reset") \
2899 _ (PUNT, "error-punt")
2901 #define foreach_tcp6_input_next \
2902 _ (DROP, "error-drop") \
2903 _ (LISTEN, "tcp6-listen") \
2904 _ (RCV_PROCESS, "tcp6-rcv-process") \
2905 _ (SYN_SENT, "tcp6-syn-sent") \
2906 _ (ESTABLISHED, "tcp6-established") \
2907 _ (RESET, "tcp6-reset") \
2908 _ (PUNT, "error-punt")
2910 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
2913 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2914 vlib_frame_t * from_frame, int is_ip4)
2916 u32 n_left_from, next_index, *from, *to_next;
2917 u32 my_thread_index = vm->thread_index;
2918 tcp_main_t *tm = vnet_get_tcp_main ();
2920 from = vlib_frame_vector_args (from_frame);
2921 n_left_from = from_frame->n_vectors;
2922 next_index = node->cached_next_index;
2923 tcp_set_time_now (my_thread_index);
2925 while (n_left_from > 0)
2929 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2931 while (n_left_from > 0 && n_left_to_next > 0)
2933 int n_advance_bytes0, n_data_bytes0;
2934 u32 bi0, fib_index0;
2936 tcp_header_t *tcp0 = 0;
2937 tcp_connection_t *tc0;
2938 transport_connection_t *tconn;
2941 u32 error0 = TCP_ERROR_NO_LISTENER, next0 = TCP_INPUT_NEXT_DROP;
2949 n_left_to_next -= 1;
2951 b0 = vlib_get_buffer (vm, bi0);
2952 vnet_buffer (b0)->tcp.flags = 0;
2953 fib_index0 = vnet_buffer (b0)->ip.fib_index;
2955 /* Checksum computed by ipx_local no need to compute again */
2959 ip40 = vlib_buffer_get_current (b0);
2960 tcp0 = ip4_next_header (ip40);
2961 n_advance_bytes0 = (ip4_header_bytes (ip40)
2962 + tcp_header_bytes (tcp0));
2963 n_data_bytes0 = clib_net_to_host_u16 (ip40->length)
2965 tconn = session_lookup_connection_wt4 (fib_index0,
2970 TRANSPORT_PROTO_TCP,
2972 tc0 = tcp_get_connection_from_transport (tconn);
2973 ASSERT (tcp_lookup_is_valid (tc0, tcp0));
2977 ip60 = vlib_buffer_get_current (b0);
2978 tcp0 = ip6_next_header (ip60);
2979 n_advance_bytes0 = tcp_header_bytes (tcp0);
2980 n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length)
2982 n_advance_bytes0 += sizeof (ip60[0]);
2983 tconn = session_lookup_connection_wt6 (fib_index0,
2988 TRANSPORT_PROTO_TCP,
2990 tc0 = tcp_get_connection_from_transport (tconn);
2991 ASSERT (tcp_lookup_is_valid (tc0, tcp0));
2995 if (PREDICT_FALSE (n_advance_bytes0 < 0))
2997 error0 = TCP_ERROR_LENGTH;
3001 /* Session exists */
3002 if (PREDICT_TRUE (0 != tc0))
3004 /* Save connection index */
3005 vnet_buffer (b0)->tcp.connection_index = tc0->c_c_index;
3006 vnet_buffer (b0)->tcp.seq_number =
3007 clib_net_to_host_u32 (tcp0->seq_number);
3008 vnet_buffer (b0)->tcp.ack_number =
3009 clib_net_to_host_u32 (tcp0->ack_number);
3011 vnet_buffer (b0)->tcp.hdr_offset = (u8 *) tcp0
3012 - (u8 *) vlib_buffer_get_current (b0);
3013 vnet_buffer (b0)->tcp.data_offset = n_advance_bytes0;
3014 vnet_buffer (b0)->tcp.data_len = n_data_bytes0;
3016 flags0 = tcp0->flags & filter_flags;
3017 next0 = tm->dispatch_table[tc0->state][flags0].next;
3018 error0 = tm->dispatch_table[tc0->state][flags0].error;
3020 if (PREDICT_FALSE (error0 == TCP_ERROR_DISPATCH
3021 || next0 == TCP_INPUT_NEXT_RESET))
3023 /* Overload tcp flags to store state */
3024 tcp_state_t state0 = tc0->state;
3025 vnet_buffer (b0)->tcp.flags = tc0->state;
3027 if (error0 == TCP_ERROR_DISPATCH)
3028 clib_warning ("disp error state %U flags %U",
3029 format_tcp_state, state0, format_tcp_flags,
3035 if ((is_ip4 && tm->punt_unknown4) ||
3036 (!is_ip4 && tm->punt_unknown6))
3038 next0 = TCP_INPUT_NEXT_PUNT;
3039 error0 = TCP_ERROR_PUNT;
3044 next0 = TCP_INPUT_NEXT_RESET;
3045 error0 = TCP_ERROR_NO_LISTENER;
3050 b0->error = error0 ? node->errors[error0] : 0;
3052 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3054 tcp_rx_trace_t *t0 =
3055 vlib_add_trace (vm, node, b0, sizeof (*t0));
3056 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
3058 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
3059 n_left_to_next, bi0, next0);
3062 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
3065 return from_frame->n_vectors;
3069 tcp4_input (vlib_main_t * vm, vlib_node_runtime_t * node,
3070 vlib_frame_t * from_frame)
3072 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3076 tcp6_input (vlib_main_t * vm, vlib_node_runtime_t * node,
3077 vlib_frame_t * from_frame)
3079 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3083 VLIB_REGISTER_NODE (tcp4_input_node) =
3085 .function = tcp4_input,
3086 .name = "tcp4-input",
3087 /* Takes a vector of packets. */
3088 .vector_size = sizeof (u32),
3089 .n_errors = TCP_N_ERROR,
3090 .error_strings = tcp_error_strings,
3091 .n_next_nodes = TCP_INPUT_N_NEXT,
3094 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3095 foreach_tcp4_input_next
3098 .format_buffer = format_tcp_header,
3099 .format_trace = format_tcp_rx_trace,
3103 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_input_node, tcp4_input);
3106 VLIB_REGISTER_NODE (tcp6_input_node) =
3108 .function = tcp6_input,
3109 .name = "tcp6-input",
3110 /* Takes a vector of packets. */
3111 .vector_size = sizeof (u32),
3112 .n_errors = TCP_N_ERROR,
3113 .error_strings = tcp_error_strings,
3114 .n_next_nodes = TCP_INPUT_N_NEXT,
3117 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3118 foreach_tcp6_input_next
3121 .format_buffer = format_tcp_header,
3122 .format_trace = format_tcp_rx_trace,
3126 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input);
3129 tcp_dispatch_table_init (tcp_main_t * tm)
3132 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3133 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3135 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3136 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3139 #define _(t,f,n,e) \
3141 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3142 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3145 /* SYNs for new connections -> tcp-listen. */
3146 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3147 _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3148 _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_NONE);
3149 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3151 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3152 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3153 _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3154 _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3155 /* SYN-ACK for a SYN */
3156 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3158 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3159 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3160 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3162 /* ACK for for established connection -> tcp-established. */
3163 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3164 /* FIN for for established connection -> tcp-established. */
3165 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3166 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3168 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3169 _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3171 _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3172 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3174 /* ACK or FIN-ACK to our FIN */
3175 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3176 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
3178 /* FIN in reply to our FIN from the other side */
3179 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3180 _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3181 /* FIN confirming that the peer (app) has closed */
3182 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3183 _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3184 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3186 _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3187 _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3189 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3190 _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3191 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3193 _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3194 _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3195 _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3197 _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3198 _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3199 _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3200 _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3201 _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3202 TCP_ERROR_CONNECTION_CLOSED);
3207 tcp_input_init (vlib_main_t * vm)
3209 clib_error_t *error = 0;
3210 tcp_main_t *tm = vnet_get_tcp_main ();
3212 if ((error = vlib_call_init_function (vm, tcp_init)))
3215 /* Initialize dispatch table. */
3216 tcp_dispatch_table_init (tm);
3221 VLIB_INIT_FUNCTION (tcp_input_init);
3224 * fd.io coding-style-patch-verification: ON
3227 * eval: (c-set-style "gnu")