2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/tcp/tcp_packet.h>
18 #include <vnet/tcp/tcp.h>
19 #include <vnet/session/session.h>
22 static char *tcp_error_strings[] = {
23 #define tcp_error(n,s) s,
24 #include <vnet/tcp/tcp_error.def>
28 /* All TCP nodes have the same outgoing arcs */
29 #define foreach_tcp_state_next \
30 _ (DROP4, "ip4-drop") \
31 _ (DROP6, "ip6-drop") \
32 _ (TCP4_OUTPUT, "tcp4-output") \
33 _ (TCP6_OUTPUT, "tcp6-output")
35 typedef enum _tcp_established_next
37 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
38 foreach_tcp_state_next
40 TCP_ESTABLISHED_N_NEXT,
41 } tcp_established_next_t;
43 typedef enum _tcp_rcv_process_next
45 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
46 foreach_tcp_state_next
48 TCP_RCV_PROCESS_N_NEXT,
49 } tcp_rcv_process_next_t;
51 typedef enum _tcp_syn_sent_next
53 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
54 foreach_tcp_state_next
57 } tcp_syn_sent_next_t;
59 typedef enum _tcp_listen_next
61 #define _(s,n) TCP_LISTEN_NEXT_##s,
62 foreach_tcp_state_next
67 /* Generic, state independent indices */
68 typedef enum _tcp_state_next
70 #define _(s,n) TCP_NEXT_##s,
71 foreach_tcp_state_next
76 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
77 : TCP_NEXT_TCP6_OUTPUT)
79 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \
82 vlib_node_registration_t tcp4_established_node;
83 vlib_node_registration_t tcp6_established_node;
86 * Validate segment sequence number. As per RFC793:
88 * Segment Receive Test
90 * ------- ------- -------------------------------------------
91 * 0 0 SEG.SEQ = RCV.NXT
92 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
94 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
95 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
97 * This ultimately consists in checking if segment falls within the window.
98 * The one important difference compared to RFC793 is that we use rcv_las,
99 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
100 * peer's reference when computing our receive window.
103 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
104 * however, is too strict when we have retransmits. Instead we just check that
105 * the seq is not beyond the right edge and that the end of the segment is not
106 * less than the left edge.
108 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
109 * use rcv_nxt in the right edge window test instead of rcv_las.
113 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
115 return (seq_geq (end_seq, tc->rcv_las)
116 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
120 * Parse TCP header options.
122 * @param th TCP header
123 * @param to TCP options data structure to be populated
124 * @return -1 if parsing failed
127 tcp_options_parse (tcp_header_t * th, tcp_options_t * to)
130 u8 opt_len, opts_len, kind;
134 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
135 data = (const u8 *) (th + 1);
137 /* Zero out all flags but those set in SYN */
138 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE);
140 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
144 /* Get options length */
145 if (kind == TCP_OPTION_EOL)
147 else if (kind == TCP_OPTION_NOOP)
159 /* weird option length */
160 if (opt_len < 2 || opt_len > opts_len)
168 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
170 to->flags |= TCP_OPTS_FLAG_MSS;
171 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
174 case TCP_OPTION_WINDOW_SCALE:
175 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
177 to->flags |= TCP_OPTS_FLAG_WSCALE;
178 to->wscale = data[2];
179 if (to->wscale > TCP_MAX_WND_SCALE)
181 clib_warning ("Illegal window scaling value: %d",
183 to->wscale = TCP_MAX_WND_SCALE;
187 case TCP_OPTION_TIMESTAMP:
188 if (opt_len == TCP_OPTION_LEN_TIMESTAMP)
190 to->flags |= TCP_OPTS_FLAG_TSTAMP;
191 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
192 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
195 case TCP_OPTION_SACK_PERMITTED:
196 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
197 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
199 case TCP_OPTION_SACK_BLOCK:
200 /* If SACK permitted was not advertised or a SYN, break */
201 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
204 /* If too short or not correctly formatted, break */
205 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
208 to->flags |= TCP_OPTS_FLAG_SACK;
209 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
210 vec_reset_length (to->sacks);
211 for (j = 0; j < to->n_sack_blocks; j++)
213 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j));
214 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j));
215 vec_add1 (to->sacks, b);
219 /* Nothing to see here */
227 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
228 * timestamp to echo and it's less than tsval_recent, drop segment
229 * but still send an ACK in order to retain TCP's mechanism for detecting
230 * and recovering from half-open connections
232 * Or at least that's what the theory says. It seems that this might not work
233 * very well with packet reordering and fast retransmit. XXX
236 tcp_segment_check_paws (tcp_connection_t * tc)
238 return tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent
239 && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
243 * Update tsval recent
246 tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
249 * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
250 * of an incoming segment:
251 * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
252 * then the TSval from the segment is copied to TS.Recent;
253 * otherwise, the TSval is ignored.
255 if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
256 && seq_leq (tc->rcv_las, seq_end))
258 ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
259 tc->tsval_recent = tc->rcv_opts.tsval;
260 tc->tsval_recent_age = tcp_time_now ();
265 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
267 * It first verifies if segment has a wrapped sequence number (PAWS) and then
268 * does the processing associated to the first four steps (ignoring security
269 * and precedence): sequence number, rst bit and syn bit checks.
271 * @return 0 if segments passes validation.
274 tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0,
275 vlib_buffer_t * b0, tcp_header_t * th0,
276 u32 * next0, u32 * error0)
278 /* We could get a burst of RSTs interleaved with acks */
279 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
281 tcp_send_reset (tc0);
282 *error0 = TCP_ERROR_CONNECTION_CLOSED;
286 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
288 *error0 = TCP_ERROR_SEGMENT_INVALID;
292 if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts)))
294 clib_warning ("options parse error");
295 *error0 = TCP_ERROR_OPTIONS;
299 if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
301 *error0 = TCP_ERROR_PAWS;
303 clib_warning ("paws failed\n%U", format_tcp_connection, tc0, 2);
304 TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
305 vnet_buffer (b0)->tcp.seq_end);
307 /* If it just so happens that a segment updates tsval_recent for a
308 * segment over 24 days old, invalidate tsval_recent. */
309 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
312 /* Age isn't reset until we get a valid tsval (bsd inspired) */
313 tc0->tsval_recent = 0;
314 clib_warning ("paws failed - really old segment. REALLY?");
318 /* Drop after ack if not rst */
321 tcp_make_ack (tc0, b0);
322 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
329 /* 1st: check sequence number */
330 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
331 vnet_buffer (b0)->tcp.seq_end))
333 *error0 = TCP_ERROR_RCV_WND;
334 /* If our window is 0 and the packet is in sequence, let it pass
335 * through for ack processing. It should be dropped later. */
336 if (!(tc0->rcv_wnd == 0
337 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number))
339 /* If not RST, send dup ack */
342 tcp_make_ack (tc0, b0);
343 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
350 /* 2nd: check the RST bit */
351 if (PREDICT_FALSE (tcp_rst (th0)))
353 tcp_connection_reset (tc0);
354 *error0 = TCP_ERROR_RST_RCVD;
358 /* 3rd: check security and precedence (skip) */
360 /* 4th: check the SYN bit */
361 if (PREDICT_FALSE (tcp_syn (th0)))
363 /* TODO implement RFC 5961 */
364 if (tc0->state == TCP_STATE_SYN_RCVD)
366 tcp_make_synack (tc0, b0);
367 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0);
371 tcp_make_ack (tc0, b0);
372 TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, tc0);
377 /* If segment in window, save timestamp */
378 tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
379 vnet_buffer (b0)->tcp.seq_end);
383 *next0 = tcp_next_drop (tc0->c_is_ip4);
386 *next0 = tcp_next_output (tc0->c_is_ip4);
391 tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0)
393 /* SND.UNA =< SEG.ACK =< SND.NXT */
394 return (seq_leq (tc0->snd_una, vnet_buffer (tb0)->tcp.ack_number)
395 && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_nxt));
399 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
401 * Note that although the original article, srtt and rttvar are scaled
402 * to minimize round-off errors, here we don't. Instead, we rely on
403 * better precision time measurements.
405 * TODO support us rtt resolution
408 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
414 err = mrtt - tc->srtt;
416 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
417 * The increase should be bound */
418 tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
419 diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
420 tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
424 /* First measurement. */
426 tc->rttvar = mrtt >> 1;
431 tcp_update_rto (tcp_connection_t * tc)
433 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
434 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
438 * Update RTT estimate and RTO timer
440 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
441 * timing. Middle boxes are known to fiddle with TCP options so we
442 * should give higher priority to ACK timing.
444 * This should be called only if previously sent bytes have been acked.
446 * return 1 if valid rtt 0 otherwise
449 tcp_update_rtt (tcp_connection_t * tc, u32 ack)
453 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
454 * RTT because they're ambiguous. */
455 if (tcp_in_cong_recovery (tc) || tc->sack_sb.sacked_bytes)
458 if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
460 mrtt = tcp_time_now () - tc->rtt_ts;
462 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
463 * snd_una, i.e., the left side of the send window:
464 * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
465 else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
467 mrtt = tcp_time_now () - tc->rcv_opts.tsecr;
470 /* Ignore dubious measurements */
471 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
474 tcp_estimate_rtt (tc, mrtt);
478 /* Allow measuring of a new RTT */
481 /* If we got here something must've been ACKed so make sure boff is 0,
482 * even if mrrt is not valid since we update the rto lower */
490 * Dequeue bytes that have been acked and while at it update RTT estimates.
493 tcp_dequeue_acked (tcp_connection_t * tc, u32 ack)
495 /* Dequeue the newly ACKed add SACKed bytes */
496 stream_session_dequeue_drop (&tc->connection,
497 tc->bytes_acked + tc->sack_sb.snd_una_adv);
499 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
501 /* Update rtt and rto */
502 tcp_update_rtt (tc, ack);
504 /* If everything has been acked, stop retransmit timer
505 * otherwise update. */
506 tcp_retransmit_timer_update (tc);
510 * Check if duplicate ack as per RFC5681 Sec. 2
513 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
516 return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
517 && seq_gt (tc->snd_una_max, tc->snd_una)
518 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
519 && (prev_snd_wnd == tc->snd_wnd));
523 * Checks if ack is a congestion control event.
526 tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
527 u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
529 /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
530 * defined to be 'duplicate' */
531 *is_dack = tc->sack_sb.last_sacked_bytes
532 || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
534 return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc));
538 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
540 sack_scoreboard_hole_t *next, *prev;
542 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
544 next = pool_elt_at_index (sb->holes, hole->next);
545 next->prev = hole->prev;
549 sb->tail = hole->prev;
552 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
554 prev = pool_elt_at_index (sb->holes, hole->prev);
555 prev->next = hole->next;
559 sb->head = hole->next;
562 if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
563 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
565 /* Poison the entry */
567 memset (hole, 0xfe, sizeof (*hole));
569 pool_put (sb->holes, hole);
572 sack_scoreboard_hole_t *
573 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
576 sack_scoreboard_hole_t *hole, *next, *prev;
579 pool_get (sb->holes, hole);
580 memset (hole, 0, sizeof (*hole));
584 hole_index = scoreboard_hole_index (sb, hole);
586 prev = scoreboard_get_hole (sb, prev_index);
589 hole->prev = prev_index;
590 hole->next = prev->next;
592 if ((next = scoreboard_next_hole (sb, hole)))
593 next->prev = hole_index;
595 sb->tail = hole_index;
597 prev->next = hole_index;
601 sb->head = hole_index;
602 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
603 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
610 scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb)
612 sack_scoreboard_hole_t *hole, *prev;
613 u32 bytes = 0, blks = 0;
616 sb->sacked_bytes = 0;
617 hole = scoreboard_last_hole (sb);
621 if (seq_gt (sb->high_sacked, hole->end))
623 bytes = sb->high_sacked - hole->end;
627 while ((prev = scoreboard_prev_hole (sb, hole))
628 && (bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
629 && blks < TCP_DUPACK_THRESHOLD))
631 bytes += hole->start - prev->end;
638 sb->lost_bytes += scoreboard_hole_bytes (hole);
641 hole = scoreboard_prev_hole (sb, hole);
643 bytes += prev->start - hole->end;
645 sb->sacked_bytes = bytes;
649 * Figure out the next hole to retransmit
651 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
653 sack_scoreboard_hole_t *
654 scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
655 sack_scoreboard_hole_t * start,
657 u8 * can_rescue, u8 * snd_limited)
659 sack_scoreboard_hole_t *hole = 0;
661 hole = start ? start : scoreboard_first_hole (sb);
662 while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
663 hole = scoreboard_next_hole (sb, hole);
665 /* Nothing, return */
668 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
672 /* Rule (1): if higher than rxt, less than high_sacked and lost */
673 if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
675 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
679 /* Rule (2): output takes care of transmitting new data */
680 if (!have_sent_1_smss)
683 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
685 /* Rule (3): if hole not lost */
686 else if (seq_lt (hole->start, sb->high_sacked))
689 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
691 /* Rule (4): if hole beyond high_sacked */
694 ASSERT (seq_geq (hole->start, sb->high_sacked));
697 /* HighRxt MUST NOT be updated */
702 if (hole && seq_lt (sb->high_rxt, hole->start))
703 sb->high_rxt = hole->start;
709 scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 seq)
711 sack_scoreboard_hole_t *hole;
712 hole = scoreboard_first_hole (sb);
715 seq = seq_gt (seq, hole->start) ? seq : hole->start;
716 sb->cur_rxt_hole = sb->head;
722 * Test that scoreboard is sane after recovery
724 * Returns 1 if scoreboard is empty or if first hole beyond
728 tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
730 sack_scoreboard_hole_t *hole;
731 hole = scoreboard_first_hole (&tc->sack_sb);
732 return (!hole || seq_geq (hole->start, tc->snd_una));
736 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
738 sack_scoreboard_t *sb = &tc->sack_sb;
739 sack_block_t *blk, tmp;
740 sack_scoreboard_hole_t *hole, *next_hole, *last_hole;
741 u32 blk_index = 0, old_sacked_bytes, hole_index;
744 sb->last_sacked_bytes = 0;
746 old_sacked_bytes = sb->sacked_bytes;
747 sb->last_bytes_delivered = 0;
749 if (!tcp_opts_sack (&tc->rcv_opts)
750 && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
753 /* Remove invalid blocks */
754 blk = tc->rcv_opts.sacks;
755 while (blk < vec_end (tc->rcv_opts.sacks))
757 if (seq_lt (blk->start, blk->end)
758 && seq_gt (blk->start, tc->snd_una)
759 && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_una_max))
764 vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
767 /* Add block for cumulative ack */
768 if (seq_gt (ack, tc->snd_una))
770 tmp.start = tc->snd_una;
772 vec_add1 (tc->rcv_opts.sacks, tmp);
775 if (vec_len (tc->rcv_opts.sacks) == 0)
778 tcp_scoreboard_trace_add (tc, ack);
780 /* Make sure blocks are ordered */
781 for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++)
782 for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++)
783 if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start))
785 tmp = tc->rcv_opts.sacks[i];
786 tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j];
787 tc->rcv_opts.sacks[j] = tmp;
790 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
792 /* If no holes, insert the first that covers all outstanding bytes */
793 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
794 tc->snd_una, tc->snd_una_max);
795 sb->tail = scoreboard_hole_index (sb, last_hole);
796 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
797 sb->high_sacked = tmp.end;
801 /* If we have holes but snd_una_max is beyond the last hole, update
803 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
804 last_hole = scoreboard_last_hole (sb);
805 if (seq_gt (tc->snd_una_max, last_hole->end))
807 if (seq_geq (last_hole->start, sb->high_sacked))
809 last_hole->end = tc->snd_una_max;
811 /* New hole after high sacked block */
812 else if (seq_lt (sb->high_sacked, tc->snd_una_max))
814 scoreboard_insert_hole (sb, sb->tail, sb->high_sacked,
818 /* Keep track of max byte sacked for when the last hole
820 if (seq_gt (tmp.end, sb->high_sacked))
821 sb->high_sacked = tmp.end;
824 /* Walk the holes with the SACK blocks */
825 hole = pool_elt_at_index (sb->holes, sb->head);
826 while (hole && blk_index < vec_len (tc->rcv_opts.sacks))
828 blk = &tc->rcv_opts.sacks[blk_index];
829 if (seq_leq (blk->start, hole->start))
831 /* Block covers hole. Remove hole */
832 if (seq_geq (blk->end, hole->end))
834 next_hole = scoreboard_next_hole (sb, hole);
836 /* Byte accounting: snd_una needs to be advanced */
841 if (seq_lt (ack, next_hole->start))
842 sb->snd_una_adv = next_hole->start - ack;
843 sb->last_bytes_delivered +=
844 next_hole->start - hole->end;
848 ASSERT (seq_geq (sb->high_sacked, ack));
849 sb->snd_una_adv = sb->high_sacked - ack;
850 sb->last_bytes_delivered += sb->high_sacked - hole->end;
854 scoreboard_remove_hole (sb, hole);
857 /* Partial 'head' overlap */
860 if (seq_gt (blk->end, hole->start))
862 hole->start = blk->end;
869 /* Hole must be split */
870 if (seq_lt (blk->end, hole->end))
872 hole_index = scoreboard_hole_index (sb, hole);
873 next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
876 /* Pool might've moved */
877 hole = scoreboard_get_hole (sb, hole_index);
878 hole->end = blk->start;
880 ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
882 else if (seq_lt (blk->start, hole->end))
884 hole->end = blk->start;
886 hole = scoreboard_next_hole (sb, hole);
890 scoreboard_update_bytes (tc, sb);
891 sb->last_sacked_bytes = sb->sacked_bytes
892 - (old_sacked_bytes - sb->last_bytes_delivered);
893 ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc));
894 ASSERT (sb->sacked_bytes == 0
895 || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack));
896 ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max
897 - seq_max (tc->snd_una, ack));
898 ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
899 || sb->holes[sb->head].start == ack + sb->snd_una_adv);
900 TCP_EVT_DBG (TCP_EVT_CC_SCOREBOARD, tc);
904 * Try to update snd_wnd based on feedback received from peer.
906 * If successful, and new window is 'effectively' 0, activate persist
910 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
912 /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
913 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
914 if (seq_lt (tc->snd_wl1, seq)
915 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
917 tc->snd_wnd = snd_wnd;
920 TCP_EVT_DBG (TCP_EVT_SND_WND, tc);
922 if (tc->snd_wnd < tc->snd_mss)
924 /* Set persist timer if not set and we just got 0 wnd */
925 if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
926 && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
927 tcp_persist_timer_set (tc);
931 tcp_persist_timer_reset (tc);
932 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
942 * Init loss recovery/fast recovery.
944 * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
945 * updated in @ref tcp_cc_handle_event after fast retransmit
948 tcp_cc_init_congestion (tcp_connection_t * tc)
950 tcp_fastrecovery_on (tc);
951 tc->snd_congestion = tc->snd_una_max;
952 tc->cwnd_acc_bytes = 0;
953 tc->cc_algo->congestion (tc);
954 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
958 tcp_cc_recovery_exit (tcp_connection_t * tc)
963 tc->snd_nxt = tc->snd_una_max;
964 tcp_recovery_off (tc);
965 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
969 tcp_cc_fastrecovery_exit (tcp_connection_t * tc)
971 tc->cc_algo->recovered (tc);
972 tc->snd_rxt_bytes = 0;
974 tc->snd_nxt = tc->snd_una_max;
975 tcp_fastrecovery_off (tc);
976 tcp_fastrecovery_1_smss_off (tc);
977 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
981 tcp_cc_congestion_undo (tcp_connection_t * tc)
983 tc->cwnd = tc->prev_cwnd;
984 tc->ssthresh = tc->prev_ssthresh;
985 tc->snd_nxt = tc->snd_una_max;
987 if (tcp_in_recovery (tc))
988 tcp_cc_recovery_exit (tc);
989 ASSERT (tc->rto_boff == 0);
990 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 5);
991 /* TODO extend for fastrecovery */
995 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
997 return (tcp_in_recovery (tc) && tc->rto_boff == 1
999 && tcp_opts_tstamp (&tc->rcv_opts)
1000 && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
1004 tcp_cc_recover (tcp_connection_t * tc)
1006 ASSERT (tcp_in_cong_recovery (tc));
1007 if (tcp_cc_is_spurious_retransmit (tc))
1009 clib_warning ("here");
1010 tcp_cc_congestion_undo (tc);
1014 if (tcp_in_recovery (tc))
1015 tcp_cc_recovery_exit (tc);
1016 else if (tcp_in_fastrecovery (tc))
1017 tcp_cc_fastrecovery_exit (tc);
1019 ASSERT (tc->rto_boff == 0);
1020 ASSERT (!tcp_in_cong_recovery (tc));
1021 ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
1026 tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b)
1028 ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
1030 /* Congestion avoidance */
1031 tc->cc_algo->rcv_ack (tc);
1032 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1034 /* If a cumulative ack, make sure dupacks is 0 */
1035 tc->rcv_dupacks = 0;
1037 /* When dupacks hits the threshold we only enter fast retransmit if
1038 * cumulative ack covers more than snd_congestion. Should snd_una
1039 * wrap this test may fail under otherwise valid circumstances.
1040 * Therefore, proactively update snd_congestion when wrap detected. */
1042 (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
1043 && seq_gt (tc->snd_congestion, tc->snd_una)))
1044 tc->snd_congestion = tc->snd_una - 1;
1048 tcp_should_fastrecover_sack (tcp_connection_t * tc)
1050 return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
1054 tcp_should_fastrecover (tcp_connection_t * tc)
1056 return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
1057 || tcp_should_fastrecover_sack (tc));
1061 * One function to rule them all ... and in the darkness bind them
1064 tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
1068 if (tcp_in_fastrecovery (tc) && tcp_opts_sack_permitted (&tc->rcv_opts))
1070 if (tc->bytes_acked)
1072 tcp_fast_retransmit (tc);
1076 * Duplicate ACK. Check if we should enter fast recovery, or if already in
1077 * it account for the bytes that left the network.
1079 else if (is_dack && !tcp_in_recovery (tc))
1081 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
1082 ASSERT (tc->snd_una != tc->snd_una_max
1083 || tc->sack_sb.last_sacked_bytes);
1087 /* Pure duplicate ack. If some data got acked, it's handled lower */
1088 if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
1090 ASSERT (tcp_in_fastrecovery (tc));
1091 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1094 else if (tcp_should_fastrecover (tc))
1096 ASSERT (!tcp_in_fastrecovery (tc));
1098 /* If of of the two conditions lower hold, reset dupacks because
1099 * we're probably after timeout (RFC6582 heuristics).
1100 * If Cumulative ack does not cover more than congestion threshold,
1102 * 1) The following doesn't hold: The congestion window is greater
1103 * than SMSS bytes and the difference between highest_ack
1104 * and prev_highest_ack is at most 4*SMSS bytes
1105 * 2) Echoed timestamp in the last non-dup ack does not equal the
1108 if (seq_leq (tc->snd_una, tc->snd_congestion)
1109 && ((!(tc->cwnd > tc->snd_mss
1110 && tc->bytes_acked <= 4 * tc->snd_mss))
1111 || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
1113 tc->rcv_dupacks = 0;
1117 tcp_cc_init_congestion (tc);
1118 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1120 /* The first segment MUST be retransmitted */
1121 tcp_retransmit_first_unacked (tc);
1123 /* Post retransmit update cwnd to ssthresh and account for the
1124 * three segments that have left the network and should've been
1125 * buffered at the receiver XXX */
1126 tc->cwnd = tc->ssthresh + tc->rcv_dupacks * tc->snd_mss;
1127 ASSERT (tc->cwnd >= tc->snd_mss);
1129 /* If cwnd allows, send more data */
1130 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1132 scoreboard_init_high_rxt (&tc->sack_sb,
1133 tc->snd_una + tc->snd_mss);
1134 tcp_fast_retransmit_sack (tc);
1138 tcp_fast_retransmit_no_sack (tc);
1142 else if (!tc->bytes_acked
1143 || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
1145 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1152 if (!tc->bytes_acked)
1156 TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
1159 * Legitimate ACK. 1) See if we can exit recovery
1161 /* XXX limit this only to first partial ack? */
1162 tcp_retransmit_timer_update (tc);
1164 if (seq_geq (tc->snd_una, tc->snd_congestion))
1166 /* If spurious return, we've already updated everything */
1167 if (tcp_cc_recover (tc))
1169 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1173 tc->snd_nxt = tc->snd_una_max;
1175 /* Treat as congestion avoidance ack */
1176 tc->cc_algo->rcv_ack (tc);
1177 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1182 * Legitimate ACK. 2) If PARTIAL ACK try to retransmit
1185 /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1186 * reset dupacks to 0. Also needed if in congestion recovery */
1187 tc->rcv_dupacks = 0;
1189 /* Post RTO timeout don't try anything fancy */
1190 if (tcp_in_recovery (tc))
1192 tc->cc_algo->rcv_ack (tc);
1193 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1197 /* Remove retransmitted bytes that have been delivered */
1198 ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv
1199 >= tc->sack_sb.last_bytes_delivered
1200 || (tc->flags & TCP_CONN_FINSNT));
1202 if (seq_lt (tc->snd_una, tc->sack_sb.high_rxt))
1204 /* If we have sacks and we haven't gotten an ack beyond high_rxt,
1205 * remove sacked bytes delivered */
1206 rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv
1207 - tc->sack_sb.last_bytes_delivered;
1208 ASSERT (tc->snd_rxt_bytes >= rxt_delivered);
1209 tc->snd_rxt_bytes -= rxt_delivered;
1213 /* Either all retransmitted holes have been acked, or we're
1214 * "in the blind" and retransmitting segment by segment */
1215 tc->snd_rxt_bytes = 0;
1218 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK);
1221 * Since this was a partial ack, try to retransmit some more data
1223 tcp_fast_retransmit (tc);
1227 tcp_cc_init (tcp_connection_t * tc)
1229 tc->cc_algo = tcp_cc_algo_get (TCP_CC_NEWRENO);
1230 tc->cc_algo->init (tc);
1234 * Process incoming ACK
1237 tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b,
1238 tcp_header_t * th, u32 * next, u32 * error)
1240 u32 prev_snd_wnd, prev_snd_una;
1243 TCP_EVT_DBG (TCP_EVT_CC_STAT, tc);
1245 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1246 if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1248 /* When we entered recovery, we reset snd_nxt to snd_una. Seems peer
1249 * still has the data so accept the ack */
1250 if (tcp_in_recovery (tc)
1251 && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_congestion)
1252 && seq_geq (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
1254 tc->snd_una_max = tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1258 /* If we have outstanding data and this is within the window, accept it,
1259 * probably retransmit has timed out. Otherwise ACK segment and then
1261 if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max))
1263 tcp_make_ack (tc, b);
1264 *next = tcp_next_output (tc->c_is_ip4);
1265 *error = TCP_ERROR_ACK_INVALID;
1266 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0,
1267 vnet_buffer (b)->tcp.ack_number);
1271 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2,
1272 vnet_buffer (b)->tcp.ack_number);
1274 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1275 *error = TCP_ERROR_ACK_FUTURE;
1278 /* If old ACK, probably it's an old dupack */
1279 if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1281 *error = TCP_ERROR_ACK_OLD;
1282 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1,
1283 vnet_buffer (b)->tcp.ack_number);
1284 if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1285 tcp_cc_handle_event (tc, 1);
1286 /* Don't drop yet */
1291 * Looks okay, process feedback
1294 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1295 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1297 prev_snd_wnd = tc->snd_wnd;
1298 prev_snd_una = tc->snd_una;
1299 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1300 vnet_buffer (b)->tcp.ack_number,
1301 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1302 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1303 tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
1304 tcp_validate_txf_size (tc, tc->bytes_acked);
1306 if (tc->bytes_acked)
1307 tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number);
1309 TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
1312 * Check if we have congestion event
1315 if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1317 tcp_cc_handle_event (tc, is_dack);
1318 if (!tcp_in_cong_recovery (tc))
1320 *error = TCP_ERROR_ACK_DUP;
1321 return vnet_buffer (b)->tcp.data_len ? 0 : -1;
1325 * Update congestion control (slow start/congestion avoidance)
1327 tcp_cc_update (tc, b);
1328 *error = TCP_ERROR_ACK_OK;
1333 tcp_sack_vector_is_sane (sack_block_t * sacks)
1336 for (i = 1; i < vec_len (sacks); i++)
1338 if (sacks[i - 1].end == sacks[i].start)
1345 * Build SACK list as per RFC2018.
1347 * Makes sure the first block contains the segment that generated the current
1348 * ACK and the following ones are the ones most recently reported in SACK
1351 * @param tc TCP connection for which the SACK list is updated
1352 * @param start Start sequence number of the newest SACK block
1353 * @param end End sequence of the newest SACK block
1356 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1358 sack_block_t *new_list = 0, *block = 0;
1361 /* If the first segment is ooo add it to the list. Last write might've moved
1362 * rcv_nxt over the first segment. */
1363 if (seq_lt (tc->rcv_nxt, start))
1365 vec_add2 (new_list, block, 1);
1366 block->start = start;
1370 /* Find the blocks still worth keeping. */
1371 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1373 /* Discard if rcv_nxt advanced beyond current block */
1374 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1377 /* Merge or drop if segment overlapped by the new segment */
1378 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1379 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1381 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1382 new_list[0].start = tc->snd_sacks[i].start;
1383 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1384 new_list[0].end = tc->snd_sacks[i].end;
1388 /* Save to new SACK list if we have space. */
1389 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1391 vec_add1 (new_list, tc->snd_sacks[i]);
1395 clib_warning ("sack discarded");
1399 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1401 /* Replace old vector with new one */
1402 vec_free (tc->snd_sacks);
1403 tc->snd_sacks = new_list;
1405 /* Segments should not 'touch' */
1406 ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks));
1410 tcp_sack_list_bytes (tcp_connection_t * tc)
1413 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1414 bytes += tc->snd_sacks[i].end - tc->snd_sacks[i].start;
1418 /** Enqueue data for delivery to application */
1420 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1423 int written, error = TCP_ERROR_ENQUEUED;
1425 ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1427 written = session_enqueue_stream_connection (&tc->connection, b, 0,
1428 1 /* queue event */ , 1);
1430 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 0, data_len, written);
1432 /* Update rcv_nxt */
1433 if (PREDICT_TRUE (written == data_len))
1435 tc->rcv_nxt += written;
1437 /* If more data written than expected, account for out-of-order bytes. */
1438 else if (written > data_len)
1440 tc->rcv_nxt += written;
1442 /* Send ACK confirming the update */
1443 tc->flags |= TCP_CONN_SNDACK;
1444 TCP_EVT_DBG (TCP_EVT_CC_INPUT, tc, data_len, written);
1446 else if (written > 0)
1448 /* We've written something but FIFO is probably full now */
1449 tc->rcv_nxt += written;
1451 /* Depending on how fast the app is, all remaining buffers in burst will
1452 * not be enqueued. Inform peer */
1453 tc->flags |= TCP_CONN_SNDACK;
1455 error = TCP_ERROR_PARTIALLY_ENQUEUED;
1459 tc->flags |= TCP_CONN_SNDACK;
1460 return TCP_ERROR_FIFO_FULL;
1463 /* Update SACK list if need be */
1464 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1466 /* Remove SACK blocks that have been delivered */
1467 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1473 /** Enqueue out-of-order data */
1475 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1478 stream_session_t *s0;
1481 ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1484 /* Enqueue out-of-order data with relative offset */
1485 rv = session_enqueue_stream_connection (&tc->connection, b,
1486 vnet_buffer (b)->tcp.seq_number -
1487 tc->rcv_nxt, 0 /* queue event */ ,
1490 /* Nothing written */
1493 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, 0);
1494 return TCP_ERROR_FIFO_FULL;
1497 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1499 /* Update SACK list if in use */
1500 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1502 ooo_segment_t *newest;
1505 s0 = session_get (tc->c_s_index, tc->c_thread_index);
1507 /* Get the newest segment from the fifo */
1508 newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo);
1511 offset = ooo_segment_offset (s0->server_rx_fifo, newest);
1512 ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1513 start = tc->rcv_nxt + offset;
1514 end = start + ooo_segment_length (s0->server_rx_fifo, newest);
1515 tcp_update_sack_list (tc, start, end);
1516 svm_fifo_newest_ooo_segment_reset (s0->server_rx_fifo);
1517 TCP_EVT_DBG (TCP_EVT_CC_SACKS, tc);
1521 return TCP_ERROR_ENQUEUED_OOO;
1525 * Check if ACK could be delayed. If ack can be delayed, it should return
1526 * true for a full frame. If we're always acking return 0.
1529 tcp_can_delack (tcp_connection_t * tc)
1531 /* Send ack if ... */
1533 /* just sent a rcv wnd 0 */
1534 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0
1535 /* constrained to send ack */
1536 || (tc->flags & TCP_CONN_SNDACK) != 0
1537 /* we're almost out of tx wnd */
1538 || tcp_available_cc_snd_space (tc) < 4 * tc->snd_mss)
1545 tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
1547 u32 discard, first = b->current_length;
1548 vlib_main_t *vm = vlib_get_main ();
1550 /* Handle multi-buffer segments */
1551 if (n_bytes_to_drop > b->current_length)
1553 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1557 discard = clib_min (n_bytes_to_drop, b->current_length);
1558 vlib_buffer_advance (b, discard);
1559 b = vlib_get_buffer (vm, b->next_buffer);
1560 n_bytes_to_drop -= discard;
1562 while (n_bytes_to_drop);
1563 if (n_bytes_to_drop > first)
1564 b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1567 vlib_buffer_advance (b, n_bytes_to_drop);
1568 vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1573 * Receive buffer for connection and handle acks
1575 * It handles both in order or out-of-order data.
1578 tcp_segment_rcv (tcp_connection_t * tc, vlib_buffer_t * b, u32 * next0)
1580 u32 error, n_bytes_to_drop, n_data_bytes;
1582 vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1583 n_data_bytes = vnet_buffer (b)->tcp.data_len;
1584 ASSERT (n_data_bytes);
1586 /* Handle out-of-order data */
1587 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1589 /* Old sequence numbers allowed through because they overlapped
1591 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1593 /* Completely in the past (possible retransmit). Ack
1594 * retransmissions since we may not have any data to send */
1595 if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1597 tcp_make_ack (tc, b);
1598 error = TCP_ERROR_SEGMENT_OLD;
1599 *next0 = tcp_next_output (tc->c_is_ip4);
1603 /* Chop off the bytes in the past and see if what is left
1604 * can be enqueued in order */
1605 n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1606 n_data_bytes -= n_bytes_to_drop;
1607 vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1608 if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1610 error = TCP_ERROR_SEGMENT_OLD;
1611 *next0 = tcp_next_drop (tc->c_is_ip4);
1617 /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1618 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1619 *next0 = tcp_next_output (tc->c_is_ip4);
1620 tcp_make_ack (tc, b);
1621 vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_DUPACK;
1622 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1628 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1629 * segments can be enqueued after fifo tail offset changes. */
1630 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1631 if (tcp_can_delack (tc))
1633 *next0 = tcp_next_drop (tc->c_is_ip4);
1634 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1635 tcp_timer_set (tc, TCP_TIMER_DELACK, TCP_DELACK_TIME);
1639 *next0 = tcp_next_output (tc->c_is_ip4);
1640 tcp_make_ack (tc, b);
1648 tcp_header_t tcp_header;
1649 tcp_connection_t tcp_connection;
1653 format_tcp_rx_trace (u8 * s, va_list * args)
1655 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1656 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1657 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1658 u32 indent = format_get_indent (s);
1660 s = format (s, "%U\n%U%U",
1661 format_tcp_header, &t->tcp_header, 128,
1662 format_white_space, indent,
1663 format_tcp_connection, &t->tcp_connection, 1);
1669 format_tcp_rx_trace_short (u8 * s, va_list * args)
1671 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1672 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1673 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1675 s = format (s, "%d -> %d (%U)",
1676 clib_net_to_host_u16 (t->tcp_header.src_port),
1677 clib_net_to_host_u16 (t->tcp_header.dst_port), format_tcp_state,
1678 t->tcp_connection.state);
1684 tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
1685 tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
1689 clib_memcpy (&t0->tcp_connection, tc0, sizeof (t0->tcp_connection));
1693 th0 = tcp_buffer_hdr (b0);
1695 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
1699 tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
1700 u8 is_ip4, u32 evt, u32 val)
1703 vlib_node_increment_counter (vm, tcp4_node, evt, val);
1705 vlib_node_increment_counter (vm, tcp6_node, evt, val);
1708 #define tcp_maybe_inc_counter(node_id, err, count) \
1710 if (next0 != tcp_next_drop (is_ip4)) \
1711 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
1712 tcp6_##node_id##_node.index, is_ip4, err, \
1715 #define tcp_inc_counter(node_id, err, count) \
1716 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
1717 tcp6_##node_id##_node.index, is_ip4, \
1719 #define tcp_maybe_inc_err_counter(cnts, err) \
1721 cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
1723 #define tcp_inc_err_counter(cnts, err, val) \
1727 #define tcp_store_err_counters(node_id, cnts) \
1730 for (i = 0; i < TCP_N_ERROR; i++) \
1732 tcp_inc_counter(node_id, i, cnts[i]); \
1737 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1738 vlib_frame_t * from_frame, int is_ip4)
1740 u32 my_thread_index = vm->thread_index, errors = 0;
1741 u32 n_left_from, next_index, *from, *to_next;
1742 u16 err_counters[TCP_N_ERROR] = { 0 };
1745 from = vlib_frame_vector_args (from_frame);
1746 n_left_from = from_frame->n_vectors;
1747 next_index = node->cached_next_index;
1749 while (n_left_from > 0)
1753 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1754 while (n_left_from > 0 && n_left_to_next > 0)
1758 tcp_header_t *th0 = 0;
1759 tcp_connection_t *tc0;
1760 u32 next0 = tcp_next_drop (is_ip4), error0 = TCP_ERROR_ACK_OK;
1762 if (n_left_from > 1)
1765 pb = vlib_get_buffer (vm, from[1]);
1766 vlib_prefetch_buffer_header (pb, LOAD);
1767 CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
1775 n_left_to_next -= 1;
1777 b0 = vlib_get_buffer (vm, bi0);
1778 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1781 if (PREDICT_FALSE (tc0 == 0))
1783 error0 = TCP_ERROR_INVALID_CONNECTION;
1787 th0 = tcp_buffer_hdr (b0);
1788 /* N.B. buffer is rewritten if segment is ooo. Thus, th0 becomes a
1789 * dangling reference. */
1790 is_fin = tcp_is_fin (th0);
1792 /* SYNs, FINs and data consume sequence numbers */
1793 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
1794 + tcp_is_syn (th0) + is_fin + vnet_buffer (b0)->tcp.data_len;
1796 /* TODO header prediction fast path */
1798 /* 1-4: check SEQ, RST, SYN */
1799 if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, th0, &next0,
1802 tcp_maybe_inc_err_counter (err_counters, error0);
1803 TCP_EVT_DBG (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
1807 /* 5: check the ACK field */
1808 if (PREDICT_FALSE (tcp_rcv_ack (tc0, b0, th0, &next0, &error0)))
1810 tcp_maybe_inc_err_counter (err_counters, error0);
1814 /* 6: check the URG bit TODO */
1816 /* 7: process the segment text */
1817 if (vnet_buffer (b0)->tcp.data_len)
1819 error0 = tcp_segment_rcv (tc0, b0, &next0);
1820 tcp_maybe_inc_err_counter (err_counters, error0);
1823 /* 8: check the FIN bit */
1824 if (PREDICT_FALSE (is_fin))
1826 /* Enter CLOSE-WAIT and notify session. To avoid lingering
1827 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1828 /* Account for the FIN if nothing else was received */
1829 if (vnet_buffer (b0)->tcp.data_len == 0)
1831 tcp_make_ack (tc0, b0);
1832 next0 = tcp_next_output (tc0->c_is_ip4);
1833 tc0->state = TCP_STATE_CLOSE_WAIT;
1834 stream_session_disconnect_notify (&tc0->connection);
1835 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
1836 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
1837 tcp_inc_err_counter (err_counters, TCP_ERROR_FIN_RCVD, 1);
1841 b0->error = node->errors[error0];
1842 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1844 tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0,
1846 tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
1849 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1850 n_left_to_next, bi0, next0);
1853 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1856 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
1858 err_counters[TCP_ERROR_EVENT_FIFO_FULL] = errors;
1859 tcp_store_err_counters (established, err_counters);
1860 tcp_flush_frame_to_output (vm, my_thread_index, is_ip4);
1861 return from_frame->n_vectors;
1865 tcp4_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1866 vlib_frame_t * from_frame)
1868 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1872 tcp6_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1873 vlib_frame_t * from_frame)
1875 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
1879 VLIB_REGISTER_NODE (tcp4_established_node) =
1881 .function = tcp4_established,
1882 .name = "tcp4-established",
1883 /* Takes a vector of packets. */
1884 .vector_size = sizeof (u32),
1885 .n_errors = TCP_N_ERROR,
1886 .error_strings = tcp_error_strings,
1887 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1890 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1891 foreach_tcp_state_next
1894 .format_trace = format_tcp_rx_trace_short,
1898 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_established_node, tcp4_established);
1901 VLIB_REGISTER_NODE (tcp6_established_node) =
1903 .function = tcp6_established,
1904 .name = "tcp6-established",
1905 /* Takes a vector of packets. */
1906 .vector_size = sizeof (u32),
1907 .n_errors = TCP_N_ERROR,
1908 .error_strings = tcp_error_strings,
1909 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1912 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1913 foreach_tcp_state_next
1916 .format_trace = format_tcp_rx_trace_short,
1921 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_established_node, tcp6_established);
1923 vlib_node_registration_t tcp4_syn_sent_node;
1924 vlib_node_registration_t tcp6_syn_sent_node;
1927 tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr)
1929 transport_connection_t *tmp = 0;
1936 if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
1939 u8 is_valid = (tc->c_lcl_port == hdr->dst_port
1940 && (tc->state == TCP_STATE_LISTEN
1941 || tc->c_rmt_port == hdr->src_port));
1945 handle = session_lookup_half_open_handle (&tc->connection);
1946 tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
1947 tc->c_proto, tc->c_is_ip4);
1951 if (tmp->lcl_port == hdr->dst_port
1952 && tmp->rmt_port == hdr->src_port)
1954 TCP_DBG ("half-open is valid!");
1962 * Lookup transport connection
1964 static tcp_connection_t *
1965 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
1969 transport_connection_t *tconn;
1970 tcp_connection_t *tc;
1975 ip4 = vlib_buffer_get_current (b);
1976 tcp = ip4_next_header (ip4);
1977 tconn = session_lookup_connection_wt4 (fib_index,
1982 TRANSPORT_PROTO_TCP,
1983 thread_index, &is_filtered);
1984 tc = tcp_get_connection_from_transport (tconn);
1985 ASSERT (tcp_lookup_is_valid (tc, tcp));
1990 ip6 = vlib_buffer_get_current (b);
1991 tcp = ip6_next_header (ip6);
1992 tconn = session_lookup_connection_wt6 (fib_index,
1997 TRANSPORT_PROTO_TCP,
1998 thread_index, &is_filtered);
1999 tc = tcp_get_connection_from_transport (tconn);
2000 ASSERT (tcp_lookup_is_valid (tc, tcp));
2006 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2007 vlib_frame_t * from_frame, int is_ip4)
2009 tcp_main_t *tm = vnet_get_tcp_main ();
2010 u32 n_left_from, next_index, *from, *to_next;
2011 u32 my_thread_index = vm->thread_index, errors = 0;
2013 from = vlib_frame_vector_args (from_frame);
2014 n_left_from = from_frame->n_vectors;
2016 next_index = node->cached_next_index;
2018 while (n_left_from > 0)
2022 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2024 while (n_left_from > 0 && n_left_to_next > 0)
2026 u32 bi0, ack0, seq0;
2029 tcp_header_t *tcp0 = 0;
2030 tcp_connection_t *tc0;
2031 tcp_connection_t *new_tc0;
2032 u32 next0 = tcp_next_drop (is_ip4), error0 = TCP_ERROR_ENQUEUED;
2039 n_left_to_next -= 1;
2041 b0 = vlib_get_buffer (vm, bi0);
2043 tcp_half_open_connection_get (vnet_buffer (b0)->
2044 tcp.connection_index);
2045 if (PREDICT_FALSE (tc0 == 0))
2047 error0 = TCP_ERROR_INVALID_CONNECTION;
2051 /* Half-open completed recently but the connection was't removed
2052 * yet by the owning thread */
2053 if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
2055 /* Make sure the connection actually exists */
2056 ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
2057 my_thread_index, is_ip4));
2061 ack0 = vnet_buffer (b0)->tcp.ack_number;
2062 seq0 = vnet_buffer (b0)->tcp.seq_number;
2063 tcp0 = tcp_buffer_hdr (b0);
2065 /* Crude check to see if the connection handle does not match
2066 * the packet. Probably connection just switched to established */
2067 if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
2068 || tcp0->src_port != tc0->c_rmt_port))
2072 (!tcp_ack (tcp0) && !tcp_rst (tcp0) && !tcp_syn (tcp0)))
2075 /* SYNs, FINs and data consume sequence numbers */
2076 vnet_buffer (b0)->tcp.seq_end = seq0 + tcp_is_syn (tcp0)
2077 + tcp_is_fin (tcp0) + vnet_buffer (b0)->tcp.data_len;
2080 * 1. check the ACK bit
2084 * If the ACK bit is set
2085 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
2086 * the RST bit is set, if so drop the segment and return)
2087 * <SEQ=SEG.ACK><CTL=RST>
2088 * and discard the segment. Return.
2089 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
2093 if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
2095 clib_warning ("ack not in rcv wnd");
2096 if (!tcp_rst (tcp0))
2097 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2101 /* Make sure ACK is valid */
2102 if (seq_gt (tc0->snd_una, ack0))
2104 clib_warning ("ack invalid");
2110 * 2. check the RST bit
2115 /* If ACK is acceptable, signal client that peer is not
2116 * willing to accept connection and drop connection*/
2118 tcp_connection_reset (tc0);
2123 * 3. check the security and precedence (skipped)
2127 * 4. check the SYN bit
2130 /* No SYN flag. Drop. */
2131 if (!tcp_syn (tcp0))
2133 clib_warning ("not synack");
2138 if (tcp_options_parse (tcp0, &tc0->rcv_opts))
2140 clib_warning ("options parse fail");
2144 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
2145 * current thread pool. */
2146 pool_get (tm->connections[my_thread_index], new_tc0);
2147 clib_memcpy (new_tc0, tc0, sizeof (*new_tc0));
2148 new_tc0->c_c_index = new_tc0 - tm->connections[my_thread_index];
2149 new_tc0->c_thread_index = my_thread_index;
2150 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
2151 new_tc0->irs = seq0;
2152 new_tc0->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID;
2153 new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] =
2154 TCP_TIMER_HANDLE_INVALID;
2155 new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2157 /* If this is not the owning thread, wait for syn retransmit to
2158 * expire and cleanup then */
2159 if (tcp_half_open_connection_cleanup (tc0))
2160 tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2162 if (tcp_opts_tstamp (&new_tc0->rcv_opts))
2164 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
2165 new_tc0->tsval_recent_age = tcp_time_now ();
2168 if (tcp_opts_wscale (&new_tc0->rcv_opts))
2169 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
2171 /* RFC1323: SYN and SYN-ACK wnd not scaled */
2172 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window);
2173 new_tc0->snd_wl1 = seq0;
2174 new_tc0->snd_wl2 = ack0;
2176 tcp_connection_init_vars (new_tc0);
2178 /* SYN-ACK: See if we can switch to ESTABLISHED state */
2179 if (PREDICT_TRUE (tcp_ack (tcp0)))
2181 /* Our SYN is ACKed: we have iss < ack = snd_una */
2183 /* TODO Dequeue acknowledged segments if we support Fast Open */
2184 new_tc0->snd_una = ack0;
2185 new_tc0->state = TCP_STATE_ESTABLISHED;
2187 /* Make sure las is initialized for the wnd computation */
2188 new_tc0->rcv_las = new_tc0->rcv_nxt;
2190 /* Notify app that we have connection. If session layer can't
2191 * allocate session send reset */
2192 if (session_stream_connect_notify (&new_tc0->connection, 0))
2194 clib_warning ("connect notify fail");
2195 tcp_send_reset_w_pkt (new_tc0, b0, is_ip4);
2196 tcp_connection_cleanup (new_tc0);
2200 /* Make sure after data segment processing ACK is sent */
2201 new_tc0->flags |= TCP_CONN_SNDACK;
2203 /* Update rtt with the syn-ack sample */
2204 tcp_update_rtt (new_tc0, vnet_buffer (b0)->tcp.ack_number);
2205 TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, new_tc0);
2207 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
2210 new_tc0->state = TCP_STATE_SYN_RCVD;
2212 /* Notify app that we have connection */
2213 if (session_stream_connect_notify (&new_tc0->connection, 0))
2215 tcp_connection_cleanup (new_tc0);
2216 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2217 TCP_EVT_DBG (TCP_EVT_RST_SENT, tc0);
2222 tcp_init_snd_vars (tc0);
2223 tcp_make_synack (new_tc0, b0);
2224 next0 = tcp_next_output (is_ip4);
2229 /* Read data, if any */
2230 if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2232 clib_warning ("rcvd data in syn-sent");
2233 error0 = tcp_segment_rcv (new_tc0, b0, &next0);
2234 if (error0 == TCP_ERROR_ACK_OK)
2235 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2236 tcp_maybe_inc_counter (syn_sent, error0, 1);
2240 tcp_make_ack (new_tc0, b0);
2241 next0 = tcp_next_output (new_tc0->c_is_ip4);
2246 b0->error = error0 ? node->errors[error0] : 0;
2248 ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2250 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2251 clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2252 clib_memcpy (&t0->tcp_connection, tc0,
2253 sizeof (t0->tcp_connection));
2256 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2257 n_left_to_next, bi0, next0);
2260 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2263 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2265 tcp_inc_counter (syn_sent, TCP_ERROR_EVENT_FIFO_FULL, errors);
2266 return from_frame->n_vectors;
2270 tcp4_syn_sent (vlib_main_t * vm, vlib_node_runtime_t * node,
2271 vlib_frame_t * from_frame)
2273 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2277 tcp6_syn_sent_rcv (vlib_main_t * vm, vlib_node_runtime_t * node,
2278 vlib_frame_t * from_frame)
2280 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2284 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2286 .function = tcp4_syn_sent,
2287 .name = "tcp4-syn-sent",
2288 /* Takes a vector of packets. */
2289 .vector_size = sizeof (u32),
2290 .n_errors = TCP_N_ERROR,
2291 .error_strings = tcp_error_strings,
2292 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2295 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2296 foreach_tcp_state_next
2299 .format_trace = format_tcp_rx_trace_short,
2303 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_syn_sent_node, tcp4_syn_sent);
2306 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2308 .function = tcp6_syn_sent_rcv,
2309 .name = "tcp6-syn-sent",
2310 /* Takes a vector of packets. */
2311 .vector_size = sizeof (u32),
2312 .n_errors = TCP_N_ERROR,
2313 .error_strings = tcp_error_strings,
2314 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2317 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2318 foreach_tcp_state_next
2321 .format_trace = format_tcp_rx_trace_short,
2325 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_syn_sent_node, tcp6_syn_sent_rcv);
2327 vlib_node_registration_t tcp4_rcv_process_node;
2328 vlib_node_registration_t tcp6_rcv_process_node;
2331 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2332 * as per RFC793 p. 64
2335 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2336 vlib_frame_t * from_frame, int is_ip4)
2338 u32 n_left_from, next_index, *from, *to_next, n_fins = 0;
2339 u32 my_thread_index = vm->thread_index, errors = 0;
2341 from = vlib_frame_vector_args (from_frame);
2342 n_left_from = from_frame->n_vectors;
2343 next_index = node->cached_next_index;
2345 while (n_left_from > 0)
2349 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2351 while (n_left_from > 0 && n_left_to_next > 0)
2355 tcp_header_t *tcp0 = 0;
2356 tcp_connection_t *tc0;
2357 u32 next0 = tcp_next_drop (is_ip4), error0 = TCP_ERROR_NONE;
2365 n_left_to_next -= 1;
2367 b0 = vlib_get_buffer (vm, bi0);
2368 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2370 if (PREDICT_FALSE (tc0 == 0))
2372 error0 = TCP_ERROR_INVALID_CONNECTION;
2376 tcp0 = tcp_buffer_hdr (b0);
2377 is_fin0 = tcp_is_fin (tcp0);
2379 /* SYNs, FINs and data consume sequence numbers */
2380 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
2381 + tcp_is_syn (tcp0) + is_fin0 + vnet_buffer (b0)->tcp.data_len;
2385 tcp_connection_t *tmp;
2386 tmp = tcp_lookup_connection (tc0->c_fib_index, b0,
2387 my_thread_index, is_ip4);
2388 if (tmp->state != tc0->state)
2390 clib_warning ("state changed");
2396 * Special treatment for CLOSED
2398 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2400 error0 = TCP_ERROR_CONNECTION_CLOSED;
2405 * For all other states (except LISTEN)
2408 /* 1-4: check SEQ, RST, SYN */
2409 if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, tcp0,
2412 tcp_maybe_inc_counter (rcv_process, error0, 1);
2416 /* 5: check the ACK field */
2419 case TCP_STATE_SYN_RCVD:
2421 * If the segment acknowledgment is not acceptable, form a
2423 * <SEQ=SEG.ACK><CTL=RST>
2426 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2428 TCP_DBG ("connection not accepted");
2429 tcp_send_reset_w_pkt (tc0, b0, is_ip4);
2430 error0 = TCP_ERROR_ACK_INVALID;
2434 /* Update rtt and rto */
2435 tcp_update_rtt (tc0, vnet_buffer (b0)->tcp.ack_number);
2437 /* Switch state to ESTABLISHED */
2438 tc0->state = TCP_STATE_ESTABLISHED;
2439 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2441 /* Initialize session variables */
2442 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2443 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2444 << tc0->rcv_opts.wscale;
2445 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2446 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2448 /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2449 tcp_retransmit_timer_reset (tc0);
2450 tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH);
2451 stream_session_accept_notify (&tc0->connection);
2452 error0 = TCP_ERROR_ACK_OK;
2454 case TCP_STATE_ESTABLISHED:
2455 /* We can get packets in established state here because they
2456 * were enqueued before state change */
2457 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2459 tcp_maybe_inc_counter (rcv_process, error0, 1);
2464 case TCP_STATE_FIN_WAIT_1:
2465 /* In addition to the processing for the ESTABLISHED state, if
2466 * our FIN is now acknowledged then enter FIN-WAIT-2 and
2467 * continue processing in that state. */
2468 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2470 tcp_maybe_inc_counter (rcv_process, error0, 1);
2474 /* Still have to send the FIN */
2475 if (tc0->flags & TCP_CONN_FINPNDG)
2477 /* TX fifo finally drained */
2478 if (!stream_session_tx_fifo_max_dequeue (&tc0->connection))
2481 /* If FIN is ACKed */
2482 else if (tc0->snd_una == tc0->snd_una_max)
2484 tc0->state = TCP_STATE_FIN_WAIT_2;
2485 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2487 /* Stop all retransmit timers because we have nothing more
2488 * to send. Enable waitclose though because we're willing to
2489 * wait for peer's FIN but not indefinitely. */
2490 tcp_connection_timers_reset (tc0);
2491 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2494 case TCP_STATE_FIN_WAIT_2:
2495 /* In addition to the processing for the ESTABLISHED state, if
2496 * the retransmission queue is empty, the user's CLOSE can be
2497 * acknowledged ("ok") but do not delete the TCB. */
2498 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2500 tcp_maybe_inc_counter (rcv_process, error0, 1);
2504 case TCP_STATE_CLOSE_WAIT:
2505 /* Do the same processing as for the ESTABLISHED state. */
2506 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2508 tcp_maybe_inc_counter (rcv_process, error0, 1);
2512 case TCP_STATE_CLOSING:
2513 /* In addition to the processing for the ESTABLISHED state, if
2514 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2515 * otherwise ignore the segment. */
2516 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2518 tcp_maybe_inc_counter (rcv_process, error0, 1);
2522 tc0->state = TCP_STATE_TIME_WAIT;
2523 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2524 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2528 case TCP_STATE_LAST_ACK:
2529 /* The only thing that [should] arrive in this state is an
2530 * acknowledgment of our FIN. If our FIN is now acknowledged,
2531 * delete the TCB, enter the CLOSED state, and return. */
2533 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2535 error0 = TCP_ERROR_ACK_INVALID;
2538 error0 = TCP_ERROR_ACK_OK;
2539 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2540 /* Apparently our ACK for the peer's FIN was lost */
2541 if (is_fin0 && tc0->snd_una != tc0->snd_una_max)
2547 tc0->state = TCP_STATE_CLOSED;
2548 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2549 tcp_connection_timers_reset (tc0);
2551 /* Don't delete the connection/session yet. Instead, wait a
2552 * reasonable amount of time until the pipes are cleared. In
2553 * particular, this makes sure that we won't have dead sessions
2554 * when processing events on the tx path */
2555 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
2560 case TCP_STATE_TIME_WAIT:
2561 /* The only thing that can arrive in this state is a
2562 * retransmission of the remote FIN. Acknowledge it, and restart
2563 * the 2 MSL timeout. */
2565 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2567 tcp_maybe_inc_counter (rcv_process, error0, 1);
2571 tcp_make_ack (tc0, b0);
2572 next0 = tcp_next_output (is_ip4);
2573 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2581 /* 6: check the URG bit TODO */
2583 /* 7: process the segment text */
2586 case TCP_STATE_ESTABLISHED:
2587 case TCP_STATE_FIN_WAIT_1:
2588 case TCP_STATE_FIN_WAIT_2:
2589 if (vnet_buffer (b0)->tcp.data_len)
2591 error0 = tcp_segment_rcv (tc0, b0, &next0);
2592 tcp_maybe_inc_counter (rcv_process, error0, 1);
2597 case TCP_STATE_CLOSE_WAIT:
2598 case TCP_STATE_CLOSING:
2599 case TCP_STATE_LAST_ACK:
2600 case TCP_STATE_TIME_WAIT:
2601 /* This should not occur, since a FIN has been received from the
2602 * remote side. Ignore the segment text. */
2606 /* 8: check the FIN bit */
2612 case TCP_STATE_ESTABLISHED:
2613 case TCP_STATE_SYN_RCVD:
2614 /* Send FIN-ACK notify app and enter CLOSE-WAIT */
2615 tcp_connection_timers_reset (tc0);
2616 tcp_make_fin (tc0, b0);
2618 next0 = tcp_next_output (tc0->c_is_ip4);
2619 stream_session_disconnect_notify (&tc0->connection);
2620 tc0->state = TCP_STATE_CLOSE_WAIT;
2621 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2623 case TCP_STATE_CLOSE_WAIT:
2624 case TCP_STATE_CLOSING:
2625 case TCP_STATE_LAST_ACK:
2628 case TCP_STATE_FIN_WAIT_1:
2629 tc0->state = TCP_STATE_CLOSING;
2630 tcp_make_ack (tc0, b0);
2631 next0 = tcp_next_output (is_ip4);
2632 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2633 /* Wait for ACK but not forever */
2634 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2636 case TCP_STATE_FIN_WAIT_2:
2637 /* Got FIN, send ACK! Be more aggressive with resource cleanup */
2638 tc0->state = TCP_STATE_TIME_WAIT;
2639 tcp_connection_timers_reset (tc0);
2640 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2641 tcp_make_ack (tc0, b0);
2642 next0 = tcp_next_output (is_ip4);
2643 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2645 case TCP_STATE_TIME_WAIT:
2646 /* Remain in the TIME-WAIT state. Restart the time-wait
2649 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2652 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
2656 b0->error = error0 ? node->errors[error0] : 0;
2658 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2660 tcp_rx_trace_t *t0 =
2661 vlib_add_trace (vm, node, b0, sizeof (*t0));
2662 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2665 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2666 n_left_to_next, bi0, next0);
2669 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2672 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2674 tcp_inc_counter (rcv_process, TCP_ERROR_EVENT_FIFO_FULL, errors);
2675 tcp_inc_counter (rcv_process, TCP_ERROR_FIN_RCVD, n_fins);
2676 return from_frame->n_vectors;
2680 tcp4_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2681 vlib_frame_t * from_frame)
2683 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2687 tcp6_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2688 vlib_frame_t * from_frame)
2690 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2694 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
2696 .function = tcp4_rcv_process,
2697 .name = "tcp4-rcv-process",
2698 /* Takes a vector of packets. */
2699 .vector_size = sizeof (u32),
2700 .n_errors = TCP_N_ERROR,
2701 .error_strings = tcp_error_strings,
2702 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2705 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2706 foreach_tcp_state_next
2709 .format_trace = format_tcp_rx_trace_short,
2713 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_rcv_process_node, tcp4_rcv_process);
2716 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
2718 .function = tcp6_rcv_process,
2719 .name = "tcp6-rcv-process",
2720 /* Takes a vector of packets. */
2721 .vector_size = sizeof (u32),
2722 .n_errors = TCP_N_ERROR,
2723 .error_strings = tcp_error_strings,
2724 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2727 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2728 foreach_tcp_state_next
2731 .format_trace = format_tcp_rx_trace_short,
2735 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_rcv_process_node, tcp6_rcv_process);
2737 vlib_node_registration_t tcp4_listen_node;
2738 vlib_node_registration_t tcp6_listen_node;
2741 * LISTEN state processing as per RFC 793 p. 65
2744 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2745 vlib_frame_t * from_frame, int is_ip4)
2747 u32 n_left_from, next_index, *from, *to_next, n_syns = 0;
2748 u32 my_thread_index = vm->thread_index;
2750 from = vlib_frame_vector_args (from_frame);
2751 n_left_from = from_frame->n_vectors;
2753 next_index = node->cached_next_index;
2755 while (n_left_from > 0)
2759 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2761 while (n_left_from > 0 && n_left_to_next > 0)
2766 tcp_header_t *th0 = 0;
2767 tcp_connection_t *lc0;
2770 tcp_connection_t *child0;
2771 u32 error0 = TCP_ERROR_NONE, next0 = tcp_next_drop (is_ip4);
2778 n_left_to_next -= 1;
2780 b0 = vlib_get_buffer (vm, bi0);
2781 lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
2785 ip40 = vlib_buffer_get_current (b0);
2786 th0 = ip4_next_header (ip40);
2790 ip60 = vlib_buffer_get_current (b0);
2791 th0 = ip6_next_header (ip60);
2794 /* Create child session. For syn-flood protection use filter */
2796 /* 1. first check for an RST: handled in dispatch */
2797 /* if (tcp_rst (th0))
2800 /* 2. second check for an ACK: handled in dispatch */
2801 /* if (tcp_ack (th0))
2803 tcp_send_reset (b0, is_ip4);
2807 /* 3. check for a SYN (did that already) */
2809 /* Make sure connection wasn't just created */
2810 child0 = tcp_lookup_connection (lc0->c_fib_index, b0,
2811 my_thread_index, is_ip4);
2812 if (PREDICT_FALSE (child0->state != TCP_STATE_LISTEN))
2814 error0 = TCP_ERROR_CREATE_EXISTS;
2818 /* Create child session and send SYN-ACK */
2819 child0 = tcp_connection_new (my_thread_index);
2820 child0->c_lcl_port = th0->dst_port;
2821 child0->c_rmt_port = th0->src_port;
2822 child0->c_is_ip4 = is_ip4;
2823 child0->state = TCP_STATE_SYN_RCVD;
2824 child0->c_fib_index = lc0->c_fib_index;
2828 child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
2829 child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
2833 clib_memcpy (&child0->c_lcl_ip6, &ip60->dst_address,
2834 sizeof (ip6_address_t));
2835 clib_memcpy (&child0->c_rmt_ip6, &ip60->src_address,
2836 sizeof (ip6_address_t));
2839 if (tcp_options_parse (th0, &child0->rcv_opts))
2841 clib_warning ("options parse fail");
2845 child0->irs = vnet_buffer (b0)->tcp.seq_number;
2846 child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
2847 child0->rcv_las = child0->rcv_nxt;
2848 child0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2850 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
2851 * segments are used to initialize PAWS. */
2852 if (tcp_opts_tstamp (&child0->rcv_opts))
2854 child0->tsval_recent = child0->rcv_opts.tsval;
2855 child0->tsval_recent_age = tcp_time_now ();
2858 if (tcp_opts_wscale (&child0->rcv_opts))
2859 child0->snd_wscale = child0->rcv_opts.wscale;
2861 child0->snd_wnd = clib_net_to_host_u16 (th0->window)
2862 << child0->snd_wscale;
2863 child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2864 child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2866 tcp_connection_init_vars (child0);
2867 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, child0, 1);
2869 if (stream_session_accept (&child0->connection, lc0->c_s_index,
2872 clib_warning ("session accept fail");
2873 tcp_connection_cleanup (child0);
2874 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2878 /* Reuse buffer to make syn-ack and send */
2879 tcp_make_synack (child0, b0);
2880 next0 = tcp_next_output (is_ip4);
2881 tcp_timer_set (child0, TCP_TIMER_ESTABLISH, TCP_SYN_RCVD_TIME);
2884 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2886 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2887 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2888 clib_memcpy (&t0->tcp_connection, lc0,
2889 sizeof (t0->tcp_connection));
2892 n_syns += (error0 == TCP_ERROR_NONE);
2893 b0->error = node->errors[error0];
2895 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2896 n_left_to_next, bi0, next0);
2899 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2902 tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
2903 return from_frame->n_vectors;
2907 tcp4_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
2908 vlib_frame_t * from_frame)
2910 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2914 tcp6_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
2915 vlib_frame_t * from_frame)
2917 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2921 VLIB_REGISTER_NODE (tcp4_listen_node) =
2923 .function = tcp4_listen,
2924 .name = "tcp4-listen",
2925 /* Takes a vector of packets. */
2926 .vector_size = sizeof (u32),
2927 .n_errors = TCP_N_ERROR,
2928 .error_strings = tcp_error_strings,
2929 .n_next_nodes = TCP_LISTEN_N_NEXT,
2932 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2933 foreach_tcp_state_next
2936 .format_trace = format_tcp_rx_trace_short,
2940 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_listen_node, tcp4_listen);
2943 VLIB_REGISTER_NODE (tcp6_listen_node) =
2945 .function = tcp6_listen,
2946 .name = "tcp6-listen",
2947 /* Takes a vector of packets. */
2948 .vector_size = sizeof (u32),
2949 .n_errors = TCP_N_ERROR,
2950 .error_strings = tcp_error_strings,
2951 .n_next_nodes = TCP_LISTEN_N_NEXT,
2954 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2955 foreach_tcp_state_next
2958 .format_trace = format_tcp_rx_trace_short,
2962 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_listen_node, tcp6_listen);
2964 vlib_node_registration_t tcp4_input_node;
2965 vlib_node_registration_t tcp6_input_node;
2967 typedef enum _tcp_input_next
2969 TCP_INPUT_NEXT_DROP,
2970 TCP_INPUT_NEXT_LISTEN,
2971 TCP_INPUT_NEXT_RCV_PROCESS,
2972 TCP_INPUT_NEXT_SYN_SENT,
2973 TCP_INPUT_NEXT_ESTABLISHED,
2974 TCP_INPUT_NEXT_RESET,
2975 TCP_INPUT_NEXT_PUNT,
2979 #define foreach_tcp4_input_next \
2980 _ (DROP, "ip4-drop") \
2981 _ (LISTEN, "tcp4-listen") \
2982 _ (RCV_PROCESS, "tcp4-rcv-process") \
2983 _ (SYN_SENT, "tcp4-syn-sent") \
2984 _ (ESTABLISHED, "tcp4-established") \
2985 _ (RESET, "tcp4-reset") \
2986 _ (PUNT, "ip4-punt")
2988 #define foreach_tcp6_input_next \
2989 _ (DROP, "ip6-drop") \
2990 _ (LISTEN, "tcp6-listen") \
2991 _ (RCV_PROCESS, "tcp6-rcv-process") \
2992 _ (SYN_SENT, "tcp6-syn-sent") \
2993 _ (ESTABLISHED, "tcp6-established") \
2994 _ (RESET, "tcp6-reset") \
2995 _ (PUNT, "ip6-punt")
2997 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
3000 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3001 vlib_frame_t * from_frame, int is_ip4)
3003 u32 n_left_from, next_index, *from, *to_next;
3004 u32 my_thread_index = vm->thread_index;
3005 tcp_main_t *tm = vnet_get_tcp_main ();
3007 from = vlib_frame_vector_args (from_frame);
3008 n_left_from = from_frame->n_vectors;
3009 next_index = node->cached_next_index;
3010 tcp_set_time_now (my_thread_index);
3012 while (n_left_from > 0)
3016 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
3018 while (n_left_from > 0 && n_left_to_next > 0)
3020 int n_advance_bytes0, n_data_bytes0;
3021 u32 bi0, fib_index0;
3023 tcp_header_t *tcp0 = 0;
3024 tcp_connection_t *tc0;
3025 transport_connection_t *tconn;
3028 u32 error0 = TCP_ERROR_NO_LISTENER, next0 = TCP_INPUT_NEXT_DROP;
3029 u8 flags0, is_filtered = 0;
3036 n_left_to_next -= 1;
3038 b0 = vlib_get_buffer (vm, bi0);
3039 vnet_buffer (b0)->tcp.flags = 0;
3040 fib_index0 = vnet_buffer (b0)->ip.fib_index;
3042 /* Checksum computed by ipx_local no need to compute again */
3046 ip40 = vlib_buffer_get_current (b0);
3047 tcp0 = ip4_next_header (ip40);
3048 n_advance_bytes0 = (ip4_header_bytes (ip40)
3049 + tcp_header_bytes (tcp0));
3050 n_data_bytes0 = clib_net_to_host_u16 (ip40->length)
3052 tconn = session_lookup_connection_wt4 (fib_index0,
3057 TRANSPORT_PROTO_TCP,
3063 ip60 = vlib_buffer_get_current (b0);
3064 tcp0 = ip6_next_header (ip60);
3065 n_advance_bytes0 = tcp_header_bytes (tcp0);
3066 n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length)
3068 n_advance_bytes0 += sizeof (ip60[0]);
3069 tconn = session_lookup_connection_wt6 (fib_index0,
3074 TRANSPORT_PROTO_TCP,
3080 if (PREDICT_FALSE (n_advance_bytes0 < 0))
3082 error0 = TCP_ERROR_LENGTH;
3086 vnet_buffer (b0)->tcp.hdr_offset = (u8 *) tcp0
3087 - (u8 *) vlib_buffer_get_current (b0);
3089 /* Session exists */
3090 if (PREDICT_TRUE (0 != tconn))
3092 tc0 = tcp_get_connection_from_transport (tconn);
3093 ASSERT (tcp_lookup_is_valid (tc0, tcp0));
3095 /* Save connection index */
3096 vnet_buffer (b0)->tcp.connection_index = tc0->c_c_index;
3097 vnet_buffer (b0)->tcp.seq_number =
3098 clib_net_to_host_u32 (tcp0->seq_number);
3099 vnet_buffer (b0)->tcp.ack_number =
3100 clib_net_to_host_u32 (tcp0->ack_number);
3102 vnet_buffer (b0)->tcp.data_offset = n_advance_bytes0;
3103 vnet_buffer (b0)->tcp.data_len = n_data_bytes0;
3105 flags0 = tcp0->flags & filter_flags;
3106 next0 = tm->dispatch_table[tc0->state][flags0].next;
3107 error0 = tm->dispatch_table[tc0->state][flags0].error;
3109 if (PREDICT_FALSE (error0 == TCP_ERROR_DISPATCH
3110 || next0 == TCP_INPUT_NEXT_RESET))
3112 /* Overload tcp flags to store state */
3113 tcp_state_t state0 = tc0->state;
3114 vnet_buffer (b0)->tcp.flags = tc0->state;
3116 if (error0 == TCP_ERROR_DISPATCH)
3117 clib_warning ("disp error state %U flags %U",
3118 format_tcp_state, state0, format_tcp_flags,
3126 next0 = TCP_INPUT_NEXT_DROP;
3127 error0 = TCP_ERROR_FILTERED;
3129 else if ((is_ip4 && tm->punt_unknown4) ||
3130 (!is_ip4 && tm->punt_unknown6))
3132 next0 = TCP_INPUT_NEXT_PUNT;
3133 error0 = TCP_ERROR_PUNT;
3138 next0 = TCP_INPUT_NEXT_RESET;
3139 error0 = TCP_ERROR_NO_LISTENER;
3145 b0->error = error0 ? node->errors[error0] : 0;
3147 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3150 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3151 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
3153 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
3154 n_left_to_next, bi0, next0);
3157 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
3160 return from_frame->n_vectors;
3164 tcp4_input (vlib_main_t * vm, vlib_node_runtime_t * node,
3165 vlib_frame_t * from_frame)
3167 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3171 tcp6_input (vlib_main_t * vm, vlib_node_runtime_t * node,
3172 vlib_frame_t * from_frame)
3174 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3178 VLIB_REGISTER_NODE (tcp4_input_node) =
3180 .function = tcp4_input,
3181 .name = "tcp4-input",
3182 /* Takes a vector of packets. */
3183 .vector_size = sizeof (u32),
3184 .n_errors = TCP_N_ERROR,
3185 .error_strings = tcp_error_strings,
3186 .n_next_nodes = TCP_INPUT_N_NEXT,
3189 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3190 foreach_tcp4_input_next
3193 .format_buffer = format_tcp_header,
3194 .format_trace = format_tcp_rx_trace,
3198 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_input_node, tcp4_input);
3201 VLIB_REGISTER_NODE (tcp6_input_node) =
3203 .function = tcp6_input,
3204 .name = "tcp6-input",
3205 /* Takes a vector of packets. */
3206 .vector_size = sizeof (u32),
3207 .n_errors = TCP_N_ERROR,
3208 .error_strings = tcp_error_strings,
3209 .n_next_nodes = TCP_INPUT_N_NEXT,
3212 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3213 foreach_tcp6_input_next
3216 .format_buffer = format_tcp_header,
3217 .format_trace = format_tcp_rx_trace,
3221 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input);
3224 tcp_dispatch_table_init (tcp_main_t * tm)
3227 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3228 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3230 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3231 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3234 #define _(t,f,n,e) \
3236 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3237 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3240 /* SYNs for new connections -> tcp-listen. */
3241 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3242 _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3243 _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_RST_RCVD);
3244 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3246 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3247 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3248 _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3249 _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3250 /* SYN-ACK for a SYN */
3251 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3253 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3254 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3255 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3257 /* ACK for for established connection -> tcp-established. */
3258 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3259 /* FIN for for established connection -> tcp-established. */
3260 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3261 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3263 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3264 _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3266 _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3267 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3269 /* ACK or FIN-ACK to our FIN */
3270 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3271 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
3273 /* FIN in reply to our FIN from the other side */
3274 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3275 _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3276 _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3277 /* FIN confirming that the peer (app) has closed */
3278 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3279 _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3280 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3282 _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3283 _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3285 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3286 _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3287 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3289 _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3290 _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3291 _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3293 _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3294 _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3295 _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3296 _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3297 _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3298 TCP_ERROR_CONNECTION_CLOSED);
3303 tcp_input_init (vlib_main_t * vm)
3305 clib_error_t *error = 0;
3306 tcp_main_t *tm = vnet_get_tcp_main ();
3308 if ((error = vlib_call_init_function (vm, tcp_init)))
3311 /* Initialize dispatch table. */
3312 tcp_dispatch_table_init (tm);
3317 VLIB_INIT_FUNCTION (tcp_input_init);
3320 * fd.io coding-style-patch-verification: ON
3323 * eval: (c-set-style "gnu")