2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/tcp/tcp_packet.h>
18 #include <vnet/tcp/tcp.h>
19 #include <vnet/session/session.h>
22 static char *tcp_error_strings[] = {
23 #define tcp_error(n,s) s,
24 #include <vnet/tcp/tcp_error.def>
28 /* All TCP nodes have the same outgoing arcs */
29 #define foreach_tcp_state_next \
30 _ (DROP, "error-drop") \
31 _ (TCP4_OUTPUT, "tcp4-output") \
32 _ (TCP6_OUTPUT, "tcp6-output")
34 typedef enum _tcp_established_next
36 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
37 foreach_tcp_state_next
39 TCP_ESTABLISHED_N_NEXT,
40 } tcp_established_next_t;
42 typedef enum _tcp_rcv_process_next
44 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
45 foreach_tcp_state_next
47 TCP_RCV_PROCESS_N_NEXT,
48 } tcp_rcv_process_next_t;
50 typedef enum _tcp_syn_sent_next
52 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
53 foreach_tcp_state_next
56 } tcp_syn_sent_next_t;
58 typedef enum _tcp_listen_next
60 #define _(s,n) TCP_LISTEN_NEXT_##s,
61 foreach_tcp_state_next
66 /* Generic, state independent indices */
67 typedef enum _tcp_state_next
69 #define _(s,n) TCP_NEXT_##s,
70 foreach_tcp_state_next
75 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
76 : TCP_NEXT_TCP6_OUTPUT)
78 vlib_node_registration_t tcp4_established_node;
79 vlib_node_registration_t tcp6_established_node;
82 * Validate segment sequence number. As per RFC793:
84 * Segment Receive Test
86 * ------- ------- -------------------------------------------
87 * 0 0 SEG.SEQ = RCV.NXT
88 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
90 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
91 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
93 * This ultimately consists in checking if segment falls within the window.
94 * The one important difference compared to RFC793 is that we use rcv_las,
95 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
96 * peer's reference when computing our receive window.
99 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
100 * however, is too strict when we have retransmits. Instead we just check that
101 * the seq is not beyond the right edge and that the end of the segment is not
102 * less than the left edge.
104 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
105 * use rcv_nxt in the right edge window test instead of rcv_las.
109 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
111 return (seq_geq (end_seq, tc->rcv_las)
112 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
116 tcp_options_parse (tcp_header_t * th, tcp_options_t * to)
119 u8 opt_len, opts_len, kind;
123 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
124 data = (const u8 *) (th + 1);
126 /* Zero out all flags but those set in SYN */
127 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE);
129 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
133 /* Get options length */
134 if (kind == TCP_OPTION_EOL)
136 else if (kind == TCP_OPTION_NOOP)
145 /* weird option length */
146 if (opt_len < 2 || opt_len > opts_len)
154 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
156 to->flags |= TCP_OPTS_FLAG_MSS;
157 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
160 case TCP_OPTION_WINDOW_SCALE:
161 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
163 to->flags |= TCP_OPTS_FLAG_WSCALE;
164 to->wscale = data[2];
165 if (to->wscale > TCP_MAX_WND_SCALE)
167 clib_warning ("Illegal window scaling value: %d",
169 to->wscale = TCP_MAX_WND_SCALE;
173 case TCP_OPTION_TIMESTAMP:
174 if (opt_len == TCP_OPTION_LEN_TIMESTAMP)
176 to->flags |= TCP_OPTS_FLAG_TSTAMP;
177 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
178 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
181 case TCP_OPTION_SACK_PERMITTED:
182 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
183 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
185 case TCP_OPTION_SACK_BLOCK:
186 /* If SACK permitted was not advertised or a SYN, break */
187 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
190 /* If too short or not correctly formatted, break */
191 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
194 to->flags |= TCP_OPTS_FLAG_SACK;
195 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
196 vec_reset_length (to->sacks);
197 for (j = 0; j < to->n_sack_blocks; j++)
199 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 4 * j));
200 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 4 * j));
201 vec_add1 (to->sacks, b);
205 /* Nothing to see here */
212 tcp_segment_check_paws (tcp_connection_t * tc)
214 return tcp_opts_tstamp (&tc->opt) && tc->tsval_recent
215 && timestamp_lt (tc->opt.tsval, tc->tsval_recent);
219 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
221 * It first verifies if segment has a wrapped sequence number (PAWS) and then
222 * does the processing associated to the first four steps (ignoring security
223 * and precedence): sequence number, rst bit and syn bit checks.
225 * @return 0 if segments passes validation.
228 tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0,
229 vlib_buffer_t * b0, tcp_header_t * th0, u32 * next0)
233 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
236 tcp_options_parse (th0, &tc0->opt);
238 /* RFC1323: Check against wrapped sequence numbers (PAWS). If we have
239 * timestamp to echo and it's less than tsval_recent, drop segment
240 * but still send an ACK in order to retain TCP's mechanism for detecting
241 * and recovering from half-open connections */
242 paws_failed = tcp_segment_check_paws (tc0);
245 clib_warning ("paws failed");
247 /* If it just so happens that a segment updates tsval_recent for a
248 * segment over 24 days old, invalidate tsval_recent. */
249 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
252 /* Age isn't reset until we get a valid tsval (bsd inspired) */
253 tc0->tsval_recent = 0;
257 /* Drop after ack if not rst */
260 tcp_make_ack (tc0, b0);
261 *next0 = tcp_next_output (tc0->c_is_ip4);
262 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0);
268 /* 1st: check sequence number */
269 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
270 vnet_buffer (b0)->tcp.seq_end))
272 /* If our window is 0 and the packet is in sequence, let it pass
273 * through for ack processing. It should be dropped later.*/
274 if (tc0->rcv_wnd == 0
275 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
277 /* TODO Should segment be tagged? */
281 /* If not RST, send dup ack */
284 tcp_make_ack (tc0, b0);
285 *next0 = tcp_next_output (tc0->c_is_ip4);
286 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0);
292 /* 2nd: check the RST bit */
295 tcp_connection_reset (tc0);
299 /* 3rd: check security and precedence (skip) */
301 /* 4th: check the SYN bit */
304 tcp_send_reset (b0, tc0->c_is_ip4);
308 /* If PAWS passed and segment in window, save timestamp */
311 tc0->tsval_recent = tc0->opt.tsval;
312 tc0->tsval_recent_age = tcp_time_now ();
319 tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0)
321 /* SND.UNA =< SEG.ACK =< SND.NXT */
322 return (seq_leq (tc0->snd_una, vnet_buffer (tb0)->tcp.ack_number)
323 && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_nxt));
327 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
329 * Note that although the original article, srtt and rttvar are scaled
330 * to minimize round-off errors, here we don't. Instead, we rely on
331 * better precision time measurements.
333 * TODO support us rtt resolution
336 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
342 err = mrtt - tc->srtt;
343 tc->srtt += err >> 3;
345 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
346 * The increase should be bound */
347 tc->rttvar += ((int) clib_abs (err) - (int) tc->rttvar) >> 2;
351 /* First measurement. */
353 tc->rttvar = mrtt >> 1;
357 /** Update RTT estimate and RTO timer
359 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
360 * timing. Middle boxes are known to fiddle with TCP options so we
361 * should give higher priority to ACK timing.
363 * return 1 if valid rtt 0 otherwise
366 tcp_update_rtt (tcp_connection_t * tc, u32 ack)
370 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
371 * RTT because they're ambiguous. */
372 if (tc->rtt_seq && seq_gt (ack, tc->rtt_seq) && !tc->rto_boff)
374 mrtt = tcp_time_now () - tc->rtt_ts;
377 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
378 * snd_una, i.e., the left side of the send window:
379 * seq_lt (tc->snd_una, ack). Note: last condition could be dropped, we don't
380 * try to update rtt for dupacks */
381 else if (tcp_opts_tstamp (&tc->opt) && tc->opt.tsecr && tc->bytes_acked)
383 mrtt = tcp_time_now () - tc->opt.tsecr;
386 /* Ignore dubious measurements */
387 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
390 tcp_estimate_rtt (tc, mrtt);
392 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
394 /* Allow measuring of RTT and make sure boff is 0 */
402 * Dequeue bytes that have been acked and while at it update RTT estimates.
405 tcp_dequeue_acked (tcp_connection_t * tc, u32 ack)
407 /* Dequeue the newly ACKed bytes */
408 stream_session_dequeue_drop (&tc->connection, tc->bytes_acked);
410 /* Update rtt and rto */
411 tcp_update_rtt (tc, ack);
415 * Check if dupack as per RFC5681 Sec. 2
417 * This works only if called before updating snd_wnd.
420 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 new_snd_wnd)
422 return ((vnet_buffer (b)->tcp.ack_number == tc->snd_una)
423 && seq_gt (tc->snd_una_max, tc->snd_una)
424 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
425 && (new_snd_wnd == tc->snd_wnd));
429 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
431 sack_scoreboard_hole_t *next, *prev;
433 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
435 next = pool_elt_at_index (sb->holes, hole->next);
436 next->prev = hole->prev;
439 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
441 prev = pool_elt_at_index (sb->holes, hole->prev);
442 prev->next = hole->next;
446 sb->head = hole->next;
449 pool_put (sb->holes, hole);
452 sack_scoreboard_hole_t *
453 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
456 sack_scoreboard_hole_t *hole, *next, *prev;
459 pool_get (sb->holes, hole);
460 memset (hole, 0, sizeof (*hole));
464 hole_index = hole - sb->holes;
466 prev = scoreboard_get_hole (sb, prev_index);
469 hole->prev = prev - sb->holes;
470 hole->next = prev->next;
472 if ((next = scoreboard_next_hole (sb, hole)))
473 next->prev = hole_index;
475 prev->next = hole_index;
479 sb->head = hole_index;
480 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
481 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
488 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
490 sack_scoreboard_t *sb = &tc->sack_sb;
491 sack_block_t *blk, tmp;
492 sack_scoreboard_hole_t *hole, *next_hole, *last_hole, *new_hole;
493 u32 blk_index = 0, old_sacked_bytes, hole_index;
496 sb->last_sacked_bytes = 0;
498 old_sacked_bytes = sb->sacked_bytes;
500 if (!tcp_opts_sack (&tc->opt) && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
503 /* Remove invalid blocks */
505 while (blk < vec_end (tc->opt.sacks))
507 if (seq_lt (blk->start, blk->end)
508 && seq_gt (blk->start, tc->snd_una)
509 && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_nxt))
514 vec_del1 (tc->opt.sacks, blk - tc->opt.sacks);
517 /* Add block for cumulative ack */
518 if (seq_gt (ack, tc->snd_una))
520 tmp.start = tc->snd_una;
522 vec_add1 (tc->opt.sacks, tmp);
525 if (vec_len (tc->opt.sacks) == 0)
528 /* Make sure blocks are ordered */
529 for (i = 0; i < vec_len (tc->opt.sacks); i++)
530 for (j = i + 1; j < vec_len (tc->opt.sacks); j++)
531 if (seq_lt (tc->opt.sacks[j].start, tc->opt.sacks[i].start))
533 tmp = tc->opt.sacks[i];
534 tc->opt.sacks[i] = tc->opt.sacks[j];
535 tc->opt.sacks[j] = tmp;
538 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
540 /* If no holes, insert the first that covers all outstanding bytes */
541 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
542 tc->snd_una, tc->snd_una_max);
543 sb->tail = scoreboard_hole_index (sb, last_hole);
547 /* If we have holes but snd_una_max is beyond the last hole, update
549 tmp = tc->opt.sacks[vec_len (tc->opt.sacks) - 1];
550 last_hole = scoreboard_last_hole (sb);
551 if (seq_gt (tc->snd_una_max, sb->max_byte_sacked)
552 && seq_gt (tc->snd_una_max, last_hole->end))
553 last_hole->end = tc->snd_una_max;
556 /* Walk the holes with the SACK blocks */
557 hole = pool_elt_at_index (sb->holes, sb->head);
558 while (hole && blk_index < vec_len (tc->opt.sacks))
560 blk = &tc->opt.sacks[blk_index];
562 if (seq_leq (blk->start, hole->start))
564 /* Block covers hole. Remove hole */
565 if (seq_geq (blk->end, hole->end))
567 next_hole = scoreboard_next_hole (sb, hole);
569 /* Byte accounting */
570 if (seq_leq (hole->end, ack))
572 /* Bytes lost because snd_wnd left edge advances */
573 if (next_hole && seq_leq (next_hole->start, ack))
574 sb->sacked_bytes -= next_hole->start - hole->end;
576 sb->sacked_bytes -= ack - hole->end;
580 sb->sacked_bytes += scoreboard_hole_bytes (hole);
583 /* snd_una needs to be advanced */
584 if (seq_geq (ack, hole->end))
586 if (next_hole && seq_lt (ack, next_hole->start))
587 sb->snd_una_adv = next_hole->start - ack;
589 sb->snd_una_adv = sb->max_byte_sacked - ack;
591 /* all these can be delivered */
592 sb->sacked_bytes -= sb->snd_una_adv;
595 /* About to remove last hole */
596 if (hole == last_hole)
598 sb->tail = hole->prev;
599 last_hole = scoreboard_last_hole (sb);
600 /* keep track of max byte sacked in case the last hole
602 if (seq_gt (hole->end, sb->max_byte_sacked))
603 sb->max_byte_sacked = hole->end;
605 scoreboard_remove_hole (sb, hole);
608 /* Partial 'head' overlap */
611 if (seq_gt (blk->end, hole->start))
613 sb->sacked_bytes += blk->end - hole->start;
614 hole->start = blk->end;
621 /* Hole must be split */
622 if (seq_lt (blk->end, hole->end))
624 sb->sacked_bytes += blk->end - blk->start;
625 hole_index = scoreboard_hole_index (sb, hole);
626 new_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
629 /* Pool might've moved */
630 hole = scoreboard_get_hole (sb, hole_index);
631 hole->end = blk->start;
633 /* New or split of tail */
634 if ((last_hole->end == new_hole->end)
635 || seq_lt (last_hole->end, new_hole->start))
637 last_hole = new_hole;
638 sb->tail = scoreboard_hole_index (sb, new_hole);
642 hole = scoreboard_next_hole (sb, hole);
646 sb->sacked_bytes += hole->end - blk->start;
647 hole->end = blk->start;
648 hole = scoreboard_next_hole (sb, hole);
653 sb->last_sacked_bytes = sb->sacked_bytes + sb->snd_una_adv
659 * If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
660 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
662 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
664 if (seq_lt (tc->snd_wl1, seq)
665 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
667 tc->snd_wnd = snd_wnd;
670 TCP_EVT_DBG (TCP_EVT_SND_WND, tc);
672 /* Set probe timer if we just got 0 wnd */
673 if (tc->snd_wnd < tc->snd_mss
674 && !tcp_timer_is_active (tc, TCP_TIMER_PERSIST))
675 tcp_persist_timer_set (tc);
677 tcp_persist_timer_reset (tc);
682 tcp_cc_congestion (tcp_connection_t * tc)
684 tc->snd_congestion = tc->snd_nxt;
685 tc->cc_algo->congestion (tc);
686 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
690 tcp_cc_recover (tcp_connection_t * tc)
692 /* TODO: check if time to recover was small. It might be that RTO popped
696 tc->cc_algo->recovered (tc);
700 tc->snd_nxt = tc->snd_una;
702 tc->cc_algo->rcv_ack (tc);
703 tc->tsecr_last_ack = tc->opt.tsecr;
705 tcp_cong_recovery_off (tc);
707 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
711 tcp_cc_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b)
715 if (tcp_in_cong_recovery (tc))
717 partial_ack = seq_lt (tc->snd_una, tc->snd_congestion);
720 /* Clear retransmitted bytes. */
725 TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
727 /* Clear retransmitted bytes. XXX should we clear all? */
729 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK);
731 /* In case snd_nxt is still in the past and output tries to
732 * shove some new bytes */
733 tc->snd_nxt = tc->snd_una_max;
735 /* XXX need proper RFC6675 support */
736 if (tc->sack_sb.last_sacked_bytes && !tcp_in_recovery (tc))
738 tcp_fast_retransmit (tc);
742 /* Retransmit first unacked segment */
743 tcp_retransmit_first_unacked (tc);
749 tc->cc_algo->rcv_ack (tc);
750 tc->tsecr_last_ack = tc->opt.tsecr;
756 tcp_cc_rcv_dupack (tcp_connection_t * tc, u32 ack)
758 // ASSERT (seq_geq(tc->snd_una, ack));
761 if (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
763 /* RFC6582 NewReno heuristic to avoid multiple fast retransmits */
764 if (tc->opt.tsecr != tc->tsecr_last_ack)
770 tcp_fastrecovery_on (tc);
772 /* Handle congestion and dupack */
773 tcp_cc_congestion (tc);
774 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
776 tcp_fast_retransmit (tc);
778 /* Post retransmit update cwnd to ssthresh and account for the
779 * three segments that have left the network and should've been
780 * buffered at the receiver */
781 tc->cwnd = tc->ssthresh + TCP_DUPACK_THRESHOLD * tc->snd_mss;
783 else if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD)
785 ASSERT (tcp_in_fastrecovery (tc));
787 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
792 tcp_cc_init (tcp_connection_t * tc)
794 tc->cc_algo = tcp_cc_algo_get (TCP_CC_NEWRENO);
795 tc->cc_algo->init (tc);
799 tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b,
800 tcp_header_t * th, u32 * next, u32 * error)
804 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
805 if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt))
807 /* If we have outstanding data and this is within the window, accept it,
808 * probably retransmit has timed out. Otherwise ACK segment and then
810 if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max))
812 tcp_make_ack (tc, b);
813 *next = tcp_next_output (tc->c_is_ip4);
814 *error = TCP_ERROR_ACK_INVALID;
815 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0,
816 vnet_buffer (b)->tcp.ack_number);
820 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2,
821 vnet_buffer (b)->tcp.ack_number);
823 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
824 *error = TCP_ERROR_ACK_FUTURE;
827 /* If old ACK, probably it's an old dupack */
828 if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
830 *error = TCP_ERROR_ACK_OLD;
831 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1,
832 vnet_buffer (b)->tcp.ack_number);
833 if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
835 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc);
836 tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number);
841 if (tcp_opts_sack_permitted (&tc->opt))
842 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
844 new_snd_wnd = clib_net_to_host_u16 (th->window) << tc->snd_wscale;
846 if (tcp_ack_is_dupack (tc, b, new_snd_wnd))
848 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
849 tcp_cc_rcv_dupack (tc, vnet_buffer (b)->tcp.ack_number);
850 *error = TCP_ERROR_ACK_DUP;
858 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
859 tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
861 /* Dequeue ACKed data and update RTT */
862 tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number);
863 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
864 vnet_buffer (b)->tcp.ack_number, new_snd_wnd);
866 /* If some of our sent bytes have been acked, update cc and retransmit
870 TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
872 /* Updates congestion control (slow start/congestion avoidance) */
873 tcp_cc_rcv_ack (tc, b);
875 /* If everything has been acked, stop retransmit timer
876 * otherwise update */
877 if (tc->snd_una == tc->snd_una_max)
878 tcp_retransmit_timer_reset (tc);
880 tcp_retransmit_timer_update (tc);
887 * Build SACK list as per RFC2018.
889 * Makes sure the first block contains the segment that generated the current
890 * ACK and the following ones are the ones most recently reported in SACK
893 * @param tc TCP connection for which the SACK list is updated
894 * @param start Start sequence number of the newest SACK block
895 * @param end End sequence of the newest SACK block
898 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
900 sack_block_t *new_list = 0, *block = 0;
903 /* If the first segment is ooo add it to the list. Last write might've moved
904 * rcv_nxt over the first segment. */
905 if (seq_lt (tc->rcv_nxt, start))
907 vec_add2 (new_list, block, 1);
908 block->start = start;
912 /* Find the blocks still worth keeping. */
913 for (i = 0; i < vec_len (tc->snd_sacks); i++)
915 /* Discard if rcv_nxt advanced beyond current block */
916 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
919 /* Merge or drop if segment overlapped by the new segment */
920 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
921 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
923 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
924 new_list[0].start = tc->snd_sacks[i].start;
925 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
926 new_list[0].end = tc->snd_sacks[i].end;
930 /* Save to new SACK list if we have space. */
931 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
933 vec_add1 (new_list, tc->snd_sacks[i]);
937 clib_warning ("dropped sack blocks");
941 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
943 /* Replace old vector with new one */
944 vec_free (tc->snd_sacks);
945 tc->snd_sacks = new_list;
948 /** Enqueue data for delivery to application */
950 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
955 /* Pure ACK. Update rcv_nxt and be done. */
956 if (PREDICT_FALSE (data_len == 0))
958 tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end;
959 return TCP_ERROR_PURE_ACK;
962 written = stream_session_enqueue_data (&tc->connection,
963 vlib_buffer_get_current (b),
964 data_len, 1 /* queue event */ );
966 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 0, data_len, written);
969 if (PREDICT_TRUE (written == data_len))
971 tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end;
973 /* If more data written than expected, account for out-of-order bytes. */
974 else if (written > data_len)
976 tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end + written - data_len;
978 /* Send ACK confirming the update */
979 tc->flags |= TCP_CONN_SNDACK;
981 else if (written > 0)
983 /* We've written something but FIFO is probably full now */
984 tc->rcv_nxt += written;
986 /* Depending on how fast the app is, all remaining buffers in burst will
987 * not be enqueued. Inform peer */
988 tc->flags |= TCP_CONN_SNDACK;
990 return TCP_ERROR_PARTIALLY_ENQUEUED;
994 tc->flags |= TCP_CONN_SNDACK;
995 return TCP_ERROR_FIFO_FULL;
998 /* Update SACK list if need be */
999 if (tcp_opts_sack_permitted (&tc->opt))
1001 /* Remove SACK blocks that have been delivered */
1002 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1005 return TCP_ERROR_ENQUEUED;
1008 /** Enqueue out-of-order data */
1010 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1013 stream_session_t *s0;
1017 /* Pure ACK. Do nothing */
1018 if (PREDICT_FALSE (data_len == 0))
1020 return TCP_ERROR_PURE_ACK;
1023 s0 = stream_session_get (tc->c_s_index, tc->c_thread_index);
1024 offset = vnet_buffer (b)->tcp.seq_number - tc->irs;
1026 clib_warning ("ooo: offset %d len %d", offset, data_len);
1028 rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, offset, data_len,
1029 vlib_buffer_get_current (b));
1031 /* Nothing written */
1034 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, 0);
1035 return TCP_ERROR_FIFO_FULL;
1038 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1040 /* Update SACK list if in use */
1041 if (tcp_opts_sack_permitted (&tc->opt))
1043 ooo_segment_t *newest;
1046 /* Get the newest segment from the fifo */
1047 newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo);
1048 start = ooo_segment_offset (s0->server_rx_fifo, newest);
1049 end = ooo_segment_end_offset (s0->server_rx_fifo, newest);
1051 tcp_update_sack_list (tc, start, end);
1054 return TCP_ERROR_ENQUEUED;
1058 * Check if ACK could be delayed. If ack can be delayed, it should return
1059 * true for a full frame. If we're always acking return 0.
1062 tcp_can_delack (tcp_connection_t * tc)
1064 /* Send ack if ... */
1066 /* just sent a rcv wnd 0 */
1067 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0
1068 /* constrained to send ack */
1069 || (tc->flags & TCP_CONN_SNDACK) != 0
1070 /* we're almost out of tx wnd */
1071 || tcp_available_snd_space (tc) < 2 * tc->snd_mss)
1078 tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b,
1079 u16 n_data_bytes, u32 * next0)
1083 /* Handle out-of-order data */
1084 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1086 /* Old sequence numbers allowed through because they overlapped
1089 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1091 error = TCP_ERROR_SEGMENT_OLD;
1092 *next0 = TCP_NEXT_DROP;
1096 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1098 /* N.B. Should not filter burst of dupacks. Two issues 1) dupacks open
1099 * cwnd on remote peer when congested 2) acks leaving should have the
1100 * latest rcv_wnd since the burst may eaten up all of it, so only the
1101 * old ones could be filtered.
1104 /* RFC2581: Send DUPACK for fast retransmit */
1105 tcp_make_ack (tc, b);
1106 *next0 = tcp_next_output (tc->c_is_ip4);
1108 /* Mark as DUPACK. We may filter these in output if
1109 * the burst fills the holes. */
1111 vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_DUPACK;
1113 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc);
1117 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1118 * segments can be enqueued after fifo tail offset changes. */
1119 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1121 if (n_data_bytes == 0)
1123 *next0 = TCP_NEXT_DROP;
1127 /* Check if ACK can be delayed */
1128 if (tcp_can_delack (tc))
1130 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1131 tcp_timer_set (tc, TCP_TIMER_DELACK, TCP_DELACK_TIME);
1135 *next0 = tcp_next_output (tc->c_is_ip4);
1136 tcp_make_ack (tc, b);
1144 tcp_header_t tcp_header;
1145 tcp_connection_t tcp_connection;
1149 format_tcp_rx_trace (u8 * s, va_list * args)
1151 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1152 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1153 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1154 uword indent = format_get_indent (s);
1156 s = format (s, "%U\n%U%U",
1157 format_tcp_header, &t->tcp_header, 128,
1158 format_white_space, indent,
1159 format_tcp_connection_verbose, &t->tcp_connection);
1165 format_tcp_rx_trace_short (u8 * s, va_list * args)
1167 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1168 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1169 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1171 s = format (s, "%d -> %d (%U)",
1172 clib_net_to_host_u16 (t->tcp_header.src_port),
1173 clib_net_to_host_u16 (t->tcp_header.dst_port), format_tcp_state,
1174 &t->tcp_connection.state);
1180 tcp_established_inc_counter (vlib_main_t * vm, u8 is_ip4, u8 evt, u8 val)
1182 if (PREDICT_TRUE (!val))
1186 vlib_node_increment_counter (vm, tcp4_established_node.index, evt, val);
1188 vlib_node_increment_counter (vm, tcp6_established_node.index, evt, val);
1192 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1193 vlib_frame_t * from_frame, int is_ip4)
1195 u32 n_left_from, next_index, *from, *to_next;
1196 u32 my_thread_index = vm->thread_index, errors = 0;
1197 tcp_main_t *tm = vnet_get_tcp_main ();
1200 from = vlib_frame_vector_args (from_frame);
1201 n_left_from = from_frame->n_vectors;
1203 next_index = node->cached_next_index;
1205 while (n_left_from > 0)
1209 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1211 while (n_left_from > 0 && n_left_to_next > 0)
1216 tcp_header_t *th0 = 0;
1217 tcp_connection_t *tc0;
1220 u32 n_advance_bytes0, n_data_bytes0;
1221 u32 next0 = TCP_ESTABLISHED_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1228 n_left_to_next -= 1;
1230 b0 = vlib_get_buffer (vm, bi0);
1231 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1234 if (PREDICT_FALSE (tc0 == 0))
1236 error0 = TCP_ERROR_INVALID_CONNECTION;
1240 /* Checksum computed by ipx_local no need to compute again */
1244 ip40 = vlib_buffer_get_current (b0);
1245 th0 = ip4_next_header (ip40);
1246 n_advance_bytes0 = (ip4_header_bytes (ip40)
1247 + tcp_header_bytes (th0));
1248 n_data_bytes0 = clib_net_to_host_u16 (ip40->length)
1253 ip60 = vlib_buffer_get_current (b0);
1254 th0 = ip6_next_header (ip60);
1255 n_advance_bytes0 = tcp_header_bytes (th0);
1256 n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length)
1258 n_advance_bytes0 += sizeof (ip60[0]);
1261 is_fin = (th0->flags & TCP_FLAG_FIN) != 0;
1263 /* SYNs, FINs and data consume sequence numbers */
1264 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
1265 + tcp_is_syn (th0) + is_fin + n_data_bytes0;
1267 /* TODO header prediction fast path */
1269 /* 1-4: check SEQ, RST, SYN */
1270 if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, th0, &next0)))
1272 error0 = TCP_ERROR_SEGMENT_INVALID;
1273 TCP_EVT_DBG (TCP_EVT_SEG_INVALID, tc0,
1274 vnet_buffer (b0)->tcp.seq_number,
1275 vnet_buffer (b0)->tcp.seq_end);
1279 /* 5: check the ACK field */
1280 if (tcp_rcv_ack (tc0, b0, th0, &next0, &error0))
1285 /* 6: check the URG bit TODO */
1287 /* 7: process the segment text */
1289 vlib_buffer_advance (b0, n_advance_bytes0);
1290 error0 = tcp_segment_rcv (tm, tc0, b0, n_data_bytes0, &next0);
1292 /* N.B. buffer is rewritten if segment is ooo. Thus, th0 becomes a
1293 * dangling reference. */
1295 /* 8: check the FIN bit */
1298 /* Enter CLOSE-WAIT and notify session. Don't send ACK, instead
1299 * wait for session to call close. To avoid lingering
1300 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1301 tc0->state = TCP_STATE_CLOSE_WAIT;
1302 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
1303 stream_session_disconnect_notify (&tc0->connection);
1304 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
1308 b0->error = node->errors[error0];
1309 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1311 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
1312 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
1313 clib_memcpy (&t0->tcp_connection, tc0,
1314 sizeof (t0->tcp_connection));
1317 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1318 n_left_to_next, bi0, next0);
1321 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1324 errors = session_manager_flush_enqueue_events (my_thread_index);
1325 tcp_established_inc_counter (vm, is_ip4, TCP_ERROR_EVENT_FIFO_FULL, errors);
1327 return from_frame->n_vectors;
1331 tcp4_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1332 vlib_frame_t * from_frame)
1334 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1338 tcp6_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1339 vlib_frame_t * from_frame)
1341 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
1345 VLIB_REGISTER_NODE (tcp4_established_node) =
1347 .function = tcp4_established,
1348 .name = "tcp4-established",
1349 /* Takes a vector of packets. */
1350 .vector_size = sizeof (u32),
1351 .n_errors = TCP_N_ERROR,
1352 .error_strings = tcp_error_strings,
1353 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1356 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1357 foreach_tcp_state_next
1360 .format_trace = format_tcp_rx_trace_short,
1364 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_established_node, tcp4_established);
1367 VLIB_REGISTER_NODE (tcp6_established_node) =
1369 .function = tcp6_established,
1370 .name = "tcp6-established",
1371 /* Takes a vector of packets. */
1372 .vector_size = sizeof (u32),
1373 .n_errors = TCP_N_ERROR,
1374 .error_strings = tcp_error_strings,
1375 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1378 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1379 foreach_tcp_state_next
1382 .format_trace = format_tcp_rx_trace_short,
1387 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_established_node, tcp6_established);
1389 vlib_node_registration_t tcp4_syn_sent_node;
1390 vlib_node_registration_t tcp6_syn_sent_node;
1393 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1394 vlib_frame_t * from_frame, int is_ip4)
1396 tcp_main_t *tm = vnet_get_tcp_main ();
1397 u32 n_left_from, next_index, *from, *to_next;
1398 u32 my_thread_index = vm->thread_index, errors = 0;
1399 u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP;
1401 from = vlib_frame_vector_args (from_frame);
1402 n_left_from = from_frame->n_vectors;
1404 next_index = node->cached_next_index;
1406 while (n_left_from > 0)
1410 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1412 while (n_left_from > 0 && n_left_to_next > 0)
1414 u32 bi0, ack0, seq0;
1417 tcp_header_t *tcp0 = 0;
1418 tcp_connection_t *tc0;
1421 u32 n_advance_bytes0, n_data_bytes0;
1422 tcp_connection_t *new_tc0;
1423 u32 next0 = TCP_SYN_SENT_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1430 n_left_to_next -= 1;
1432 b0 = vlib_get_buffer (vm, bi0);
1434 tcp_half_open_connection_get (vnet_buffer (b0)->
1435 tcp.connection_index);
1437 ack0 = vnet_buffer (b0)->tcp.ack_number;
1438 seq0 = vnet_buffer (b0)->tcp.seq_number;
1440 /* Checksum computed by ipx_local no need to compute again */
1444 ip40 = vlib_buffer_get_current (b0);
1445 tcp0 = ip4_next_header (ip40);
1446 n_advance_bytes0 = (ip4_header_bytes (ip40)
1447 + tcp_header_bytes (tcp0));
1448 n_data_bytes0 = clib_net_to_host_u16 (ip40->length)
1453 ip60 = vlib_buffer_get_current (b0);
1454 tcp0 = ip6_next_header (ip60);
1455 n_advance_bytes0 = tcp_header_bytes (tcp0);
1456 n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length)
1458 n_advance_bytes0 += sizeof (ip60[0]);
1462 (!tcp_ack (tcp0) && !tcp_rst (tcp0) && !tcp_syn (tcp0)))
1465 /* SYNs, FINs and data consume sequence numbers */
1466 vnet_buffer (b0)->tcp.seq_end = seq0 + tcp_is_syn (tcp0)
1467 + tcp_is_fin (tcp0) + n_data_bytes0;
1470 * 1. check the ACK bit
1474 * If the ACK bit is set
1475 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
1476 * the RST bit is set, if so drop the segment and return)
1477 * <SEQ=SEG.ACK><CTL=RST>
1478 * and discard the segment. Return.
1479 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
1483 if (ack0 <= tc0->iss || ack0 > tc0->snd_nxt)
1485 if (!tcp_rst (tcp0))
1486 tcp_send_reset (b0, is_ip4);
1491 /* Make sure ACK is valid */
1492 if (tc0->snd_una > ack0)
1497 * 2. check the RST bit
1502 /* If ACK is acceptable, signal client that peer is not
1503 * willing to accept connection and drop connection*/
1506 stream_session_connect_notify (&tc0->connection, sst,
1508 tcp_connection_cleanup (tc0);
1514 * 3. check the security and precedence (skipped)
1518 * 4. check the SYN bit
1521 /* No SYN flag. Drop. */
1522 if (!tcp_syn (tcp0))
1525 /* Stop connection establishment and retransmit timers */
1526 tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH);
1527 tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT_SYN);
1529 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
1530 * current thread pool. */
1531 pool_get (tm->connections[my_thread_index], new_tc0);
1532 clib_memcpy (new_tc0, tc0, sizeof (*new_tc0));
1534 new_tc0->c_thread_index = my_thread_index;
1536 /* Cleanup half-open connection XXX lock */
1537 pool_put (tm->half_open_connections, tc0);
1539 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
1540 new_tc0->irs = seq0;
1543 tcp_options_parse (tcp0, &new_tc0->opt);
1545 if (tcp_opts_tstamp (&new_tc0->opt))
1547 new_tc0->tsval_recent = new_tc0->opt.tsval;
1548 new_tc0->tsval_recent_age = tcp_time_now ();
1551 if (tcp_opts_wscale (&new_tc0->opt))
1552 new_tc0->snd_wscale = new_tc0->opt.wscale;
1555 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window);
1556 new_tc0->snd_wl1 = seq0;
1557 new_tc0->snd_wl2 = ack0;
1559 tcp_connection_init_vars (new_tc0);
1561 /* SYN-ACK: See if we can switch to ESTABLISHED state */
1564 /* Our SYN is ACKed: we have iss < ack = snd_una */
1566 /* TODO Dequeue acknowledged segments if we support Fast Open */
1567 new_tc0->snd_una = ack0;
1568 new_tc0->state = TCP_STATE_ESTABLISHED;
1570 /* Make sure las is initialized for the wnd computation */
1571 new_tc0->rcv_las = new_tc0->rcv_nxt;
1573 /* Notify app that we have connection */
1574 stream_session_connect_notify (&new_tc0->connection, sst, 0);
1576 /* Make sure after data segment processing ACK is sent */
1577 new_tc0->flags |= TCP_CONN_SNDACK;
1579 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
1582 new_tc0->state = TCP_STATE_SYN_RCVD;
1584 /* Notify app that we have connection */
1585 stream_session_connect_notify (&new_tc0->connection, sst, 0);
1587 tcp_make_synack (new_tc0, b0);
1588 next0 = tcp_next_output (is_ip4);
1593 /* Read data, if any */
1597 tcp_segment_rcv (tm, new_tc0, b0, n_data_bytes0, &next0);
1598 if (error0 == TCP_ERROR_PURE_ACK)
1599 error0 = TCP_ERROR_SYN_ACKS_RCVD;
1603 tcp_make_ack (new_tc0, b0);
1604 next0 = tcp_next_output (new_tc0->c_is_ip4);
1609 b0->error = error0 ? node->errors[error0] : 0;
1610 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1612 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
1613 clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
1614 clib_memcpy (&t0->tcp_connection, tc0,
1615 sizeof (t0->tcp_connection));
1618 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1619 n_left_to_next, bi0, next0);
1622 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1625 errors = session_manager_flush_enqueue_events (my_thread_index);
1629 vlib_node_increment_counter (vm, tcp4_established_node.index,
1630 TCP_ERROR_EVENT_FIFO_FULL, errors);
1632 vlib_node_increment_counter (vm, tcp6_established_node.index,
1633 TCP_ERROR_EVENT_FIFO_FULL, errors);
1636 return from_frame->n_vectors;
1640 tcp4_syn_sent (vlib_main_t * vm, vlib_node_runtime_t * node,
1641 vlib_frame_t * from_frame)
1643 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1647 tcp6_syn_sent_rcv (vlib_main_t * vm, vlib_node_runtime_t * node,
1648 vlib_frame_t * from_frame)
1650 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
1654 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
1656 .function = tcp4_syn_sent,
1657 .name = "tcp4-syn-sent",
1658 /* Takes a vector of packets. */
1659 .vector_size = sizeof (u32),
1660 .n_errors = TCP_N_ERROR,
1661 .error_strings = tcp_error_strings,
1662 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
1665 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
1666 foreach_tcp_state_next
1669 .format_trace = format_tcp_rx_trace_short,
1673 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_syn_sent_node, tcp4_syn_sent);
1676 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
1678 .function = tcp6_syn_sent_rcv,
1679 .name = "tcp6-syn-sent",
1680 /* Takes a vector of packets. */
1681 .vector_size = sizeof (u32),
1682 .n_errors = TCP_N_ERROR,
1683 .error_strings = tcp_error_strings,
1684 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
1687 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
1688 foreach_tcp_state_next
1691 .format_trace = format_tcp_rx_trace_short,
1695 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_syn_sent_node, tcp6_syn_sent_rcv);
1697 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
1698 * as per RFC793 p. 64
1701 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1702 vlib_frame_t * from_frame, int is_ip4)
1704 tcp_main_t *tm = vnet_get_tcp_main ();
1705 u32 n_left_from, next_index, *from, *to_next;
1706 u32 my_thread_index = vm->thread_index, errors = 0;
1708 from = vlib_frame_vector_args (from_frame);
1709 n_left_from = from_frame->n_vectors;
1711 next_index = node->cached_next_index;
1713 while (n_left_from > 0)
1717 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1719 while (n_left_from > 0 && n_left_to_next > 0)
1724 tcp_header_t *tcp0 = 0;
1725 tcp_connection_t *tc0;
1728 u32 n_advance_bytes0, n_data_bytes0;
1729 u32 next0 = TCP_RCV_PROCESS_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1736 n_left_to_next -= 1;
1738 b0 = vlib_get_buffer (vm, bi0);
1739 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1741 if (PREDICT_FALSE (tc0 == 0))
1743 error0 = TCP_ERROR_INVALID_CONNECTION;
1747 /* Checksum computed by ipx_local no need to compute again */
1751 ip40 = vlib_buffer_get_current (b0);
1752 tcp0 = ip4_next_header (ip40);
1753 n_advance_bytes0 = (ip4_header_bytes (ip40)
1754 + tcp_header_bytes (tcp0));
1755 n_data_bytes0 = clib_net_to_host_u16 (ip40->length)
1760 ip60 = vlib_buffer_get_current (b0);
1761 tcp0 = ip6_next_header (ip60);
1762 n_advance_bytes0 = tcp_header_bytes (tcp0);
1763 n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length)
1765 n_advance_bytes0 += sizeof (ip60[0]);
1768 /* SYNs, FINs and data consume sequence numbers */
1769 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
1770 + tcp_is_syn (tcp0) + tcp_is_fin (tcp0) + n_data_bytes0;
1773 * Special treatment for CLOSED
1777 case TCP_STATE_CLOSED:
1783 * For all other states (except LISTEN)
1786 /* 1-4: check SEQ, RST, SYN */
1788 (tcp_segment_validate (vm, tc0, b0, tcp0, &next0)))
1790 error0 = TCP_ERROR_SEGMENT_INVALID;
1794 /* 5: check the ACK field */
1797 case TCP_STATE_SYN_RCVD:
1799 * If the segment acknowledgment is not acceptable, form a
1801 * <SEQ=SEG.ACK><CTL=RST>
1804 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
1806 tcp_send_reset (b0, is_ip4);
1809 /* Switch state to ESTABLISHED */
1810 tc0->state = TCP_STATE_ESTABLISHED;
1812 /* Initialize session variables */
1813 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
1814 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
1816 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
1817 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
1819 /* Shoulder tap the server */
1820 stream_session_accept_notify (&tc0->connection);
1822 /* Reset SYN-ACK retransmit timer */
1823 tcp_retransmit_timer_reset (tc0);
1825 case TCP_STATE_ESTABLISHED:
1826 /* We can get packets in established state here because they
1827 * were enqueued before state change */
1828 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
1832 case TCP_STATE_FIN_WAIT_1:
1833 /* In addition to the processing for the ESTABLISHED state, if
1834 * our FIN is now acknowledged then enter FIN-WAIT-2 and
1835 * continue processing in that state. */
1836 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
1839 /* If FIN is ACKed */
1840 if (tc0->snd_una == tc0->snd_una_max)
1842 tc0->state = TCP_STATE_FIN_WAIT_2;
1843 /* Stop all timers, 2MSL will be set lower */
1844 tcp_connection_timers_reset (tc0);
1847 case TCP_STATE_FIN_WAIT_2:
1848 /* In addition to the processing for the ESTABLISHED state, if
1849 * the retransmission queue is empty, the user's CLOSE can be
1850 * acknowledged ("ok") but do not delete the TCB. */
1851 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
1853 /* check if rtx queue is empty and ack CLOSE TODO */
1855 case TCP_STATE_CLOSE_WAIT:
1856 /* Do the same processing as for the ESTABLISHED state. */
1857 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
1860 case TCP_STATE_CLOSING:
1861 /* In addition to the processing for the ESTABLISHED state, if
1862 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
1863 * otherwise ignore the segment. */
1864 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
1867 /* XXX test that send queue empty */
1868 tc0->state = TCP_STATE_TIME_WAIT;
1872 case TCP_STATE_LAST_ACK:
1873 /* The only thing that can arrive in this state is an
1874 * acknowledgment of our FIN. If our FIN is now acknowledged,
1875 * delete the TCB, enter the CLOSED state, and return. */
1877 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
1880 tc0->state = TCP_STATE_CLOSED;
1882 /* Don't delete the connection/session yet. Instead, wait a
1883 * reasonable amount of time until the pipes are cleared. In
1884 * particular, this makes sure that we won't have dead sessions
1885 * when processing events on the tx path */
1886 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
1888 /* Stop retransmit */
1889 tcp_retransmit_timer_reset (tc0);
1894 case TCP_STATE_TIME_WAIT:
1895 /* The only thing that can arrive in this state is a
1896 * retransmission of the remote FIN. Acknowledge it, and restart
1897 * the 2 MSL timeout. */
1906 /* 6: check the URG bit TODO */
1908 /* 7: process the segment text */
1911 case TCP_STATE_ESTABLISHED:
1912 case TCP_STATE_FIN_WAIT_1:
1913 case TCP_STATE_FIN_WAIT_2:
1914 vlib_buffer_advance (b0, n_advance_bytes0);
1915 error0 = tcp_segment_rcv (tm, tc0, b0, n_data_bytes0, &next0);
1917 case TCP_STATE_CLOSE_WAIT:
1918 case TCP_STATE_CLOSING:
1919 case TCP_STATE_LAST_ACK:
1920 case TCP_STATE_TIME_WAIT:
1921 /* This should not occur, since a FIN has been received from the
1922 * remote side. Ignore the segment text. */
1926 /* 8: check the FIN bit */
1927 if (!tcp_fin (tcp0))
1932 case TCP_STATE_ESTABLISHED:
1933 case TCP_STATE_SYN_RCVD:
1934 /* Send FIN-ACK notify app and enter CLOSE-WAIT */
1935 tcp_connection_timers_reset (tc0);
1936 tcp_make_fin (tc0, b0);
1937 next0 = tcp_next_output (tc0->c_is_ip4);
1938 stream_session_disconnect_notify (&tc0->connection);
1939 tc0->state = TCP_STATE_CLOSE_WAIT;
1941 case TCP_STATE_CLOSE_WAIT:
1942 case TCP_STATE_CLOSING:
1943 case TCP_STATE_LAST_ACK:
1946 case TCP_STATE_FIN_WAIT_1:
1947 tc0->state = TCP_STATE_TIME_WAIT;
1948 tcp_connection_timers_reset (tc0);
1949 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
1951 case TCP_STATE_FIN_WAIT_2:
1952 /* Got FIN, send ACK! */
1953 tc0->state = TCP_STATE_TIME_WAIT;
1954 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
1955 tcp_make_ack (tc0, b0);
1956 next0 = tcp_next_output (is_ip4);
1958 case TCP_STATE_TIME_WAIT:
1959 /* Remain in the TIME-WAIT state. Restart the 2 MSL time-wait
1962 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
1965 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
1967 b0->error = error0 ? node->errors[error0] : 0;
1970 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1972 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
1973 clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
1974 clib_memcpy (&t0->tcp_connection, tc0,
1975 sizeof (t0->tcp_connection));
1978 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1979 n_left_to_next, bi0, next0);
1982 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1985 errors = session_manager_flush_enqueue_events (my_thread_index);
1989 vlib_node_increment_counter (vm, tcp4_established_node.index,
1990 TCP_ERROR_EVENT_FIFO_FULL, errors);
1992 vlib_node_increment_counter (vm, tcp6_established_node.index,
1993 TCP_ERROR_EVENT_FIFO_FULL, errors);
1996 return from_frame->n_vectors;
2000 tcp4_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2001 vlib_frame_t * from_frame)
2003 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2007 tcp6_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2008 vlib_frame_t * from_frame)
2010 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2014 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
2016 .function = tcp4_rcv_process,
2017 .name = "tcp4-rcv-process",
2018 /* Takes a vector of packets. */
2019 .vector_size = sizeof (u32),
2020 .n_errors = TCP_N_ERROR,
2021 .error_strings = tcp_error_strings,
2022 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2025 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2026 foreach_tcp_state_next
2029 .format_trace = format_tcp_rx_trace_short,
2033 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_rcv_process_node, tcp4_rcv_process);
2036 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
2038 .function = tcp6_rcv_process,
2039 .name = "tcp6-rcv-process",
2040 /* Takes a vector of packets. */
2041 .vector_size = sizeof (u32),
2042 .n_errors = TCP_N_ERROR,
2043 .error_strings = tcp_error_strings,
2044 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2047 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2048 foreach_tcp_state_next
2051 .format_trace = format_tcp_rx_trace_short,
2055 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_rcv_process_node, tcp6_rcv_process);
2057 vlib_node_registration_t tcp4_listen_node;
2058 vlib_node_registration_t tcp6_listen_node;
2061 * LISTEN state processing as per RFC 793 p. 65
2064 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2065 vlib_frame_t * from_frame, int is_ip4)
2067 u32 n_left_from, next_index, *from, *to_next;
2068 u32 my_thread_index = vm->thread_index;
2069 tcp_main_t *tm = vnet_get_tcp_main ();
2070 u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP;
2072 from = vlib_frame_vector_args (from_frame);
2073 n_left_from = from_frame->n_vectors;
2075 next_index = node->cached_next_index;
2077 while (n_left_from > 0)
2081 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2083 while (n_left_from > 0 && n_left_to_next > 0)
2088 tcp_header_t *th0 = 0;
2089 tcp_connection_t *lc0;
2092 tcp_connection_t *child0;
2093 u32 error0 = TCP_ERROR_SYNS_RCVD, next0 = TCP_LISTEN_NEXT_DROP;
2100 n_left_to_next -= 1;
2102 b0 = vlib_get_buffer (vm, bi0);
2103 lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
2107 ip40 = vlib_buffer_get_current (b0);
2108 th0 = ip4_next_header (ip40);
2112 ip60 = vlib_buffer_get_current (b0);
2113 th0 = ip6_next_header (ip60);
2116 /* Create child session. For syn-flood protection use filter */
2118 /* 1. first check for an RST */
2122 /* 2. second check for an ACK */
2125 tcp_send_reset (b0, is_ip4);
2129 /* 3. check for a SYN (did that already) */
2131 /* Create child session and send SYN-ACK */
2132 pool_get (tm->connections[my_thread_index], child0);
2133 memset (child0, 0, sizeof (*child0));
2135 child0->c_c_index = child0 - tm->connections[my_thread_index];
2136 child0->c_lcl_port = lc0->c_lcl_port;
2137 child0->c_rmt_port = th0->src_port;
2138 child0->c_is_ip4 = is_ip4;
2139 child0->c_thread_index = my_thread_index;
2143 child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
2144 child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
2148 clib_memcpy (&child0->c_lcl_ip6, &ip60->dst_address,
2149 sizeof (ip6_address_t));
2150 clib_memcpy (&child0->c_rmt_ip6, &ip60->src_address,
2151 sizeof (ip6_address_t));
2154 if (stream_session_accept (&child0->connection, lc0->c_s_index, sst,
2157 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2161 tcp_options_parse (th0, &child0->opt);
2163 child0->irs = vnet_buffer (b0)->tcp.seq_number;
2164 child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
2165 child0->rcv_las = child0->rcv_nxt;
2166 child0->state = TCP_STATE_SYN_RCVD;
2168 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
2169 * segments are used to initialize PAWS. */
2170 if (tcp_opts_tstamp (&child0->opt))
2172 child0->tsval_recent = child0->opt.tsval;
2173 child0->tsval_recent_age = tcp_time_now ();
2176 if (tcp_opts_wscale (&child0->opt))
2177 child0->snd_wscale = child0->opt.wscale;
2180 child0->snd_wnd = clib_net_to_host_u16 (th0->window);
2181 child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2182 child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2184 tcp_connection_init_vars (child0);
2186 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, child0);
2188 /* Reuse buffer to make syn-ack and send */
2189 tcp_make_synack (child0, b0);
2190 next0 = tcp_next_output (is_ip4);
2193 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2195 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2196 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2197 clib_memcpy (&t0->tcp_connection, lc0,
2198 sizeof (t0->tcp_connection));
2201 b0->error = node->errors[error0];
2203 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2204 n_left_to_next, bi0, next0);
2207 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2209 return from_frame->n_vectors;
2213 tcp4_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
2214 vlib_frame_t * from_frame)
2216 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2220 tcp6_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
2221 vlib_frame_t * from_frame)
2223 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2227 VLIB_REGISTER_NODE (tcp4_listen_node) =
2229 .function = tcp4_listen,
2230 .name = "tcp4-listen",
2231 /* Takes a vector of packets. */
2232 .vector_size = sizeof (u32),
2233 .n_errors = TCP_N_ERROR,
2234 .error_strings = tcp_error_strings,
2235 .n_next_nodes = TCP_LISTEN_N_NEXT,
2238 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2239 foreach_tcp_state_next
2242 .format_trace = format_tcp_rx_trace_short,
2246 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_listen_node, tcp4_listen);
2249 VLIB_REGISTER_NODE (tcp6_listen_node) =
2251 .function = tcp6_listen,
2252 .name = "tcp6-listen",
2253 /* Takes a vector of packets. */
2254 .vector_size = sizeof (u32),
2255 .n_errors = TCP_N_ERROR,
2256 .error_strings = tcp_error_strings,
2257 .n_next_nodes = TCP_LISTEN_N_NEXT,
2260 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2261 foreach_tcp_state_next
2264 .format_trace = format_tcp_rx_trace_short,
2268 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_listen_node, tcp6_listen);
2270 vlib_node_registration_t tcp4_input_node;
2271 vlib_node_registration_t tcp6_input_node;
2273 typedef enum _tcp_input_next
2275 TCP_INPUT_NEXT_DROP,
2276 TCP_INPUT_NEXT_LISTEN,
2277 TCP_INPUT_NEXT_RCV_PROCESS,
2278 TCP_INPUT_NEXT_SYN_SENT,
2279 TCP_INPUT_NEXT_ESTABLISHED,
2280 TCP_INPUT_NEXT_RESET,
2284 #define foreach_tcp4_input_next \
2285 _ (DROP, "error-drop") \
2286 _ (LISTEN, "tcp4-listen") \
2287 _ (RCV_PROCESS, "tcp4-rcv-process") \
2288 _ (SYN_SENT, "tcp4-syn-sent") \
2289 _ (ESTABLISHED, "tcp4-established") \
2290 _ (RESET, "tcp4-reset")
2292 #define foreach_tcp6_input_next \
2293 _ (DROP, "error-drop") \
2294 _ (LISTEN, "tcp6-listen") \
2295 _ (RCV_PROCESS, "tcp6-rcv-process") \
2296 _ (SYN_SENT, "tcp6-syn-sent") \
2297 _ (ESTABLISHED, "tcp6-established") \
2298 _ (RESET, "tcp6-reset")
2300 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
2303 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2304 vlib_frame_t * from_frame, int is_ip4)
2306 u32 n_left_from, next_index, *from, *to_next;
2307 u32 my_thread_index = vm->thread_index;
2308 tcp_main_t *tm = vnet_get_tcp_main ();
2310 from = vlib_frame_vector_args (from_frame);
2311 n_left_from = from_frame->n_vectors;
2313 next_index = node->cached_next_index;
2315 while (n_left_from > 0)
2319 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2321 while (n_left_from > 0 && n_left_to_next > 0)
2326 tcp_header_t *tcp0 = 0;
2327 tcp_connection_t *tc0;
2330 u32 error0 = TCP_ERROR_NO_LISTENER, next0 = TCP_INPUT_NEXT_DROP;
2338 n_left_to_next -= 1;
2340 b0 = vlib_get_buffer (vm, bi0);
2341 vnet_buffer (b0)->tcp.flags = 0;
2345 ip40 = vlib_buffer_get_current (b0);
2346 tcp0 = ip4_next_header (ip40);
2348 /* lookup session */
2350 (tcp_connection_t *)
2351 stream_session_lookup_transport4 (&ip40->dst_address,
2355 SESSION_TYPE_IP4_TCP,
2360 ip60 = vlib_buffer_get_current (b0);
2361 tcp0 = ip6_next_header (ip60);
2363 (tcp_connection_t *)
2364 stream_session_lookup_transport6 (&ip60->src_address,
2368 SESSION_TYPE_IP6_TCP,
2372 /* Session exists */
2373 if (PREDICT_TRUE (0 != tc0))
2375 /* Save connection index */
2376 vnet_buffer (b0)->tcp.connection_index = tc0->c_c_index;
2377 vnet_buffer (b0)->tcp.seq_number =
2378 clib_net_to_host_u32 (tcp0->seq_number);
2379 vnet_buffer (b0)->tcp.ack_number =
2380 clib_net_to_host_u32 (tcp0->ack_number);
2382 flags0 = tcp0->flags & filter_flags;
2383 next0 = tm->dispatch_table[tc0->state][flags0].next;
2384 error0 = tm->dispatch_table[tc0->state][flags0].error;
2386 if (PREDICT_FALSE (error0 == TCP_ERROR_DISPATCH))
2388 tcp_state_t state0 = tc0->state;
2389 /* Overload tcp flags to store state */
2390 vnet_buffer (b0)->tcp.flags = tc0->state;
2391 clib_warning ("disp error state %U flags %U",
2392 format_tcp_state, &state0,
2393 format_tcp_flags, (int) flags0);
2399 next0 = TCP_INPUT_NEXT_RESET;
2400 error0 = TCP_ERROR_NO_LISTENER;
2403 b0->error = error0 ? node->errors[error0] : 0;
2405 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2407 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2408 clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2410 clib_memcpy (&t0->tcp_connection, tc0, sizeof (*tc0));
2413 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2414 n_left_to_next, bi0, next0);
2417 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2420 return from_frame->n_vectors;
2424 tcp4_input (vlib_main_t * vm, vlib_node_runtime_t * node,
2425 vlib_frame_t * from_frame)
2427 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2431 tcp6_input (vlib_main_t * vm, vlib_node_runtime_t * node,
2432 vlib_frame_t * from_frame)
2434 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2438 VLIB_REGISTER_NODE (tcp4_input_node) =
2440 .function = tcp4_input,
2441 .name = "tcp4-input",
2442 /* Takes a vector of packets. */
2443 .vector_size = sizeof (u32),
2444 .n_errors = TCP_N_ERROR,
2445 .error_strings = tcp_error_strings,
2446 .n_next_nodes = TCP_INPUT_N_NEXT,
2449 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2450 foreach_tcp4_input_next
2453 .format_buffer = format_tcp_header,
2454 .format_trace = format_tcp_rx_trace,
2458 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_input_node, tcp4_input);
2461 VLIB_REGISTER_NODE (tcp6_input_node) =
2463 .function = tcp6_input,
2464 .name = "tcp6-input",
2465 /* Takes a vector of packets. */
2466 .vector_size = sizeof (u32),
2467 .n_errors = TCP_N_ERROR,
2468 .error_strings = tcp_error_strings,
2469 .n_next_nodes = TCP_INPUT_N_NEXT,
2472 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2473 foreach_tcp6_input_next
2476 .format_buffer = format_tcp_header,
2477 .format_trace = format_tcp_rx_trace,
2481 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input);
2484 tcp_dispatch_table_init (tcp_main_t * tm)
2487 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
2488 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
2490 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
2491 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
2494 #define _(t,f,n,e) \
2496 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
2497 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
2500 /* SYNs for new connections -> tcp-listen. */
2501 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
2502 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
2503 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2504 /* SYN-ACK for a SYN */
2505 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
2507 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
2508 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
2509 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
2511 /* ACK for for established connection -> tcp-established. */
2512 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
2513 /* FIN for for established connection -> tcp-established. */
2514 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
2515 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
2517 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
2518 /* ACK or FIN-ACK to our FIN */
2519 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2520 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
2522 /* FIN in reply to our FIN from the other side */
2523 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2524 /* FIN confirming that the peer (app) has closed */
2525 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2526 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
2528 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2533 tcp_input_init (vlib_main_t * vm)
2535 clib_error_t *error = 0;
2536 tcp_main_t *tm = vnet_get_tcp_main ();
2538 if ((error = vlib_call_init_function (vm, tcp_init)))
2541 /* Initialize dispatch table. */
2542 tcp_dispatch_table_init (tm);
2547 VLIB_INIT_FUNCTION (tcp_input_init);
2550 * fd.io coding-style-patch-verification: ON
2553 * eval: (c-set-style "gnu")