2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/tcp/tcp_packet.h>
18 #include <vnet/tcp/tcp.h>
19 #include <vnet/session/session.h>
22 static char *tcp_error_strings[] = {
23 #define tcp_error(n,s) s,
24 #include <vnet/tcp/tcp_error.def>
28 /* All TCP nodes have the same outgoing arcs */
29 #define foreach_tcp_state_next \
30 _ (DROP4, "ip4-drop") \
31 _ (DROP6, "ip6-drop") \
32 _ (TCP4_OUTPUT, "tcp4-output") \
33 _ (TCP6_OUTPUT, "tcp6-output")
35 typedef enum _tcp_established_next
37 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
38 foreach_tcp_state_next
40 TCP_ESTABLISHED_N_NEXT,
41 } tcp_established_next_t;
43 typedef enum _tcp_rcv_process_next
45 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
46 foreach_tcp_state_next
48 TCP_RCV_PROCESS_N_NEXT,
49 } tcp_rcv_process_next_t;
51 typedef enum _tcp_syn_sent_next
53 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
54 foreach_tcp_state_next
57 } tcp_syn_sent_next_t;
59 typedef enum _tcp_listen_next
61 #define _(s,n) TCP_LISTEN_NEXT_##s,
62 foreach_tcp_state_next
67 /* Generic, state independent indices */
68 typedef enum _tcp_state_next
70 #define _(s,n) TCP_NEXT_##s,
71 foreach_tcp_state_next
76 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
77 : TCP_NEXT_TCP6_OUTPUT)
79 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \
82 vlib_node_registration_t tcp4_established_node;
83 vlib_node_registration_t tcp6_established_node;
86 * Validate segment sequence number. As per RFC793:
88 * Segment Receive Test
90 * ------- ------- -------------------------------------------
91 * 0 0 SEG.SEQ = RCV.NXT
92 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
94 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
95 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
97 * This ultimately consists in checking if segment falls within the window.
98 * The one important difference compared to RFC793 is that we use rcv_las,
99 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
100 * peer's reference when computing our receive window.
103 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
104 * however, is too strict when we have retransmits. Instead we just check that
105 * the seq is not beyond the right edge and that the end of the segment is not
106 * less than the left edge.
108 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
109 * use rcv_nxt in the right edge window test instead of rcv_las.
113 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
115 return (seq_geq (end_seq, tc->rcv_las)
116 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
120 * Parse TCP header options.
122 * @param th TCP header
123 * @param to TCP options data structure to be populated
124 * @param is_syn set if packet is syn
125 * @return -1 if parsing failed
128 tcp_options_parse (tcp_header_t * th, tcp_options_t * to, u8 is_syn)
131 u8 opt_len, opts_len, kind;
135 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
136 data = (const u8 *) (th + 1);
138 /* Zero out all flags but those set in SYN */
139 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
140 | TCP_OPTS_FLAG_TSTAMP | TCP_OPTION_MSS);
142 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
146 /* Get options length */
147 if (kind == TCP_OPTION_EOL)
149 else if (kind == TCP_OPTION_NOOP)
161 /* weird option length */
162 if (opt_len < 2 || opt_len > opts_len)
172 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
174 to->flags |= TCP_OPTS_FLAG_MSS;
175 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
178 case TCP_OPTION_WINDOW_SCALE:
181 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
183 to->flags |= TCP_OPTS_FLAG_WSCALE;
184 to->wscale = data[2];
185 if (to->wscale > TCP_MAX_WND_SCALE)
186 to->wscale = TCP_MAX_WND_SCALE;
189 case TCP_OPTION_TIMESTAMP:
191 to->flags |= TCP_OPTS_FLAG_TSTAMP;
192 if ((to->flags & TCP_OPTS_FLAG_TSTAMP)
193 && opt_len == TCP_OPTION_LEN_TIMESTAMP)
195 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
196 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
199 case TCP_OPTION_SACK_PERMITTED:
202 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
203 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
205 case TCP_OPTION_SACK_BLOCK:
206 /* If SACK permitted was not advertised or a SYN, break */
207 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
210 /* If too short or not correctly formatted, break */
211 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
214 to->flags |= TCP_OPTS_FLAG_SACK;
215 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
216 vec_reset_length (to->sacks);
217 for (j = 0; j < to->n_sack_blocks; j++)
219 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j));
220 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j));
221 vec_add1 (to->sacks, b);
225 /* Nothing to see here */
233 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
234 * timestamp to echo and it's less than tsval_recent, drop segment
235 * but still send an ACK in order to retain TCP's mechanism for detecting
236 * and recovering from half-open connections
238 * Or at least that's what the theory says. It seems that this might not work
239 * very well with packet reordering and fast retransmit. XXX
242 tcp_segment_check_paws (tcp_connection_t * tc)
244 return tcp_opts_tstamp (&tc->rcv_opts)
245 && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
249 * Update tsval recent
252 tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
255 * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
256 * of an incoming segment:
257 * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
258 * then the TSval from the segment is copied to TS.Recent;
259 * otherwise, the TSval is ignored.
261 if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
262 && seq_leq (tc->rcv_las, seq_end))
264 ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
265 tc->tsval_recent = tc->rcv_opts.tsval;
266 tc->tsval_recent_age = tcp_time_now_w_thread (tc->c_thread_index);
271 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
273 * It first verifies if segment has a wrapped sequence number (PAWS) and then
274 * does the processing associated to the first four steps (ignoring security
275 * and precedence): sequence number, rst bit and syn bit checks.
277 * @return 0 if segments passes validation.
280 tcp_segment_validate (tcp_worker_ctx_t * wrk, tcp_connection_t * tc0,
281 vlib_buffer_t * b0, tcp_header_t * th0, u32 * error0)
283 /* We could get a burst of RSTs interleaved with acks */
284 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
286 tcp_send_reset (tc0);
287 *error0 = TCP_ERROR_CONNECTION_CLOSED;
291 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
293 *error0 = TCP_ERROR_SEGMENT_INVALID;
297 if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
299 *error0 = TCP_ERROR_OPTIONS;
303 if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
305 *error0 = TCP_ERROR_PAWS;
306 TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
307 vnet_buffer (b0)->tcp.seq_end);
309 /* If it just so happens that a segment updates tsval_recent for a
310 * segment over 24 days old, invalidate tsval_recent. */
311 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
312 tcp_time_now_w_thread (tc0->c_thread_index)))
314 tc0->tsval_recent = tc0->rcv_opts.tsval;
315 clib_warning ("paws failed: 24-day old segment");
317 /* Drop after ack if not rst. Resets can fail paws check as per
318 * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
319 * be subjected to the PAWS check by verifying an acceptable value in
321 else if (!tcp_rst (th0))
323 tcp_program_ack (wrk, tc0);
324 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
329 /* 1st: check sequence number */
330 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
331 vnet_buffer (b0)->tcp.seq_end))
333 *error0 = TCP_ERROR_RCV_WND;
334 /* If our window is 0 and the packet is in sequence, let it pass
335 * through for ack processing. It should be dropped later. */
336 if (!(tc0->rcv_wnd == 0
337 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number))
339 /* If not RST, send dup ack */
342 tcp_program_dupack (wrk, tc0);
343 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
349 /* 2nd: check the RST bit */
350 if (PREDICT_FALSE (tcp_rst (th0)))
352 tcp_connection_reset (tc0);
353 *error0 = TCP_ERROR_RST_RCVD;
357 /* 3rd: check security and precedence (skip) */
359 /* 4th: check the SYN bit */
360 if (PREDICT_FALSE (tcp_syn (th0)))
362 *error0 = tcp_ack (th0) ? TCP_ERROR_SYN_ACKS_RCVD : TCP_ERROR_SYNS_RCVD;
363 /* TODO implement RFC 5961 */
364 if (tc0->state == TCP_STATE_SYN_RCVD)
366 tcp_options_parse (th0, &tc0->rcv_opts, 1);
367 tcp_send_synack (tc0);
368 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, tc0, 0);
372 tcp_program_ack (wrk, tc0);
373 TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, tc0);
378 /* If segment in window, save timestamp */
379 tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
380 vnet_buffer (b0)->tcp.seq_end);
388 tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0)
390 /* SND.UNA =< SEG.ACK =< SND.NXT */
391 return (seq_leq (tc0->snd_una, vnet_buffer (tb0)->tcp.ack_number)
392 && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_nxt));
396 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
398 * Note that although the original article, srtt and rttvar are scaled
399 * to minimize round-off errors, here we don't. Instead, we rely on
400 * better precision time measurements.
402 * TODO support us rtt resolution
405 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
411 err = mrtt - tc->srtt;
413 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
414 * The increase should be bound */
415 tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
416 diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
417 tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
421 /* First measurement. */
423 tc->rttvar = mrtt >> 1;
428 tcp_update_rto (tcp_connection_t * tc)
430 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
431 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
435 * Update RTT estimate and RTO timer
437 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
438 * timing. Middle boxes are known to fiddle with TCP options so we
439 * should give higher priority to ACK timing.
441 * This should be called only if previously sent bytes have been acked.
443 * return 1 if valid rtt 0 otherwise
446 tcp_update_rtt (tcp_connection_t * tc, u32 ack)
450 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
451 * RTT because they're ambiguous. */
452 if (tcp_in_cong_recovery (tc) || tc->sack_sb.sacked_bytes)
454 if (tcp_in_recovery (tc))
459 if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
461 f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
462 tc->mrtt_us = tc->mrtt_us + (sample - tc->mrtt_us) * 0.125;
463 mrtt = clib_max ((u32) (sample * THZ), 1);
464 /* Allow measuring of a new RTT */
467 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
468 * snd_una, i.e., the left side of the send window:
469 * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
470 else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
472 u32 now = tcp_time_now_w_thread (tc->c_thread_index);
473 mrtt = clib_max (now - tc->rcv_opts.tsecr, 1);
476 /* Ignore dubious measurements */
477 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
480 tcp_estimate_rtt (tc, mrtt);
484 /* If we got here something must've been ACKed so make sure boff is 0,
485 * even if mrtt is not valid since we update the rto lower */
493 tcp_estimate_initial_rtt (tcp_connection_t * tc)
495 u8 thread_index = vlib_num_workers ()? 1 : 0;
500 tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
501 mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
506 mrtt = tcp_time_now_w_thread (thread_index) - tc->rcv_opts.tsecr;
507 mrtt = clib_max (mrtt, 1);
508 tc->mrtt_us = (f64) mrtt *TCP_TICK;
511 if (mrtt > 0 && mrtt < TCP_RTT_MAX)
512 tcp_estimate_rtt (tc, mrtt);
517 * Dequeue bytes for connections that have received acks in last burst
520 tcp_handle_postponed_dequeues (tcp_worker_ctx_t * wrk)
522 u32 thread_index = wrk->vm->thread_index;
523 u32 *pending_deq_acked;
524 tcp_connection_t *tc;
527 if (!vec_len (wrk->pending_deq_acked))
530 pending_deq_acked = wrk->pending_deq_acked;
531 for (i = 0; i < vec_len (pending_deq_acked); i++)
533 tc = tcp_connection_get (pending_deq_acked[i], thread_index);
534 tc->flags &= ~TCP_CONN_DEQ_PENDING;
536 if (PREDICT_FALSE (!tc->burst_acked))
539 /* Dequeue the newly ACKed bytes */
540 stream_session_dequeue_drop (&tc->connection, tc->burst_acked);
542 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
544 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
546 if (seq_leq (tc->psh_seq, tc->snd_una))
547 tc->flags &= ~TCP_CONN_PSH_PENDING;
550 /* If everything has been acked, stop retransmit timer
551 * otherwise update. */
552 tcp_retransmit_timer_update (tc);
554 /* If not congested, update pacer based on our new
556 if (!tcp_in_fastrecovery (tc))
557 tcp_connection_tx_pacer_update (tc);
559 _vec_len (wrk->pending_deq_acked) = 0;
563 tcp_program_dequeue (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
565 if (!(tc->flags & TCP_CONN_DEQ_PENDING))
567 vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
568 tc->flags |= TCP_CONN_DEQ_PENDING;
570 tc->burst_acked += tc->bytes_acked + tc->sack_sb.snd_una_adv;
574 * Check if duplicate ack as per RFC5681 Sec. 2
577 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
580 return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
581 && seq_gt (tc->snd_una_max, tc->snd_una)
582 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
583 && (prev_snd_wnd == tc->snd_wnd));
587 * Checks if ack is a congestion control event.
590 tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
591 u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
593 /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
594 * defined to be 'duplicate' */
595 *is_dack = tc->sack_sb.last_sacked_bytes
596 || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
598 return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc));
602 scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
604 ASSERT (!pool_is_free_index (sb->holes, hole - sb->holes));
605 return hole - sb->holes;
609 scoreboard_hole_bytes (sack_scoreboard_hole_t * hole)
611 return hole->end - hole->start;
614 sack_scoreboard_hole_t *
615 scoreboard_get_hole (sack_scoreboard_t * sb, u32 index)
617 if (index != TCP_INVALID_SACK_HOLE_INDEX)
618 return pool_elt_at_index (sb->holes, index);
622 sack_scoreboard_hole_t *
623 scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
625 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
626 return pool_elt_at_index (sb->holes, hole->next);
630 sack_scoreboard_hole_t *
631 scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
633 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
634 return pool_elt_at_index (sb->holes, hole->prev);
638 sack_scoreboard_hole_t *
639 scoreboard_first_hole (sack_scoreboard_t * sb)
641 if (sb->head != TCP_INVALID_SACK_HOLE_INDEX)
642 return pool_elt_at_index (sb->holes, sb->head);
646 sack_scoreboard_hole_t *
647 scoreboard_last_hole (sack_scoreboard_t * sb)
649 if (sb->tail != TCP_INVALID_SACK_HOLE_INDEX)
650 return pool_elt_at_index (sb->holes, sb->tail);
655 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
657 sack_scoreboard_hole_t *next, *prev;
659 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
661 next = pool_elt_at_index (sb->holes, hole->next);
662 next->prev = hole->prev;
666 sb->tail = hole->prev;
669 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
671 prev = pool_elt_at_index (sb->holes, hole->prev);
672 prev->next = hole->next;
676 sb->head = hole->next;
679 if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
680 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
682 /* Poison the entry */
684 clib_memset (hole, 0xfe, sizeof (*hole));
686 pool_put (sb->holes, hole);
689 static sack_scoreboard_hole_t *
690 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
693 sack_scoreboard_hole_t *hole, *next, *prev;
696 pool_get (sb->holes, hole);
697 clib_memset (hole, 0, sizeof (*hole));
701 hole_index = scoreboard_hole_index (sb, hole);
703 prev = scoreboard_get_hole (sb, prev_index);
706 hole->prev = prev_index;
707 hole->next = prev->next;
709 if ((next = scoreboard_next_hole (sb, hole)))
710 next->prev = hole_index;
712 sb->tail = hole_index;
714 prev->next = hole_index;
718 sb->head = hole_index;
719 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
720 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
727 scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb)
729 sack_scoreboard_hole_t *left, *right;
730 u32 bytes = 0, blks = 0;
733 sb->sacked_bytes = 0;
734 left = scoreboard_last_hole (sb);
738 if (seq_gt (sb->high_sacked, left->end))
740 bytes = sb->high_sacked - left->end;
744 while ((right = left)
745 && bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
746 && blks < TCP_DUPACK_THRESHOLD
747 /* left not updated if above conditions fail */
748 && (left = scoreboard_prev_hole (sb, right)))
750 bytes += right->start - left->end;
754 /* left is first lost */
759 sb->lost_bytes += scoreboard_hole_bytes (right);
761 left = scoreboard_prev_hole (sb, right);
763 bytes += right->start - left->end;
765 while ((right = left));
768 sb->sacked_bytes = bytes;
772 * Figure out the next hole to retransmit
774 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
776 sack_scoreboard_hole_t *
777 scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
778 sack_scoreboard_hole_t * start,
779 u8 have_unsent, u8 * can_rescue, u8 * snd_limited)
781 sack_scoreboard_hole_t *hole = 0;
783 hole = start ? start : scoreboard_first_hole (sb);
784 while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
785 hole = scoreboard_next_hole (sb, hole);
787 /* Nothing, return */
790 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
794 /* Rule (1): if higher than rxt, less than high_sacked and lost */
795 if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
797 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
801 /* Rule (2): available unsent data */
804 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
807 /* Rule (3): if hole not lost */
808 else if (seq_lt (hole->start, sb->high_sacked))
811 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
813 /* Rule (4): if hole beyond high_sacked */
816 ASSERT (seq_geq (hole->start, sb->high_sacked));
819 /* HighRxt MUST NOT be updated */
824 if (hole && seq_lt (sb->high_rxt, hole->start))
825 sb->high_rxt = hole->start;
831 scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 snd_una)
833 sack_scoreboard_hole_t *hole;
834 hole = scoreboard_first_hole (sb);
837 snd_una = seq_gt (snd_una, hole->start) ? snd_una : hole->start;
838 sb->cur_rxt_hole = sb->head;
840 sb->high_rxt = snd_una;
841 sb->rescue_rxt = snd_una - 1;
845 scoreboard_init (sack_scoreboard_t * sb)
847 sb->head = TCP_INVALID_SACK_HOLE_INDEX;
848 sb->tail = TCP_INVALID_SACK_HOLE_INDEX;
849 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
853 scoreboard_clear (sack_scoreboard_t * sb)
855 sack_scoreboard_hole_t *hole;
856 while ((hole = scoreboard_first_hole (sb)))
858 scoreboard_remove_hole (sb, hole);
860 ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX);
861 ASSERT (pool_elts (sb->holes) == 0);
862 sb->sacked_bytes = 0;
863 sb->last_sacked_bytes = 0;
864 sb->last_bytes_delivered = 0;
869 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
873 * Test that scoreboard is sane after recovery
875 * Returns 1 if scoreboard is empty or if first hole beyond
879 tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
881 sack_scoreboard_hole_t *hole;
882 hole = scoreboard_first_hole (&tc->sack_sb);
883 return (!hole || (seq_geq (hole->start, tc->snd_una)
884 && seq_lt (hole->end, tc->snd_una_max)));
888 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
890 sack_scoreboard_t *sb = &tc->sack_sb;
891 sack_block_t *blk, tmp;
892 sack_scoreboard_hole_t *hole, *next_hole, *last_hole;
893 u32 blk_index = 0, old_sacked_bytes, hole_index;
896 sb->last_sacked_bytes = 0;
897 sb->last_bytes_delivered = 0;
900 if (!tcp_opts_sack (&tc->rcv_opts)
901 && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
904 old_sacked_bytes = sb->sacked_bytes;
906 /* Remove invalid blocks */
907 blk = tc->rcv_opts.sacks;
908 while (blk < vec_end (tc->rcv_opts.sacks))
910 if (seq_lt (blk->start, blk->end)
911 && seq_gt (blk->start, tc->snd_una)
912 && seq_gt (blk->start, ack)
913 && seq_lt (blk->start, tc->snd_una_max)
914 && seq_leq (blk->end, tc->snd_una_max))
919 vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
922 /* Add block for cumulative ack */
923 if (seq_gt (ack, tc->snd_una))
925 tmp.start = tc->snd_una;
927 vec_add1 (tc->rcv_opts.sacks, tmp);
930 if (vec_len (tc->rcv_opts.sacks) == 0)
933 tcp_scoreboard_trace_add (tc, ack);
935 /* Make sure blocks are ordered */
936 for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++)
937 for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++)
938 if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start))
940 tmp = tc->rcv_opts.sacks[i];
941 tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j];
942 tc->rcv_opts.sacks[j] = tmp;
945 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
947 /* If no holes, insert the first that covers all outstanding bytes */
948 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
949 tc->snd_una, tc->snd_una_max);
950 sb->tail = scoreboard_hole_index (sb, last_hole);
951 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
952 sb->high_sacked = tmp.end;
956 /* If we have holes but snd_una_max is beyond the last hole, update
958 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
959 last_hole = scoreboard_last_hole (sb);
960 if (seq_gt (tc->snd_una_max, last_hole->end))
962 if (seq_geq (last_hole->start, sb->high_sacked))
964 last_hole->end = tc->snd_una_max;
966 /* New hole after high sacked block */
967 else if (seq_lt (sb->high_sacked, tc->snd_una_max))
969 scoreboard_insert_hole (sb, sb->tail, sb->high_sacked,
973 /* Keep track of max byte sacked for when the last hole
975 if (seq_gt (tmp.end, sb->high_sacked))
976 sb->high_sacked = tmp.end;
979 /* Walk the holes with the SACK blocks */
980 hole = pool_elt_at_index (sb->holes, sb->head);
981 while (hole && blk_index < vec_len (tc->rcv_opts.sacks))
983 blk = &tc->rcv_opts.sacks[blk_index];
984 if (seq_leq (blk->start, hole->start))
986 /* Block covers hole. Remove hole */
987 if (seq_geq (blk->end, hole->end))
989 next_hole = scoreboard_next_hole (sb, hole);
991 /* Byte accounting: snd_una needs to be advanced */
996 if (seq_lt (ack, next_hole->start))
997 sb->snd_una_adv = next_hole->start - ack;
998 sb->last_bytes_delivered +=
999 next_hole->start - hole->end;
1003 ASSERT (seq_geq (sb->high_sacked, ack));
1004 sb->snd_una_adv = sb->high_sacked - ack;
1005 sb->last_bytes_delivered += sb->high_sacked - hole->end;
1009 scoreboard_remove_hole (sb, hole);
1012 /* Partial 'head' overlap */
1015 if (seq_gt (blk->end, hole->start))
1017 hole->start = blk->end;
1024 /* Hole must be split */
1025 if (seq_lt (blk->end, hole->end))
1027 hole_index = scoreboard_hole_index (sb, hole);
1028 next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
1031 /* Pool might've moved */
1032 hole = scoreboard_get_hole (sb, hole_index);
1033 hole->end = blk->start;
1035 ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
1037 else if (seq_lt (blk->start, hole->end))
1039 hole->end = blk->start;
1041 hole = scoreboard_next_hole (sb, hole);
1045 if (pool_elts (sb->holes) == 1)
1047 hole = scoreboard_first_hole (sb);
1048 if (hole->start == ack + sb->snd_una_adv
1049 && hole->end == tc->snd_una_max)
1050 scoreboard_remove_hole (sb, hole);
1053 scoreboard_update_bytes (tc, sb);
1054 sb->last_sacked_bytes = sb->sacked_bytes
1055 - (old_sacked_bytes - sb->last_bytes_delivered);
1056 ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc));
1057 ASSERT (sb->sacked_bytes == 0 || tcp_in_recovery (tc)
1058 || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack));
1059 ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max
1060 - seq_max (tc->snd_una, ack) || tcp_in_recovery (tc));
1061 ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
1062 || sb->holes[sb->head].start == ack + sb->snd_una_adv);
1063 TCP_EVT_DBG (TCP_EVT_CC_SCOREBOARD, tc);
1067 * Try to update snd_wnd based on feedback received from peer.
1069 * If successful, and new window is 'effectively' 0, activate persist
1073 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
1075 /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
1076 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
1077 if (seq_lt (tc->snd_wl1, seq)
1078 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
1080 tc->snd_wnd = snd_wnd;
1083 TCP_EVT_DBG (TCP_EVT_SND_WND, tc);
1085 if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
1087 /* Set persist timer if not set and we just got 0 wnd */
1088 if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
1089 && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
1090 tcp_persist_timer_set (tc);
1094 tcp_persist_timer_reset (tc);
1095 if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
1098 tcp_update_rto (tc);
1105 * Init loss recovery/fast recovery.
1107 * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
1108 * updated in @ref tcp_cc_handle_event after fast retransmit
1111 tcp_cc_init_congestion (tcp_connection_t * tc)
1113 tcp_fastrecovery_on (tc);
1114 tc->snd_congestion = tc->snd_una_max;
1115 tc->cwnd_acc_bytes = 0;
1116 tc->snd_rxt_bytes = 0;
1117 tc->prev_ssthresh = tc->ssthresh;
1118 tc->prev_cwnd = tc->cwnd;
1119 tc->cc_algo->congestion (tc);
1120 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
1124 tcp_cc_recovery_exit (tcp_connection_t * tc)
1127 tcp_update_rto (tc);
1129 tc->snd_nxt = tc->snd_una_max;
1131 tcp_recovery_off (tc);
1132 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
1136 tcp_cc_fastrecovery_exit (tcp_connection_t * tc)
1138 tc->cc_algo->recovered (tc);
1139 tc->snd_rxt_bytes = 0;
1140 tc->rcv_dupacks = 0;
1141 tc->snd_nxt = tc->snd_una_max;
1142 tc->snd_rxt_bytes = 0;
1145 tcp_fastrecovery_off (tc);
1146 tcp_fastrecovery_first_off (tc);
1148 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
1152 tcp_cc_congestion_undo (tcp_connection_t * tc)
1154 tc->cwnd = tc->prev_cwnd;
1155 tc->ssthresh = tc->prev_ssthresh;
1156 tc->snd_nxt = tc->snd_una_max;
1157 tc->rcv_dupacks = 0;
1158 if (tcp_in_recovery (tc))
1159 tcp_cc_recovery_exit (tc);
1160 else if (tcp_in_fastrecovery (tc))
1161 tcp_cc_fastrecovery_exit (tc);
1162 ASSERT (tc->rto_boff == 0);
1163 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 5);
1167 tcp_cc_is_spurious_timeout_rxt (tcp_connection_t * tc)
1169 return (tcp_in_recovery (tc) && tc->rto_boff == 1
1171 && tcp_opts_tstamp (&tc->rcv_opts)
1172 && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
1176 tcp_cc_is_spurious_fast_rxt (tcp_connection_t * tc)
1178 return (tcp_in_fastrecovery (tc)
1179 && tc->cwnd > tc->ssthresh + 3 * tc->snd_mss);
1183 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
1185 return (tcp_cc_is_spurious_timeout_rxt (tc)
1186 || tcp_cc_is_spurious_fast_rxt (tc));
1190 tcp_cc_recover (tcp_connection_t * tc)
1192 ASSERT (tcp_in_cong_recovery (tc));
1193 if (tcp_cc_is_spurious_retransmit (tc))
1195 tcp_cc_congestion_undo (tc);
1199 if (tcp_in_recovery (tc))
1200 tcp_cc_recovery_exit (tc);
1201 else if (tcp_in_fastrecovery (tc))
1202 tcp_cc_fastrecovery_exit (tc);
1204 ASSERT (tc->rto_boff == 0);
1205 ASSERT (!tcp_in_cong_recovery (tc));
1206 ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
1211 tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b)
1213 ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
1215 /* Congestion avoidance */
1216 tcp_cc_rcv_ack (tc);
1218 /* If a cumulative ack, make sure dupacks is 0 */
1219 tc->rcv_dupacks = 0;
1221 /* When dupacks hits the threshold we only enter fast retransmit if
1222 * cumulative ack covers more than snd_congestion. Should snd_una
1223 * wrap this test may fail under otherwise valid circumstances.
1224 * Therefore, proactively update snd_congestion when wrap detected. */
1226 (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
1227 && seq_gt (tc->snd_congestion, tc->snd_una)))
1228 tc->snd_congestion = tc->snd_una - 1;
1232 tcp_should_fastrecover_sack (tcp_connection_t * tc)
1234 return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
1238 tcp_should_fastrecover (tcp_connection_t * tc)
1240 return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
1241 || tcp_should_fastrecover_sack (tc));
1245 tcp_program_fastretransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1247 if (!(tc->flags & TCP_CONN_FRXT_PENDING))
1249 vec_add1 (wrk->pending_fast_rxt, tc->c_c_index);
1250 tc->flags |= TCP_CONN_FRXT_PENDING;
1255 tcp_do_fastretransmits (tcp_worker_ctx_t * wrk)
1257 u32 *ongoing_fast_rxt, burst_bytes, sent_bytes, thread_index;
1258 u32 max_burst_size, burst_size, n_segs = 0, n_segs_now;
1259 tcp_connection_t *tc;
1263 if (vec_len (wrk->pending_fast_rxt) == 0
1264 && vec_len (wrk->postponed_fast_rxt) == 0)
1267 thread_index = wrk->vm->thread_index;
1268 last_cpu_time = wrk->vm->clib_time.last_cpu_time;
1269 ongoing_fast_rxt = wrk->ongoing_fast_rxt;
1270 vec_append (ongoing_fast_rxt, wrk->postponed_fast_rxt);
1271 vec_append (ongoing_fast_rxt, wrk->pending_fast_rxt);
1273 _vec_len (wrk->postponed_fast_rxt) = 0;
1274 _vec_len (wrk->pending_fast_rxt) = 0;
1276 max_burst_size = VLIB_FRAME_SIZE / vec_len (ongoing_fast_rxt);
1277 max_burst_size = clib_max (max_burst_size, 1);
1279 for (i = 0; i < vec_len (ongoing_fast_rxt); i++)
1281 if (n_segs >= VLIB_FRAME_SIZE)
1283 vec_add1 (wrk->postponed_fast_rxt, ongoing_fast_rxt[i]);
1287 tc = tcp_connection_get (ongoing_fast_rxt[i], thread_index);
1288 tc->flags &= ~TCP_CONN_FRXT_PENDING;
1290 if (!tcp_in_fastrecovery (tc))
1293 burst_size = clib_min (max_burst_size, VLIB_FRAME_SIZE - n_segs);
1294 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
1296 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1299 tcp_program_fastretransmit (wrk, tc);
1303 n_segs_now = tcp_fast_retransmit (wrk, tc, burst_size);
1304 sent_bytes = clib_min (n_segs_now * tc->snd_mss, burst_bytes);
1305 transport_connection_tx_pacer_update_bytes (&tc->connection,
1307 n_segs += n_segs_now;
1309 _vec_len (ongoing_fast_rxt) = 0;
1310 wrk->ongoing_fast_rxt = ongoing_fast_rxt;
1314 * One function to rule them all ... and in the darkness bind them
1317 tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
1321 if (tcp_in_fastrecovery (tc) && tcp_opts_sack_permitted (&tc->rcv_opts))
1323 if (tc->bytes_acked)
1325 tcp_program_fastretransmit (tcp_get_worker (tc->c_thread_index), tc);
1329 * Duplicate ACK. Check if we should enter fast recovery, or if already in
1330 * it account for the bytes that left the network.
1332 else if (is_dack && !tcp_in_recovery (tc))
1334 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
1335 ASSERT (tc->snd_una != tc->snd_una_max
1336 || tc->sack_sb.last_sacked_bytes);
1340 /* Pure duplicate ack. If some data got acked, it's handled lower */
1341 if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
1343 ASSERT (tcp_in_fastrecovery (tc));
1344 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1347 else if (tcp_should_fastrecover (tc))
1351 ASSERT (!tcp_in_fastrecovery (tc));
1353 /* Heuristic to catch potential late dupacks
1354 * after fast retransmit exits */
1355 if (is_dack && tc->snd_una == tc->snd_congestion
1356 && timestamp_leq (tc->rcv_opts.tsecr, tc->tsecr_last_ack))
1358 tc->rcv_dupacks = 0;
1362 tcp_cc_init_congestion (tc);
1363 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1365 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1367 tc->cwnd = tc->ssthresh;
1368 scoreboard_init_high_rxt (&tc->sack_sb, tc->snd_una);
1372 /* Post retransmit update cwnd to ssthresh and account for the
1373 * three segments that have left the network and should've been
1374 * buffered at the receiver XXX */
1375 tc->cwnd = tc->ssthresh + 3 * tc->snd_mss;
1378 /* Constrain rate until we get a partial ack */
1379 pacer_wnd = clib_max (0.1 * tc->cwnd, 2 * tc->snd_mss);
1380 tcp_connection_tx_pacer_reset (tc, pacer_wnd,
1381 0 /* start bucket */ );
1382 tcp_program_fastretransmit (tcp_get_worker (tc->c_thread_index),
1386 else if (!tc->bytes_acked
1387 || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
1389 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1395 /* Don't allow entry in fast recovery if still in recovery, for now */
1396 else if (0 && is_dack && tcp_in_recovery (tc))
1398 /* If of of the two conditions lower hold, reset dupacks because
1399 * we're probably after timeout (RFC6582 heuristics).
1400 * If Cumulative ack does not cover more than congestion threshold,
1402 * 1) The following doesn't hold: The congestion window is greater
1403 * than SMSS bytes and the difference between highest_ack
1404 * and prev_highest_ack is at most 4*SMSS bytes
1405 * 2) Echoed timestamp in the last non-dup ack does not equal the
1408 if (seq_leq (tc->snd_una, tc->snd_congestion)
1409 && ((!(tc->cwnd > tc->snd_mss
1410 && tc->bytes_acked <= 4 * tc->snd_mss))
1411 || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
1413 tc->rcv_dupacks = 0;
1418 if (!tc->bytes_acked)
1422 TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
1425 * Legitimate ACK. 1) See if we can exit recovery
1428 /* Update the pacing rate. For the first partial ack we move from
1429 * the artificially constrained rate to the one after congestion */
1430 tcp_connection_tx_pacer_update (tc);
1432 if (seq_geq (tc->snd_una, tc->snd_congestion))
1434 tcp_retransmit_timer_update (tc);
1436 /* If spurious return, we've already updated everything */
1437 if (tcp_cc_recover (tc))
1439 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1443 tc->snd_nxt = tc->snd_una_max;
1445 /* Treat as congestion avoidance ack */
1446 tcp_cc_rcv_ack (tc);
1451 * Legitimate ACK. 2) If PARTIAL ACK try to retransmit
1454 /* XXX limit this only to first partial ack? */
1455 tcp_retransmit_timer_update (tc);
1457 /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1458 * reset dupacks to 0. Also needed if in congestion recovery */
1459 tc->rcv_dupacks = 0;
1461 /* Post RTO timeout don't try anything fancy */
1462 if (tcp_in_recovery (tc))
1464 tcp_cc_rcv_ack (tc);
1465 transport_add_tx_event (&tc->connection);
1469 /* Remove retransmitted bytes that have been delivered */
1470 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1472 ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv
1473 >= tc->sack_sb.last_bytes_delivered
1474 || (tc->flags & TCP_CONN_FINSNT));
1476 /* If we have sacks and we haven't gotten an ack beyond high_rxt,
1477 * remove sacked bytes delivered */
1478 if (seq_lt (tc->snd_una, tc->sack_sb.high_rxt))
1480 rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv
1481 - tc->sack_sb.last_bytes_delivered;
1482 ASSERT (tc->snd_rxt_bytes >= rxt_delivered);
1483 tc->snd_rxt_bytes -= rxt_delivered;
1487 /* Apparently all retransmitted holes have been acked */
1488 tc->snd_rxt_bytes = 0;
1489 tc->sack_sb.high_rxt = tc->snd_una;
1494 tcp_fastrecovery_first_on (tc);
1495 /* Reuse last bytes delivered to track total bytes acked */
1496 tc->sack_sb.last_bytes_delivered += tc->bytes_acked;
1497 if (tc->snd_rxt_bytes > tc->bytes_acked)
1498 tc->snd_rxt_bytes -= tc->bytes_acked;
1500 tc->snd_rxt_bytes = 0;
1503 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK);
1506 * Since this was a partial ack, try to retransmit some more data
1508 tcp_program_fastretransmit (tcp_get_worker (tc->c_thread_index), tc);
1512 * Process incoming ACK
1515 tcp_rcv_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1516 tcp_header_t * th, u32 * error)
1518 u32 prev_snd_wnd, prev_snd_una;
1521 TCP_EVT_DBG (TCP_EVT_CC_STAT, tc);
1523 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1524 if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1526 /* When we entered recovery, we reset snd_nxt to snd_una. Seems peer
1527 * still has the data so accept the ack */
1528 if (tcp_in_recovery (tc)
1529 && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_congestion))
1531 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1532 if (seq_gt (tc->snd_nxt, tc->snd_una_max))
1533 tc->snd_una_max = tc->snd_nxt;
1537 /* If we have outstanding data and this is within the window, accept it,
1538 * probably retransmit has timed out. Otherwise ACK segment and then
1540 if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max))
1542 tcp_program_ack (wrk, tc);
1543 *error = TCP_ERROR_ACK_FUTURE;
1544 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0,
1545 vnet_buffer (b)->tcp.ack_number);
1549 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2,
1550 vnet_buffer (b)->tcp.ack_number);
1552 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1555 /* If old ACK, probably it's an old dupack */
1556 if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1558 *error = TCP_ERROR_ACK_OLD;
1559 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1,
1560 vnet_buffer (b)->tcp.ack_number);
1561 if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1562 tcp_cc_handle_event (tc, 1);
1563 /* Don't drop yet */
1568 * Looks okay, process feedback
1571 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1572 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1574 prev_snd_wnd = tc->snd_wnd;
1575 prev_snd_una = tc->snd_una;
1576 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1577 vnet_buffer (b)->tcp.ack_number,
1578 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1579 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1580 tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
1581 tcp_validate_txf_size (tc, tc->bytes_acked);
1583 if (tc->bytes_acked)
1585 tcp_program_dequeue (wrk, tc);
1586 tcp_update_rtt (tc, vnet_buffer (b)->tcp.ack_number);
1589 TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
1592 * Check if we have congestion event
1595 if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1597 tcp_cc_handle_event (tc, is_dack);
1598 if (!tcp_in_cong_recovery (tc))
1600 *error = TCP_ERROR_ACK_OK;
1603 *error = TCP_ERROR_ACK_DUP;
1604 if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1610 * Update congestion control (slow start/congestion avoidance)
1612 tcp_cc_update (tc, b);
1613 *error = TCP_ERROR_ACK_OK;
1618 tcp_program_disconnect (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1620 if (!tcp_disconnect_pending (tc))
1622 vec_add1 (wrk->pending_disconnects, tc->c_c_index);
1623 tcp_disconnect_pending_on (tc);
1628 tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
1630 u32 thread_index, *pending_disconnects;
1631 tcp_connection_t *tc;
1634 if (!vec_len (wrk->pending_disconnects))
1637 thread_index = wrk->vm->thread_index;
1638 pending_disconnects = wrk->pending_disconnects;
1639 for (i = 0; i < vec_len (pending_disconnects); i++)
1641 tc = tcp_connection_get (pending_disconnects[i], thread_index);
1642 tcp_disconnect_pending_off (tc);
1643 session_transport_closing_notify (&tc->connection);
1645 _vec_len (wrk->pending_disconnects) = 0;
1649 tcp_rcv_fin (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1652 /* Account for the FIN and send ack */
1654 tcp_program_ack (wrk, tc);
1655 /* Enter CLOSE-WAIT and notify session. To avoid lingering
1656 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1657 tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
1658 tcp_program_disconnect (wrk, tc);
1659 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
1660 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc);
1661 *error = TCP_ERROR_FIN_RCVD;
1665 tcp_sack_vector_is_sane (sack_block_t * sacks)
1668 for (i = 1; i < vec_len (sacks); i++)
1670 if (sacks[i - 1].end == sacks[i].start)
1677 * Build SACK list as per RFC2018.
1679 * Makes sure the first block contains the segment that generated the current
1680 * ACK and the following ones are the ones most recently reported in SACK
1683 * @param tc TCP connection for which the SACK list is updated
1684 * @param start Start sequence number of the newest SACK block
1685 * @param end End sequence of the newest SACK block
1688 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1690 sack_block_t *new_list = 0, *block = 0;
1693 /* If the first segment is ooo add it to the list. Last write might've moved
1694 * rcv_nxt over the first segment. */
1695 if (seq_lt (tc->rcv_nxt, start))
1697 vec_add2 (new_list, block, 1);
1698 block->start = start;
1702 /* Find the blocks still worth keeping. */
1703 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1705 /* Discard if rcv_nxt advanced beyond current block */
1706 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1709 /* Merge or drop if segment overlapped by the new segment */
1710 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1711 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1713 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1714 new_list[0].start = tc->snd_sacks[i].start;
1715 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1716 new_list[0].end = tc->snd_sacks[i].end;
1720 /* Save to new SACK list if we have space. */
1721 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1723 vec_add1 (new_list, tc->snd_sacks[i]);
1727 clib_warning ("sack discarded");
1731 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1733 /* Replace old vector with new one */
1734 vec_free (tc->snd_sacks);
1735 tc->snd_sacks = new_list;
1737 /* Segments should not 'touch' */
1738 ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks));
1742 tcp_sack_list_bytes (tcp_connection_t * tc)
1745 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1746 bytes += tc->snd_sacks[i].end - tc->snd_sacks[i].start;
1750 /** Enqueue data for delivery to application */
1752 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1755 int written, error = TCP_ERROR_ENQUEUED;
1757 ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1759 written = session_enqueue_stream_connection (&tc->connection, b, 0,
1760 1 /* queue event */ , 1);
1762 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 0, data_len, written);
1764 /* Update rcv_nxt */
1765 if (PREDICT_TRUE (written == data_len))
1767 tc->rcv_nxt += written;
1769 /* If more data written than expected, account for out-of-order bytes. */
1770 else if (written > data_len)
1772 tc->rcv_nxt += written;
1773 TCP_EVT_DBG (TCP_EVT_CC_INPUT, tc, data_len, written);
1775 else if (written > 0)
1777 /* We've written something but FIFO is probably full now */
1778 tc->rcv_nxt += written;
1779 error = TCP_ERROR_PARTIALLY_ENQUEUED;
1783 return TCP_ERROR_FIFO_FULL;
1786 /* Update SACK list if need be */
1787 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1789 /* Remove SACK blocks that have been delivered */
1790 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1796 /** Enqueue out-of-order data */
1798 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1804 ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1807 /* Enqueue out-of-order data with relative offset */
1808 rv = session_enqueue_stream_connection (&tc->connection, b,
1809 vnet_buffer (b)->tcp.seq_number -
1810 tc->rcv_nxt, 0 /* queue event */ ,
1813 /* Nothing written */
1816 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, 0);
1817 return TCP_ERROR_FIFO_FULL;
1820 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1822 /* Update SACK list if in use */
1823 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1825 ooo_segment_t *newest;
1828 s0 = session_get (tc->c_s_index, tc->c_thread_index);
1830 /* Get the newest segment from the fifo */
1831 newest = svm_fifo_newest_ooo_segment (s0->rx_fifo);
1834 offset = ooo_segment_offset (s0->rx_fifo, newest);
1835 ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1836 start = tc->rcv_nxt + offset;
1837 end = start + ooo_segment_length (s0->rx_fifo, newest);
1838 tcp_update_sack_list (tc, start, end);
1839 svm_fifo_newest_ooo_segment_reset (s0->rx_fifo);
1840 TCP_EVT_DBG (TCP_EVT_CC_SACKS, tc);
1844 return TCP_ERROR_ENQUEUED_OOO;
1848 * Check if ACK could be delayed. If ack can be delayed, it should return
1849 * true for a full frame. If we're always acking return 0.
1852 tcp_can_delack (tcp_connection_t * tc)
1854 /* Send ack if ... */
1856 /* just sent a rcv wnd 0 */
1857 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0
1858 /* constrained to send ack */
1859 || (tc->flags & TCP_CONN_SNDACK) != 0
1860 /* we're almost out of tx wnd */
1861 || tcp_available_cc_snd_space (tc) < 4 * tc->snd_mss)
1868 tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
1870 u32 discard, first = b->current_length;
1871 vlib_main_t *vm = vlib_get_main ();
1873 /* Handle multi-buffer segments */
1874 if (n_bytes_to_drop > b->current_length)
1876 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1880 discard = clib_min (n_bytes_to_drop, b->current_length);
1881 vlib_buffer_advance (b, discard);
1882 b = vlib_get_buffer (vm, b->next_buffer);
1883 n_bytes_to_drop -= discard;
1885 while (n_bytes_to_drop);
1886 if (n_bytes_to_drop > first)
1887 b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1890 vlib_buffer_advance (b, n_bytes_to_drop);
1891 vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1896 * Receive buffer for connection and handle acks
1898 * It handles both in order or out-of-order data.
1901 tcp_segment_rcv (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1904 u32 error, n_bytes_to_drop, n_data_bytes;
1906 vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1907 n_data_bytes = vnet_buffer (b)->tcp.data_len;
1908 ASSERT (n_data_bytes);
1910 /* Handle out-of-order data */
1911 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1913 /* Old sequence numbers allowed through because they overlapped
1915 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1917 /* Completely in the past (possible retransmit). Ack
1918 * retransmissions since we may not have any data to send */
1919 if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1921 tcp_program_ack (wrk, tc);
1922 error = TCP_ERROR_SEGMENT_OLD;
1926 /* Chop off the bytes in the past and see if what is left
1927 * can be enqueued in order */
1928 n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1929 n_data_bytes -= n_bytes_to_drop;
1930 vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1931 if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1933 error = TCP_ERROR_SEGMENT_OLD;
1939 /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1940 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1941 tcp_program_dupack (wrk, tc);
1942 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1948 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1949 * segments can be enqueued after fifo tail offset changes. */
1950 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1951 if (tcp_can_delack (tc))
1953 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1954 tcp_timer_set (tc, TCP_TIMER_DELACK, TCP_DELACK_TIME);
1958 tcp_program_ack (wrk, tc);
1966 tcp_header_t tcp_header;
1967 tcp_connection_t tcp_connection;
1971 format_tcp_rx_trace (u8 * s, va_list * args)
1973 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1974 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1975 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1976 u32 indent = format_get_indent (s);
1978 s = format (s, "%U\n%U%U",
1979 format_tcp_header, &t->tcp_header, 128,
1980 format_white_space, indent,
1981 format_tcp_connection, &t->tcp_connection, 1);
1987 format_tcp_rx_trace_short (u8 * s, va_list * args)
1989 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1990 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1991 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1993 s = format (s, "%d -> %d (%U)",
1994 clib_net_to_host_u16 (t->tcp_header.dst_port),
1995 clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
1996 t->tcp_connection.state);
2002 tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
2003 tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
2007 clib_memcpy_fast (&t0->tcp_connection, tc0,
2008 sizeof (t0->tcp_connection));
2012 th0 = tcp_buffer_hdr (b0);
2014 clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2018 tcp_established_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2019 vlib_frame_t * frame, u8 is_ip4)
2023 n_left = frame->n_vectors;
2024 from = vlib_frame_vector_args (frame);
2028 tcp_connection_t *tc0;
2035 b0 = vlib_get_buffer (vm, bi0);
2037 if (b0->flags & VLIB_BUFFER_IS_TRACED)
2039 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2040 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2042 th0 = tcp_buffer_hdr (b0);
2043 tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
2052 tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
2053 u8 is_ip4, u32 evt, u32 val)
2056 vlib_node_increment_counter (vm, tcp4_node, evt, val);
2058 vlib_node_increment_counter (vm, tcp6_node, evt, val);
2061 #define tcp_maybe_inc_counter(node_id, err, count) \
2063 if (next0 != tcp_next_drop (is_ip4)) \
2064 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
2065 tcp6_##node_id##_node.index, is_ip4, err, \
2068 #define tcp_inc_counter(node_id, err, count) \
2069 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
2070 tcp6_##node_id##_node.index, is_ip4, \
2072 #define tcp_maybe_inc_err_counter(cnts, err) \
2074 cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
2076 #define tcp_inc_err_counter(cnts, err, val) \
2080 #define tcp_store_err_counters(node_id, cnts) \
2083 for (i = 0; i < TCP_N_ERROR; i++) \
2085 tcp_inc_counter(node_id, i, cnts[i]); \
2090 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2091 vlib_frame_t * frame, int is_ip4)
2093 u32 thread_index = vm->thread_index, errors = 0;
2094 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2095 u32 n_left_from, *from, *first_buffer;
2096 u16 err_counters[TCP_N_ERROR] = { 0 };
2098 if (node->flags & VLIB_NODE_FLAG_TRACE)
2099 tcp_established_trace_frame (vm, node, frame, is_ip4);
2101 first_buffer = from = vlib_frame_vector_args (frame);
2102 n_left_from = frame->n_vectors;
2104 while (n_left_from > 0)
2106 u32 bi0, error0 = TCP_ERROR_ACK_OK;
2109 tcp_connection_t *tc0;
2111 if (n_left_from > 1)
2114 pb = vlib_get_buffer (vm, from[1]);
2115 vlib_prefetch_buffer_header (pb, LOAD);
2116 CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2123 b0 = vlib_get_buffer (vm, bi0);
2124 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2127 if (PREDICT_FALSE (tc0 == 0))
2129 error0 = TCP_ERROR_INVALID_CONNECTION;
2133 th0 = tcp_buffer_hdr (b0);
2135 /* TODO header prediction fast path */
2137 /* 1-4: check SEQ, RST, SYN */
2138 if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, th0, &error0)))
2140 TCP_EVT_DBG (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
2144 /* 5: check the ACK field */
2145 if (PREDICT_FALSE (tcp_rcv_ack (wrk, tc0, b0, th0, &error0)))
2148 /* 6: check the URG bit TODO */
2150 /* 7: process the segment text */
2151 if (vnet_buffer (b0)->tcp.data_len)
2152 error0 = tcp_segment_rcv (wrk, tc0, b0);
2154 /* 8: check the FIN bit */
2155 if (PREDICT_FALSE (tcp_is_fin (th0)))
2156 tcp_rcv_fin (wrk, tc0, b0, &error0);
2159 tcp_inc_err_counter (err_counters, error0, 1);
2162 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2164 err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
2165 tcp_store_err_counters (established, err_counters);
2166 tcp_handle_postponed_dequeues (wrk);
2167 tcp_handle_disconnects (wrk);
2168 vlib_buffer_free (vm, first_buffer, frame->n_vectors);
2170 return frame->n_vectors;
2174 tcp4_established (vlib_main_t * vm, vlib_node_runtime_t * node,
2175 vlib_frame_t * from_frame)
2177 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2181 tcp6_established (vlib_main_t * vm, vlib_node_runtime_t * node,
2182 vlib_frame_t * from_frame)
2184 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2188 VLIB_REGISTER_NODE (tcp4_established_node) =
2190 .function = tcp4_established,
2191 .name = "tcp4-established",
2192 /* Takes a vector of packets. */
2193 .vector_size = sizeof (u32),
2194 .n_errors = TCP_N_ERROR,
2195 .error_strings = tcp_error_strings,
2196 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2199 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2200 foreach_tcp_state_next
2203 .format_trace = format_tcp_rx_trace_short,
2207 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_established_node, tcp4_established);
2210 VLIB_REGISTER_NODE (tcp6_established_node) =
2212 .function = tcp6_established,
2213 .name = "tcp6-established",
2214 /* Takes a vector of packets. */
2215 .vector_size = sizeof (u32),
2216 .n_errors = TCP_N_ERROR,
2217 .error_strings = tcp_error_strings,
2218 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2221 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2222 foreach_tcp_state_next
2225 .format_trace = format_tcp_rx_trace_short,
2230 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_established_node, tcp6_established);
2232 vlib_node_registration_t tcp4_syn_sent_node;
2233 vlib_node_registration_t tcp6_syn_sent_node;
2236 tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr)
2238 transport_connection_t *tmp = 0;
2245 if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
2248 u8 is_valid = (tc->c_lcl_port == hdr->dst_port
2249 && (tc->state == TCP_STATE_LISTEN
2250 || tc->c_rmt_port == hdr->src_port));
2254 handle = session_lookup_half_open_handle (&tc->connection);
2255 tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
2256 tc->c_proto, tc->c_is_ip4);
2260 if (tmp->lcl_port == hdr->dst_port
2261 && tmp->rmt_port == hdr->src_port)
2263 TCP_DBG ("half-open is valid!");
2271 * Lookup transport connection
2273 static tcp_connection_t *
2274 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
2278 transport_connection_t *tconn;
2279 tcp_connection_t *tc;
2284 ip4 = vlib_buffer_get_current (b);
2285 tcp = ip4_next_header (ip4);
2286 tconn = session_lookup_connection_wt4 (fib_index,
2291 TRANSPORT_PROTO_TCP,
2292 thread_index, &is_filtered);
2293 tc = tcp_get_connection_from_transport (tconn);
2294 ASSERT (tcp_lookup_is_valid (tc, tcp));
2299 ip6 = vlib_buffer_get_current (b);
2300 tcp = ip6_next_header (ip6);
2301 tconn = session_lookup_connection_wt6 (fib_index,
2306 TRANSPORT_PROTO_TCP,
2307 thread_index, &is_filtered);
2308 tc = tcp_get_connection_from_transport (tconn);
2309 ASSERT (tcp_lookup_is_valid (tc, tcp));
2315 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2316 vlib_frame_t * from_frame, int is_ip4)
2318 tcp_main_t *tm = vnet_get_tcp_main ();
2319 u32 n_left_from, *from, *first_buffer, errors = 0;
2320 u32 my_thread_index = vm->thread_index;
2321 tcp_worker_ctx_t *wrk = tcp_get_worker (my_thread_index);
2323 from = first_buffer = vlib_frame_vector_args (from_frame);
2324 n_left_from = from_frame->n_vectors;
2326 while (n_left_from > 0)
2328 u32 bi0, ack0, seq0, error0 = TCP_ERROR_NONE;
2329 tcp_connection_t *tc0, *new_tc0;
2330 tcp_header_t *tcp0 = 0;
2338 b0 = vlib_get_buffer (vm, bi0);
2340 tcp_half_open_connection_get (vnet_buffer (b0)->tcp.connection_index);
2341 if (PREDICT_FALSE (tc0 == 0))
2343 error0 = TCP_ERROR_INVALID_CONNECTION;
2347 /* Half-open completed recently but the connection was't removed
2348 * yet by the owning thread */
2349 if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
2351 /* Make sure the connection actually exists */
2352 ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
2353 my_thread_index, is_ip4));
2357 ack0 = vnet_buffer (b0)->tcp.ack_number;
2358 seq0 = vnet_buffer (b0)->tcp.seq_number;
2359 tcp0 = tcp_buffer_hdr (b0);
2361 /* Crude check to see if the connection handle does not match
2362 * the packet. Probably connection just switched to established */
2363 if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
2364 || tcp0->src_port != tc0->c_rmt_port))
2366 error0 = TCP_ERROR_INVALID_CONNECTION;
2370 if (PREDICT_FALSE (!tcp_ack (tcp0) && !tcp_rst (tcp0)
2371 && !tcp_syn (tcp0)))
2373 error0 = TCP_ERROR_SEGMENT_INVALID;
2377 /* SYNs consume sequence numbers */
2378 vnet_buffer (b0)->tcp.seq_end += tcp_is_syn (tcp0);
2381 * 1. check the ACK bit
2385 * If the ACK bit is set
2386 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
2387 * the RST bit is set, if so drop the segment and return)
2388 * <SEQ=SEG.ACK><CTL=RST>
2389 * and discard the segment. Return.
2390 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
2394 if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
2396 if (!tcp_rst (tcp0))
2397 tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2398 error0 = TCP_ERROR_RCV_WND;
2402 /* Make sure ACK is valid */
2403 if (seq_gt (tc0->snd_una, ack0))
2405 error0 = TCP_ERROR_ACK_INVALID;
2411 * 2. check the RST bit
2416 /* If ACK is acceptable, signal client that peer is not
2417 * willing to accept connection and drop connection*/
2419 tcp_connection_reset (tc0);
2420 error0 = TCP_ERROR_RST_RCVD;
2425 * 3. check the security and precedence (skipped)
2429 * 4. check the SYN bit
2432 /* No SYN flag. Drop. */
2433 if (!tcp_syn (tcp0))
2435 clib_warning ("not synack");
2436 error0 = TCP_ERROR_SEGMENT_INVALID;
2441 if (tcp_options_parse (tcp0, &tc0->rcv_opts, 1))
2443 clib_warning ("options parse fail");
2444 error0 = TCP_ERROR_OPTIONS;
2448 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
2449 * current thread pool. */
2450 pool_get (tm->connections[my_thread_index], new_tc0);
2451 clib_memcpy_fast (new_tc0, tc0, sizeof (*new_tc0));
2452 new_tc0->c_c_index = new_tc0 - tm->connections[my_thread_index];
2453 new_tc0->c_thread_index = my_thread_index;
2454 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
2455 new_tc0->irs = seq0;
2456 new_tc0->timers[TCP_TIMER_ESTABLISH_AO] = TCP_TIMER_HANDLE_INVALID;
2457 new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
2458 new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2460 /* If this is not the owning thread, wait for syn retransmit to
2461 * expire and cleanup then */
2462 if (tcp_half_open_connection_cleanup (tc0))
2463 tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2465 if (tcp_opts_tstamp (&new_tc0->rcv_opts))
2467 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
2468 new_tc0->tsval_recent_age = tcp_time_now ();
2471 if (tcp_opts_wscale (&new_tc0->rcv_opts))
2472 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
2474 new_tc0->rcv_wscale = 0;
2476 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2477 << new_tc0->snd_wscale;
2478 new_tc0->snd_wl1 = seq0;
2479 new_tc0->snd_wl2 = ack0;
2481 tcp_connection_init_vars (new_tc0);
2483 /* SYN-ACK: See if we can switch to ESTABLISHED state */
2484 if (PREDICT_TRUE (tcp_ack (tcp0)))
2486 /* Our SYN is ACKed: we have iss < ack = snd_una */
2488 /* TODO Dequeue acknowledged segments if we support Fast Open */
2489 new_tc0->snd_una = ack0;
2490 new_tc0->state = TCP_STATE_ESTABLISHED;
2492 /* Make sure las is initialized for the wnd computation */
2493 new_tc0->rcv_las = new_tc0->rcv_nxt;
2495 /* Notify app that we have connection. If session layer can't
2496 * allocate session send reset */
2497 if (session_stream_connect_notify (&new_tc0->connection, 0))
2499 clib_warning ("connect notify fail");
2500 tcp_send_reset_w_pkt (new_tc0, b0, my_thread_index, is_ip4);
2501 tcp_connection_cleanup (new_tc0);
2505 new_tc0->tx_fifo_size =
2506 transport_tx_fifo_size (&new_tc0->connection);
2507 /* Update rtt with the syn-ack sample */
2508 tcp_estimate_initial_rtt (new_tc0);
2509 TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, new_tc0);
2510 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2512 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
2515 new_tc0->state = TCP_STATE_SYN_RCVD;
2517 /* Notify app that we have connection */
2518 if (session_stream_connect_notify (&new_tc0->connection, 0))
2520 tcp_connection_cleanup (new_tc0);
2521 tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2522 TCP_EVT_DBG (TCP_EVT_RST_SENT, tc0);
2526 new_tc0->tx_fifo_size =
2527 transport_tx_fifo_size (&new_tc0->connection);
2528 new_tc0->rtt_ts = 0;
2529 tcp_init_snd_vars (new_tc0);
2530 tcp_send_synack (new_tc0);
2531 error0 = TCP_ERROR_SYNS_RCVD;
2535 /* Read data, if any */
2536 if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2538 clib_warning ("rcvd data in syn-sent");
2539 error0 = tcp_segment_rcv (wrk, new_tc0, b0);
2540 if (error0 == TCP_ERROR_ACK_OK)
2541 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2545 tcp_program_ack (wrk, new_tc0);
2550 tcp_inc_counter (syn_sent, error0, 1);
2551 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2553 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2554 clib_memcpy_fast (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2555 clib_memcpy_fast (&t0->tcp_connection, tc0,
2556 sizeof (t0->tcp_connection));
2560 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2562 tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
2563 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2565 return from_frame->n_vectors;
2569 tcp4_syn_sent (vlib_main_t * vm, vlib_node_runtime_t * node,
2570 vlib_frame_t * from_frame)
2572 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2576 tcp6_syn_sent_rcv (vlib_main_t * vm, vlib_node_runtime_t * node,
2577 vlib_frame_t * from_frame)
2579 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2583 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2585 .function = tcp4_syn_sent,
2586 .name = "tcp4-syn-sent",
2587 /* Takes a vector of packets. */
2588 .vector_size = sizeof (u32),
2589 .n_errors = TCP_N_ERROR,
2590 .error_strings = tcp_error_strings,
2591 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2594 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2595 foreach_tcp_state_next
2598 .format_trace = format_tcp_rx_trace_short,
2602 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_syn_sent_node, tcp4_syn_sent);
2605 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2607 .function = tcp6_syn_sent_rcv,
2608 .name = "tcp6-syn-sent",
2609 /* Takes a vector of packets. */
2610 .vector_size = sizeof (u32),
2611 .n_errors = TCP_N_ERROR,
2612 .error_strings = tcp_error_strings,
2613 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2616 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2617 foreach_tcp_state_next
2620 .format_trace = format_tcp_rx_trace_short,
2624 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_syn_sent_node, tcp6_syn_sent_rcv);
2626 vlib_node_registration_t tcp4_rcv_process_node;
2627 vlib_node_registration_t tcp6_rcv_process_node;
2630 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2631 * as per RFC793 p. 64
2634 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2635 vlib_frame_t * from_frame, int is_ip4)
2637 u32 thread_index = vm->thread_index, errors = 0, *first_buffer;
2638 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2639 u32 n_left_from, *from, max_dequeue;
2641 from = first_buffer = vlib_frame_vector_args (from_frame);
2642 n_left_from = from_frame->n_vectors;
2644 while (n_left_from > 0)
2646 u32 bi0, error0 = TCP_ERROR_NONE;
2647 tcp_header_t *tcp0 = 0;
2648 tcp_connection_t *tc0;
2656 b0 = vlib_get_buffer (vm, bi0);
2657 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2659 if (PREDICT_FALSE (tc0 == 0))
2661 error0 = TCP_ERROR_INVALID_CONNECTION;
2665 tcp0 = tcp_buffer_hdr (b0);
2666 is_fin0 = tcp_is_fin (tcp0);
2670 tcp_connection_t *tmp;
2671 tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
2673 if (tmp->state != tc0->state)
2675 if (tc0->state != TCP_STATE_CLOSED)
2676 clib_warning ("state changed");
2682 * Special treatment for CLOSED
2684 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2686 error0 = TCP_ERROR_CONNECTION_CLOSED;
2691 * For all other states (except LISTEN)
2694 /* 1-4: check SEQ, RST, SYN */
2695 if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, tcp0, &error0)))
2698 /* 5: check the ACK field */
2701 case TCP_STATE_SYN_RCVD:
2703 * If the segment acknowledgment is not acceptable, form a
2705 * <SEQ=SEG.ACK><CTL=RST>
2708 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2710 tcp_connection_reset (tc0);
2711 error0 = TCP_ERROR_ACK_INVALID;
2715 /* Make sure the ack is exactly right */
2716 if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
2718 tcp_connection_reset (tc0);
2719 error0 = TCP_ERROR_SEGMENT_INVALID;
2723 /* Update rtt and rto */
2724 tcp_estimate_initial_rtt (tc0);
2726 /* Switch state to ESTABLISHED */
2727 tc0->state = TCP_STATE_ESTABLISHED;
2728 TCP_EVT_DBG (TCP_EVT_STATE_CHANGE, tc0);
2730 /* Initialize session variables */
2731 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2732 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2733 << tc0->rcv_opts.wscale;
2734 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2735 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2737 /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2738 tcp_retransmit_timer_reset (tc0);
2739 tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH);
2740 if (stream_session_accept_notify (&tc0->connection))
2742 error0 = TCP_ERROR_MSG_QUEUE_FULL;
2743 tcp_connection_reset (tc0);
2746 error0 = TCP_ERROR_ACK_OK;
2748 case TCP_STATE_ESTABLISHED:
2749 /* We can get packets in established state here because they
2750 * were enqueued before state change */
2751 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2755 case TCP_STATE_FIN_WAIT_1:
2756 /* In addition to the processing for the ESTABLISHED state, if
2757 * our FIN is now acknowledged then enter FIN-WAIT-2 and
2758 * continue processing in that state. */
2759 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2762 /* Still have to send the FIN */
2763 if (tc0->flags & TCP_CONN_FINPNDG)
2765 /* TX fifo finally drained */
2766 max_dequeue = session_tx_fifo_max_dequeue (&tc0->connection);
2767 if (max_dequeue <= tc0->burst_acked)
2770 /* If FIN is ACKed */
2771 else if (tc0->snd_una == tc0->snd_una_max)
2773 tcp_connection_set_state (tc0, TCP_STATE_FIN_WAIT_2);
2775 /* Stop all retransmit timers because we have nothing more
2776 * to send. Enable waitclose though because we're willing to
2777 * wait for peer's FIN but not indefinitely. */
2778 tcp_connection_timers_reset (tc0);
2779 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2781 /* Don't try to deq the FIN acked */
2782 if (tc0->burst_acked > 1)
2783 stream_session_dequeue_drop (&tc0->connection,
2784 tc0->burst_acked - 1);
2785 tc0->burst_acked = 0;
2788 case TCP_STATE_FIN_WAIT_2:
2789 /* In addition to the processing for the ESTABLISHED state, if
2790 * the retransmission queue is empty, the user's CLOSE can be
2791 * acknowledged ("ok") but do not delete the TCB. */
2792 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2794 tc0->burst_acked = 0;
2796 case TCP_STATE_CLOSE_WAIT:
2797 /* Do the same processing as for the ESTABLISHED state. */
2798 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2801 if (tc0->flags & TCP_CONN_FINPNDG)
2803 /* TX fifo finally drained */
2804 if (!session_tx_fifo_max_dequeue (&tc0->connection))
2807 tcp_connection_timers_reset (tc0);
2808 tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2809 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2813 case TCP_STATE_CLOSING:
2814 /* In addition to the processing for the ESTABLISHED state, if
2815 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2816 * otherwise ignore the segment. */
2817 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2820 tcp_connection_timers_reset (tc0);
2821 tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2822 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2826 case TCP_STATE_LAST_ACK:
2827 /* The only thing that [should] arrive in this state is an
2828 * acknowledgment of our FIN. If our FIN is now acknowledged,
2829 * delete the TCB, enter the CLOSED state, and return. */
2831 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2833 error0 = TCP_ERROR_ACK_INVALID;
2836 error0 = TCP_ERROR_ACK_OK;
2837 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2838 /* Apparently our ACK for the peer's FIN was lost */
2839 if (is_fin0 && tc0->snd_una != tc0->snd_una_max)
2845 tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2847 /* Don't free the connection from the data path since
2848 * we can't ensure that we have no packets already enqueued
2849 * to output. Rely instead on the waitclose timer */
2850 tcp_connection_timers_reset (tc0);
2851 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
2856 case TCP_STATE_TIME_WAIT:
2857 /* The only thing that can arrive in this state is a
2858 * retransmission of the remote FIN. Acknowledge it, and restart
2859 * the 2 MSL timeout. */
2861 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2864 tcp_program_ack (wrk, tc0);
2865 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2873 /* 6: check the URG bit TODO */
2875 /* 7: process the segment text */
2878 case TCP_STATE_ESTABLISHED:
2879 case TCP_STATE_FIN_WAIT_1:
2880 case TCP_STATE_FIN_WAIT_2:
2881 if (vnet_buffer (b0)->tcp.data_len)
2882 error0 = tcp_segment_rcv (wrk, tc0, b0);
2884 case TCP_STATE_CLOSE_WAIT:
2885 case TCP_STATE_CLOSING:
2886 case TCP_STATE_LAST_ACK:
2887 case TCP_STATE_TIME_WAIT:
2888 /* This should not occur, since a FIN has been received from the
2889 * remote side. Ignore the segment text. */
2893 /* 8: check the FIN bit */
2897 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
2901 case TCP_STATE_ESTABLISHED:
2902 /* Account for the FIN and send ack */
2904 tcp_program_ack (wrk, tc0);
2905 tcp_connection_set_state (tc0, TCP_STATE_CLOSE_WAIT);
2906 tcp_program_disconnect (wrk, tc0);
2907 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
2909 case TCP_STATE_SYN_RCVD:
2910 /* Send FIN-ACK, enter LAST-ACK and because the app was not
2911 * notified yet, set a cleanup timer instead of relying on
2912 * disconnect notify and the implicit close call. */
2913 tcp_connection_timers_reset (tc0);
2916 tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2917 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2919 case TCP_STATE_CLOSE_WAIT:
2920 case TCP_STATE_CLOSING:
2921 case TCP_STATE_LAST_ACK:
2924 case TCP_STATE_FIN_WAIT_1:
2926 tcp_connection_set_state (tc0, TCP_STATE_CLOSING);
2927 tcp_program_ack (wrk, tc0);
2928 /* Wait for ACK but not forever */
2929 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2931 case TCP_STATE_FIN_WAIT_2:
2932 /* Got FIN, send ACK! Be more aggressive with resource cleanup */
2934 tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2935 tcp_connection_timers_reset (tc0);
2936 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2937 tcp_program_ack (wrk, tc0);
2939 case TCP_STATE_TIME_WAIT:
2940 /* Remain in the TIME-WAIT state. Restart the time-wait
2943 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_TIMEWAIT_TIME);
2946 error0 = TCP_ERROR_FIN_RCVD;
2950 tcp_inc_counter (rcv_process, error0, 1);
2951 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2953 tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2954 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2958 errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2960 tcp_inc_counter (rcv_process, TCP_ERROR_MSG_QUEUE_FULL, errors);
2961 tcp_handle_postponed_dequeues (wrk);
2962 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2964 return from_frame->n_vectors;
2968 tcp4_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2969 vlib_frame_t * from_frame)
2971 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2975 tcp6_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2976 vlib_frame_t * from_frame)
2978 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2982 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
2984 .function = tcp4_rcv_process,
2985 .name = "tcp4-rcv-process",
2986 /* Takes a vector of packets. */
2987 .vector_size = sizeof (u32),
2988 .n_errors = TCP_N_ERROR,
2989 .error_strings = tcp_error_strings,
2990 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2993 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2994 foreach_tcp_state_next
2997 .format_trace = format_tcp_rx_trace_short,
3001 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_rcv_process_node, tcp4_rcv_process);
3004 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
3006 .function = tcp6_rcv_process,
3007 .name = "tcp6-rcv-process",
3008 /* Takes a vector of packets. */
3009 .vector_size = sizeof (u32),
3010 .n_errors = TCP_N_ERROR,
3011 .error_strings = tcp_error_strings,
3012 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3015 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3016 foreach_tcp_state_next
3019 .format_trace = format_tcp_rx_trace_short,
3023 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_rcv_process_node, tcp6_rcv_process);
3025 vlib_node_registration_t tcp4_listen_node;
3026 vlib_node_registration_t tcp6_listen_node;
3029 * LISTEN state processing as per RFC 793 p. 65
3032 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3033 vlib_frame_t * from_frame, int is_ip4)
3035 u32 n_left_from, *from, n_syns = 0, *first_buffer;
3036 u32 my_thread_index = vm->thread_index;
3038 from = first_buffer = vlib_frame_vector_args (from_frame);
3039 n_left_from = from_frame->n_vectors;
3041 while (n_left_from > 0)
3046 tcp_header_t *th0 = 0;
3047 tcp_connection_t *lc0;
3050 tcp_connection_t *child0;
3051 u32 error0 = TCP_ERROR_NONE;
3057 b0 = vlib_get_buffer (vm, bi0);
3058 lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
3062 ip40 = vlib_buffer_get_current (b0);
3063 th0 = ip4_next_header (ip40);
3067 ip60 = vlib_buffer_get_current (b0);
3068 th0 = ip6_next_header (ip60);
3071 /* Create child session. For syn-flood protection use filter */
3073 /* 1. first check for an RST: handled in dispatch */
3074 /* if (tcp_rst (th0))
3078 /* 2. second check for an ACK: handled in dispatch */
3079 /* if (tcp_ack (th0))
3081 tcp_send_reset (b0, is_ip4);
3086 /* 3. check for a SYN (did that already) */
3088 /* Make sure connection wasn't just created */
3089 child0 = tcp_lookup_connection (lc0->c_fib_index, b0, my_thread_index,
3091 if (PREDICT_FALSE (child0->state != TCP_STATE_LISTEN))
3093 error0 = TCP_ERROR_CREATE_EXISTS;
3097 /* Create child session and send SYN-ACK */
3098 child0 = tcp_connection_alloc (my_thread_index);
3099 child0->c_lcl_port = th0->dst_port;
3100 child0->c_rmt_port = th0->src_port;
3101 child0->c_is_ip4 = is_ip4;
3102 child0->state = TCP_STATE_SYN_RCVD;
3103 child0->c_fib_index = lc0->c_fib_index;
3107 child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
3108 child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
3112 clib_memcpy_fast (&child0->c_lcl_ip6, &ip60->dst_address,
3113 sizeof (ip6_address_t));
3114 clib_memcpy_fast (&child0->c_rmt_ip6, &ip60->src_address,
3115 sizeof (ip6_address_t));
3118 if (tcp_options_parse (th0, &child0->rcv_opts, 1))
3120 error0 = TCP_ERROR_OPTIONS;
3121 tcp_connection_free (child0);
3125 child0->irs = vnet_buffer (b0)->tcp.seq_number;
3126 child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
3127 child0->rcv_las = child0->rcv_nxt;
3128 child0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
3130 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
3131 * segments are used to initialize PAWS. */
3132 if (tcp_opts_tstamp (&child0->rcv_opts))
3134 child0->tsval_recent = child0->rcv_opts.tsval;
3135 child0->tsval_recent_age = tcp_time_now ();
3138 if (tcp_opts_wscale (&child0->rcv_opts))
3139 child0->snd_wscale = child0->rcv_opts.wscale;
3141 child0->snd_wnd = clib_net_to_host_u16 (th0->window)
3142 << child0->snd_wscale;
3143 child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
3144 child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
3146 tcp_connection_init_vars (child0);
3147 child0->rto = TCP_RTO_MIN;
3148 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, child0, 1);
3150 if (stream_session_accept (&child0->connection, lc0->c_s_index,
3153 tcp_connection_cleanup (child0);
3154 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
3158 child0->tx_fifo_size = transport_tx_fifo_size (&child0->connection);
3159 tcp_send_synack (child0);
3160 tcp_timer_set (child0, TCP_TIMER_ESTABLISH, TCP_SYN_RCVD_TIME);
3164 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3166 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3167 clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
3168 clib_memcpy_fast (&t0->tcp_connection, lc0,
3169 sizeof (t0->tcp_connection));
3172 n_syns += (error0 == TCP_ERROR_NONE);
3175 tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
3176 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
3178 return from_frame->n_vectors;
3182 tcp4_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
3183 vlib_frame_t * from_frame)
3185 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3189 tcp6_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
3190 vlib_frame_t * from_frame)
3192 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3196 VLIB_REGISTER_NODE (tcp4_listen_node) =
3198 .function = tcp4_listen,
3199 .name = "tcp4-listen",
3200 /* Takes a vector of packets. */
3201 .vector_size = sizeof (u32),
3202 .n_errors = TCP_N_ERROR,
3203 .error_strings = tcp_error_strings,
3204 .n_next_nodes = TCP_LISTEN_N_NEXT,
3207 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3208 foreach_tcp_state_next
3211 .format_trace = format_tcp_rx_trace_short,
3215 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_listen_node, tcp4_listen);
3218 VLIB_REGISTER_NODE (tcp6_listen_node) =
3220 .function = tcp6_listen,
3221 .name = "tcp6-listen",
3222 /* Takes a vector of packets. */
3223 .vector_size = sizeof (u32),
3224 .n_errors = TCP_N_ERROR,
3225 .error_strings = tcp_error_strings,
3226 .n_next_nodes = TCP_LISTEN_N_NEXT,
3229 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3230 foreach_tcp_state_next
3233 .format_trace = format_tcp_rx_trace_short,
3237 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_listen_node, tcp6_listen);
3239 vlib_node_registration_t tcp4_input_node;
3240 vlib_node_registration_t tcp6_input_node;
3242 typedef enum _tcp_input_next
3244 TCP_INPUT_NEXT_DROP,
3245 TCP_INPUT_NEXT_LISTEN,
3246 TCP_INPUT_NEXT_RCV_PROCESS,
3247 TCP_INPUT_NEXT_SYN_SENT,
3248 TCP_INPUT_NEXT_ESTABLISHED,
3249 TCP_INPUT_NEXT_RESET,
3250 TCP_INPUT_NEXT_PUNT,
3254 #define foreach_tcp4_input_next \
3255 _ (DROP, "ip4-drop") \
3256 _ (LISTEN, "tcp4-listen") \
3257 _ (RCV_PROCESS, "tcp4-rcv-process") \
3258 _ (SYN_SENT, "tcp4-syn-sent") \
3259 _ (ESTABLISHED, "tcp4-established") \
3260 _ (RESET, "tcp4-reset") \
3261 _ (PUNT, "ip4-punt")
3263 #define foreach_tcp6_input_next \
3264 _ (DROP, "ip6-drop") \
3265 _ (LISTEN, "tcp6-listen") \
3266 _ (RCV_PROCESS, "tcp6-rcv-process") \
3267 _ (SYN_SENT, "tcp6-syn-sent") \
3268 _ (ESTABLISHED, "tcp6-established") \
3269 _ (RESET, "tcp6-reset") \
3270 _ (PUNT, "ip6-punt")
3272 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
3275 tcp_input_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
3276 vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
3278 tcp_connection_t *tc;
3283 for (i = 0; i < n_bufs; i++)
3285 if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
3287 t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
3288 tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
3290 tcp = vlib_buffer_get_current (bs[i]);
3291 tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
3297 tcp_input_set_error_next (tcp_main_t * tm, u16 * next, u32 * error, u8 is_ip4)
3299 if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
3301 *next = TCP_INPUT_NEXT_DROP;
3303 else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
3305 *next = TCP_INPUT_NEXT_PUNT;
3306 *error = TCP_ERROR_PUNT;
3310 *next = TCP_INPUT_NEXT_RESET;
3311 *error = TCP_ERROR_NO_LISTENER;
3315 static inline tcp_connection_t *
3316 tcp_input_lookup_buffer (vlib_buffer_t * b, u8 thread_index, u32 * error,
3319 u32 fib_index = vnet_buffer (b)->ip.fib_index;
3320 int n_advance_bytes, n_data_bytes;
3321 transport_connection_t *tc;
3327 ip4_header_t *ip4 = vlib_buffer_get_current (b);
3328 int ip_hdr_bytes = ip4_header_bytes (ip4);
3329 if (PREDICT_FALSE (b->current_length < ip_hdr_bytes + sizeof (*tcp)))
3331 *error = TCP_ERROR_LENGTH;
3334 tcp = ip4_next_header (ip4);
3335 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip4;
3336 n_advance_bytes = (ip_hdr_bytes + tcp_header_bytes (tcp));
3337 n_data_bytes = clib_net_to_host_u16 (ip4->length) - n_advance_bytes;
3339 /* Length check. Checksum computed by ipx_local no need to compute again */
3340 if (PREDICT_FALSE (n_data_bytes < 0))
3342 *error = TCP_ERROR_LENGTH;
3346 tc = session_lookup_connection_wt4 (fib_index, &ip4->dst_address,
3347 &ip4->src_address, tcp->dst_port,
3348 tcp->src_port, TRANSPORT_PROTO_TCP,
3349 thread_index, &result);
3353 ip6_header_t *ip6 = vlib_buffer_get_current (b);
3354 if (PREDICT_FALSE (b->current_length < sizeof (*ip6) + sizeof (*tcp)))
3356 *error = TCP_ERROR_LENGTH;
3359 tcp = ip6_next_header (ip6);
3360 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip6;
3361 n_advance_bytes = tcp_header_bytes (tcp);
3362 n_data_bytes = clib_net_to_host_u16 (ip6->payload_length)
3364 n_advance_bytes += sizeof (ip6[0]);
3366 if (PREDICT_FALSE (n_data_bytes < 0))
3368 *error = TCP_ERROR_LENGTH;
3372 tc = session_lookup_connection_wt6 (fib_index, &ip6->dst_address,
3373 &ip6->src_address, tcp->dst_port,
3374 tcp->src_port, TRANSPORT_PROTO_TCP,
3375 thread_index, &result);
3378 vnet_buffer (b)->tcp.seq_number = clib_net_to_host_u32 (tcp->seq_number);
3379 vnet_buffer (b)->tcp.ack_number = clib_net_to_host_u32 (tcp->ack_number);
3380 vnet_buffer (b)->tcp.data_offset = n_advance_bytes;
3381 vnet_buffer (b)->tcp.data_len = n_data_bytes;
3382 vnet_buffer (b)->tcp.seq_end = vnet_buffer (b)->tcp.seq_number
3384 vnet_buffer (b)->tcp.flags = 0;
3386 *error = result ? TCP_ERROR_NONE + result : *error;
3388 return tcp_get_connection_from_transport (tc);
3392 tcp_input_dispatch_buffer (tcp_main_t * tm, tcp_connection_t * tc,
3393 vlib_buffer_t * b, u16 * next, u32 * error)
3398 tcp = tcp_buffer_hdr (b);
3399 flags = tcp->flags & filter_flags;
3400 *next = tm->dispatch_table[tc->state][flags].next;
3401 *error = tm->dispatch_table[tc->state][flags].error;
3403 if (PREDICT_FALSE (*error == TCP_ERROR_DISPATCH
3404 || *next == TCP_INPUT_NEXT_RESET))
3406 /* Overload tcp flags to store state */
3407 tcp_state_t state = tc->state;
3408 vnet_buffer (b)->tcp.flags = tc->state;
3410 if (*error == TCP_ERROR_DISPATCH)
3411 clib_warning ("tcp conn %u disp error state %U flags %U",
3412 tc->c_c_index, format_tcp_state, state,
3413 format_tcp_flags, (int) flags);
3418 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3419 vlib_frame_t * frame, int is_ip4)
3421 u32 n_left_from, *from, thread_index = vm->thread_index;
3422 tcp_main_t *tm = vnet_get_tcp_main ();
3423 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
3424 u16 nexts[VLIB_FRAME_SIZE], *next;
3426 tcp_set_time_now (tcp_get_worker (thread_index));
3428 from = vlib_frame_vector_args (frame);
3429 n_left_from = frame->n_vectors;
3430 vlib_get_buffers (vm, from, bufs, n_left_from);
3435 while (n_left_from >= 4)
3437 u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
3438 tcp_connection_t *tc0, *tc1;
3441 vlib_prefetch_buffer_header (b[2], STORE);
3442 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3444 vlib_prefetch_buffer_header (b[3], STORE);
3445 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3448 next[0] = next[1] = TCP_INPUT_NEXT_DROP;
3450 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4);
3451 tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4);
3453 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
3455 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3456 ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
3458 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3459 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3461 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3462 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3466 if (PREDICT_TRUE (tc0 != 0))
3468 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3469 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3470 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3473 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3475 if (PREDICT_TRUE (tc1 != 0))
3477 ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
3478 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3479 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3482 tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
3489 while (n_left_from > 0)
3491 tcp_connection_t *tc0;
3492 u32 error0 = TCP_ERROR_NO_LISTENER;
3494 if (n_left_from > 1)
3496 vlib_prefetch_buffer_header (b[1], STORE);
3497 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3500 next[0] = TCP_INPUT_NEXT_DROP;
3501 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4);
3502 if (PREDICT_TRUE (tc0 != 0))
3504 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3505 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3506 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3509 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3516 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
3517 tcp_input_trace_frame (vm, node, bufs, frame->n_vectors, is_ip4);
3519 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
3520 return frame->n_vectors;
3524 tcp4_input (vlib_main_t * vm, vlib_node_runtime_t * node,
3525 vlib_frame_t * from_frame)
3527 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3531 tcp6_input (vlib_main_t * vm, vlib_node_runtime_t * node,
3532 vlib_frame_t * from_frame)
3534 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3538 VLIB_REGISTER_NODE (tcp4_input_node) =
3540 .function = tcp4_input,
3541 .name = "tcp4-input",
3542 /* Takes a vector of packets. */
3543 .vector_size = sizeof (u32),
3544 .n_errors = TCP_N_ERROR,
3545 .error_strings = tcp_error_strings,
3546 .n_next_nodes = TCP_INPUT_N_NEXT,
3549 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3550 foreach_tcp4_input_next
3553 .format_buffer = format_tcp_header,
3554 .format_trace = format_tcp_rx_trace,
3558 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_input_node, tcp4_input);
3561 VLIB_REGISTER_NODE (tcp6_input_node) =
3563 .function = tcp6_input,
3564 .name = "tcp6-input",
3565 /* Takes a vector of packets. */
3566 .vector_size = sizeof (u32),
3567 .n_errors = TCP_N_ERROR,
3568 .error_strings = tcp_error_strings,
3569 .n_next_nodes = TCP_INPUT_N_NEXT,
3572 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3573 foreach_tcp6_input_next
3576 .format_buffer = format_tcp_header,
3577 .format_trace = format_tcp_rx_trace,
3581 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input);
3584 tcp_dispatch_table_init (tcp_main_t * tm)
3587 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3588 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3590 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3591 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3594 #define _(t,f,n,e) \
3596 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3597 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3600 /* RFC 793: In LISTEN if RST drop and if ACK return RST */
3601 _(LISTEN, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3602 _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_ACK_INVALID);
3603 _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_INVALID_CONNECTION);
3604 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3605 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3606 TCP_ERROR_ACK_INVALID);
3607 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3608 TCP_ERROR_SEGMENT_INVALID);
3609 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3610 TCP_ERROR_SEGMENT_INVALID);
3611 _(LISTEN, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3612 TCP_ERROR_INVALID_CONNECTION);
3613 _(LISTEN, TCP_FLAG_FIN, TCP_INPUT_NEXT_RESET, TCP_ERROR_SEGMENT_INVALID);
3614 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3615 TCP_ERROR_SEGMENT_INVALID);
3616 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3617 TCP_ERROR_SEGMENT_INVALID);
3618 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3620 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_DROP,
3621 TCP_ERROR_SEGMENT_INVALID);
3622 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3623 TCP_ERROR_SEGMENT_INVALID);
3624 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3625 TCP_ERROR_SEGMENT_INVALID);
3626 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3627 TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3628 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3629 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3630 _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3631 _(SYN_RCVD, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3633 _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3634 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3636 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3638 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3639 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3640 _(SYN_RCVD, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3641 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3643 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3645 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3646 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3647 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3649 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3650 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3651 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3652 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3653 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3654 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3655 _(SYN_RCVD, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3656 /* SYN-ACK for a SYN */
3657 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3659 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3660 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3661 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3663 /* ACK for for established connection -> tcp-established. */
3664 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3665 /* FIN for for established connection -> tcp-established. */
3666 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3667 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3669 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3671 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3672 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3673 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED,
3675 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3676 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3677 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3678 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3679 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3680 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3681 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3682 _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3684 _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3685 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3687 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3689 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3690 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3691 _(ESTABLISHED, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3692 /* ACK or FIN-ACK to our FIN */
3693 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3694 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
3696 /* FIN in reply to our FIN from the other side */
3697 _(FIN_WAIT_1, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3698 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3699 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3701 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3702 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3703 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3704 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3705 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3706 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3707 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3709 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3710 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3711 _(FIN_WAIT_1, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3712 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3714 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3716 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3717 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3718 _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3719 _(FIN_WAIT_1, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3721 _(CLOSING, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3722 _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3723 _(CLOSING, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3724 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3726 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3728 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3729 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3730 _(CLOSING, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3731 _(CLOSING, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3733 _(CLOSING, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3734 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3736 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3738 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3739 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3740 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3742 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3743 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3744 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3745 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3746 /* FIN confirming that the peer (app) has closed */
3747 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3748 _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3749 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3751 _(FIN_WAIT_2, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3752 _(FIN_WAIT_2, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3754 _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3755 _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3757 _(CLOSE_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3758 _(CLOSE_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3760 _(LAST_ACK, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3761 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3762 _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3763 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3765 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3767 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3768 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3769 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3771 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3772 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3773 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3774 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3775 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3776 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3777 _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3778 _(LAST_ACK, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3780 _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3781 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3783 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3785 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3786 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3787 _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3788 _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3789 _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3791 _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3792 _(TIME_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3794 _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3795 /* RFC793 CLOSED: An incoming segment containing a RST is discarded. An
3796 * incoming segment not containing a RST causes a RST to be sent in
3798 _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3799 _(CLOSED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3800 TCP_ERROR_CONNECTION_CLOSED);
3801 _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3802 _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3803 _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3808 static clib_error_t *
3809 tcp_input_init (vlib_main_t * vm)
3811 clib_error_t *error = 0;
3812 tcp_main_t *tm = vnet_get_tcp_main ();
3814 if ((error = vlib_call_init_function (vm, tcp_init)))
3817 /* Initialize dispatch table. */
3818 tcp_dispatch_table_init (tm);
3823 VLIB_INIT_FUNCTION (tcp_input_init);
3826 * fd.io coding-style-patch-verification: ON
3829 * eval: (c-set-style "gnu")