2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/tcp/tcp_packet.h>
18 #include <vnet/tcp/tcp.h>
19 #include <vnet/session/session.h>
22 static char *tcp_error_strings[] = {
23 #define tcp_error(n,s) s,
24 #include <vnet/tcp/tcp_error.def>
28 /* All TCP nodes have the same outgoing arcs */
29 #define foreach_tcp_state_next \
30 _ (DROP4, "ip4-drop") \
31 _ (DROP6, "ip6-drop") \
32 _ (TCP4_OUTPUT, "tcp4-output") \
33 _ (TCP6_OUTPUT, "tcp6-output")
35 typedef enum _tcp_established_next
37 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
38 foreach_tcp_state_next
40 TCP_ESTABLISHED_N_NEXT,
41 } tcp_established_next_t;
43 typedef enum _tcp_rcv_process_next
45 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
46 foreach_tcp_state_next
48 TCP_RCV_PROCESS_N_NEXT,
49 } tcp_rcv_process_next_t;
51 typedef enum _tcp_syn_sent_next
53 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
54 foreach_tcp_state_next
57 } tcp_syn_sent_next_t;
59 typedef enum _tcp_listen_next
61 #define _(s,n) TCP_LISTEN_NEXT_##s,
62 foreach_tcp_state_next
67 /* Generic, state independent indices */
68 typedef enum _tcp_state_next
70 #define _(s,n) TCP_NEXT_##s,
71 foreach_tcp_state_next
76 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
77 : TCP_NEXT_TCP6_OUTPUT)
79 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \
83 * Validate segment sequence number. As per RFC793:
85 * Segment Receive Test
87 * ------- ------- -------------------------------------------
88 * 0 0 SEG.SEQ = RCV.NXT
89 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
91 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
92 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
94 * This ultimately consists in checking if segment falls within the window.
95 * The one important difference compared to RFC793 is that we use rcv_las,
96 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
97 * peer's reference when computing our receive window.
100 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
101 * however, is too strict when we have retransmits. Instead we just check that
102 * the seq is not beyond the right edge and that the end of the segment is not
103 * less than the left edge.
105 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
106 * use rcv_nxt in the right edge window test instead of rcv_las.
110 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
112 return (seq_geq (end_seq, tc->rcv_las)
113 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
117 * Parse TCP header options.
119 * @param th TCP header
120 * @param to TCP options data structure to be populated
121 * @param is_syn set if packet is syn
122 * @return -1 if parsing failed
125 tcp_options_parse (tcp_header_t * th, tcp_options_t * to, u8 is_syn)
128 u8 opt_len, opts_len, kind;
132 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
133 data = (const u8 *) (th + 1);
135 /* Zero out all flags but those set in SYN */
136 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
137 | TCP_OPTS_FLAG_TSTAMP | TCP_OPTION_MSS);
139 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
143 /* Get options length */
144 if (kind == TCP_OPTION_EOL)
146 else if (kind == TCP_OPTION_NOOP)
158 /* weird option length */
159 if (opt_len < 2 || opt_len > opts_len)
169 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
171 to->flags |= TCP_OPTS_FLAG_MSS;
172 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
175 case TCP_OPTION_WINDOW_SCALE:
178 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
180 to->flags |= TCP_OPTS_FLAG_WSCALE;
181 to->wscale = data[2];
182 if (to->wscale > TCP_MAX_WND_SCALE)
183 to->wscale = TCP_MAX_WND_SCALE;
186 case TCP_OPTION_TIMESTAMP:
188 to->flags |= TCP_OPTS_FLAG_TSTAMP;
189 if ((to->flags & TCP_OPTS_FLAG_TSTAMP)
190 && opt_len == TCP_OPTION_LEN_TIMESTAMP)
192 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
193 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
196 case TCP_OPTION_SACK_PERMITTED:
199 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
200 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
202 case TCP_OPTION_SACK_BLOCK:
203 /* If SACK permitted was not advertised or a SYN, break */
204 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
207 /* If too short or not correctly formatted, break */
208 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
211 to->flags |= TCP_OPTS_FLAG_SACK;
212 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
213 vec_reset_length (to->sacks);
214 for (j = 0; j < to->n_sack_blocks; j++)
216 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j));
217 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j));
218 vec_add1 (to->sacks, b);
222 /* Nothing to see here */
230 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
231 * timestamp to echo and it's less than tsval_recent, drop segment
232 * but still send an ACK in order to retain TCP's mechanism for detecting
233 * and recovering from half-open connections
235 * Or at least that's what the theory says. It seems that this might not work
236 * very well with packet reordering and fast retransmit. XXX
239 tcp_segment_check_paws (tcp_connection_t * tc)
241 return tcp_opts_tstamp (&tc->rcv_opts)
242 && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
246 * Update tsval recent
249 tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
252 * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
253 * of an incoming segment:
254 * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
255 * then the TSval from the segment is copied to TS.Recent;
256 * otherwise, the TSval is ignored.
258 if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
259 && seq_leq (tc->rcv_las, seq_end))
261 ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
262 tc->tsval_recent = tc->rcv_opts.tsval;
263 tc->tsval_recent_age = tcp_time_now_w_thread (tc->c_thread_index);
268 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
270 * It first verifies if segment has a wrapped sequence number (PAWS) and then
271 * does the processing associated to the first four steps (ignoring security
272 * and precedence): sequence number, rst bit and syn bit checks.
274 * @return 0 if segments passes validation.
277 tcp_segment_validate (tcp_worker_ctx_t * wrk, tcp_connection_t * tc0,
278 vlib_buffer_t * b0, tcp_header_t * th0, u32 * error0)
280 /* We could get a burst of RSTs interleaved with acks */
281 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
283 tcp_send_reset (tc0);
284 *error0 = TCP_ERROR_CONNECTION_CLOSED;
288 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
290 *error0 = TCP_ERROR_SEGMENT_INVALID;
294 if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
296 *error0 = TCP_ERROR_OPTIONS;
300 if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
302 *error0 = TCP_ERROR_PAWS;
303 TCP_EVT (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
304 vnet_buffer (b0)->tcp.seq_end);
306 /* If it just so happens that a segment updates tsval_recent for a
307 * segment over 24 days old, invalidate tsval_recent. */
308 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
309 tcp_time_now_w_thread (tc0->c_thread_index)))
311 tc0->tsval_recent = tc0->rcv_opts.tsval;
312 clib_warning ("paws failed: 24-day old segment");
314 /* Drop after ack if not rst. Resets can fail paws check as per
315 * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
316 * be subjected to the PAWS check by verifying an acceptable value in
318 else if (!tcp_rst (th0))
320 tcp_program_ack (tc0);
321 TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
326 /* 1st: check sequence number */
327 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
328 vnet_buffer (b0)->tcp.seq_end))
330 /* SYN/SYN-ACK retransmit */
332 && vnet_buffer (b0)->tcp.seq_number == tc0->rcv_nxt - 1)
334 tcp_options_parse (th0, &tc0->rcv_opts, 1);
335 if (tc0->state == TCP_STATE_SYN_RCVD)
337 tcp_send_synack (tc0);
338 TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
339 *error0 = TCP_ERROR_SYNS_RCVD;
343 tcp_program_ack (tc0);
344 TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
345 *error0 = TCP_ERROR_SYN_ACKS_RCVD;
350 /* If our window is 0 and the packet is in sequence, let it pass
351 * through for ack processing. It should be dropped later. */
352 if (tc0->rcv_wnd < tc0->snd_mss
353 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
356 /* If we entered recovery and peer did so as well, there's a chance that
357 * dup acks won't be acceptable on either end because seq_end may be less
358 * than rcv_las. This can happen if acks are lost in both directions. */
359 if (tcp_in_recovery (tc0)
360 && seq_geq (vnet_buffer (b0)->tcp.seq_number,
361 tc0->rcv_las - tc0->rcv_wnd)
362 && seq_leq (vnet_buffer (b0)->tcp.seq_end,
363 tc0->rcv_nxt + tc0->rcv_wnd))
366 *error0 = TCP_ERROR_RCV_WND;
368 /* If we advertised a zero rcv_wnd and the segment is in the past or the
369 * next one that we expect, it is probably a window probe */
370 if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
371 && seq_lt (vnet_buffer (b0)->tcp.seq_end,
372 tc0->rcv_las + tc0->rcv_opts.mss))
373 *error0 = TCP_ERROR_ZERO_RWND;
375 tc0->errors.below_data_wnd += seq_lt (vnet_buffer (b0)->tcp.seq_end,
378 /* If not RST, send dup ack */
381 tcp_program_dupack (tc0);
382 TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
390 /* 2nd: check the RST bit */
391 if (PREDICT_FALSE (tcp_rst (th0)))
393 tcp_connection_reset (tc0);
394 *error0 = TCP_ERROR_RST_RCVD;
398 /* 3rd: check security and precedence (skip) */
400 /* 4th: check the SYN bit (in window) */
401 if (PREDICT_FALSE (tcp_syn (th0)))
403 /* As per RFC5961 send challenge ack instead of reset */
404 tcp_program_ack (tc0);
405 *error0 = TCP_ERROR_SPURIOUS_SYN;
409 /* If segment in window, save timestamp */
410 tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
411 vnet_buffer (b0)->tcp.seq_end);
419 tcp_rcv_ack_no_cc (tcp_connection_t * tc, vlib_buffer_t * b, u32 * error)
421 /* SND.UNA =< SEG.ACK =< SND.NXT */
422 if (!(seq_leq (tc->snd_una, vnet_buffer (b)->tcp.ack_number)
423 && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
425 if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
426 && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
428 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
431 *error = TCP_ERROR_ACK_INVALID;
436 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
437 tc->snd_una = vnet_buffer (b)->tcp.ack_number;
438 *error = TCP_ERROR_ACK_OK;
443 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
445 * Note that although the original article, srtt and rttvar are scaled
446 * to minimize round-off errors, here we don't. Instead, we rely on
447 * better precision time measurements.
449 * TODO support us rtt resolution
452 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
458 err = mrtt - tc->srtt;
460 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
461 * The increase should be bound */
462 tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
463 diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
464 tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
468 /* First measurement. */
470 tc->rttvar = mrtt >> 1;
474 #ifndef CLIB_MARCH_VARIANT
476 tcp_update_rto (tcp_connection_t * tc)
478 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
479 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
481 #endif /* CLIB_MARCH_VARIANT */
484 * Update RTT estimate and RTO timer
486 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
487 * timing. Middle boxes are known to fiddle with TCP options so we
488 * should give higher priority to ACK timing.
490 * This should be called only if previously sent bytes have been acked.
492 * return 1 if valid rtt 0 otherwise
495 tcp_update_rtt (tcp_connection_t * tc, u32 ack)
499 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
500 * RTT because they're ambiguous. */
501 if (tcp_in_cong_recovery (tc) || tc->sack_sb.sacked_bytes)
503 if (tcp_in_recovery (tc))
508 if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
510 f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
511 tc->mrtt_us = tc->mrtt_us + (sample - tc->mrtt_us) * 0.125;
512 mrtt = clib_max ((u32) (sample * THZ), 1);
513 /* Allow measuring of a new RTT */
516 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
517 * snd_una, i.e., the left side of the send window:
518 * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
519 else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
521 u32 now = tcp_tstamp (tc);
522 mrtt = clib_max (now - tc->rcv_opts.tsecr, 1);
525 /* Ignore dubious measurements */
526 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
529 tcp_estimate_rtt (tc, mrtt);
533 /* If we got here something must've been ACKed so make sure boff is 0,
534 * even if mrtt is not valid since we update the rto lower */
542 tcp_estimate_initial_rtt (tcp_connection_t * tc)
544 u8 thread_index = vlib_num_workers ()? 1 : 0;
549 tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
550 tc->mrtt_us = clib_max (tc->mrtt_us, 0.0001);
551 mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
556 mrtt = tcp_time_now_w_thread (thread_index) - tc->rcv_opts.tsecr;
557 mrtt = clib_max (mrtt, 1);
558 /* Due to retransmits we don't know the initial mrtt */
559 if (tc->rto_boff && mrtt > 1 * THZ)
561 tc->mrtt_us = (f64) mrtt *TCP_TICK;
564 if (mrtt > 0 && mrtt < TCP_RTT_MAX)
565 tcp_estimate_rtt (tc, mrtt);
570 * Dequeue bytes for connections that have received acks in last burst
573 tcp_handle_postponed_dequeues (tcp_worker_ctx_t * wrk)
575 u32 thread_index = wrk->vm->thread_index;
576 u32 *pending_deq_acked;
577 tcp_connection_t *tc;
580 if (!vec_len (wrk->pending_deq_acked))
583 pending_deq_acked = wrk->pending_deq_acked;
584 for (i = 0; i < vec_len (pending_deq_acked); i++)
586 tc = tcp_connection_get (pending_deq_acked[i], thread_index);
587 tc->flags &= ~TCP_CONN_DEQ_PENDING;
589 if (PREDICT_FALSE (!tc->burst_acked))
592 /* Dequeue the newly ACKed bytes */
593 session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
595 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
597 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
599 if (seq_leq (tc->psh_seq, tc->snd_una))
600 tc->flags &= ~TCP_CONN_PSH_PENDING;
603 /* If everything has been acked, stop retransmit timer
604 * otherwise update. */
605 tcp_retransmit_timer_update (tc);
607 /* If not congested, update pacer based on our new
609 if (!tcp_in_fastrecovery (tc))
610 tcp_connection_tx_pacer_update (tc);
612 _vec_len (wrk->pending_deq_acked) = 0;
616 tcp_program_dequeue (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
618 if (!(tc->flags & TCP_CONN_DEQ_PENDING))
620 vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
621 tc->flags |= TCP_CONN_DEQ_PENDING;
623 tc->burst_acked += tc->bytes_acked + tc->sack_sb.snd_una_adv;
627 * Check if duplicate ack as per RFC5681 Sec. 2
630 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
633 return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
634 && seq_gt (tc->snd_nxt, tc->snd_una)
635 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
636 && (prev_snd_wnd == tc->snd_wnd));
640 * Checks if ack is a congestion control event.
643 tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
644 u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
646 /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
647 * defined to be 'duplicate' */
648 *is_dack = tc->sack_sb.last_sacked_bytes
649 || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
651 return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc));
654 #ifndef CLIB_MARCH_VARIANT
656 scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
658 ASSERT (!pool_is_free_index (sb->holes, hole - sb->holes));
659 return hole - sb->holes;
663 scoreboard_hole_bytes (sack_scoreboard_hole_t * hole)
665 return hole->end - hole->start;
668 sack_scoreboard_hole_t *
669 scoreboard_get_hole (sack_scoreboard_t * sb, u32 index)
671 if (index != TCP_INVALID_SACK_HOLE_INDEX)
672 return pool_elt_at_index (sb->holes, index);
676 sack_scoreboard_hole_t *
677 scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
679 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
680 return pool_elt_at_index (sb->holes, hole->next);
684 sack_scoreboard_hole_t *
685 scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
687 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
688 return pool_elt_at_index (sb->holes, hole->prev);
692 sack_scoreboard_hole_t *
693 scoreboard_first_hole (sack_scoreboard_t * sb)
695 if (sb->head != TCP_INVALID_SACK_HOLE_INDEX)
696 return pool_elt_at_index (sb->holes, sb->head);
700 sack_scoreboard_hole_t *
701 scoreboard_last_hole (sack_scoreboard_t * sb)
703 if (sb->tail != TCP_INVALID_SACK_HOLE_INDEX)
704 return pool_elt_at_index (sb->holes, sb->tail);
709 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
711 sack_scoreboard_hole_t *next, *prev;
713 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
715 next = pool_elt_at_index (sb->holes, hole->next);
716 next->prev = hole->prev;
720 sb->tail = hole->prev;
723 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
725 prev = pool_elt_at_index (sb->holes, hole->prev);
726 prev->next = hole->next;
730 sb->head = hole->next;
733 if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
734 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
736 /* Poison the entry */
738 clib_memset (hole, 0xfe, sizeof (*hole));
740 pool_put (sb->holes, hole);
743 static sack_scoreboard_hole_t *
744 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
747 sack_scoreboard_hole_t *hole, *next, *prev;
750 pool_get (sb->holes, hole);
751 clib_memset (hole, 0, sizeof (*hole));
755 hole_index = scoreboard_hole_index (sb, hole);
757 prev = scoreboard_get_hole (sb, prev_index);
760 hole->prev = prev_index;
761 hole->next = prev->next;
763 if ((next = scoreboard_next_hole (sb, hole)))
764 next->prev = hole_index;
766 sb->tail = hole_index;
768 prev->next = hole_index;
772 sb->head = hole_index;
773 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
774 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
779 #endif /* CLIB_MARCH_VARIANT */
781 #ifndef CLIB_MARCH_VARIANT
783 scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb)
785 sack_scoreboard_hole_t *left, *right;
786 u32 bytes = 0, blks = 0;
788 sb->last_lost_bytes = 0;
790 sb->sacked_bytes = 0;
791 left = scoreboard_last_hole (sb);
795 if (seq_gt (sb->high_sacked, left->end))
797 bytes = sb->high_sacked - left->end;
801 while ((right = left)
802 && bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
803 && blks < TCP_DUPACK_THRESHOLD
804 /* left not updated if above conditions fail */
805 && (left = scoreboard_prev_hole (sb, right)))
807 bytes += right->start - left->end;
811 /* left is first lost */
816 sb->lost_bytes += scoreboard_hole_bytes (right);
817 sb->last_lost_bytes += left->is_lost ? 0 : left->end - left->start;
819 left = scoreboard_prev_hole (sb, right);
821 bytes += right->start - left->end;
823 while ((right = left));
826 sb->sacked_bytes = bytes;
830 * Figure out the next hole to retransmit
832 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
834 sack_scoreboard_hole_t *
835 scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
836 sack_scoreboard_hole_t * start,
837 u8 have_unsent, u8 * can_rescue, u8 * snd_limited)
839 sack_scoreboard_hole_t *hole = 0;
841 hole = start ? start : scoreboard_first_hole (sb);
842 while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
843 hole = scoreboard_next_hole (sb, hole);
845 /* Nothing, return */
848 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
852 /* Rule (1): if higher than rxt, less than high_sacked and lost */
853 if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
855 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
859 /* Rule (2): available unsent data */
862 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
865 /* Rule (3): if hole not lost */
866 else if (seq_lt (hole->start, sb->high_sacked))
869 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
871 /* Rule (4): if hole beyond high_sacked */
874 ASSERT (seq_geq (hole->start, sb->high_sacked));
877 /* HighRxt MUST NOT be updated */
882 if (hole && seq_lt (sb->high_rxt, hole->start))
883 sb->high_rxt = hole->start;
887 #endif /* CLIB_MARCH_VARIANT */
890 scoreboard_init_high_rxt (sack_scoreboard_t * sb, u32 snd_una)
892 sack_scoreboard_hole_t *hole;
893 hole = scoreboard_first_hole (sb);
896 snd_una = seq_gt (snd_una, hole->start) ? snd_una : hole->start;
897 sb->cur_rxt_hole = sb->head;
899 sb->high_rxt = snd_una;
900 sb->rescue_rxt = snd_una - 1;
903 #ifndef CLIB_MARCH_VARIANT
905 scoreboard_init (sack_scoreboard_t * sb)
907 sb->head = TCP_INVALID_SACK_HOLE_INDEX;
908 sb->tail = TCP_INVALID_SACK_HOLE_INDEX;
909 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
913 scoreboard_clear (sack_scoreboard_t * sb)
915 sack_scoreboard_hole_t *hole;
916 while ((hole = scoreboard_first_hole (sb)))
918 scoreboard_remove_hole (sb, hole);
920 ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX);
921 ASSERT (pool_elts (sb->holes) == 0);
922 sb->sacked_bytes = 0;
923 sb->last_sacked_bytes = 0;
924 sb->last_bytes_delivered = 0;
929 sb->last_lost_bytes = 0;
930 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
932 #endif /* CLIB_MARCH_VARIANT */
935 * Test that scoreboard is sane after recovery
937 * Returns 1 if scoreboard is empty or if first hole beyond
941 tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
943 sack_scoreboard_hole_t *hole;
944 hole = scoreboard_first_hole (&tc->sack_sb);
945 return (!hole || (seq_geq (hole->start, tc->snd_una)
946 && seq_lt (hole->end, tc->snd_nxt)));
949 #ifndef CLIB_MARCH_VARIANT
952 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
954 sack_scoreboard_hole_t *hole, *next_hole, *last_hole;
955 u32 blk_index = 0, old_sacked_bytes, hole_index;
956 sack_scoreboard_t *sb = &tc->sack_sb;
957 sack_block_t *blk, tmp;
960 sb->last_sacked_bytes = 0;
961 sb->last_bytes_delivered = 0;
964 if (!tcp_opts_sack (&tc->rcv_opts)
965 && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
968 old_sacked_bytes = sb->sacked_bytes;
970 /* Remove invalid blocks */
971 blk = tc->rcv_opts.sacks;
972 while (blk < vec_end (tc->rcv_opts.sacks))
974 if (seq_lt (blk->start, blk->end)
975 && seq_gt (blk->start, tc->snd_una)
976 && seq_gt (blk->start, ack)
977 && seq_lt (blk->start, tc->snd_nxt)
978 && seq_leq (blk->end, tc->snd_nxt))
983 vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
986 /* Add block for cumulative ack */
987 if (seq_gt (ack, tc->snd_una))
989 tmp.start = tc->snd_una;
991 vec_add1 (tc->rcv_opts.sacks, tmp);
994 if (vec_len (tc->rcv_opts.sacks) == 0)
997 tcp_scoreboard_trace_add (tc, ack);
999 /* Make sure blocks are ordered */
1000 for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++)
1001 for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++)
1002 if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start))
1004 tmp = tc->rcv_opts.sacks[i];
1005 tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j];
1006 tc->rcv_opts.sacks[j] = tmp;
1009 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
1011 /* If no holes, insert the first that covers all outstanding bytes */
1012 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
1013 tc->snd_una, tc->snd_nxt);
1014 sb->tail = scoreboard_hole_index (sb, last_hole);
1015 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
1016 sb->high_sacked = tmp.end;
1020 /* If we have holes but snd_una_max is beyond the last hole, update
1022 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
1023 last_hole = scoreboard_last_hole (sb);
1024 if (seq_gt (tc->snd_nxt, last_hole->end))
1026 if (seq_geq (last_hole->start, sb->high_sacked))
1028 last_hole->end = tc->snd_nxt;
1030 /* New hole after high sacked block */
1031 else if (seq_lt (sb->high_sacked, tc->snd_nxt))
1033 scoreboard_insert_hole (sb, sb->tail, sb->high_sacked,
1037 /* Keep track of max byte sacked for when the last hole
1039 if (seq_gt (tmp.end, sb->high_sacked))
1040 sb->high_sacked = tmp.end;
1043 /* Walk the holes with the SACK blocks */
1044 hole = pool_elt_at_index (sb->holes, sb->head);
1045 while (hole && blk_index < vec_len (tc->rcv_opts.sacks))
1047 blk = &tc->rcv_opts.sacks[blk_index];
1048 if (seq_leq (blk->start, hole->start))
1050 /* Block covers hole. Remove hole */
1051 if (seq_geq (blk->end, hole->end))
1053 next_hole = scoreboard_next_hole (sb, hole);
1055 /* Byte accounting: snd_una needs to be advanced */
1056 if (blk->end == ack)
1060 if (seq_lt (ack, next_hole->start))
1061 sb->snd_una_adv = next_hole->start - ack;
1062 sb->last_bytes_delivered +=
1063 next_hole->start - hole->end;
1067 ASSERT (seq_geq (sb->high_sacked, ack));
1068 sb->snd_una_adv = sb->high_sacked - ack;
1069 sb->last_bytes_delivered += sb->high_sacked - hole->end;
1072 scoreboard_remove_hole (sb, hole);
1075 /* Partial 'head' overlap */
1078 if (seq_gt (blk->end, hole->start))
1080 hole->start = blk->end;
1087 /* Hole must be split */
1088 if (seq_lt (blk->end, hole->end))
1090 hole_index = scoreboard_hole_index (sb, hole);
1091 next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
1094 /* Pool might've moved */
1095 hole = scoreboard_get_hole (sb, hole_index);
1096 hole->end = blk->start;
1098 ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
1100 else if (seq_lt (blk->start, hole->end))
1102 hole->end = blk->start;
1104 hole = scoreboard_next_hole (sb, hole);
1108 if (pool_elts (sb->holes) == 1)
1110 hole = scoreboard_first_hole (sb);
1111 if (hole->start == ack + sb->snd_una_adv && hole->end == tc->snd_nxt)
1112 scoreboard_remove_hole (sb, hole);
1115 scoreboard_update_bytes (tc, sb);
1116 sb->last_sacked_bytes = sb->sacked_bytes
1117 - (old_sacked_bytes - sb->last_bytes_delivered);
1119 ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc));
1120 ASSERT (sb->sacked_bytes == 0 || tcp_in_recovery (tc)
1121 || sb->sacked_bytes < tc->snd_nxt - seq_max (tc->snd_una, ack));
1122 ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_nxt
1123 - seq_max (tc->snd_una, ack) || tcp_in_recovery (tc));
1124 ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
1125 || sb->holes[sb->head].start == ack + sb->snd_una_adv);
1126 ASSERT (sb->last_lost_bytes <= sb->lost_bytes);
1128 TCP_EVT (TCP_EVT_CC_SCOREBOARD, tc);
1130 #endif /* CLIB_MARCH_VARIANT */
1133 * Try to update snd_wnd based on feedback received from peer.
1135 * If successful, and new window is 'effectively' 0, activate persist
1139 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
1141 /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
1142 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
1143 if (seq_lt (tc->snd_wl1, seq)
1144 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
1146 tc->snd_wnd = snd_wnd;
1149 TCP_EVT (TCP_EVT_SND_WND, tc);
1151 if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
1153 /* Set persist timer if not set and we just got 0 wnd */
1154 if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
1155 && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
1156 tcp_persist_timer_set (tc);
1160 tcp_persist_timer_reset (tc);
1161 if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
1164 tcp_update_rto (tc);
1170 #ifndef CLIB_MARCH_VARIANT
1172 * Init loss recovery/fast recovery.
1174 * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
1175 * updated in @ref tcp_cc_handle_event after fast retransmit
1178 tcp_cc_init_congestion (tcp_connection_t * tc)
1180 tcp_fastrecovery_on (tc);
1181 tc->snd_congestion = tc->snd_nxt;
1182 tc->cwnd_acc_bytes = 0;
1183 tc->snd_rxt_bytes = 0;
1184 tc->prev_ssthresh = tc->ssthresh;
1185 tc->prev_cwnd = tc->cwnd;
1186 tc->cc_algo->congestion (tc);
1187 tc->fr_occurences += 1;
1188 TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
1190 #endif /* CLIB_MARCH_VARIANT */
1193 tcp_cc_recovery_exit (tcp_connection_t * tc)
1196 tcp_update_rto (tc);
1199 tcp_recovery_off (tc);
1200 TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
1203 #ifndef CLIB_MARCH_VARIANT
1205 tcp_cc_fastrecovery_clear (tcp_connection_t * tc)
1207 tc->snd_rxt_bytes = 0;
1208 tc->rcv_dupacks = 0;
1211 tcp_fastrecovery_off (tc);
1212 tcp_fastrecovery_first_off (tc);
1213 tc->flags &= ~TCP_CONN_FRXT_PENDING;
1215 TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
1217 #endif /* CLIB_MARCH_VARIANT */
1220 tcp_cc_congestion_undo (tcp_connection_t * tc)
1222 tc->cwnd = tc->prev_cwnd;
1223 tc->ssthresh = tc->prev_ssthresh;
1224 tc->rcv_dupacks = 0;
1225 if (tcp_in_recovery (tc))
1227 tcp_cc_recovery_exit (tc);
1228 tc->snd_nxt = seq_max (tc->snd_nxt, tc->snd_congestion);
1230 else if (tcp_in_fastrecovery (tc))
1232 tcp_cc_fastrecovery_clear (tc);
1234 tcp_cc_undo_recovery (tc);
1235 ASSERT (tc->rto_boff == 0);
1236 TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
1240 tcp_cc_is_spurious_timeout_rxt (tcp_connection_t * tc)
1242 return (tcp_in_recovery (tc) && tc->rto_boff == 1
1244 && tcp_opts_tstamp (&tc->rcv_opts)
1245 && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
1249 tcp_cc_is_spurious_fast_rxt (tcp_connection_t * tc)
1251 return (tcp_in_fastrecovery (tc)
1252 && tc->cwnd > tc->ssthresh + 3 * tc->snd_mss);
1256 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
1258 return (tcp_cc_is_spurious_timeout_rxt (tc)
1259 || tcp_cc_is_spurious_fast_rxt (tc));
1263 tcp_cc_recover (tcp_connection_t * tc)
1265 ASSERT (tcp_in_cong_recovery (tc));
1266 if (tcp_cc_is_spurious_retransmit (tc))
1268 tcp_cc_congestion_undo (tc);
1272 if (tcp_in_recovery (tc))
1273 tcp_cc_recovery_exit (tc);
1274 else if (tcp_in_fastrecovery (tc))
1276 tcp_cc_recovered (tc);
1277 tcp_cc_fastrecovery_clear (tc);
1280 ASSERT (tc->rto_boff == 0);
1281 ASSERT (!tcp_in_cong_recovery (tc));
1282 ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
1287 tcp_cc_update (tcp_connection_t * tc, tcp_rate_sample_t * rs)
1289 ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
1291 /* Congestion avoidance */
1292 tcp_cc_rcv_ack (tc, rs);
1294 /* If a cumulative ack, make sure dupacks is 0 */
1295 tc->rcv_dupacks = 0;
1297 /* When dupacks hits the threshold we only enter fast retransmit if
1298 * cumulative ack covers more than snd_congestion. Should snd_una
1299 * wrap this test may fail under otherwise valid circumstances.
1300 * Therefore, proactively update snd_congestion when wrap detected. */
1302 (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
1303 && seq_gt (tc->snd_congestion, tc->snd_una)))
1304 tc->snd_congestion = tc->snd_una - 1;
1308 tcp_should_fastrecover_sack (tcp_connection_t * tc)
1310 return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
1314 tcp_should_fastrecover (tcp_connection_t * tc)
1316 return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
1317 || tcp_should_fastrecover_sack (tc));
1321 * One function to rule them all ... and in the darkness bind them
1324 tcp_cc_handle_event (tcp_connection_t * tc, tcp_rate_sample_t * rs,
1329 if (tcp_in_fastrecovery (tc) && tcp_opts_sack_permitted (&tc->rcv_opts))
1331 if (tc->bytes_acked)
1333 tcp_program_fastretransmit (tc);
1337 * Duplicate ACK. Check if we should enter fast recovery, or if already in
1338 * it account for the bytes that left the network.
1340 else if (is_dack && !tcp_in_recovery (tc))
1342 TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
1343 ASSERT (tc->snd_una != tc->snd_nxt || tc->sack_sb.last_sacked_bytes);
1347 /* Pure duplicate ack. If some data got acked, it's handled lower */
1348 if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
1350 ASSERT (tcp_in_fastrecovery (tc));
1351 tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1354 else if (tcp_should_fastrecover (tc))
1358 ASSERT (!tcp_in_fastrecovery (tc));
1360 /* Heuristic to catch potential late dupacks
1361 * after fast retransmit exits */
1362 if (is_dack && tc->snd_una == tc->snd_congestion
1363 && timestamp_leq (tc->rcv_opts.tsecr, tc->tsecr_last_ack))
1365 tc->rcv_dupacks = 0;
1369 tcp_cc_init_congestion (tc);
1370 tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1372 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1374 tc->cwnd = tc->ssthresh;
1375 scoreboard_init_high_rxt (&tc->sack_sb, tc->snd_una);
1379 /* Post retransmit update cwnd to ssthresh and account for the
1380 * three segments that have left the network and should've been
1381 * buffered at the receiver XXX */
1382 tc->cwnd = tc->ssthresh + 3 * tc->snd_mss;
1385 /* Constrain rate until we get a partial ack */
1386 pacer_wnd = clib_max (0.1 * tc->cwnd, 2 * tc->snd_mss);
1387 tcp_connection_tx_pacer_reset (tc, pacer_wnd,
1388 0 /* start bucket */ );
1389 tcp_program_fastretransmit (tc);
1392 else if (!tc->bytes_acked
1393 || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
1395 tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1401 /* Don't allow entry in fast recovery if still in recovery, for now */
1402 else if (0 && is_dack && tcp_in_recovery (tc))
1404 /* If of of the two conditions lower hold, reset dupacks because
1405 * we're probably after timeout (RFC6582 heuristics).
1406 * If Cumulative ack does not cover more than congestion threshold,
1408 * 1) The following doesn't hold: The congestion window is greater
1409 * than SMSS bytes and the difference between highest_ack
1410 * and prev_highest_ack is at most 4*SMSS bytes
1411 * 2) Echoed timestamp in the last non-dup ack does not equal the
1414 if (seq_leq (tc->snd_una, tc->snd_congestion)
1415 && ((!(tc->cwnd > tc->snd_mss
1416 && tc->bytes_acked <= 4 * tc->snd_mss))
1417 || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
1419 tc->rcv_dupacks = 0;
1424 if (!tc->bytes_acked)
1428 TCP_EVT (TCP_EVT_CC_PACK, tc);
1431 * Legitimate ACK. 1) See if we can exit recovery
1434 /* Update the pacing rate. For the first partial ack we move from
1435 * the artificially constrained rate to the one after congestion */
1436 tcp_connection_tx_pacer_update (tc);
1438 if (seq_geq (tc->snd_una, tc->snd_congestion))
1440 tcp_retransmit_timer_update (tc);
1442 /* If spurious return, we've already updated everything */
1443 if (tcp_cc_recover (tc))
1445 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1449 /* Treat as congestion avoidance ack */
1450 tcp_cc_rcv_ack (tc, rs);
1455 * Legitimate ACK. 2) If PARTIAL ACK try to retransmit
1458 /* XXX limit this only to first partial ack? */
1459 tcp_retransmit_timer_update (tc);
1461 /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1462 * reset dupacks to 0. Also needed if in congestion recovery */
1463 tc->rcv_dupacks = 0;
1465 /* Post RTO timeout don't try anything fancy */
1466 if (tcp_in_recovery (tc))
1468 tcp_cc_rcv_ack (tc, rs);
1469 transport_add_tx_event (&tc->connection);
1473 /* Remove retransmitted bytes that have been delivered */
1474 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1476 ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv
1477 >= tc->sack_sb.last_bytes_delivered
1478 || (tc->flags & TCP_CONN_FINSNT));
1480 /* If we have sacks and we haven't gotten an ack beyond high_rxt,
1481 * remove sacked bytes delivered */
1482 if (seq_lt (tc->snd_una, tc->sack_sb.high_rxt))
1484 rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv
1485 - tc->sack_sb.last_bytes_delivered;
1486 ASSERT (tc->snd_rxt_bytes >= rxt_delivered);
1487 tc->snd_rxt_bytes -= rxt_delivered;
1491 /* Apparently all retransmitted holes have been acked */
1492 tc->snd_rxt_bytes = 0;
1493 tc->sack_sb.high_rxt = tc->snd_una;
1498 tcp_fastrecovery_first_on (tc);
1499 if (tc->snd_rxt_bytes > tc->bytes_acked)
1500 tc->snd_rxt_bytes -= tc->bytes_acked;
1502 tc->snd_rxt_bytes = 0;
1505 tcp_cc_rcv_cong_ack (tc, TCP_CC_PARTIALACK, rs);
1508 * Since this was a partial ack, try to retransmit some more data
1510 tcp_program_fastretransmit (tc);
1514 * Process incoming ACK
1517 tcp_rcv_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1518 tcp_header_t * th, u32 * error)
1520 u32 prev_snd_wnd, prev_snd_una;
1521 tcp_rate_sample_t rs = { 0 };
1524 TCP_EVT (TCP_EVT_CC_STAT, tc);
1526 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1527 if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1529 /* We've probably entered recovery and the peer still has some
1530 * of the data we've sent. Update snd_nxt and accept the ack */
1531 if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
1532 && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
1534 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1538 tc->errors.above_ack_wnd += 1;
1539 *error = TCP_ERROR_ACK_FUTURE;
1540 TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number);
1544 /* If old ACK, probably it's an old dupack */
1545 if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1547 tc->errors.below_ack_wnd += 1;
1548 *error = TCP_ERROR_ACK_OLD;
1549 TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 1, vnet_buffer (b)->tcp.ack_number);
1550 if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1551 tcp_cc_handle_event (tc, 0, 1);
1552 /* Don't drop yet */
1559 * Looks okay, process feedback
1562 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1563 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1565 prev_snd_wnd = tc->snd_wnd;
1566 prev_snd_una = tc->snd_una;
1567 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1568 vnet_buffer (b)->tcp.ack_number,
1569 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1570 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1571 tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
1572 tcp_validate_txf_size (tc, tc->bytes_acked);
1574 if (tc->bytes_acked)
1576 tcp_program_dequeue (wrk, tc);
1577 tcp_update_rtt (tc, vnet_buffer (b)->tcp.ack_number);
1580 if (tc->flags & TCP_CONN_RATE_SAMPLE)
1581 tcp_bt_sample_delivery_rate (tc, &rs);
1583 TCP_EVT (TCP_EVT_ACK_RCVD, tc);
1586 * Check if we have congestion event
1589 if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1591 tcp_cc_handle_event (tc, &rs, is_dack);
1592 tc->dupacks_in += is_dack;
1593 if (!tcp_in_cong_recovery (tc))
1595 *error = TCP_ERROR_ACK_OK;
1598 *error = TCP_ERROR_ACK_DUP;
1599 if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1605 * Update congestion control (slow start/congestion avoidance)
1607 tcp_cc_update (tc, &rs);
1608 *error = TCP_ERROR_ACK_OK;
1613 tcp_program_disconnect (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1615 if (!tcp_disconnect_pending (tc))
1617 vec_add1 (wrk->pending_disconnects, tc->c_c_index);
1618 tcp_disconnect_pending_on (tc);
1623 tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
1625 u32 thread_index, *pending_disconnects;
1626 tcp_connection_t *tc;
1629 if (!vec_len (wrk->pending_disconnects))
1632 thread_index = wrk->vm->thread_index;
1633 pending_disconnects = wrk->pending_disconnects;
1634 for (i = 0; i < vec_len (pending_disconnects); i++)
1636 tc = tcp_connection_get (pending_disconnects[i], thread_index);
1637 tcp_disconnect_pending_off (tc);
1638 session_transport_closing_notify (&tc->connection);
1640 _vec_len (wrk->pending_disconnects) = 0;
1644 tcp_rcv_fin (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1647 /* Reject out-of-order fins */
1648 if (vnet_buffer (b)->tcp.seq_end != tc->rcv_nxt)
1651 /* Account for the FIN and send ack */
1653 tcp_program_ack (tc);
1654 /* Enter CLOSE-WAIT and notify session. To avoid lingering
1655 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1656 tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
1657 tcp_program_disconnect (wrk, tc);
1658 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
1659 TCP_EVT (TCP_EVT_FIN_RCVD, tc);
1660 *error = TCP_ERROR_FIN_RCVD;
1663 #ifndef CLIB_MARCH_VARIANT
1665 tcp_sack_vector_is_sane (sack_block_t * sacks)
1668 for (i = 1; i < vec_len (sacks); i++)
1670 if (sacks[i - 1].end == sacks[i].start)
1677 * Build SACK list as per RFC2018.
1679 * Makes sure the first block contains the segment that generated the current
1680 * ACK and the following ones are the ones most recently reported in SACK
1683 * @param tc TCP connection for which the SACK list is updated
1684 * @param start Start sequence number of the newest SACK block
1685 * @param end End sequence of the newest SACK block
1688 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1690 sack_block_t *new_list = tc->snd_sacks_fl, *block = 0;
1693 /* If the first segment is ooo add it to the list. Last write might've moved
1694 * rcv_nxt over the first segment. */
1695 if (seq_lt (tc->rcv_nxt, start))
1697 vec_add2 (new_list, block, 1);
1698 block->start = start;
1702 /* Find the blocks still worth keeping. */
1703 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1705 /* Discard if rcv_nxt advanced beyond current block */
1706 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1709 /* Merge or drop if segment overlapped by the new segment */
1710 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1711 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1713 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1714 new_list[0].start = tc->snd_sacks[i].start;
1715 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1716 new_list[0].end = tc->snd_sacks[i].end;
1720 /* Save to new SACK list if we have space. */
1721 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1722 vec_add1 (new_list, tc->snd_sacks[i]);
1725 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1727 /* Replace old vector with new one */
1728 vec_reset_length (tc->snd_sacks);
1729 tc->snd_sacks_fl = tc->snd_sacks;
1730 tc->snd_sacks = new_list;
1732 /* Segments should not 'touch' */
1733 ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks));
1737 tcp_sack_list_bytes (tcp_connection_t * tc)
1740 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1741 bytes += tc->snd_sacks[i].end - tc->snd_sacks[i].start;
1744 #endif /* CLIB_MARCH_VARIANT */
1746 /** Enqueue data for delivery to application */
1748 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1751 int written, error = TCP_ERROR_ENQUEUED;
1753 ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1755 written = session_enqueue_stream_connection (&tc->connection, b, 0,
1756 1 /* queue event */ , 1);
1757 tc->bytes_in += written;
1759 TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
1761 /* Update rcv_nxt */
1762 if (PREDICT_TRUE (written == data_len))
1764 tc->rcv_nxt += written;
1766 /* If more data written than expected, account for out-of-order bytes. */
1767 else if (written > data_len)
1769 tc->rcv_nxt += written;
1770 TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
1772 else if (written > 0)
1774 /* We've written something but FIFO is probably full now */
1775 tc->rcv_nxt += written;
1776 error = TCP_ERROR_PARTIALLY_ENQUEUED;
1780 return TCP_ERROR_FIFO_FULL;
1783 /* Update SACK list if need be */
1784 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1786 /* Remove SACK blocks that have been delivered */
1787 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1793 /** Enqueue out-of-order data */
1795 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1801 ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1804 /* Enqueue out-of-order data with relative offset */
1805 rv = session_enqueue_stream_connection (&tc->connection, b,
1806 vnet_buffer (b)->tcp.seq_number -
1807 tc->rcv_nxt, 0 /* queue event */ ,
1810 /* Nothing written */
1813 TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
1814 return TCP_ERROR_FIFO_FULL;
1817 TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1818 tc->bytes_in += data_len;
1820 /* Update SACK list if in use */
1821 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1823 ooo_segment_t *newest;
1826 s0 = session_get (tc->c_s_index, tc->c_thread_index);
1828 /* Get the newest segment from the fifo */
1829 newest = svm_fifo_newest_ooo_segment (s0->rx_fifo);
1832 offset = ooo_segment_offset_prod (s0->rx_fifo, newest);
1833 ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1834 start = tc->rcv_nxt + offset;
1835 end = start + ooo_segment_length (s0->rx_fifo, newest);
1836 tcp_update_sack_list (tc, start, end);
1837 svm_fifo_newest_ooo_segment_reset (s0->rx_fifo);
1838 TCP_EVT (TCP_EVT_CC_SACKS, tc);
1842 return TCP_ERROR_ENQUEUED_OOO;
1846 * Check if ACK could be delayed. If ack can be delayed, it should return
1847 * true for a full frame. If we're always acking return 0.
1850 tcp_can_delack (tcp_connection_t * tc)
1852 /* Send ack if ... */
1854 /* just sent a rcv wnd 0
1855 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0 */
1856 /* constrained to send ack */
1857 || (tc->flags & TCP_CONN_SNDACK) != 0
1858 /* we're almost out of tx wnd */
1859 || tcp_available_cc_snd_space (tc) < 4 * tc->snd_mss)
1866 tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
1868 u32 discard, first = b->current_length;
1869 vlib_main_t *vm = vlib_get_main ();
1871 /* Handle multi-buffer segments */
1872 if (n_bytes_to_drop > b->current_length)
1874 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1878 discard = clib_min (n_bytes_to_drop, b->current_length);
1879 vlib_buffer_advance (b, discard);
1880 b = vlib_get_buffer (vm, b->next_buffer);
1881 n_bytes_to_drop -= discard;
1883 while (n_bytes_to_drop);
1884 if (n_bytes_to_drop > first)
1885 b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1888 vlib_buffer_advance (b, n_bytes_to_drop);
1889 vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1894 * Receive buffer for connection and handle acks
1896 * It handles both in order or out-of-order data.
1899 tcp_segment_rcv (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1902 u32 error, n_bytes_to_drop, n_data_bytes;
1904 vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1905 n_data_bytes = vnet_buffer (b)->tcp.data_len;
1906 ASSERT (n_data_bytes);
1907 tc->data_segs_in += 1;
1909 /* Handle out-of-order data */
1910 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1912 /* Old sequence numbers allowed through because they overlapped
1914 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1916 /* Completely in the past (possible retransmit). Ack
1917 * retransmissions since we may not have any data to send */
1918 if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1920 tcp_program_ack (tc);
1921 error = TCP_ERROR_SEGMENT_OLD;
1925 /* Chop off the bytes in the past and see if what is left
1926 * can be enqueued in order */
1927 n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1928 n_data_bytes -= n_bytes_to_drop;
1929 vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1930 if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1932 error = TCP_ERROR_SEGMENT_OLD;
1938 /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1939 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1940 tcp_program_dupack (tc);
1941 TCP_EVT (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1942 tc->errors.above_data_wnd += seq_gt (vnet_buffer (b)->tcp.seq_end,
1943 tc->rcv_las + tc->rcv_wnd);
1949 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1950 * segments can be enqueued after fifo tail offset changes. */
1951 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1952 if (tcp_can_delack (tc))
1954 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1955 tcp_timer_set (tc, TCP_TIMER_DELACK, tcp_cfg.delack_time);
1959 tcp_program_ack (tc);
1967 tcp_header_t tcp_header;
1968 tcp_connection_t tcp_connection;
1972 format_tcp_rx_trace (u8 * s, va_list * args)
1974 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1975 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1976 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1977 u32 indent = format_get_indent (s);
1979 s = format (s, "%U\n%U%U",
1980 format_tcp_header, &t->tcp_header, 128,
1981 format_white_space, indent,
1982 format_tcp_connection, &t->tcp_connection, 1);
1988 format_tcp_rx_trace_short (u8 * s, va_list * args)
1990 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1991 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1992 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1994 s = format (s, "%d -> %d (%U)",
1995 clib_net_to_host_u16 (t->tcp_header.dst_port),
1996 clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
1997 t->tcp_connection.state);
2003 tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
2004 tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
2008 clib_memcpy_fast (&t0->tcp_connection, tc0,
2009 sizeof (t0->tcp_connection));
2013 th0 = tcp_buffer_hdr (b0);
2015 clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2019 tcp_established_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2020 vlib_frame_t * frame, u8 is_ip4)
2024 n_left = frame->n_vectors;
2025 from = vlib_frame_vector_args (frame);
2029 tcp_connection_t *tc0;
2036 b0 = vlib_get_buffer (vm, bi0);
2038 if (b0->flags & VLIB_BUFFER_IS_TRACED)
2040 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2041 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2043 th0 = tcp_buffer_hdr (b0);
2044 tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
2053 tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
2054 u8 is_ip4, u32 evt, u32 val)
2057 vlib_node_increment_counter (vm, tcp4_node, evt, val);
2059 vlib_node_increment_counter (vm, tcp6_node, evt, val);
2062 #define tcp_maybe_inc_counter(node_id, err, count) \
2064 if (next0 != tcp_next_drop (is_ip4)) \
2065 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
2066 tcp6_##node_id##_node.index, is_ip4, err, \
2069 #define tcp_inc_counter(node_id, err, count) \
2070 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
2071 tcp6_##node_id##_node.index, is_ip4, \
2073 #define tcp_maybe_inc_err_counter(cnts, err) \
2075 cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
2077 #define tcp_inc_err_counter(cnts, err, val) \
2081 #define tcp_store_err_counters(node_id, cnts) \
2084 for (i = 0; i < TCP_N_ERROR; i++) \
2086 tcp_inc_counter(node_id, i, cnts[i]); \
2091 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2092 vlib_frame_t * frame, int is_ip4)
2094 u32 thread_index = vm->thread_index, errors = 0;
2095 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2096 u32 n_left_from, *from, *first_buffer;
2097 u16 err_counters[TCP_N_ERROR] = { 0 };
2099 if (node->flags & VLIB_NODE_FLAG_TRACE)
2100 tcp_established_trace_frame (vm, node, frame, is_ip4);
2102 first_buffer = from = vlib_frame_vector_args (frame);
2103 n_left_from = frame->n_vectors;
2105 while (n_left_from > 0)
2107 u32 bi0, error0 = TCP_ERROR_ACK_OK;
2110 tcp_connection_t *tc0;
2112 if (n_left_from > 1)
2115 pb = vlib_get_buffer (vm, from[1]);
2116 vlib_prefetch_buffer_header (pb, LOAD);
2117 CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2124 b0 = vlib_get_buffer (vm, bi0);
2125 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2128 if (PREDICT_FALSE (tc0 == 0))
2130 error0 = TCP_ERROR_INVALID_CONNECTION;
2134 th0 = tcp_buffer_hdr (b0);
2136 /* TODO header prediction fast path */
2138 /* 1-4: check SEQ, RST, SYN */
2139 if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, th0, &error0)))
2141 TCP_EVT (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
2145 /* 5: check the ACK field */
2146 if (PREDICT_FALSE (tcp_rcv_ack (wrk, tc0, b0, th0, &error0)))
2149 /* 6: check the URG bit TODO */
2151 /* 7: process the segment text */
2152 if (vnet_buffer (b0)->tcp.data_len)
2153 error0 = tcp_segment_rcv (wrk, tc0, b0);
2155 /* 8: check the FIN bit */
2156 if (PREDICT_FALSE (tcp_is_fin (th0)))
2157 tcp_rcv_fin (wrk, tc0, b0, &error0);
2160 tcp_inc_err_counter (err_counters, error0, 1);
2163 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2165 err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
2166 tcp_store_err_counters (established, err_counters);
2167 tcp_handle_postponed_dequeues (wrk);
2168 tcp_handle_disconnects (wrk);
2169 vlib_buffer_free (vm, first_buffer, frame->n_vectors);
2171 return frame->n_vectors;
2174 VLIB_NODE_FN (tcp4_established_node) (vlib_main_t * vm,
2175 vlib_node_runtime_t * node,
2176 vlib_frame_t * from_frame)
2178 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2181 VLIB_NODE_FN (tcp6_established_node) (vlib_main_t * vm,
2182 vlib_node_runtime_t * node,
2183 vlib_frame_t * from_frame)
2185 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2189 VLIB_REGISTER_NODE (tcp4_established_node) =
2191 .name = "tcp4-established",
2192 /* Takes a vector of packets. */
2193 .vector_size = sizeof (u32),
2194 .n_errors = TCP_N_ERROR,
2195 .error_strings = tcp_error_strings,
2196 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2199 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2200 foreach_tcp_state_next
2203 .format_trace = format_tcp_rx_trace_short,
2208 VLIB_REGISTER_NODE (tcp6_established_node) =
2210 .name = "tcp6-established",
2211 /* Takes a vector of packets. */
2212 .vector_size = sizeof (u32),
2213 .n_errors = TCP_N_ERROR,
2214 .error_strings = tcp_error_strings,
2215 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2218 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2219 foreach_tcp_state_next
2222 .format_trace = format_tcp_rx_trace_short,
2228 tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr)
2230 transport_connection_t *tmp = 0;
2237 if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
2240 u8 is_valid = (tc->c_lcl_port == hdr->dst_port
2241 && (tc->state == TCP_STATE_LISTEN
2242 || tc->c_rmt_port == hdr->src_port));
2246 handle = session_lookup_half_open_handle (&tc->connection);
2247 tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
2248 tc->c_proto, tc->c_is_ip4);
2252 if (tmp->lcl_port == hdr->dst_port
2253 && tmp->rmt_port == hdr->src_port)
2255 TCP_DBG ("half-open is valid!");
2263 * Lookup transport connection
2265 static tcp_connection_t *
2266 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
2270 transport_connection_t *tconn;
2271 tcp_connection_t *tc;
2276 ip4 = vlib_buffer_get_current (b);
2277 tcp = ip4_next_header (ip4);
2278 tconn = session_lookup_connection_wt4 (fib_index,
2283 TRANSPORT_PROTO_TCP,
2284 thread_index, &is_filtered);
2285 tc = tcp_get_connection_from_transport (tconn);
2286 ASSERT (tcp_lookup_is_valid (tc, tcp));
2291 ip6 = vlib_buffer_get_current (b);
2292 tcp = ip6_next_header (ip6);
2293 tconn = session_lookup_connection_wt6 (fib_index,
2298 TRANSPORT_PROTO_TCP,
2299 thread_index, &is_filtered);
2300 tc = tcp_get_connection_from_transport (tconn);
2301 ASSERT (tcp_lookup_is_valid (tc, tcp));
2307 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2308 vlib_frame_t * from_frame, int is_ip4)
2310 u32 n_left_from, *from, *first_buffer, errors = 0;
2311 u32 my_thread_index = vm->thread_index;
2312 tcp_worker_ctx_t *wrk = tcp_get_worker (my_thread_index);
2314 from = first_buffer = vlib_frame_vector_args (from_frame);
2315 n_left_from = from_frame->n_vectors;
2317 while (n_left_from > 0)
2319 u32 bi0, ack0, seq0, error0 = TCP_ERROR_NONE;
2320 tcp_connection_t *tc0, *new_tc0;
2321 tcp_header_t *tcp0 = 0;
2329 b0 = vlib_get_buffer (vm, bi0);
2331 tcp_half_open_connection_get (vnet_buffer (b0)->tcp.connection_index);
2332 if (PREDICT_FALSE (tc0 == 0))
2334 error0 = TCP_ERROR_INVALID_CONNECTION;
2338 /* Half-open completed recently but the connection was't removed
2339 * yet by the owning thread */
2340 if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
2342 /* Make sure the connection actually exists */
2343 ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
2344 my_thread_index, is_ip4));
2345 error0 = TCP_ERROR_SPURIOUS_SYN_ACK;
2349 ack0 = vnet_buffer (b0)->tcp.ack_number;
2350 seq0 = vnet_buffer (b0)->tcp.seq_number;
2351 tcp0 = tcp_buffer_hdr (b0);
2353 /* Crude check to see if the connection handle does not match
2354 * the packet. Probably connection just switched to established */
2355 if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
2356 || tcp0->src_port != tc0->c_rmt_port))
2358 error0 = TCP_ERROR_INVALID_CONNECTION;
2362 if (PREDICT_FALSE (!tcp_ack (tcp0) && !tcp_rst (tcp0)
2363 && !tcp_syn (tcp0)))
2365 error0 = TCP_ERROR_SEGMENT_INVALID;
2369 /* SYNs consume sequence numbers */
2370 vnet_buffer (b0)->tcp.seq_end += tcp_is_syn (tcp0);
2373 * 1. check the ACK bit
2377 * If the ACK bit is set
2378 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
2379 * the RST bit is set, if so drop the segment and return)
2380 * <SEQ=SEG.ACK><CTL=RST>
2381 * and discard the segment. Return.
2382 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
2386 if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
2388 if (!tcp_rst (tcp0))
2389 tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2390 error0 = TCP_ERROR_RCV_WND;
2394 /* Make sure ACK is valid */
2395 if (seq_gt (tc0->snd_una, ack0))
2397 error0 = TCP_ERROR_ACK_INVALID;
2403 * 2. check the RST bit
2408 /* If ACK is acceptable, signal client that peer is not
2409 * willing to accept connection and drop connection*/
2411 tcp_connection_reset (tc0);
2412 error0 = TCP_ERROR_RST_RCVD;
2417 * 3. check the security and precedence (skipped)
2421 * 4. check the SYN bit
2424 /* No SYN flag. Drop. */
2425 if (!tcp_syn (tcp0))
2427 error0 = TCP_ERROR_SEGMENT_INVALID;
2432 if (tcp_options_parse (tcp0, &tc0->rcv_opts, 1))
2434 error0 = TCP_ERROR_OPTIONS;
2438 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
2439 * current thread pool. */
2440 new_tc0 = tcp_connection_alloc_w_base (my_thread_index, tc0);
2441 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
2442 new_tc0->irs = seq0;
2443 new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
2444 new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2446 /* If this is not the owning thread, wait for syn retransmit to
2447 * expire and cleanup then */
2448 if (tcp_half_open_connection_cleanup (tc0))
2449 tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2451 if (tcp_opts_tstamp (&new_tc0->rcv_opts))
2453 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
2454 new_tc0->tsval_recent_age = tcp_time_now ();
2457 if (tcp_opts_wscale (&new_tc0->rcv_opts))
2458 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
2460 new_tc0->rcv_wscale = 0;
2462 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2463 << new_tc0->snd_wscale;
2464 new_tc0->snd_wl1 = seq0;
2465 new_tc0->snd_wl2 = ack0;
2467 tcp_connection_init_vars (new_tc0);
2469 /* SYN-ACK: See if we can switch to ESTABLISHED state */
2470 if (PREDICT_TRUE (tcp_ack (tcp0)))
2472 /* Our SYN is ACKed: we have iss < ack = snd_una */
2474 /* TODO Dequeue acknowledged segments if we support Fast Open */
2475 new_tc0->snd_una = ack0;
2476 new_tc0->state = TCP_STATE_ESTABLISHED;
2478 /* Make sure las is initialized for the wnd computation */
2479 new_tc0->rcv_las = new_tc0->rcv_nxt;
2481 /* Notify app that we have connection. If session layer can't
2482 * allocate session send reset */
2483 if (session_stream_connect_notify (&new_tc0->connection, 0))
2485 tcp_send_reset_w_pkt (new_tc0, b0, my_thread_index, is_ip4);
2486 tcp_connection_cleanup (new_tc0);
2487 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2491 new_tc0->tx_fifo_size =
2492 transport_tx_fifo_size (&new_tc0->connection);
2493 /* Update rtt with the syn-ack sample */
2494 tcp_estimate_initial_rtt (new_tc0);
2495 TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc0);
2496 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2498 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
2501 new_tc0->state = TCP_STATE_SYN_RCVD;
2503 /* Notify app that we have connection */
2504 if (session_stream_connect_notify (&new_tc0->connection, 0))
2506 tcp_connection_cleanup (new_tc0);
2507 tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2508 TCP_EVT (TCP_EVT_RST_SENT, tc0);
2509 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2513 new_tc0->tx_fifo_size =
2514 transport_tx_fifo_size (&new_tc0->connection);
2515 new_tc0->rtt_ts = 0;
2516 tcp_init_snd_vars (new_tc0);
2517 tcp_send_synack (new_tc0);
2518 error0 = TCP_ERROR_SYNS_RCVD;
2522 /* Read data, if any */
2523 if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2525 clib_warning ("rcvd data in syn-sent");
2526 error0 = tcp_segment_rcv (wrk, new_tc0, b0);
2527 if (error0 == TCP_ERROR_ACK_OK)
2528 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2532 tcp_program_ack (new_tc0);
2537 tcp_inc_counter (syn_sent, error0, 1);
2538 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2540 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2541 clib_memcpy_fast (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2542 clib_memcpy_fast (&t0->tcp_connection, tc0,
2543 sizeof (t0->tcp_connection));
2547 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2549 tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
2550 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2552 return from_frame->n_vectors;
2555 VLIB_NODE_FN (tcp4_syn_sent_node) (vlib_main_t * vm,
2556 vlib_node_runtime_t * node,
2557 vlib_frame_t * from_frame)
2559 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2562 VLIB_NODE_FN (tcp6_syn_sent_node) (vlib_main_t * vm,
2563 vlib_node_runtime_t * node,
2564 vlib_frame_t * from_frame)
2566 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2570 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2572 .name = "tcp4-syn-sent",
2573 /* Takes a vector of packets. */
2574 .vector_size = sizeof (u32),
2575 .n_errors = TCP_N_ERROR,
2576 .error_strings = tcp_error_strings,
2577 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2580 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2581 foreach_tcp_state_next
2584 .format_trace = format_tcp_rx_trace_short,
2589 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2591 .name = "tcp6-syn-sent",
2592 /* Takes a vector of packets. */
2593 .vector_size = sizeof (u32),
2594 .n_errors = TCP_N_ERROR,
2595 .error_strings = tcp_error_strings,
2596 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2599 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2600 foreach_tcp_state_next
2603 .format_trace = format_tcp_rx_trace_short,
2608 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2609 * as per RFC793 p. 64
2612 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2613 vlib_frame_t * from_frame, int is_ip4)
2615 u32 thread_index = vm->thread_index, errors = 0, *first_buffer;
2616 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2617 u32 n_left_from, *from, max_dequeue;
2619 from = first_buffer = vlib_frame_vector_args (from_frame);
2620 n_left_from = from_frame->n_vectors;
2622 while (n_left_from > 0)
2624 u32 bi0, error0 = TCP_ERROR_NONE;
2625 tcp_header_t *tcp0 = 0;
2626 tcp_connection_t *tc0;
2634 b0 = vlib_get_buffer (vm, bi0);
2635 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2637 if (PREDICT_FALSE (tc0 == 0))
2639 error0 = TCP_ERROR_INVALID_CONNECTION;
2643 tcp0 = tcp_buffer_hdr (b0);
2644 is_fin0 = tcp_is_fin (tcp0);
2648 tcp_connection_t *tmp;
2649 tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
2651 if (tmp->state != tc0->state)
2653 if (tc0->state != TCP_STATE_CLOSED)
2654 clib_warning ("state changed");
2660 * Special treatment for CLOSED
2662 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2664 error0 = TCP_ERROR_CONNECTION_CLOSED;
2669 * For all other states (except LISTEN)
2672 /* 1-4: check SEQ, RST, SYN */
2673 if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, tcp0, &error0)))
2676 /* 5: check the ACK field */
2679 case TCP_STATE_SYN_RCVD:
2681 /* Make sure the segment is exactly right */
2682 if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
2684 tcp_connection_reset (tc0);
2685 error0 = TCP_ERROR_SEGMENT_INVALID;
2690 * If the segment acknowledgment is not acceptable, form a
2692 * <SEQ=SEG.ACK><CTL=RST>
2695 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2697 tcp_connection_reset (tc0);
2701 /* Update rtt and rto */
2702 tcp_estimate_initial_rtt (tc0);
2704 /* Switch state to ESTABLISHED */
2705 tc0->state = TCP_STATE_ESTABLISHED;
2706 TCP_EVT (TCP_EVT_STATE_CHANGE, tc0);
2708 /* Initialize session variables */
2709 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2710 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2711 << tc0->rcv_opts.wscale;
2712 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2713 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2715 /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2716 tcp_retransmit_timer_reset (tc0);
2717 if (session_stream_accept_notify (&tc0->connection))
2719 error0 = TCP_ERROR_MSG_QUEUE_FULL;
2720 tcp_connection_reset (tc0);
2723 error0 = TCP_ERROR_ACK_OK;
2725 case TCP_STATE_ESTABLISHED:
2726 /* We can get packets in established state here because they
2727 * were enqueued before state change */
2728 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2732 case TCP_STATE_FIN_WAIT_1:
2733 /* In addition to the processing for the ESTABLISHED state, if
2734 * our FIN is now acknowledged then enter FIN-WAIT-2 and
2735 * continue processing in that state. */
2736 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2739 /* Still have to send the FIN */
2740 if (tc0->flags & TCP_CONN_FINPNDG)
2742 /* TX fifo finally drained */
2743 max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2744 if (max_dequeue <= tc0->burst_acked)
2746 /* If a fin was received and data was acked extend wait */
2747 else if ((tc0->flags & TCP_CONN_FINRCVD) && tc0->bytes_acked)
2748 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
2749 tcp_cfg.closewait_time);
2751 /* If FIN is ACKed */
2752 else if (tc0->snd_una == tc0->snd_nxt)
2754 /* Stop all retransmit timers because we have nothing more
2756 tcp_connection_timers_reset (tc0);
2758 /* We already have a FIN but didn't transition to CLOSING
2759 * because of outstanding tx data. Close the connection. */
2760 if (tc0->flags & TCP_CONN_FINRCVD)
2762 tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2763 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE,
2764 tcp_cfg.cleanup_time);
2765 session_transport_closed_notify (&tc0->connection);
2769 tcp_connection_set_state (tc0, TCP_STATE_FIN_WAIT_2);
2770 /* Enable waitclose because we're willing to wait for peer's
2771 * FIN but not indefinitely. */
2772 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.finwait2_time);
2774 /* Don't try to deq the FIN acked */
2775 if (tc0->burst_acked > 1)
2776 session_tx_fifo_dequeue_drop (&tc0->connection,
2777 tc0->burst_acked - 1);
2778 tc0->burst_acked = 0;
2781 case TCP_STATE_FIN_WAIT_2:
2782 /* In addition to the processing for the ESTABLISHED state, if
2783 * the retransmission queue is empty, the user's CLOSE can be
2784 * acknowledged ("ok") but do not delete the TCB. */
2785 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2787 tc0->burst_acked = 0;
2789 case TCP_STATE_CLOSE_WAIT:
2790 /* Do the same processing as for the ESTABLISHED state. */
2791 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2794 if (!(tc0->flags & TCP_CONN_FINPNDG))
2797 /* Still have outstanding tx data */
2798 max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2799 if (max_dequeue > tc0->burst_acked)
2803 tcp_connection_timers_reset (tc0);
2804 tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2805 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
2807 case TCP_STATE_CLOSING:
2808 /* In addition to the processing for the ESTABLISHED state, if
2809 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2810 * otherwise ignore the segment. */
2811 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2814 if (tc0->snd_una != tc0->snd_nxt)
2817 tcp_connection_timers_reset (tc0);
2818 tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2819 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
2820 session_transport_closed_notify (&tc0->connection);
2824 case TCP_STATE_LAST_ACK:
2825 /* The only thing that [should] arrive in this state is an
2826 * acknowledgment of our FIN. If our FIN is now acknowledged,
2827 * delete the TCB, enter the CLOSED state, and return. */
2829 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2832 /* Apparently our ACK for the peer's FIN was lost */
2833 if (is_fin0 && tc0->snd_una != tc0->snd_nxt)
2839 tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2840 session_transport_closed_notify (&tc0->connection);
2842 /* Don't free the connection from the data path since
2843 * we can't ensure that we have no packets already enqueued
2844 * to output. Rely instead on the waitclose timer */
2845 tcp_connection_timers_reset (tc0);
2846 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
2851 case TCP_STATE_TIME_WAIT:
2852 /* The only thing that can arrive in this state is a
2853 * retransmission of the remote FIN. Acknowledge it, and restart
2854 * the 2 MSL timeout. */
2856 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2862 tcp_program_ack (tc0);
2863 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
2871 /* 6: check the URG bit TODO */
2873 /* 7: process the segment text */
2876 case TCP_STATE_ESTABLISHED:
2877 case TCP_STATE_FIN_WAIT_1:
2878 case TCP_STATE_FIN_WAIT_2:
2879 if (vnet_buffer (b0)->tcp.data_len)
2880 error0 = tcp_segment_rcv (wrk, tc0, b0);
2882 case TCP_STATE_CLOSE_WAIT:
2883 case TCP_STATE_CLOSING:
2884 case TCP_STATE_LAST_ACK:
2885 case TCP_STATE_TIME_WAIT:
2886 /* This should not occur, since a FIN has been received from the
2887 * remote side. Ignore the segment text. */
2891 /* 8: check the FIN bit */
2895 TCP_EVT (TCP_EVT_FIN_RCVD, tc0);
2899 case TCP_STATE_ESTABLISHED:
2900 /* Account for the FIN and send ack */
2902 tcp_program_ack (tc0);
2903 tcp_connection_set_state (tc0, TCP_STATE_CLOSE_WAIT);
2904 tcp_program_disconnect (wrk, tc0);
2905 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
2907 case TCP_STATE_SYN_RCVD:
2908 /* Send FIN-ACK, enter LAST-ACK and because the app was not
2909 * notified yet, set a cleanup timer instead of relying on
2910 * disconnect notify and the implicit close call. */
2911 tcp_connection_timers_reset (tc0);
2914 tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2915 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
2917 case TCP_STATE_CLOSE_WAIT:
2918 case TCP_STATE_CLOSING:
2919 case TCP_STATE_LAST_ACK:
2922 case TCP_STATE_FIN_WAIT_1:
2925 if (tc0->flags & TCP_CONN_FINPNDG)
2927 /* If data is outstanding, stay in FIN_WAIT_1 and try to finish
2928 * sending it. Since we already received a fin, do not wait
2930 tc0->flags |= TCP_CONN_FINRCVD;
2931 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
2932 tcp_cfg.closewait_time);
2936 tcp_connection_set_state (tc0, TCP_STATE_CLOSING);
2937 tcp_program_ack (tc0);
2938 /* Wait for ACK for our FIN but not forever */
2939 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
2940 tcp_cfg.closing_time);
2943 case TCP_STATE_FIN_WAIT_2:
2944 /* Got FIN, send ACK! Be more aggressive with resource cleanup */
2946 tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2947 tcp_connection_timers_reset (tc0);
2948 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
2949 tcp_program_ack (tc0);
2950 session_transport_closed_notify (&tc0->connection);
2952 case TCP_STATE_TIME_WAIT:
2953 /* Remain in the TIME-WAIT state. Restart the time-wait
2956 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
2959 error0 = TCP_ERROR_FIN_RCVD;
2963 tcp_inc_counter (rcv_process, error0, 1);
2964 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2966 tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2967 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2971 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2973 tcp_inc_counter (rcv_process, TCP_ERROR_MSG_QUEUE_FULL, errors);
2974 tcp_handle_postponed_dequeues (wrk);
2975 tcp_handle_disconnects (wrk);
2976 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2978 return from_frame->n_vectors;
2981 VLIB_NODE_FN (tcp4_rcv_process_node) (vlib_main_t * vm,
2982 vlib_node_runtime_t * node,
2983 vlib_frame_t * from_frame)
2985 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2988 VLIB_NODE_FN (tcp6_rcv_process_node) (vlib_main_t * vm,
2989 vlib_node_runtime_t * node,
2990 vlib_frame_t * from_frame)
2992 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2996 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
2998 .name = "tcp4-rcv-process",
2999 /* Takes a vector of packets. */
3000 .vector_size = sizeof (u32),
3001 .n_errors = TCP_N_ERROR,
3002 .error_strings = tcp_error_strings,
3003 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3006 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3007 foreach_tcp_state_next
3010 .format_trace = format_tcp_rx_trace_short,
3015 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
3017 .name = "tcp6-rcv-process",
3018 /* Takes a vector of packets. */
3019 .vector_size = sizeof (u32),
3020 .n_errors = TCP_N_ERROR,
3021 .error_strings = tcp_error_strings,
3022 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3025 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3026 foreach_tcp_state_next
3029 .format_trace = format_tcp_rx_trace_short,
3034 * LISTEN state processing as per RFC 793 p. 65
3037 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3038 vlib_frame_t * from_frame, int is_ip4)
3040 u32 n_left_from, *from, n_syns = 0, *first_buffer;
3041 u32 my_thread_index = vm->thread_index;
3043 from = first_buffer = vlib_frame_vector_args (from_frame);
3044 n_left_from = from_frame->n_vectors;
3046 while (n_left_from > 0)
3051 tcp_header_t *th0 = 0;
3052 tcp_connection_t *lc0;
3055 tcp_connection_t *child0;
3056 u32 error0 = TCP_ERROR_NONE;
3062 b0 = vlib_get_buffer (vm, bi0);
3063 lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
3067 ip40 = vlib_buffer_get_current (b0);
3068 th0 = ip4_next_header (ip40);
3072 ip60 = vlib_buffer_get_current (b0);
3073 th0 = ip6_next_header (ip60);
3076 /* Create child session. For syn-flood protection use filter */
3078 /* 1. first check for an RST: handled in dispatch */
3079 /* if (tcp_rst (th0))
3083 /* 2. second check for an ACK: handled in dispatch */
3084 /* if (tcp_ack (th0))
3086 tcp_send_reset (b0, is_ip4);
3091 /* 3. check for a SYN (did that already) */
3093 /* Make sure connection wasn't just created */
3094 child0 = tcp_lookup_connection (lc0->c_fib_index, b0, my_thread_index,
3096 if (PREDICT_FALSE (child0->state != TCP_STATE_LISTEN))
3098 error0 = TCP_ERROR_CREATE_EXISTS;
3102 /* Create child session and send SYN-ACK */
3103 child0 = tcp_connection_alloc (my_thread_index);
3104 child0->c_lcl_port = th0->dst_port;
3105 child0->c_rmt_port = th0->src_port;
3106 child0->c_is_ip4 = is_ip4;
3107 child0->state = TCP_STATE_SYN_RCVD;
3108 child0->c_fib_index = lc0->c_fib_index;
3109 child0->cc_algo = lc0->cc_algo;
3113 child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
3114 child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
3118 clib_memcpy_fast (&child0->c_lcl_ip6, &ip60->dst_address,
3119 sizeof (ip6_address_t));
3120 clib_memcpy_fast (&child0->c_rmt_ip6, &ip60->src_address,
3121 sizeof (ip6_address_t));
3124 if (tcp_options_parse (th0, &child0->rcv_opts, 1))
3126 error0 = TCP_ERROR_OPTIONS;
3127 tcp_connection_free (child0);
3131 child0->irs = vnet_buffer (b0)->tcp.seq_number;
3132 child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
3133 child0->rcv_las = child0->rcv_nxt;
3134 child0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
3136 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
3137 * segments are used to initialize PAWS. */
3138 if (tcp_opts_tstamp (&child0->rcv_opts))
3140 child0->tsval_recent = child0->rcv_opts.tsval;
3141 child0->tsval_recent_age = tcp_time_now ();
3144 if (tcp_opts_wscale (&child0->rcv_opts))
3145 child0->snd_wscale = child0->rcv_opts.wscale;
3147 child0->snd_wnd = clib_net_to_host_u16 (th0->window)
3148 << child0->snd_wscale;
3149 child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
3150 child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
3152 tcp_connection_init_vars (child0);
3153 child0->rto = TCP_RTO_MIN;
3155 if (session_stream_accept (&child0->connection, lc0->c_s_index,
3156 lc0->c_thread_index, 0 /* notify */ ))
3158 tcp_connection_cleanup (child0);
3159 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
3163 TCP_EVT (TCP_EVT_SYN_RCVD, child0, 1);
3164 child0->tx_fifo_size = transport_tx_fifo_size (&child0->connection);
3165 tcp_send_synack (child0);
3169 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3171 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3172 clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
3173 clib_memcpy_fast (&t0->tcp_connection, lc0,
3174 sizeof (t0->tcp_connection));
3177 n_syns += (error0 == TCP_ERROR_NONE);
3180 tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
3181 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
3183 return from_frame->n_vectors;
3186 VLIB_NODE_FN (tcp4_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3187 vlib_frame_t * from_frame)
3189 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3192 VLIB_NODE_FN (tcp6_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3193 vlib_frame_t * from_frame)
3195 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3199 VLIB_REGISTER_NODE (tcp4_listen_node) =
3201 .name = "tcp4-listen",
3202 /* Takes a vector of packets. */
3203 .vector_size = sizeof (u32),
3204 .n_errors = TCP_N_ERROR,
3205 .error_strings = tcp_error_strings,
3206 .n_next_nodes = TCP_LISTEN_N_NEXT,
3209 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3210 foreach_tcp_state_next
3213 .format_trace = format_tcp_rx_trace_short,
3218 VLIB_REGISTER_NODE (tcp6_listen_node) =
3220 .name = "tcp6-listen",
3221 /* Takes a vector of packets. */
3222 .vector_size = sizeof (u32),
3223 .n_errors = TCP_N_ERROR,
3224 .error_strings = tcp_error_strings,
3225 .n_next_nodes = TCP_LISTEN_N_NEXT,
3228 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3229 foreach_tcp_state_next
3232 .format_trace = format_tcp_rx_trace_short,
3236 typedef enum _tcp_input_next
3238 TCP_INPUT_NEXT_DROP,
3239 TCP_INPUT_NEXT_LISTEN,
3240 TCP_INPUT_NEXT_RCV_PROCESS,
3241 TCP_INPUT_NEXT_SYN_SENT,
3242 TCP_INPUT_NEXT_ESTABLISHED,
3243 TCP_INPUT_NEXT_RESET,
3244 TCP_INPUT_NEXT_PUNT,
3248 #define foreach_tcp4_input_next \
3249 _ (DROP, "ip4-drop") \
3250 _ (LISTEN, "tcp4-listen") \
3251 _ (RCV_PROCESS, "tcp4-rcv-process") \
3252 _ (SYN_SENT, "tcp4-syn-sent") \
3253 _ (ESTABLISHED, "tcp4-established") \
3254 _ (RESET, "tcp4-reset") \
3255 _ (PUNT, "ip4-punt")
3257 #define foreach_tcp6_input_next \
3258 _ (DROP, "ip6-drop") \
3259 _ (LISTEN, "tcp6-listen") \
3260 _ (RCV_PROCESS, "tcp6-rcv-process") \
3261 _ (SYN_SENT, "tcp6-syn-sent") \
3262 _ (ESTABLISHED, "tcp6-established") \
3263 _ (RESET, "tcp6-reset") \
3264 _ (PUNT, "ip6-punt")
3266 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
3269 tcp_input_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
3270 vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
3272 tcp_connection_t *tc;
3277 for (i = 0; i < n_bufs; i++)
3279 if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
3281 t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
3282 tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
3284 tcp = vlib_buffer_get_current (bs[i]);
3285 tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
3291 tcp_input_set_error_next (tcp_main_t * tm, u16 * next, u32 * error, u8 is_ip4)
3293 if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
3295 *next = TCP_INPUT_NEXT_DROP;
3297 else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
3299 *next = TCP_INPUT_NEXT_PUNT;
3300 *error = TCP_ERROR_PUNT;
3304 *next = TCP_INPUT_NEXT_RESET;
3305 *error = TCP_ERROR_NO_LISTENER;
3309 always_inline tcp_connection_t *
3310 tcp_input_lookup_buffer (vlib_buffer_t * b, u8 thread_index, u32 * error,
3311 u8 is_ip4, u8 is_nolookup)
3313 u32 fib_index = vnet_buffer (b)->ip.fib_index;
3314 int n_advance_bytes, n_data_bytes;
3315 transport_connection_t *tc;
3321 ip4_header_t *ip4 = vlib_buffer_get_current (b);
3322 int ip_hdr_bytes = ip4_header_bytes (ip4);
3323 if (PREDICT_FALSE (b->current_length < ip_hdr_bytes + sizeof (*tcp)))
3325 *error = TCP_ERROR_LENGTH;
3328 tcp = ip4_next_header (ip4);
3329 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip4;
3330 n_advance_bytes = (ip_hdr_bytes + tcp_header_bytes (tcp));
3331 n_data_bytes = clib_net_to_host_u16 (ip4->length) - n_advance_bytes;
3333 /* Length check. Checksum computed by ipx_local no need to compute again */
3334 if (PREDICT_FALSE (n_data_bytes < 0))
3336 *error = TCP_ERROR_LENGTH;
3341 tc = session_lookup_connection_wt4 (fib_index, &ip4->dst_address,
3342 &ip4->src_address, tcp->dst_port,
3344 TRANSPORT_PROTO_TCP, thread_index,
3349 ip6_header_t *ip6 = vlib_buffer_get_current (b);
3350 if (PREDICT_FALSE (b->current_length < sizeof (*ip6) + sizeof (*tcp)))
3352 *error = TCP_ERROR_LENGTH;
3355 tcp = ip6_next_header (ip6);
3356 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip6;
3357 n_advance_bytes = tcp_header_bytes (tcp);
3358 n_data_bytes = clib_net_to_host_u16 (ip6->payload_length)
3360 n_advance_bytes += sizeof (ip6[0]);
3362 if (PREDICT_FALSE (n_data_bytes < 0))
3364 *error = TCP_ERROR_LENGTH;
3371 (ip6_address_is_link_local_unicast (&ip6->dst_address)))
3373 ip4_main_t *im = &ip4_main;
3374 fib_index = vec_elt (im->fib_index_by_sw_if_index,
3375 vnet_buffer (b)->sw_if_index[VLIB_RX]);
3378 tc = session_lookup_connection_wt6 (fib_index, &ip6->dst_address,
3380 tcp->dst_port, tcp->src_port,
3381 TRANSPORT_PROTO_TCP,
3382 thread_index, &result);
3388 (transport_connection_t *) tcp_connection_get (vnet_buffer (b)->
3389 tcp.connection_index,
3392 vnet_buffer (b)->tcp.seq_number = clib_net_to_host_u32 (tcp->seq_number);
3393 vnet_buffer (b)->tcp.ack_number = clib_net_to_host_u32 (tcp->ack_number);
3394 vnet_buffer (b)->tcp.data_offset = n_advance_bytes;
3395 vnet_buffer (b)->tcp.data_len = n_data_bytes;
3396 vnet_buffer (b)->tcp.seq_end = vnet_buffer (b)->tcp.seq_number
3398 vnet_buffer (b)->tcp.flags = 0;
3400 *error = result ? TCP_ERROR_NONE + result : *error;
3402 return tcp_get_connection_from_transport (tc);
3406 tcp_input_dispatch_buffer (tcp_main_t * tm, tcp_connection_t * tc,
3407 vlib_buffer_t * b, u16 * next, u32 * error)
3412 tcp = tcp_buffer_hdr (b);
3413 flags = tcp->flags & filter_flags;
3414 *next = tm->dispatch_table[tc->state][flags].next;
3415 *error = tm->dispatch_table[tc->state][flags].error;
3418 if (PREDICT_FALSE (*error == TCP_ERROR_DISPATCH
3419 || *next == TCP_INPUT_NEXT_RESET))
3421 /* Overload tcp flags to store state */
3422 tcp_state_t state = tc->state;
3423 vnet_buffer (b)->tcp.flags = tc->state;
3425 if (*error == TCP_ERROR_DISPATCH)
3426 clib_warning ("tcp conn %u disp error state %U flags %U",
3427 tc->c_c_index, format_tcp_state, state,
3428 format_tcp_flags, (int) flags);
3433 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3434 vlib_frame_t * frame, int is_ip4, u8 is_nolookup)
3436 u32 n_left_from, *from, thread_index = vm->thread_index;
3437 tcp_main_t *tm = vnet_get_tcp_main ();
3438 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
3439 u16 nexts[VLIB_FRAME_SIZE], *next;
3441 tcp_set_time_now (tcp_get_worker (thread_index));
3443 from = vlib_frame_vector_args (frame);
3444 n_left_from = frame->n_vectors;
3445 vlib_get_buffers (vm, from, bufs, n_left_from);
3450 while (n_left_from >= 4)
3452 u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
3453 tcp_connection_t *tc0, *tc1;
3456 vlib_prefetch_buffer_header (b[2], STORE);
3457 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3459 vlib_prefetch_buffer_header (b[3], STORE);
3460 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3463 next[0] = next[1] = TCP_INPUT_NEXT_DROP;
3465 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
3467 tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4,
3470 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
3472 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3473 ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
3475 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3476 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3478 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3479 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3483 if (PREDICT_TRUE (tc0 != 0))
3485 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3486 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3487 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3490 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3492 if (PREDICT_TRUE (tc1 != 0))
3494 ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
3495 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3496 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3499 tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
3506 while (n_left_from > 0)
3508 tcp_connection_t *tc0;
3509 u32 error0 = TCP_ERROR_NO_LISTENER;
3511 if (n_left_from > 1)
3513 vlib_prefetch_buffer_header (b[1], STORE);
3514 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3517 next[0] = TCP_INPUT_NEXT_DROP;
3518 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
3520 if (PREDICT_TRUE (tc0 != 0))
3522 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3523 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3524 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3527 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3534 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
3535 tcp_input_trace_frame (vm, node, bufs, frame->n_vectors, is_ip4);
3537 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
3538 return frame->n_vectors;
3541 VLIB_NODE_FN (tcp4_input_nolookup_node) (vlib_main_t * vm,
3542 vlib_node_runtime_t * node,
3543 vlib_frame_t * from_frame)
3545 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3546 1 /* is_nolookup */ );
3549 VLIB_NODE_FN (tcp6_input_nolookup_node) (vlib_main_t * vm,
3550 vlib_node_runtime_t * node,
3551 vlib_frame_t * from_frame)
3553 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3554 1 /* is_nolookup */ );
3558 VLIB_REGISTER_NODE (tcp4_input_nolookup_node) =
3560 .name = "tcp4-input-nolookup",
3561 /* Takes a vector of packets. */
3562 .vector_size = sizeof (u32),
3563 .n_errors = TCP_N_ERROR,
3564 .error_strings = tcp_error_strings,
3565 .n_next_nodes = TCP_INPUT_N_NEXT,
3568 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3569 foreach_tcp4_input_next
3572 .format_buffer = format_tcp_header,
3573 .format_trace = format_tcp_rx_trace,
3578 VLIB_REGISTER_NODE (tcp6_input_nolookup_node) =
3580 .name = "tcp6-input-nolookup",
3581 /* Takes a vector of packets. */
3582 .vector_size = sizeof (u32),
3583 .n_errors = TCP_N_ERROR,
3584 .error_strings = tcp_error_strings,
3585 .n_next_nodes = TCP_INPUT_N_NEXT,
3588 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3589 foreach_tcp6_input_next
3592 .format_buffer = format_tcp_header,
3593 .format_trace = format_tcp_rx_trace,
3597 VLIB_NODE_FN (tcp4_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3598 vlib_frame_t * from_frame)
3600 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3601 0 /* is_nolookup */ );
3604 VLIB_NODE_FN (tcp6_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3605 vlib_frame_t * from_frame)
3607 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3608 0 /* is_nolookup */ );
3612 VLIB_REGISTER_NODE (tcp4_input_node) =
3614 .name = "tcp4-input",
3615 /* Takes a vector of packets. */
3616 .vector_size = sizeof (u32),
3617 .n_errors = TCP_N_ERROR,
3618 .error_strings = tcp_error_strings,
3619 .n_next_nodes = TCP_INPUT_N_NEXT,
3622 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3623 foreach_tcp4_input_next
3626 .format_buffer = format_tcp_header,
3627 .format_trace = format_tcp_rx_trace,
3632 VLIB_REGISTER_NODE (tcp6_input_node) =
3634 .name = "tcp6-input",
3635 /* Takes a vector of packets. */
3636 .vector_size = sizeof (u32),
3637 .n_errors = TCP_N_ERROR,
3638 .error_strings = tcp_error_strings,
3639 .n_next_nodes = TCP_INPUT_N_NEXT,
3642 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3643 foreach_tcp6_input_next
3646 .format_buffer = format_tcp_header,
3647 .format_trace = format_tcp_rx_trace,
3651 #ifndef CLIB_MARCH_VARIANT
3653 tcp_dispatch_table_init (tcp_main_t * tm)
3656 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3657 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3659 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3660 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3663 #define _(t,f,n,e) \
3665 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3666 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3669 /* RFC 793: In LISTEN if RST drop and if ACK return RST */
3670 _(LISTEN, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3671 _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_ACK_INVALID);
3672 _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_INVALID_CONNECTION);
3673 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3674 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3675 TCP_ERROR_ACK_INVALID);
3676 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3677 TCP_ERROR_SEGMENT_INVALID);
3678 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3679 TCP_ERROR_SEGMENT_INVALID);
3680 _(LISTEN, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3681 TCP_ERROR_INVALID_CONNECTION);
3682 _(LISTEN, TCP_FLAG_FIN, TCP_INPUT_NEXT_RESET, TCP_ERROR_SEGMENT_INVALID);
3683 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3684 TCP_ERROR_SEGMENT_INVALID);
3685 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3686 TCP_ERROR_SEGMENT_INVALID);
3687 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3689 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_DROP,
3690 TCP_ERROR_SEGMENT_INVALID);
3691 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3692 TCP_ERROR_SEGMENT_INVALID);
3693 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3694 TCP_ERROR_SEGMENT_INVALID);
3695 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3696 TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3697 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3698 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3699 _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3700 _(SYN_RCVD, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3702 _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3703 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3705 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3707 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3708 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3709 _(SYN_RCVD, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3710 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3712 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3714 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3715 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3716 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3718 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3719 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3720 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3721 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3722 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3723 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3724 _(SYN_RCVD, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3725 /* SYN-ACK for a SYN */
3726 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3728 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3729 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3730 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3732 _(SYN_SENT, TCP_FLAG_FIN, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3733 _(SYN_SENT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3735 /* ACK for for established connection -> tcp-established. */
3736 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3737 /* FIN for for established connection -> tcp-established. */
3738 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3739 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3741 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3743 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3744 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3745 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED,
3747 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3748 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3749 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3750 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3751 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3752 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3753 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3754 _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3756 _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3757 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3759 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3761 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3762 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3763 _(ESTABLISHED, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3764 /* ACK or FIN-ACK to our FIN */
3765 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3766 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
3768 /* FIN in reply to our FIN from the other side */
3769 _(FIN_WAIT_1, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3770 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3771 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3773 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3774 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3775 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3776 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3777 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3778 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3779 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3781 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3782 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3783 _(FIN_WAIT_1, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3784 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3786 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3788 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3789 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3790 _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3791 _(FIN_WAIT_1, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3793 _(CLOSING, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3794 _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3795 _(CLOSING, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3796 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3798 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3800 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3801 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3802 _(CLOSING, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3803 _(CLOSING, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3805 _(CLOSING, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3806 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3808 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3810 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3811 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3812 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3814 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3815 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3816 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3817 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3818 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3819 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3820 /* FIN confirming that the peer (app) has closed */
3821 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3822 _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3823 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3825 _(FIN_WAIT_2, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3826 _(FIN_WAIT_2, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3828 _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3829 _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3831 _(CLOSE_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3832 _(CLOSE_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3834 _(LAST_ACK, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3835 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3836 _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3837 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3839 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3841 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3842 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3843 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3845 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3846 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3847 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3848 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3849 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3850 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3851 _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3852 _(LAST_ACK, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3854 _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3855 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3857 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3859 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3860 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3861 _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3862 _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3863 _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3865 _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3866 _(TIME_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3868 _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3869 /* RFC793 CLOSED: An incoming segment containing a RST is discarded. An
3870 * incoming segment not containing a RST causes a RST to be sent in
3872 _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3873 _(CLOSED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3874 TCP_ERROR_CONNECTION_CLOSED);
3875 _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3876 _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3877 _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3882 static clib_error_t *
3883 tcp_input_init (vlib_main_t * vm)
3885 clib_error_t *error = 0;
3886 tcp_main_t *tm = vnet_get_tcp_main ();
3888 if ((error = vlib_call_init_function (vm, tcp_init)))
3891 /* Initialize dispatch table. */
3892 tcp_dispatch_table_init (tm);
3897 VLIB_INIT_FUNCTION (tcp_input_init);
3899 #endif /* CLIB_MARCH_VARIANT */
3902 * fd.io coding-style-patch-verification: ON
3905 * eval: (c-set-style "gnu")