2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/ip6_fib.h>
19 #include <vnet/tcp/tcp_packet.h>
20 #include <vnet/tcp/tcp.h>
21 #include <vnet/session/session.h>
24 static char *tcp_error_strings[] = {
25 #define tcp_error(n,s) s,
26 #include <vnet/tcp/tcp_error.def>
30 /* All TCP nodes have the same outgoing arcs */
31 #define foreach_tcp_state_next \
32 _ (DROP4, "ip4-drop") \
33 _ (DROP6, "ip6-drop") \
34 _ (TCP4_OUTPUT, "tcp4-output") \
35 _ (TCP6_OUTPUT, "tcp6-output")
37 typedef enum _tcp_established_next
39 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
40 foreach_tcp_state_next
42 TCP_ESTABLISHED_N_NEXT,
43 } tcp_established_next_t;
45 typedef enum _tcp_rcv_process_next
47 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
48 foreach_tcp_state_next
50 TCP_RCV_PROCESS_N_NEXT,
51 } tcp_rcv_process_next_t;
53 typedef enum _tcp_syn_sent_next
55 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
56 foreach_tcp_state_next
59 } tcp_syn_sent_next_t;
61 typedef enum _tcp_listen_next
63 #define _(s,n) TCP_LISTEN_NEXT_##s,
64 foreach_tcp_state_next
69 /* Generic, state independent indices */
70 typedef enum _tcp_state_next
72 #define _(s,n) TCP_NEXT_##s,
73 foreach_tcp_state_next
78 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
79 : TCP_NEXT_TCP6_OUTPUT)
81 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \
85 * Validate segment sequence number. As per RFC793:
87 * Segment Receive Test
89 * ------- ------- -------------------------------------------
90 * 0 0 SEG.SEQ = RCV.NXT
91 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
93 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
94 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
96 * This ultimately consists in checking if segment falls within the window.
97 * The one important difference compared to RFC793 is that we use rcv_las,
98 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
99 * peer's reference when computing our receive window.
102 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
103 * however, is too strict when we have retransmits. Instead we just check that
104 * the seq is not beyond the right edge and that the end of the segment is not
105 * less than the left edge.
107 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
108 * use rcv_nxt in the right edge window test instead of rcv_las.
112 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
114 return (seq_geq (end_seq, tc->rcv_las)
115 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
119 * Parse TCP header options.
121 * @param th TCP header
122 * @param to TCP options data structure to be populated
123 * @param is_syn set if packet is syn
124 * @return -1 if parsing failed
127 tcp_options_parse (tcp_header_t * th, tcp_options_t * to, u8 is_syn)
130 u8 opt_len, opts_len, kind;
134 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
135 data = (const u8 *) (th + 1);
137 /* Zero out all flags but those set in SYN */
138 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
139 | TCP_OPTS_FLAG_TSTAMP | TCP_OPTS_FLAG_MSS);
141 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
145 /* Get options length */
146 if (kind == TCP_OPTION_EOL)
148 else if (kind == TCP_OPTION_NOOP)
160 /* weird option length */
161 if (opt_len < 2 || opt_len > opts_len)
171 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
173 to->flags |= TCP_OPTS_FLAG_MSS;
174 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
177 case TCP_OPTION_WINDOW_SCALE:
180 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
182 to->flags |= TCP_OPTS_FLAG_WSCALE;
183 to->wscale = data[2];
184 if (to->wscale > TCP_MAX_WND_SCALE)
185 to->wscale = TCP_MAX_WND_SCALE;
188 case TCP_OPTION_TIMESTAMP:
190 to->flags |= TCP_OPTS_FLAG_TSTAMP;
191 if ((to->flags & TCP_OPTS_FLAG_TSTAMP)
192 && opt_len == TCP_OPTION_LEN_TIMESTAMP)
194 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
195 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
198 case TCP_OPTION_SACK_PERMITTED:
201 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
202 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
204 case TCP_OPTION_SACK_BLOCK:
205 /* If SACK permitted was not advertised or a SYN, break */
206 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
209 /* If too short or not correctly formatted, break */
210 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
213 to->flags |= TCP_OPTS_FLAG_SACK;
214 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
215 vec_reset_length (to->sacks);
216 for (j = 0; j < to->n_sack_blocks; j++)
218 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j));
219 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j));
220 vec_add1 (to->sacks, b);
224 /* Nothing to see here */
232 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
233 * timestamp to echo and it's less than tsval_recent, drop segment
234 * but still send an ACK in order to retain TCP's mechanism for detecting
235 * and recovering from half-open connections
237 * Or at least that's what the theory says. It seems that this might not work
238 * very well with packet reordering and fast retransmit. XXX
241 tcp_segment_check_paws (tcp_connection_t * tc)
243 return tcp_opts_tstamp (&tc->rcv_opts)
244 && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
248 * Update tsval recent
251 tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
254 * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
255 * of an incoming segment:
256 * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
257 * then the TSval from the segment is copied to TS.Recent;
258 * otherwise, the TSval is ignored.
260 if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
261 && seq_leq (tc->rcv_las, seq_end))
263 ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
264 tc->tsval_recent = tc->rcv_opts.tsval;
265 tc->tsval_recent_age = tcp_time_now_w_thread (tc->c_thread_index);
270 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
272 * It first verifies if segment has a wrapped sequence number (PAWS) and then
273 * does the processing associated to the first four steps (ignoring security
274 * and precedence): sequence number, rst bit and syn bit checks.
276 * @return 0 if segments passes validation.
279 tcp_segment_validate (tcp_worker_ctx_t * wrk, tcp_connection_t * tc0,
280 vlib_buffer_t * b0, tcp_header_t * th0, u32 * error0)
282 /* We could get a burst of RSTs interleaved with acks */
283 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
285 tcp_send_reset (tc0);
286 *error0 = TCP_ERROR_CONNECTION_CLOSED;
290 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
292 *error0 = TCP_ERROR_SEGMENT_INVALID;
296 if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
298 *error0 = TCP_ERROR_OPTIONS;
302 if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
304 *error0 = TCP_ERROR_PAWS;
305 TCP_EVT (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
306 vnet_buffer (b0)->tcp.seq_end);
308 /* If it just so happens that a segment updates tsval_recent for a
309 * segment over 24 days old, invalidate tsval_recent. */
310 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
311 tcp_time_now_w_thread (tc0->c_thread_index)))
313 tc0->tsval_recent = tc0->rcv_opts.tsval;
314 clib_warning ("paws failed: 24-day old segment");
316 /* Drop after ack if not rst. Resets can fail paws check as per
317 * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
318 * be subjected to the PAWS check by verifying an acceptable value in
320 else if (!tcp_rst (th0))
322 tcp_program_ack (tc0);
323 TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
328 /* 1st: check sequence number */
329 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
330 vnet_buffer (b0)->tcp.seq_end))
332 /* SYN/SYN-ACK retransmit */
334 && vnet_buffer (b0)->tcp.seq_number == tc0->rcv_nxt - 1)
336 tcp_options_parse (th0, &tc0->rcv_opts, 1);
337 if (tc0->state == TCP_STATE_SYN_RCVD)
339 tcp_send_synack (tc0);
340 TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
341 *error0 = TCP_ERROR_SYNS_RCVD;
345 tcp_program_ack (tc0);
346 TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
347 *error0 = TCP_ERROR_SYN_ACKS_RCVD;
352 /* If our window is 0 and the packet is in sequence, let it pass
353 * through for ack processing. It should be dropped later. */
354 if (tc0->rcv_wnd < tc0->snd_mss
355 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
358 /* If we entered recovery and peer did so as well, there's a chance that
359 * dup acks won't be acceptable on either end because seq_end may be less
360 * than rcv_las. This can happen if acks are lost in both directions. */
361 if (tcp_in_recovery (tc0)
362 && seq_geq (vnet_buffer (b0)->tcp.seq_number,
363 tc0->rcv_las - tc0->rcv_wnd)
364 && seq_leq (vnet_buffer (b0)->tcp.seq_end,
365 tc0->rcv_nxt + tc0->rcv_wnd))
368 *error0 = TCP_ERROR_RCV_WND;
370 /* If we advertised a zero rcv_wnd and the segment is in the past or the
371 * next one that we expect, it is probably a window probe */
372 if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
373 && seq_lt (vnet_buffer (b0)->tcp.seq_end,
374 tc0->rcv_las + tc0->rcv_opts.mss))
375 *error0 = TCP_ERROR_ZERO_RWND;
377 tc0->errors.below_data_wnd += seq_lt (vnet_buffer (b0)->tcp.seq_end,
380 /* If not RST, send dup ack */
383 tcp_program_dupack (tc0);
384 TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
392 /* 2nd: check the RST bit */
393 if (PREDICT_FALSE (tcp_rst (th0)))
395 tcp_connection_reset (tc0);
396 *error0 = TCP_ERROR_RST_RCVD;
400 /* 3rd: check security and precedence (skip) */
402 /* 4th: check the SYN bit (in window) */
403 if (PREDICT_FALSE (tcp_syn (th0)))
405 /* As per RFC5961 send challenge ack instead of reset */
406 tcp_program_ack (tc0);
407 *error0 = TCP_ERROR_SPURIOUS_SYN;
411 /* If segment in window, save timestamp */
412 tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
413 vnet_buffer (b0)->tcp.seq_end);
421 tcp_rcv_ack_no_cc (tcp_connection_t * tc, vlib_buffer_t * b, u32 * error)
423 /* SND.UNA =< SEG.ACK =< SND.NXT */
424 if (!(seq_leq (tc->snd_una, vnet_buffer (b)->tcp.ack_number)
425 && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
427 if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
428 && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
430 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
433 *error = TCP_ERROR_ACK_INVALID;
438 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
439 tc->snd_una = vnet_buffer (b)->tcp.ack_number;
440 *error = TCP_ERROR_ACK_OK;
445 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
447 * Note that although the original article, srtt and rttvar are scaled
448 * to minimize round-off errors, here we don't. Instead, we rely on
449 * better precision time measurements.
451 * TODO support us rtt resolution
454 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
460 err = mrtt - tc->srtt;
462 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
463 * The increase should be bound */
464 tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
465 diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
466 tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
470 /* First measurement. */
472 tc->rttvar = mrtt >> 1;
476 #ifndef CLIB_MARCH_VARIANT
478 tcp_update_rto (tcp_connection_t * tc)
480 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
481 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
483 #endif /* CLIB_MARCH_VARIANT */
486 * Update RTT estimate and RTO timer
488 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
489 * timing. Middle boxes are known to fiddle with TCP options so we
490 * should give higher priority to ACK timing.
492 * This should be called only if previously sent bytes have been acked.
494 * return 1 if valid rtt 0 otherwise
497 tcp_update_rtt (tcp_connection_t * tc, tcp_rate_sample_t * rs, u32 ack)
501 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
502 * RTT because they're ambiguous. */
503 if (tcp_in_cong_recovery (tc))
505 /* Accept rtt estimates for samples that have not been retransmitted */
506 if ((tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
507 && !(rs->flags & TCP_BTS_IS_RXT))
509 mrtt = rs->rtt_time * THZ;
515 if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
517 f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
518 tc->mrtt_us = tc->mrtt_us + (sample - tc->mrtt_us) * 0.125;
519 mrtt = clib_max ((u32) (sample * THZ), 1);
520 /* Allow measuring of a new RTT */
523 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
524 * snd_una, i.e., the left side of the send window:
525 * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
526 else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
528 u32 now = tcp_tstamp (tc);
529 mrtt = clib_max (now - tc->rcv_opts.tsecr, 1);
534 /* Ignore dubious measurements */
535 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
538 tcp_estimate_rtt (tc, mrtt);
542 /* If we got here something must've been ACKed so make sure boff is 0,
543 * even if mrtt is not valid since we update the rto lower */
551 tcp_estimate_initial_rtt (tcp_connection_t * tc)
553 u8 thread_index = vlib_num_workers ()? 1 : 0;
558 tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
559 tc->mrtt_us = clib_max (tc->mrtt_us, 0.0001);
560 mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
565 mrtt = tcp_time_now_w_thread (thread_index) - tc->rcv_opts.tsecr;
566 mrtt = clib_max (mrtt, 1);
567 /* Due to retransmits we don't know the initial mrtt */
568 if (tc->rto_boff && mrtt > 1 * THZ)
570 tc->mrtt_us = (f64) mrtt *TCP_TICK;
573 if (mrtt > 0 && mrtt < TCP_RTT_MAX)
574 tcp_estimate_rtt (tc, mrtt);
579 tcp_recovery_no_snd_space (tcp_connection_t * tc)
581 return (tcp_in_fastrecovery (tc)
582 && tcp_fastrecovery_prr_snd_space (tc) < tc->snd_mss)
583 || (tcp_in_recovery (tc)
584 && tcp_available_output_snd_space (tc) < tc->snd_mss);
588 * Dequeue bytes for connections that have received acks in last burst
591 tcp_handle_postponed_dequeues (tcp_worker_ctx_t * wrk)
593 u32 thread_index = wrk->vm->thread_index;
594 u32 *pending_deq_acked;
595 tcp_connection_t *tc;
598 if (!vec_len (wrk->pending_deq_acked))
601 pending_deq_acked = wrk->pending_deq_acked;
602 for (i = 0; i < vec_len (pending_deq_acked); i++)
604 tc = tcp_connection_get (pending_deq_acked[i], thread_index);
605 tc->flags &= ~TCP_CONN_DEQ_PENDING;
609 /* Dequeue the newly ACKed bytes */
610 session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
612 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
614 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
616 if (seq_leq (tc->psh_seq, tc->snd_una))
617 tc->flags &= ~TCP_CONN_PSH_PENDING;
620 /* If everything has been acked, stop retransmit timer
621 * otherwise update. */
622 tcp_retransmit_timer_update (tc);
624 /* Update pacer based on our new cwnd estimate */
625 tcp_connection_tx_pacer_update (tc);
628 /* Reset the pacer if we've been idle, i.e., no data sent or if
629 * we're in recovery and snd space constrained */
630 if (tc->data_segs_out == tc->prev_dsegs_out
631 || tcp_recovery_no_snd_space (tc))
632 transport_connection_tx_pacer_reset_bucket (&tc->connection,
635 tc->prev_dsegs_out = tc->data_segs_out;
637 _vec_len (wrk->pending_deq_acked) = 0;
641 tcp_program_dequeue (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
643 if (!(tc->flags & TCP_CONN_DEQ_PENDING))
645 vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
646 tc->flags |= TCP_CONN_DEQ_PENDING;
648 tc->burst_acked += tc->bytes_acked;
651 #ifndef CLIB_MARCH_VARIANT
653 scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
655 ASSERT (!pool_is_free_index (sb->holes, hole - sb->holes));
656 return hole - sb->holes;
660 scoreboard_hole_bytes (sack_scoreboard_hole_t * hole)
662 return hole->end - hole->start;
665 sack_scoreboard_hole_t *
666 scoreboard_get_hole (sack_scoreboard_t * sb, u32 index)
668 if (index != TCP_INVALID_SACK_HOLE_INDEX)
669 return pool_elt_at_index (sb->holes, index);
673 sack_scoreboard_hole_t *
674 scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
676 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
677 return pool_elt_at_index (sb->holes, hole->next);
681 sack_scoreboard_hole_t *
682 scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
684 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
685 return pool_elt_at_index (sb->holes, hole->prev);
689 sack_scoreboard_hole_t *
690 scoreboard_first_hole (sack_scoreboard_t * sb)
692 if (sb->head != TCP_INVALID_SACK_HOLE_INDEX)
693 return pool_elt_at_index (sb->holes, sb->head);
697 sack_scoreboard_hole_t *
698 scoreboard_last_hole (sack_scoreboard_t * sb)
700 if (sb->tail != TCP_INVALID_SACK_HOLE_INDEX)
701 return pool_elt_at_index (sb->holes, sb->tail);
706 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
708 sack_scoreboard_hole_t *next, *prev;
710 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
712 next = pool_elt_at_index (sb->holes, hole->next);
713 next->prev = hole->prev;
717 sb->tail = hole->prev;
720 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
722 prev = pool_elt_at_index (sb->holes, hole->prev);
723 prev->next = hole->next;
727 sb->head = hole->next;
730 if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
731 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
733 /* Poison the entry */
735 clib_memset (hole, 0xfe, sizeof (*hole));
737 pool_put (sb->holes, hole);
740 static sack_scoreboard_hole_t *
741 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
744 sack_scoreboard_hole_t *hole, *next, *prev;
747 pool_get (sb->holes, hole);
748 clib_memset (hole, 0, sizeof (*hole));
752 hole_index = scoreboard_hole_index (sb, hole);
754 prev = scoreboard_get_hole (sb, prev_index);
757 hole->prev = prev_index;
758 hole->next = prev->next;
760 if ((next = scoreboard_next_hole (sb, hole)))
761 next->prev = hole_index;
763 sb->tail = hole_index;
765 prev->next = hole_index;
769 sb->head = hole_index;
770 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
771 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
778 scoreboard_update_sacked_rxt (sack_scoreboard_t * sb, u32 start, u32 end,
781 if (!has_rxt || seq_geq (start, sb->high_rxt))
785 seq_lt (end, sb->high_rxt) ? (end - start) : (sb->high_rxt - start);
789 scoreboard_update_bytes (sack_scoreboard_t * sb, u32 ack, u32 snd_mss)
791 sack_scoreboard_hole_t *left, *right;
792 u32 sacked = 0, blks = 0, old_sacked;
794 old_sacked = sb->sacked_bytes;
796 sb->last_lost_bytes = 0;
798 sb->sacked_bytes = 0;
800 right = scoreboard_last_hole (sb);
803 sb->sacked_bytes = sb->high_sacked - ack;
807 if (seq_gt (sb->high_sacked, right->end))
809 sacked = sb->high_sacked - right->end;
813 while (sacked < (TCP_DUPACK_THRESHOLD - 1) * snd_mss
814 && blks < TCP_DUPACK_THRESHOLD)
817 sb->lost_bytes += scoreboard_hole_bytes (right);
819 left = scoreboard_prev_hole (sb, right);
822 ASSERT (right->start == ack || sb->is_reneging);
823 sacked += right->start - ack;
828 sacked += right->start - left->end;
833 /* right is first lost */
836 sb->lost_bytes += scoreboard_hole_bytes (right);
837 sb->last_lost_bytes += right->is_lost ? 0 : (right->end - right->start);
839 left = scoreboard_prev_hole (sb, right);
842 ASSERT (right->start == ack || sb->is_reneging);
843 sacked += right->start - ack;
846 sacked += right->start - left->end;
850 sb->sacked_bytes = sacked;
851 sb->last_sacked_bytes = sacked - (old_sacked - sb->last_bytes_delivered);
855 * Figure out the next hole to retransmit
857 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
859 sack_scoreboard_hole_t *
860 scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
861 sack_scoreboard_hole_t * start,
862 u8 have_unsent, u8 * can_rescue, u8 * snd_limited)
864 sack_scoreboard_hole_t *hole = 0;
866 hole = start ? start : scoreboard_first_hole (sb);
867 while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
868 hole = scoreboard_next_hole (sb, hole);
870 /* Nothing, return */
873 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
877 /* Rule (1): if higher than rxt, less than high_sacked and lost */
878 if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
880 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
884 /* Rule (2): available unsent data */
887 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
890 /* Rule (3): if hole not lost */
891 else if (seq_lt (hole->start, sb->high_sacked))
893 /* And we didn't already retransmit it */
894 if (seq_leq (hole->end, sb->high_rxt))
896 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
900 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
902 /* Rule (4): if hole beyond high_sacked */
905 ASSERT (seq_geq (hole->start, sb->high_sacked));
908 /* HighRxt MUST NOT be updated */
913 if (hole && seq_lt (sb->high_rxt, hole->start))
914 sb->high_rxt = hole->start;
920 scoreboard_init_rxt (sack_scoreboard_t * sb, u32 snd_una)
922 sack_scoreboard_hole_t *hole;
923 hole = scoreboard_first_hole (sb);
926 snd_una = seq_gt (snd_una, hole->start) ? snd_una : hole->start;
927 sb->cur_rxt_hole = sb->head;
929 sb->high_rxt = snd_una;
930 sb->rescue_rxt = snd_una - 1;
934 scoreboard_init (sack_scoreboard_t * sb)
936 sb->head = TCP_INVALID_SACK_HOLE_INDEX;
937 sb->tail = TCP_INVALID_SACK_HOLE_INDEX;
938 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
942 scoreboard_clear (sack_scoreboard_t * sb)
944 sack_scoreboard_hole_t *hole;
945 while ((hole = scoreboard_first_hole (sb)))
947 scoreboard_remove_hole (sb, hole);
949 ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX);
950 ASSERT (pool_elts (sb->holes) == 0);
951 sb->sacked_bytes = 0;
952 sb->last_sacked_bytes = 0;
953 sb->last_bytes_delivered = 0;
955 sb->last_lost_bytes = 0;
956 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
961 scoreboard_clear_reneging (sack_scoreboard_t * sb, u32 start, u32 end)
963 sack_scoreboard_hole_t *last_hole;
965 clib_warning ("sack reneging");
967 scoreboard_clear (sb);
968 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
970 last_hole->is_lost = 1;
971 sb->tail = scoreboard_hole_index (sb, last_hole);
972 sb->high_sacked = start;
973 scoreboard_init_rxt (sb, start);
976 #endif /* CLIB_MARCH_VARIANT */
979 * Test that scoreboard is sane after recovery
981 * Returns 1 if scoreboard is empty or if first hole beyond
985 tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
987 sack_scoreboard_hole_t *hole;
988 hole = scoreboard_first_hole (&tc->sack_sb);
989 return (!hole || (seq_geq (hole->start, tc->snd_una)
990 && seq_lt (hole->end, tc->snd_nxt)));
993 #ifndef CLIB_MARCH_VARIANT
996 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
998 sack_scoreboard_hole_t *hole, *next_hole;
999 sack_scoreboard_t *sb = &tc->sack_sb;
1000 sack_block_t *blk, *rcv_sacks;
1001 u32 blk_index = 0, i, j;
1004 sb->last_sacked_bytes = 0;
1005 sb->last_bytes_delivered = 0;
1008 if (!tcp_opts_sack (&tc->rcv_opts)
1009 && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
1012 has_rxt = tcp_in_cong_recovery (tc);
1014 /* Remove invalid blocks */
1015 blk = tc->rcv_opts.sacks;
1016 while (blk < vec_end (tc->rcv_opts.sacks))
1018 if (seq_lt (blk->start, blk->end)
1019 && seq_gt (blk->start, tc->snd_una)
1020 && seq_gt (blk->start, ack)
1021 && seq_lt (blk->start, tc->snd_nxt)
1022 && seq_leq (blk->end, tc->snd_nxt))
1027 vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
1030 /* Add block for cumulative ack */
1031 if (seq_gt (ack, tc->snd_una))
1033 vec_add2 (tc->rcv_opts.sacks, blk, 1);
1034 blk->start = tc->snd_una;
1038 if (vec_len (tc->rcv_opts.sacks) == 0)
1041 tcp_scoreboard_trace_add (tc, ack);
1043 /* Make sure blocks are ordered */
1044 rcv_sacks = tc->rcv_opts.sacks;
1045 for (i = 0; i < vec_len (rcv_sacks); i++)
1046 for (j = i + 1; j < vec_len (rcv_sacks); j++)
1047 if (seq_lt (rcv_sacks[j].start, rcv_sacks[i].start))
1049 sack_block_t tmp = rcv_sacks[i];
1050 rcv_sacks[i] = rcv_sacks[j];
1054 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
1056 /* Handle reneging as a special case */
1057 if (PREDICT_FALSE (sb->is_reneging))
1059 /* No holes, only sacked bytes */
1060 if (seq_leq (tc->snd_nxt, sb->high_sacked))
1062 /* No progress made so return */
1063 if (seq_leq (ack, tc->snd_una))
1066 /* Update sacked bytes delivered and return */
1067 sb->last_bytes_delivered = ack - tc->snd_una;
1068 sb->sacked_bytes -= sb->last_bytes_delivered;
1069 sb->is_reneging = seq_lt (ack, sb->high_sacked);
1073 /* New hole above high sacked. Add it and process normally */
1074 hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
1075 sb->high_sacked, tc->snd_nxt);
1076 sb->tail = scoreboard_hole_index (sb, hole);
1078 /* Not reneging and no holes. Insert the first that covers all
1079 * outstanding bytes */
1082 hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
1083 tc->snd_una, tc->snd_nxt);
1084 sb->tail = scoreboard_hole_index (sb, hole);
1086 sb->high_sacked = rcv_sacks[vec_len (rcv_sacks) - 1].end;
1090 /* If we have holes but snd_nxt is beyond the last hole, update
1091 * last hole end or add new hole after high sacked */
1092 hole = scoreboard_last_hole (sb);
1093 if (seq_gt (tc->snd_nxt, hole->end))
1095 if (seq_geq (hole->start, sb->high_sacked))
1097 hole->end = tc->snd_nxt;
1099 /* New hole after high sacked block */
1100 else if (seq_lt (sb->high_sacked, tc->snd_nxt))
1102 scoreboard_insert_hole (sb, sb->tail, sb->high_sacked,
1107 /* Keep track of max byte sacked for when the last hole
1109 sb->high_sacked = seq_max (rcv_sacks[vec_len (rcv_sacks) - 1].end,
1113 /* Walk the holes with the SACK blocks */
1114 hole = pool_elt_at_index (sb->holes, sb->head);
1116 if (PREDICT_FALSE (sb->is_reneging))
1117 sb->last_bytes_delivered += hole->start - tc->snd_una;
1119 while (hole && blk_index < vec_len (rcv_sacks))
1121 blk = &rcv_sacks[blk_index];
1122 if (seq_leq (blk->start, hole->start))
1124 /* Block covers hole. Remove hole */
1125 if (seq_geq (blk->end, hole->end))
1127 next_hole = scoreboard_next_hole (sb, hole);
1129 /* If covered by ack, compute delivered bytes */
1130 if (blk->end == ack)
1132 u32 sacked = next_hole ? next_hole->start : sb->high_sacked;
1133 if (PREDICT_FALSE (seq_lt (ack, sacked)))
1135 sb->last_bytes_delivered += ack - hole->end;
1136 sb->is_reneging = 1;
1140 sb->last_bytes_delivered += sacked - hole->end;
1141 sb->is_reneging = 0;
1144 scoreboard_update_sacked_rxt (sb, hole->start, hole->end,
1146 scoreboard_remove_hole (sb, hole);
1149 /* Partial 'head' overlap */
1152 if (seq_gt (blk->end, hole->start))
1154 scoreboard_update_sacked_rxt (sb, hole->start, blk->end,
1156 hole->start = blk->end;
1163 /* Hole must be split */
1164 if (seq_lt (blk->end, hole->end))
1166 u32 hole_index = scoreboard_hole_index (sb, hole);
1167 next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
1169 /* Pool might've moved */
1170 hole = scoreboard_get_hole (sb, hole_index);
1171 hole->end = blk->start;
1173 scoreboard_update_sacked_rxt (sb, blk->start, blk->end,
1177 ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
1179 else if (seq_lt (blk->start, hole->end))
1181 scoreboard_update_sacked_rxt (sb, blk->start, hole->end,
1183 hole->end = blk->start;
1185 hole = scoreboard_next_hole (sb, hole);
1189 scoreboard_update_bytes (sb, ack, tc->snd_mss);
1191 ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc));
1192 ASSERT (sb->sacked_bytes == 0 || tcp_in_recovery (tc)
1193 || sb->sacked_bytes <= tc->snd_nxt - seq_max (tc->snd_una, ack));
1194 ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_nxt
1195 - seq_max (tc->snd_una, ack) || tcp_in_recovery (tc));
1196 ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
1197 || sb->is_reneging || sb->holes[sb->head].start == ack);
1198 ASSERT (sb->last_lost_bytes <= sb->lost_bytes);
1199 ASSERT ((ack - tc->snd_una) + sb->last_sacked_bytes
1200 - sb->last_bytes_delivered >= sb->rxt_sacked);
1201 ASSERT ((ack - tc->snd_una) >= tc->sack_sb.last_bytes_delivered
1202 || (tc->flags & TCP_CONN_FINSNT));
1204 TCP_EVT (TCP_EVT_CC_SCOREBOARD, tc);
1206 #endif /* CLIB_MARCH_VARIANT */
1209 * Try to update snd_wnd based on feedback received from peer.
1211 * If successful, and new window is 'effectively' 0, activate persist
1215 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
1217 /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
1218 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
1219 if (seq_lt (tc->snd_wl1, seq)
1220 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
1222 tc->snd_wnd = snd_wnd;
1225 TCP_EVT (TCP_EVT_SND_WND, tc);
1227 if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
1229 /* Set persist timer if not set and we just got 0 wnd */
1230 if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
1231 && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
1232 tcp_persist_timer_set (tc);
1236 tcp_persist_timer_reset (tc);
1237 if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
1240 tcp_update_rto (tc);
1247 * Init loss recovery/fast recovery.
1249 * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
1250 * updated in @ref tcp_cc_handle_event after fast retransmit
1253 tcp_cc_init_congestion (tcp_connection_t * tc)
1255 tcp_fastrecovery_on (tc);
1256 tc->snd_congestion = tc->snd_nxt;
1257 tc->cwnd_acc_bytes = 0;
1258 tc->snd_rxt_bytes = 0;
1259 tc->rxt_delivered = 0;
1260 tc->prr_delivered = 0;
1261 tc->prr_start = tc->snd_una;
1262 tc->prev_ssthresh = tc->ssthresh;
1263 tc->prev_cwnd = tc->cwnd;
1265 tc->snd_rxt_ts = tcp_tstamp (tc);
1266 tcp_cc_congestion (tc);
1268 /* Post retransmit update cwnd to ssthresh and account for the
1269 * three segments that have left the network and should've been
1270 * buffered at the receiver XXX */
1271 if (!tcp_opts_sack_permitted (&tc->rcv_opts))
1272 tc->cwnd += 3 * tc->snd_mss;
1274 tc->fr_occurences += 1;
1275 TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
1279 tcp_cc_congestion_undo (tcp_connection_t * tc)
1281 tc->cwnd = tc->prev_cwnd;
1282 tc->ssthresh = tc->prev_ssthresh;
1283 tcp_cc_undo_recovery (tc);
1284 ASSERT (tc->rto_boff == 0);
1285 TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
1289 tcp_cc_is_spurious_timeout_rxt (tcp_connection_t * tc)
1291 return (tcp_in_recovery (tc) && tc->rto_boff == 1
1293 && tcp_opts_tstamp (&tc->rcv_opts)
1294 && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
1298 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
1300 return (tcp_cc_is_spurious_timeout_rxt (tc));
1304 tcp_should_fastrecover_sack (tcp_connection_t * tc)
1306 return (tc->sack_sb.lost_bytes
1307 || ((TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
1308 < tc->sack_sb.sacked_bytes));
1312 tcp_should_fastrecover (tcp_connection_t * tc, u8 has_sack)
1316 /* If of of the two conditions lower hold, reset dupacks because
1317 * we're probably after timeout (RFC6582 heuristics).
1318 * If Cumulative ack does not cover more than congestion threshold,
1320 * 1) The following doesn't hold: The congestion window is greater
1321 * than SMSS bytes and the difference between highest_ack
1322 * and prev_highest_ack is at most 4*SMSS bytes
1323 * 2) Echoed timestamp in the last non-dup ack does not equal the
1326 if (seq_leq (tc->snd_una, tc->snd_congestion)
1327 && ((!(tc->cwnd > tc->snd_mss
1328 && tc->bytes_acked <= 4 * tc->snd_mss))
1329 || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
1331 tc->rcv_dupacks = 0;
1335 return ((tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1336 || tcp_should_fastrecover_sack (tc));
1340 tcp_cc_recover (tcp_connection_t * tc)
1342 sack_scoreboard_hole_t *hole;
1345 ASSERT (tcp_in_cong_recovery (tc));
1347 if (tcp_cc_is_spurious_retransmit (tc))
1349 tcp_cc_congestion_undo (tc);
1353 tc->rcv_dupacks = 0;
1354 tc->prr_delivered = 0;
1355 tc->rxt_delivered = 0;
1356 tc->snd_rxt_bytes = 0;
1359 tc->flags &= ~TCP_CONN_RXT_PENDING;
1361 tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
1363 /* Previous recovery left us congested. Continue sending as part
1364 * of the current recovery event with an updated snd_congestion */
1365 if (tc->sack_sb.sacked_bytes)
1367 tc->snd_congestion = tc->snd_nxt;
1368 tc->snd_rxt_ts = tcp_tstamp (tc);
1369 tc->prr_start = tc->snd_una;
1370 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
1371 tcp_program_retransmit (tc);
1375 hole = scoreboard_first_hole (&tc->sack_sb);
1376 if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
1377 scoreboard_clear (&tc->sack_sb);
1379 if (!tcp_in_recovery (tc) && !is_spurious)
1380 tcp_cc_recovered (tc);
1382 tcp_fastrecovery_off (tc);
1383 tcp_fastrecovery_first_off (tc);
1384 tcp_recovery_off (tc);
1385 TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
1387 ASSERT (tc->rto_boff == 0);
1388 ASSERT (!tcp_in_cong_recovery (tc));
1389 ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
1394 tcp_cc_update (tcp_connection_t * tc, tcp_rate_sample_t * rs)
1396 ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
1398 /* Congestion avoidance */
1399 tcp_cc_rcv_ack (tc, rs);
1401 /* If a cumulative ack, make sure dupacks is 0 */
1402 tc->rcv_dupacks = 0;
1404 /* When dupacks hits the threshold we only enter fast retransmit if
1405 * cumulative ack covers more than snd_congestion. Should snd_una
1406 * wrap this test may fail under otherwise valid circumstances.
1407 * Therefore, proactively update snd_congestion when wrap detected. */
1409 (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
1410 && seq_gt (tc->snd_congestion, tc->snd_una)))
1411 tc->snd_congestion = tc->snd_una - 1;
1415 * One function to rule them all ... and in the darkness bind them
1418 tcp_cc_handle_event (tcp_connection_t * tc, tcp_rate_sample_t * rs,
1421 u8 has_sack = tcp_opts_sack_permitted (&tc->rcv_opts);
1424 * If not in recovery, figure out if we should enter
1426 if (!tcp_in_cong_recovery (tc))
1431 TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
1432 tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1434 if (tcp_should_fastrecover (tc, has_sack))
1436 tcp_cc_init_congestion (tc);
1439 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
1441 tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
1442 tcp_program_retransmit (tc);
1449 * Already in recovery. See if we can exit and stop retransmitting
1452 if (seq_geq (tc->snd_una, tc->snd_congestion))
1454 /* If spurious return, we've already updated everything */
1455 if (tcp_cc_recover (tc))
1457 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1461 /* Treat as congestion avoidance ack */
1462 tcp_cc_rcv_ack (tc, rs);
1467 * Process (re)transmit feedback. Output path uses this to decide how much
1468 * more data to release into the network
1472 tc->rxt_delivered += tc->sack_sb.rxt_sacked;
1473 tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
1474 - tc->sack_sb.last_bytes_delivered;
1476 tcp_program_retransmit (tc);
1482 tc->rcv_dupacks += 1;
1483 TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
1485 tc->rxt_delivered = clib_max (tc->rxt_delivered + tc->bytes_acked,
1488 tc->prr_delivered += 1;
1490 tc->prr_delivered += tc->bytes_acked - tc->snd_mss * tc->rcv_dupacks;
1492 /* If partial ack, assume that the first un-acked segment was lost */
1493 if (tc->bytes_acked || tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1494 tcp_fastrecovery_first_on (tc);
1496 tcp_program_retransmit (tc);
1500 * Notify cc of the event
1503 if (!tc->bytes_acked)
1505 tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1509 /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1510 * reset dupacks to 0. Also needed if in congestion recovery */
1511 tc->rcv_dupacks = 0;
1513 if (tcp_in_recovery (tc))
1514 tcp_cc_rcv_ack (tc, rs);
1516 tcp_cc_rcv_cong_ack (tc, TCP_CC_PARTIALACK, rs);
1520 * Check if duplicate ack as per RFC5681 Sec. 2
1523 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
1526 return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
1527 && seq_gt (tc->snd_nxt, tc->snd_una)
1528 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
1529 && (prev_snd_wnd == tc->snd_wnd));
1533 * Checks if ack is a congestion control event.
1536 tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
1537 u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
1539 /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
1540 * defined to be 'duplicate' as well */
1541 *is_dack = tc->sack_sb.last_sacked_bytes
1542 || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
1544 /* If reneging, wait for timer based retransmits */
1545 if (PREDICT_FALSE (tcp_is_lost_fin (tc) || tc->sack_sb.is_reneging))
1548 return (*is_dack || tcp_in_cong_recovery (tc));
1552 * Process incoming ACK
1555 tcp_rcv_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1556 tcp_header_t * th, u32 * error)
1558 u32 prev_snd_wnd, prev_snd_una;
1559 tcp_rate_sample_t rs = { 0 };
1562 TCP_EVT (TCP_EVT_CC_STAT, tc);
1564 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1565 if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1567 /* We've probably entered recovery and the peer still has some
1568 * of the data we've sent. Update snd_nxt and accept the ack */
1569 if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
1570 && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
1572 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1576 tc->errors.above_ack_wnd += 1;
1577 *error = TCP_ERROR_ACK_FUTURE;
1578 TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number);
1582 /* If old ACK, probably it's an old dupack */
1583 if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1585 tc->errors.below_ack_wnd += 1;
1586 *error = TCP_ERROR_ACK_OLD;
1587 TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 1, vnet_buffer (b)->tcp.ack_number);
1588 if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1589 tcp_cc_handle_event (tc, 0, 1);
1590 /* Don't drop yet */
1597 * Looks okay, process feedback
1600 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1601 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1603 prev_snd_wnd = tc->snd_wnd;
1604 prev_snd_una = tc->snd_una;
1605 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1606 vnet_buffer (b)->tcp.ack_number,
1607 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1608 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1609 tc->snd_una = vnet_buffer (b)->tcp.ack_number;
1610 tcp_validate_txf_size (tc, tc->bytes_acked);
1612 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1613 tcp_bt_sample_delivery_rate (tc, &rs);
1615 tcp_program_dequeue (wrk, tc);
1617 if (tc->bytes_acked)
1618 tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
1620 TCP_EVT (TCP_EVT_ACK_RCVD, tc);
1623 * Check if we have congestion event
1626 if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1628 tcp_cc_handle_event (tc, &rs, is_dack);
1629 tc->dupacks_in += is_dack;
1630 if (!tcp_in_cong_recovery (tc))
1632 *error = TCP_ERROR_ACK_OK;
1635 *error = TCP_ERROR_ACK_DUP;
1636 if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1642 * Update congestion control (slow start/congestion avoidance)
1644 tcp_cc_update (tc, &rs);
1645 *error = TCP_ERROR_ACK_OK;
1650 tcp_program_disconnect (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1652 if (!tcp_disconnect_pending (tc))
1654 vec_add1 (wrk->pending_disconnects, tc->c_c_index);
1655 tcp_disconnect_pending_on (tc);
1660 tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
1662 u32 thread_index, *pending_disconnects;
1663 tcp_connection_t *tc;
1666 if (!vec_len (wrk->pending_disconnects))
1669 thread_index = wrk->vm->thread_index;
1670 pending_disconnects = wrk->pending_disconnects;
1671 for (i = 0; i < vec_len (pending_disconnects); i++)
1673 tc = tcp_connection_get (pending_disconnects[i], thread_index);
1674 tcp_disconnect_pending_off (tc);
1675 session_transport_closing_notify (&tc->connection);
1677 _vec_len (wrk->pending_disconnects) = 0;
1681 tcp_rcv_fin (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1684 /* Reject out-of-order fins */
1685 if (vnet_buffer (b)->tcp.seq_end != tc->rcv_nxt)
1688 /* Account for the FIN and send ack */
1690 tc->flags |= TCP_CONN_FINRCVD;
1691 tcp_program_ack (tc);
1692 /* Enter CLOSE-WAIT and notify session. To avoid lingering
1693 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1694 tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
1695 tcp_program_disconnect (wrk, tc);
1696 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
1697 TCP_EVT (TCP_EVT_FIN_RCVD, tc);
1698 *error = TCP_ERROR_FIN_RCVD;
1701 #ifndef CLIB_MARCH_VARIANT
1703 tcp_sack_vector_is_sane (sack_block_t * sacks)
1706 for (i = 1; i < vec_len (sacks); i++)
1708 if (sacks[i - 1].end == sacks[i].start)
1715 * Build SACK list as per RFC2018.
1717 * Makes sure the first block contains the segment that generated the current
1718 * ACK and the following ones are the ones most recently reported in SACK
1721 * @param tc TCP connection for which the SACK list is updated
1722 * @param start Start sequence number of the newest SACK block
1723 * @param end End sequence of the newest SACK block
1726 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1728 sack_block_t *new_list = tc->snd_sacks_fl, *block = 0;
1731 /* If the first segment is ooo add it to the list. Last write might've moved
1732 * rcv_nxt over the first segment. */
1733 if (seq_lt (tc->rcv_nxt, start))
1735 vec_add2 (new_list, block, 1);
1736 block->start = start;
1740 /* Find the blocks still worth keeping. */
1741 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1743 /* Discard if rcv_nxt advanced beyond current block */
1744 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1747 /* Merge or drop if segment overlapped by the new segment */
1748 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1749 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1751 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1752 new_list[0].start = tc->snd_sacks[i].start;
1753 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1754 new_list[0].end = tc->snd_sacks[i].end;
1758 /* Save to new SACK list if we have space. */
1759 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1760 vec_add1 (new_list, tc->snd_sacks[i]);
1763 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1765 /* Replace old vector with new one */
1766 vec_reset_length (tc->snd_sacks);
1767 tc->snd_sacks_fl = tc->snd_sacks;
1768 tc->snd_sacks = new_list;
1770 /* Segments should not 'touch' */
1771 ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks));
1775 tcp_sack_list_bytes (tcp_connection_t * tc)
1778 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1779 bytes += tc->snd_sacks[i].end - tc->snd_sacks[i].start;
1782 #endif /* CLIB_MARCH_VARIANT */
1784 /** Enqueue data for delivery to application */
1786 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1789 int written, error = TCP_ERROR_ENQUEUED;
1791 ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1793 written = session_enqueue_stream_connection (&tc->connection, b, 0,
1794 1 /* queue event */ , 1);
1795 tc->bytes_in += written;
1797 TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
1799 /* Update rcv_nxt */
1800 if (PREDICT_TRUE (written == data_len))
1802 tc->rcv_nxt += written;
1804 /* If more data written than expected, account for out-of-order bytes. */
1805 else if (written > data_len)
1807 tc->rcv_nxt += written;
1808 TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
1810 else if (written > 0)
1812 /* We've written something but FIFO is probably full now */
1813 tc->rcv_nxt += written;
1814 error = TCP_ERROR_PARTIALLY_ENQUEUED;
1818 return TCP_ERROR_FIFO_FULL;
1821 /* Update SACK list if need be */
1822 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1824 /* Remove SACK blocks that have been delivered */
1825 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1831 /** Enqueue out-of-order data */
1833 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1839 ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1842 /* Enqueue out-of-order data with relative offset */
1843 rv = session_enqueue_stream_connection (&tc->connection, b,
1844 vnet_buffer (b)->tcp.seq_number -
1845 tc->rcv_nxt, 0 /* queue event */ ,
1848 /* Nothing written */
1851 TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
1852 return TCP_ERROR_FIFO_FULL;
1855 TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1856 tc->bytes_in += data_len;
1858 /* Update SACK list if in use */
1859 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1861 ooo_segment_t *newest;
1864 s0 = session_get (tc->c_s_index, tc->c_thread_index);
1866 /* Get the newest segment from the fifo */
1867 newest = svm_fifo_newest_ooo_segment (s0->rx_fifo);
1870 offset = ooo_segment_offset_prod (s0->rx_fifo, newest);
1871 ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
1872 start = tc->rcv_nxt + offset;
1873 end = start + ooo_segment_length (s0->rx_fifo, newest);
1874 tcp_update_sack_list (tc, start, end);
1875 svm_fifo_newest_ooo_segment_reset (s0->rx_fifo);
1876 TCP_EVT (TCP_EVT_CC_SACKS, tc);
1880 return TCP_ERROR_ENQUEUED_OOO;
1884 * Check if ACK could be delayed. If ack can be delayed, it should return
1885 * true for a full frame. If we're always acking return 0.
1888 tcp_can_delack (tcp_connection_t * tc)
1890 /* Send ack if ... */
1892 /* just sent a rcv wnd 0
1893 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0 */
1894 /* constrained to send ack */
1895 || (tc->flags & TCP_CONN_SNDACK) != 0
1896 /* we're almost out of tx wnd */
1897 || tcp_available_cc_snd_space (tc) < 4 * tc->snd_mss)
1904 tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
1906 u32 discard, first = b->current_length;
1907 vlib_main_t *vm = vlib_get_main ();
1909 /* Handle multi-buffer segments */
1910 if (n_bytes_to_drop > b->current_length)
1912 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1916 discard = clib_min (n_bytes_to_drop, b->current_length);
1917 vlib_buffer_advance (b, discard);
1918 b = vlib_get_buffer (vm, b->next_buffer);
1919 n_bytes_to_drop -= discard;
1921 while (n_bytes_to_drop);
1922 if (n_bytes_to_drop > first)
1923 b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
1926 vlib_buffer_advance (b, n_bytes_to_drop);
1927 vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
1932 * Receive buffer for connection and handle acks
1934 * It handles both in order or out-of-order data.
1937 tcp_segment_rcv (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1940 u32 error, n_bytes_to_drop, n_data_bytes;
1942 vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
1943 n_data_bytes = vnet_buffer (b)->tcp.data_len;
1944 ASSERT (n_data_bytes);
1945 tc->data_segs_in += 1;
1947 /* Handle out-of-order data */
1948 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1950 /* Old sequence numbers allowed through because they overlapped
1952 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1954 /* Completely in the past (possible retransmit). Ack
1955 * retransmissions since we may not have any data to send */
1956 if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1958 tcp_program_ack (tc);
1959 error = TCP_ERROR_SEGMENT_OLD;
1963 /* Chop off the bytes in the past and see if what is left
1964 * can be enqueued in order */
1965 n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1966 n_data_bytes -= n_bytes_to_drop;
1967 vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
1968 if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
1970 error = TCP_ERROR_SEGMENT_OLD;
1976 /* RFC2581: Enqueue and send DUPACK for fast retransmit */
1977 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1978 tcp_program_dupack (tc);
1979 TCP_EVT (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
1980 tc->errors.above_data_wnd += seq_gt (vnet_buffer (b)->tcp.seq_end,
1981 tc->rcv_las + tc->rcv_wnd);
1987 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1988 * segments can be enqueued after fifo tail offset changes. */
1989 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1990 if (tcp_can_delack (tc))
1992 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1993 tcp_timer_set (tc, TCP_TIMER_DELACK, tcp_cfg.delack_time);
1997 tcp_program_ack (tc);
2005 tcp_header_t tcp_header;
2006 tcp_connection_t tcp_connection;
2010 format_tcp_rx_trace (u8 * s, va_list * args)
2012 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
2013 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
2014 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
2015 u32 indent = format_get_indent (s);
2017 s = format (s, "%U\n%U%U",
2018 format_tcp_header, &t->tcp_header, 128,
2019 format_white_space, indent,
2020 format_tcp_connection, &t->tcp_connection, 1);
2026 format_tcp_rx_trace_short (u8 * s, va_list * args)
2028 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
2029 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
2030 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
2032 s = format (s, "%d -> %d (%U)",
2033 clib_net_to_host_u16 (t->tcp_header.dst_port),
2034 clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
2035 t->tcp_connection.state);
2041 tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
2042 tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
2046 clib_memcpy_fast (&t0->tcp_connection, tc0,
2047 sizeof (t0->tcp_connection));
2051 th0 = tcp_buffer_hdr (b0);
2053 clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2057 tcp_established_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2058 vlib_frame_t * frame, u8 is_ip4)
2062 n_left = frame->n_vectors;
2063 from = vlib_frame_vector_args (frame);
2067 tcp_connection_t *tc0;
2074 b0 = vlib_get_buffer (vm, bi0);
2076 if (b0->flags & VLIB_BUFFER_IS_TRACED)
2078 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2079 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2081 th0 = tcp_buffer_hdr (b0);
2082 tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
2091 tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
2092 u8 is_ip4, u32 evt, u32 val)
2095 vlib_node_increment_counter (vm, tcp4_node, evt, val);
2097 vlib_node_increment_counter (vm, tcp6_node, evt, val);
2100 #define tcp_maybe_inc_counter(node_id, err, count) \
2102 if (next0 != tcp_next_drop (is_ip4)) \
2103 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
2104 tcp6_##node_id##_node.index, is_ip4, err, \
2107 #define tcp_inc_counter(node_id, err, count) \
2108 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
2109 tcp6_##node_id##_node.index, is_ip4, \
2111 #define tcp_maybe_inc_err_counter(cnts, err) \
2113 cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
2115 #define tcp_inc_err_counter(cnts, err, val) \
2119 #define tcp_store_err_counters(node_id, cnts) \
2122 for (i = 0; i < TCP_N_ERROR; i++) \
2124 tcp_inc_counter(node_id, i, cnts[i]); \
2129 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2130 vlib_frame_t * frame, int is_ip4)
2132 u32 thread_index = vm->thread_index, errors = 0;
2133 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2134 u32 n_left_from, *from, *first_buffer;
2135 u16 err_counters[TCP_N_ERROR] = { 0 };
2137 if (node->flags & VLIB_NODE_FLAG_TRACE)
2138 tcp_established_trace_frame (vm, node, frame, is_ip4);
2140 first_buffer = from = vlib_frame_vector_args (frame);
2141 n_left_from = frame->n_vectors;
2143 while (n_left_from > 0)
2145 u32 bi0, error0 = TCP_ERROR_ACK_OK;
2148 tcp_connection_t *tc0;
2150 if (n_left_from > 1)
2153 pb = vlib_get_buffer (vm, from[1]);
2154 vlib_prefetch_buffer_header (pb, LOAD);
2155 CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2162 b0 = vlib_get_buffer (vm, bi0);
2163 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2166 if (PREDICT_FALSE (tc0 == 0))
2168 error0 = TCP_ERROR_INVALID_CONNECTION;
2172 th0 = tcp_buffer_hdr (b0);
2174 /* TODO header prediction fast path */
2176 /* 1-4: check SEQ, RST, SYN */
2177 if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, th0, &error0)))
2179 TCP_EVT (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
2183 /* 5: check the ACK field */
2184 if (PREDICT_FALSE (tcp_rcv_ack (wrk, tc0, b0, th0, &error0)))
2187 /* 6: check the URG bit TODO */
2189 /* 7: process the segment text */
2190 if (vnet_buffer (b0)->tcp.data_len)
2191 error0 = tcp_segment_rcv (wrk, tc0, b0);
2193 /* 8: check the FIN bit */
2194 if (PREDICT_FALSE (tcp_is_fin (th0)))
2195 tcp_rcv_fin (wrk, tc0, b0, &error0);
2198 tcp_inc_err_counter (err_counters, error0, 1);
2201 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2203 err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
2204 tcp_store_err_counters (established, err_counters);
2205 tcp_handle_postponed_dequeues (wrk);
2206 tcp_handle_disconnects (wrk);
2207 vlib_buffer_free (vm, first_buffer, frame->n_vectors);
2209 return frame->n_vectors;
2212 VLIB_NODE_FN (tcp4_established_node) (vlib_main_t * vm,
2213 vlib_node_runtime_t * node,
2214 vlib_frame_t * from_frame)
2216 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2219 VLIB_NODE_FN (tcp6_established_node) (vlib_main_t * vm,
2220 vlib_node_runtime_t * node,
2221 vlib_frame_t * from_frame)
2223 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2227 VLIB_REGISTER_NODE (tcp4_established_node) =
2229 .name = "tcp4-established",
2230 /* Takes a vector of packets. */
2231 .vector_size = sizeof (u32),
2232 .n_errors = TCP_N_ERROR,
2233 .error_strings = tcp_error_strings,
2234 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2237 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2238 foreach_tcp_state_next
2241 .format_trace = format_tcp_rx_trace_short,
2246 VLIB_REGISTER_NODE (tcp6_established_node) =
2248 .name = "tcp6-established",
2249 /* Takes a vector of packets. */
2250 .vector_size = sizeof (u32),
2251 .n_errors = TCP_N_ERROR,
2252 .error_strings = tcp_error_strings,
2253 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2256 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2257 foreach_tcp_state_next
2260 .format_trace = format_tcp_rx_trace_short,
2266 tcp_lookup_is_valid (tcp_connection_t * tc, tcp_header_t * hdr)
2268 transport_connection_t *tmp = 0;
2275 if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
2278 u8 is_valid = (tc->c_lcl_port == hdr->dst_port
2279 && (tc->state == TCP_STATE_LISTEN
2280 || tc->c_rmt_port == hdr->src_port));
2284 handle = session_lookup_half_open_handle (&tc->connection);
2285 tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
2286 tc->c_proto, tc->c_is_ip4);
2290 if (tmp->lcl_port == hdr->dst_port
2291 && tmp->rmt_port == hdr->src_port)
2293 TCP_DBG ("half-open is valid!");
2301 * Lookup transport connection
2303 static tcp_connection_t *
2304 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
2308 transport_connection_t *tconn;
2309 tcp_connection_t *tc;
2314 ip4 = vlib_buffer_get_current (b);
2315 tcp = ip4_next_header (ip4);
2316 tconn = session_lookup_connection_wt4 (fib_index,
2321 TRANSPORT_PROTO_TCP,
2322 thread_index, &is_filtered);
2323 tc = tcp_get_connection_from_transport (tconn);
2324 ASSERT (tcp_lookup_is_valid (tc, tcp));
2329 ip6 = vlib_buffer_get_current (b);
2330 tcp = ip6_next_header (ip6);
2331 tconn = session_lookup_connection_wt6 (fib_index,
2336 TRANSPORT_PROTO_TCP,
2337 thread_index, &is_filtered);
2338 tc = tcp_get_connection_from_transport (tconn);
2339 ASSERT (tcp_lookup_is_valid (tc, tcp));
2345 tcp_check_tx_offload (tcp_connection_t * tc, int is_ipv4)
2347 vnet_main_t *vnm = vnet_get_main ();
2348 const dpo_id_t *dpo;
2349 const load_balance_t *lb;
2350 vnet_hw_interface_t *hw_if;
2351 u32 sw_if_idx, lb_idx;
2355 ip4_address_t *dst_addr = &(tc->c_rmt_ip.ip4);
2356 lb_idx = ip4_fib_forwarding_lookup (tc->c_fib_index, dst_addr);
2360 ip6_address_t *dst_addr = &(tc->c_rmt_ip.ip6);
2361 lb_idx = ip6_fib_table_fwding_lookup (tc->c_fib_index, dst_addr);
2364 lb = load_balance_get (lb_idx);
2365 dpo = load_balance_get_bucket_i (lb, 0);
2367 sw_if_idx = dpo->dpoi_index;
2368 hw_if = vnet_get_sup_hw_interface (vnm, sw_if_idx);
2370 if (hw_if->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
2371 tc->cfg_flags |= TCP_CFG_F_TSO;
2375 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2376 vlib_frame_t * from_frame, int is_ip4)
2378 u32 n_left_from, *from, *first_buffer, errors = 0;
2379 u32 my_thread_index = vm->thread_index;
2380 tcp_worker_ctx_t *wrk = tcp_get_worker (my_thread_index);
2382 from = first_buffer = vlib_frame_vector_args (from_frame);
2383 n_left_from = from_frame->n_vectors;
2385 while (n_left_from > 0)
2387 u32 bi0, ack0, seq0, error0 = TCP_ERROR_NONE;
2388 tcp_connection_t *tc0, *new_tc0;
2389 tcp_header_t *tcp0 = 0;
2397 b0 = vlib_get_buffer (vm, bi0);
2399 tcp_half_open_connection_get (vnet_buffer (b0)->tcp.connection_index);
2400 if (PREDICT_FALSE (tc0 == 0))
2402 error0 = TCP_ERROR_INVALID_CONNECTION;
2406 /* Half-open completed recently but the connection was't removed
2407 * yet by the owning thread */
2408 if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
2410 /* Make sure the connection actually exists */
2411 ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
2412 my_thread_index, is_ip4));
2413 error0 = TCP_ERROR_SPURIOUS_SYN_ACK;
2417 ack0 = vnet_buffer (b0)->tcp.ack_number;
2418 seq0 = vnet_buffer (b0)->tcp.seq_number;
2419 tcp0 = tcp_buffer_hdr (b0);
2421 /* Crude check to see if the connection handle does not match
2422 * the packet. Probably connection just switched to established */
2423 if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
2424 || tcp0->src_port != tc0->c_rmt_port))
2426 error0 = TCP_ERROR_INVALID_CONNECTION;
2430 if (PREDICT_FALSE (!tcp_ack (tcp0) && !tcp_rst (tcp0)
2431 && !tcp_syn (tcp0)))
2433 error0 = TCP_ERROR_SEGMENT_INVALID;
2437 /* SYNs consume sequence numbers */
2438 vnet_buffer (b0)->tcp.seq_end += tcp_is_syn (tcp0);
2441 * 1. check the ACK bit
2445 * If the ACK bit is set
2446 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
2447 * the RST bit is set, if so drop the segment and return)
2448 * <SEQ=SEG.ACK><CTL=RST>
2449 * and discard the segment. Return.
2450 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
2454 if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
2456 if (!tcp_rst (tcp0))
2457 tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2458 error0 = TCP_ERROR_RCV_WND;
2462 /* Make sure ACK is valid */
2463 if (seq_gt (tc0->snd_una, ack0))
2465 error0 = TCP_ERROR_ACK_INVALID;
2471 * 2. check the RST bit
2476 /* If ACK is acceptable, signal client that peer is not
2477 * willing to accept connection and drop connection*/
2479 tcp_connection_reset (tc0);
2480 error0 = TCP_ERROR_RST_RCVD;
2485 * 3. check the security and precedence (skipped)
2489 * 4. check the SYN bit
2492 /* No SYN flag. Drop. */
2493 if (!tcp_syn (tcp0))
2495 error0 = TCP_ERROR_SEGMENT_INVALID;
2500 if (tcp_options_parse (tcp0, &tc0->rcv_opts, 1))
2502 error0 = TCP_ERROR_OPTIONS;
2506 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
2507 * current thread pool. */
2508 new_tc0 = tcp_connection_alloc_w_base (my_thread_index, tc0);
2509 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
2510 new_tc0->irs = seq0;
2511 new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
2512 new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2514 /* If this is not the owning thread, wait for syn retransmit to
2515 * expire and cleanup then */
2516 if (tcp_half_open_connection_cleanup (tc0))
2517 tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2519 if (tcp_opts_tstamp (&new_tc0->rcv_opts))
2521 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
2522 new_tc0->tsval_recent_age = tcp_time_now ();
2525 if (tcp_opts_wscale (&new_tc0->rcv_opts))
2526 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
2528 new_tc0->rcv_wscale = 0;
2530 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2531 << new_tc0->snd_wscale;
2532 new_tc0->snd_wl1 = seq0;
2533 new_tc0->snd_wl2 = ack0;
2535 tcp_connection_init_vars (new_tc0);
2537 /* SYN-ACK: See if we can switch to ESTABLISHED state */
2538 if (PREDICT_TRUE (tcp_ack (tcp0)))
2540 /* Our SYN is ACKed: we have iss < ack = snd_una */
2542 /* TODO Dequeue acknowledged segments if we support Fast Open */
2543 new_tc0->snd_una = ack0;
2544 new_tc0->state = TCP_STATE_ESTABLISHED;
2546 /* Make sure las is initialized for the wnd computation */
2547 new_tc0->rcv_las = new_tc0->rcv_nxt;
2549 /* Notify app that we have connection. If session layer can't
2550 * allocate session send reset */
2551 if (session_stream_connect_notify (&new_tc0->connection, 0))
2553 tcp_send_reset_w_pkt (new_tc0, b0, my_thread_index, is_ip4);
2554 tcp_connection_cleanup (new_tc0);
2555 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2559 new_tc0->tx_fifo_size =
2560 transport_tx_fifo_size (&new_tc0->connection);
2561 /* Update rtt with the syn-ack sample */
2562 tcp_estimate_initial_rtt (new_tc0);
2563 TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc0);
2564 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2566 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
2569 new_tc0->state = TCP_STATE_SYN_RCVD;
2571 /* Notify app that we have connection */
2572 if (session_stream_connect_notify (&new_tc0->connection, 0))
2574 tcp_connection_cleanup (new_tc0);
2575 tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2576 TCP_EVT (TCP_EVT_RST_SENT, tc0);
2577 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2581 new_tc0->tx_fifo_size =
2582 transport_tx_fifo_size (&new_tc0->connection);
2583 new_tc0->rtt_ts = 0;
2584 tcp_init_snd_vars (new_tc0);
2585 tcp_send_synack (new_tc0);
2586 error0 = TCP_ERROR_SYNS_RCVD;
2590 if (!(new_tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2591 tcp_check_tx_offload (new_tc0, is_ip4);
2593 /* Read data, if any */
2594 if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2596 clib_warning ("rcvd data in syn-sent");
2597 error0 = tcp_segment_rcv (wrk, new_tc0, b0);
2598 if (error0 == TCP_ERROR_ACK_OK)
2599 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2603 /* Send ack now instead of programming it because connection was
2604 * just established and it's not optional. */
2605 tcp_send_ack (new_tc0);
2610 tcp_inc_counter (syn_sent, error0, 1);
2611 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2613 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2614 clib_memcpy_fast (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2615 clib_memcpy_fast (&t0->tcp_connection, tc0,
2616 sizeof (t0->tcp_connection));
2620 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2622 tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
2623 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2625 return from_frame->n_vectors;
2628 VLIB_NODE_FN (tcp4_syn_sent_node) (vlib_main_t * vm,
2629 vlib_node_runtime_t * node,
2630 vlib_frame_t * from_frame)
2632 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2635 VLIB_NODE_FN (tcp6_syn_sent_node) (vlib_main_t * vm,
2636 vlib_node_runtime_t * node,
2637 vlib_frame_t * from_frame)
2639 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2643 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2645 .name = "tcp4-syn-sent",
2646 /* Takes a vector of packets. */
2647 .vector_size = sizeof (u32),
2648 .n_errors = TCP_N_ERROR,
2649 .error_strings = tcp_error_strings,
2650 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2653 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2654 foreach_tcp_state_next
2657 .format_trace = format_tcp_rx_trace_short,
2662 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2664 .name = "tcp6-syn-sent",
2665 /* Takes a vector of packets. */
2666 .vector_size = sizeof (u32),
2667 .n_errors = TCP_N_ERROR,
2668 .error_strings = tcp_error_strings,
2669 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2672 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2673 foreach_tcp_state_next
2676 .format_trace = format_tcp_rx_trace_short,
2681 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2682 * as per RFC793 p. 64
2685 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2686 vlib_frame_t * from_frame, int is_ip4)
2688 u32 thread_index = vm->thread_index, errors = 0, *first_buffer;
2689 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2690 u32 n_left_from, *from, max_dequeue;
2692 from = first_buffer = vlib_frame_vector_args (from_frame);
2693 n_left_from = from_frame->n_vectors;
2695 while (n_left_from > 0)
2697 u32 bi0, error0 = TCP_ERROR_NONE;
2698 tcp_header_t *tcp0 = 0;
2699 tcp_connection_t *tc0;
2707 b0 = vlib_get_buffer (vm, bi0);
2708 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2710 if (PREDICT_FALSE (tc0 == 0))
2712 error0 = TCP_ERROR_INVALID_CONNECTION;
2716 tcp0 = tcp_buffer_hdr (b0);
2717 is_fin0 = tcp_is_fin (tcp0);
2721 tcp_connection_t *tmp;
2722 tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
2724 if (tmp->state != tc0->state)
2726 if (tc0->state != TCP_STATE_CLOSED)
2727 clib_warning ("state changed");
2733 * Special treatment for CLOSED
2735 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2737 error0 = TCP_ERROR_CONNECTION_CLOSED;
2742 * For all other states (except LISTEN)
2745 /* 1-4: check SEQ, RST, SYN */
2746 if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, tcp0, &error0)))
2749 /* 5: check the ACK field */
2752 case TCP_STATE_SYN_RCVD:
2754 /* Make sure the segment is exactly right */
2755 if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
2757 tcp_connection_reset (tc0);
2758 error0 = TCP_ERROR_SEGMENT_INVALID;
2763 * If the segment acknowledgment is not acceptable, form a
2765 * <SEQ=SEG.ACK><CTL=RST>
2768 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2770 tcp_connection_reset (tc0);
2774 /* Update rtt and rto */
2775 tcp_estimate_initial_rtt (tc0);
2776 tcp_connection_tx_pacer_update (tc0);
2778 /* Switch state to ESTABLISHED */
2779 tc0->state = TCP_STATE_ESTABLISHED;
2780 TCP_EVT (TCP_EVT_STATE_CHANGE, tc0);
2782 if (!(tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2783 tcp_check_tx_offload (tc0, is_ip4);
2785 /* Initialize session variables */
2786 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2787 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2788 << tc0->rcv_opts.wscale;
2789 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2790 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2792 /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2793 tcp_retransmit_timer_reset (tc0);
2794 if (session_stream_accept_notify (&tc0->connection))
2796 error0 = TCP_ERROR_MSG_QUEUE_FULL;
2797 tcp_connection_reset (tc0);
2800 error0 = TCP_ERROR_ACK_OK;
2802 case TCP_STATE_ESTABLISHED:
2803 /* We can get packets in established state here because they
2804 * were enqueued before state change */
2805 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2809 case TCP_STATE_FIN_WAIT_1:
2810 /* In addition to the processing for the ESTABLISHED state, if
2811 * our FIN is now acknowledged then enter FIN-WAIT-2 and
2812 * continue processing in that state. */
2813 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2816 /* Still have to send the FIN */
2817 if (tc0->flags & TCP_CONN_FINPNDG)
2819 /* TX fifo finally drained */
2820 max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2821 if (max_dequeue <= tc0->burst_acked)
2823 /* If a fin was received and data was acked extend wait */
2824 else if ((tc0->flags & TCP_CONN_FINRCVD) && tc0->bytes_acked)
2825 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
2826 tcp_cfg.closewait_time);
2828 /* If FIN is ACKed */
2829 else if (tc0->snd_una == tc0->snd_nxt)
2831 /* Stop all retransmit timers because we have nothing more
2833 tcp_connection_timers_reset (tc0);
2835 /* We already have a FIN but didn't transition to CLOSING
2836 * because of outstanding tx data. Close the connection. */
2837 if (tc0->flags & TCP_CONN_FINRCVD)
2839 tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2840 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE,
2841 tcp_cfg.cleanup_time);
2842 session_transport_closed_notify (&tc0->connection);
2846 tcp_connection_set_state (tc0, TCP_STATE_FIN_WAIT_2);
2847 /* Enable waitclose because we're willing to wait for peer's
2848 * FIN but not indefinitely. */
2849 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.finwait2_time);
2851 /* Don't try to deq the FIN acked */
2852 if (tc0->burst_acked > 1)
2853 session_tx_fifo_dequeue_drop (&tc0->connection,
2854 tc0->burst_acked - 1);
2855 tc0->burst_acked = 0;
2858 case TCP_STATE_FIN_WAIT_2:
2859 /* In addition to the processing for the ESTABLISHED state, if
2860 * the retransmission queue is empty, the user's CLOSE can be
2861 * acknowledged ("ok") but do not delete the TCB. */
2862 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2864 tc0->burst_acked = 0;
2866 case TCP_STATE_CLOSE_WAIT:
2867 /* Do the same processing as for the ESTABLISHED state. */
2868 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
2871 if (!(tc0->flags & TCP_CONN_FINPNDG))
2874 /* Still have outstanding tx data */
2875 max_dequeue = transport_max_tx_dequeue (&tc0->connection);
2876 if (max_dequeue > tc0->burst_acked)
2880 tcp_connection_timers_reset (tc0);
2881 tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2882 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
2884 case TCP_STATE_CLOSING:
2885 /* In addition to the processing for the ESTABLISHED state, if
2886 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2887 * otherwise ignore the segment. */
2888 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2891 if (tc0->snd_una != tc0->snd_nxt)
2894 tcp_connection_timers_reset (tc0);
2895 tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
2896 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
2897 session_transport_closed_notify (&tc0->connection);
2901 case TCP_STATE_LAST_ACK:
2902 /* The only thing that [should] arrive in this state is an
2903 * acknowledgment of our FIN. If our FIN is now acknowledged,
2904 * delete the TCB, enter the CLOSED state, and return. */
2906 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2909 /* Apparently our ACK for the peer's FIN was lost */
2910 if (is_fin0 && tc0->snd_una != tc0->snd_nxt)
2916 tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
2917 session_transport_closed_notify (&tc0->connection);
2919 /* Don't free the connection from the data path since
2920 * we can't ensure that we have no packets already enqueued
2921 * to output. Rely instead on the waitclose timer */
2922 tcp_connection_timers_reset (tc0);
2923 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
2928 case TCP_STATE_TIME_WAIT:
2929 /* The only thing that can arrive in this state is a
2930 * retransmission of the remote FIN. Acknowledge it, and restart
2931 * the 2 MSL timeout. */
2933 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2939 tcp_program_ack (tc0);
2940 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
2948 /* 6: check the URG bit TODO */
2950 /* 7: process the segment text */
2953 case TCP_STATE_ESTABLISHED:
2954 case TCP_STATE_FIN_WAIT_1:
2955 case TCP_STATE_FIN_WAIT_2:
2956 if (vnet_buffer (b0)->tcp.data_len)
2957 error0 = tcp_segment_rcv (wrk, tc0, b0);
2959 case TCP_STATE_CLOSE_WAIT:
2960 case TCP_STATE_CLOSING:
2961 case TCP_STATE_LAST_ACK:
2962 case TCP_STATE_TIME_WAIT:
2963 /* This should not occur, since a FIN has been received from the
2964 * remote side. Ignore the segment text. */
2968 /* 8: check the FIN bit */
2972 TCP_EVT (TCP_EVT_FIN_RCVD, tc0);
2976 case TCP_STATE_ESTABLISHED:
2977 /* Account for the FIN and send ack */
2979 tcp_program_ack (tc0);
2980 tcp_connection_set_state (tc0, TCP_STATE_CLOSE_WAIT);
2981 tcp_program_disconnect (wrk, tc0);
2982 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
2984 case TCP_STATE_SYN_RCVD:
2985 /* Send FIN-ACK, enter LAST-ACK and because the app was not
2986 * notified yet, set a cleanup timer instead of relying on
2987 * disconnect notify and the implicit close call. */
2988 tcp_connection_timers_reset (tc0);
2991 tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
2992 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
2994 case TCP_STATE_CLOSE_WAIT:
2995 case TCP_STATE_CLOSING:
2996 case TCP_STATE_LAST_ACK:
2999 case TCP_STATE_FIN_WAIT_1:
3002 if (tc0->flags & TCP_CONN_FINPNDG)
3004 /* If data is outstanding, stay in FIN_WAIT_1 and try to finish
3005 * sending it. Since we already received a fin, do not wait
3007 tc0->flags |= TCP_CONN_FINRCVD;
3008 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
3009 tcp_cfg.closewait_time);
3013 tcp_connection_set_state (tc0, TCP_STATE_CLOSING);
3014 tcp_program_ack (tc0);
3015 /* Wait for ACK for our FIN but not forever */
3016 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
3017 tcp_cfg.closing_time);
3020 case TCP_STATE_FIN_WAIT_2:
3021 /* Got FIN, send ACK! Be more aggressive with resource cleanup */
3023 tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
3024 tcp_connection_timers_reset (tc0);
3025 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
3026 tcp_program_ack (tc0);
3027 session_transport_closed_notify (&tc0->connection);
3029 case TCP_STATE_TIME_WAIT:
3030 /* Remain in the TIME-WAIT state. Restart the time-wait
3033 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
3036 error0 = TCP_ERROR_FIN_RCVD;
3040 tcp_inc_counter (rcv_process, error0, 1);
3041 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3043 tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3044 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
3048 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
3050 tcp_inc_counter (rcv_process, TCP_ERROR_MSG_QUEUE_FULL, errors);
3051 tcp_handle_postponed_dequeues (wrk);
3052 tcp_handle_disconnects (wrk);
3053 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
3055 return from_frame->n_vectors;
3058 VLIB_NODE_FN (tcp4_rcv_process_node) (vlib_main_t * vm,
3059 vlib_node_runtime_t * node,
3060 vlib_frame_t * from_frame)
3062 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3065 VLIB_NODE_FN (tcp6_rcv_process_node) (vlib_main_t * vm,
3066 vlib_node_runtime_t * node,
3067 vlib_frame_t * from_frame)
3069 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3073 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
3075 .name = "tcp4-rcv-process",
3076 /* Takes a vector of packets. */
3077 .vector_size = sizeof (u32),
3078 .n_errors = TCP_N_ERROR,
3079 .error_strings = tcp_error_strings,
3080 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3083 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3084 foreach_tcp_state_next
3087 .format_trace = format_tcp_rx_trace_short,
3092 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
3094 .name = "tcp6-rcv-process",
3095 /* Takes a vector of packets. */
3096 .vector_size = sizeof (u32),
3097 .n_errors = TCP_N_ERROR,
3098 .error_strings = tcp_error_strings,
3099 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3102 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3103 foreach_tcp_state_next
3106 .format_trace = format_tcp_rx_trace_short,
3111 * LISTEN state processing as per RFC 793 p. 65
3114 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3115 vlib_frame_t * from_frame, int is_ip4)
3117 u32 n_left_from, *from, n_syns = 0, *first_buffer;
3118 u32 my_thread_index = vm->thread_index;
3120 from = first_buffer = vlib_frame_vector_args (from_frame);
3121 n_left_from = from_frame->n_vectors;
3123 while (n_left_from > 0)
3128 tcp_header_t *th0 = 0;
3129 tcp_connection_t *lc0;
3132 tcp_connection_t *child0;
3133 u32 error0 = TCP_ERROR_NONE;
3139 b0 = vlib_get_buffer (vm, bi0);
3140 lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
3144 ip40 = vlib_buffer_get_current (b0);
3145 th0 = ip4_next_header (ip40);
3149 ip60 = vlib_buffer_get_current (b0);
3150 th0 = ip6_next_header (ip60);
3153 /* Create child session. For syn-flood protection use filter */
3155 /* 1. first check for an RST: handled in dispatch */
3156 /* if (tcp_rst (th0))
3160 /* 2. second check for an ACK: handled in dispatch */
3161 /* if (tcp_ack (th0))
3163 tcp_send_reset (b0, is_ip4);
3168 /* 3. check for a SYN (did that already) */
3170 /* Make sure connection wasn't just created */
3171 child0 = tcp_lookup_connection (lc0->c_fib_index, b0, my_thread_index,
3173 if (PREDICT_FALSE (child0->state != TCP_STATE_LISTEN))
3175 error0 = TCP_ERROR_CREATE_EXISTS;
3179 /* Create child session and send SYN-ACK */
3180 child0 = tcp_connection_alloc (my_thread_index);
3181 child0->c_lcl_port = th0->dst_port;
3182 child0->c_rmt_port = th0->src_port;
3183 child0->c_is_ip4 = is_ip4;
3184 child0->state = TCP_STATE_SYN_RCVD;
3185 child0->c_fib_index = lc0->c_fib_index;
3186 child0->cc_algo = lc0->cc_algo;
3190 child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
3191 child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
3195 clib_memcpy_fast (&child0->c_lcl_ip6, &ip60->dst_address,
3196 sizeof (ip6_address_t));
3197 clib_memcpy_fast (&child0->c_rmt_ip6, &ip60->src_address,
3198 sizeof (ip6_address_t));
3201 if (tcp_options_parse (th0, &child0->rcv_opts, 1))
3203 error0 = TCP_ERROR_OPTIONS;
3204 tcp_connection_free (child0);
3208 child0->irs = vnet_buffer (b0)->tcp.seq_number;
3209 child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
3210 child0->rcv_las = child0->rcv_nxt;
3211 child0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
3213 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
3214 * segments are used to initialize PAWS. */
3215 if (tcp_opts_tstamp (&child0->rcv_opts))
3217 child0->tsval_recent = child0->rcv_opts.tsval;
3218 child0->tsval_recent_age = tcp_time_now ();
3221 if (tcp_opts_wscale (&child0->rcv_opts))
3222 child0->snd_wscale = child0->rcv_opts.wscale;
3224 child0->snd_wnd = clib_net_to_host_u16 (th0->window)
3225 << child0->snd_wscale;
3226 child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
3227 child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
3229 tcp_connection_init_vars (child0);
3230 child0->rto = TCP_RTO_MIN;
3232 if (session_stream_accept (&child0->connection, lc0->c_s_index,
3233 lc0->c_thread_index, 0 /* notify */ ))
3235 tcp_connection_cleanup (child0);
3236 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
3240 TCP_EVT (TCP_EVT_SYN_RCVD, child0, 1);
3241 child0->tx_fifo_size = transport_tx_fifo_size (&child0->connection);
3242 tcp_send_synack (child0);
3246 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3248 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3249 clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
3250 clib_memcpy_fast (&t0->tcp_connection, lc0,
3251 sizeof (t0->tcp_connection));
3254 n_syns += (error0 == TCP_ERROR_NONE);
3257 tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
3258 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
3260 return from_frame->n_vectors;
3263 VLIB_NODE_FN (tcp4_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3264 vlib_frame_t * from_frame)
3266 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3269 VLIB_NODE_FN (tcp6_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3270 vlib_frame_t * from_frame)
3272 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3276 VLIB_REGISTER_NODE (tcp4_listen_node) =
3278 .name = "tcp4-listen",
3279 /* Takes a vector of packets. */
3280 .vector_size = sizeof (u32),
3281 .n_errors = TCP_N_ERROR,
3282 .error_strings = tcp_error_strings,
3283 .n_next_nodes = TCP_LISTEN_N_NEXT,
3286 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3287 foreach_tcp_state_next
3290 .format_trace = format_tcp_rx_trace_short,
3295 VLIB_REGISTER_NODE (tcp6_listen_node) =
3297 .name = "tcp6-listen",
3298 /* Takes a vector of packets. */
3299 .vector_size = sizeof (u32),
3300 .n_errors = TCP_N_ERROR,
3301 .error_strings = tcp_error_strings,
3302 .n_next_nodes = TCP_LISTEN_N_NEXT,
3305 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3306 foreach_tcp_state_next
3309 .format_trace = format_tcp_rx_trace_short,
3313 typedef enum _tcp_input_next
3315 TCP_INPUT_NEXT_DROP,
3316 TCP_INPUT_NEXT_LISTEN,
3317 TCP_INPUT_NEXT_RCV_PROCESS,
3318 TCP_INPUT_NEXT_SYN_SENT,
3319 TCP_INPUT_NEXT_ESTABLISHED,
3320 TCP_INPUT_NEXT_RESET,
3321 TCP_INPUT_NEXT_PUNT,
3325 #define foreach_tcp4_input_next \
3326 _ (DROP, "ip4-drop") \
3327 _ (LISTEN, "tcp4-listen") \
3328 _ (RCV_PROCESS, "tcp4-rcv-process") \
3329 _ (SYN_SENT, "tcp4-syn-sent") \
3330 _ (ESTABLISHED, "tcp4-established") \
3331 _ (RESET, "tcp4-reset") \
3332 _ (PUNT, "ip4-punt")
3334 #define foreach_tcp6_input_next \
3335 _ (DROP, "ip6-drop") \
3336 _ (LISTEN, "tcp6-listen") \
3337 _ (RCV_PROCESS, "tcp6-rcv-process") \
3338 _ (SYN_SENT, "tcp6-syn-sent") \
3339 _ (ESTABLISHED, "tcp6-established") \
3340 _ (RESET, "tcp6-reset") \
3341 _ (PUNT, "ip6-punt")
3343 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
3346 tcp_input_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
3347 vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
3349 tcp_connection_t *tc;
3354 for (i = 0; i < n_bufs; i++)
3356 if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
3358 t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
3359 tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
3361 tcp = vlib_buffer_get_current (bs[i]);
3362 tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
3368 tcp_input_set_error_next (tcp_main_t * tm, u16 * next, u32 * error, u8 is_ip4)
3370 if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
3372 *next = TCP_INPUT_NEXT_DROP;
3374 else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
3376 *next = TCP_INPUT_NEXT_PUNT;
3377 *error = TCP_ERROR_PUNT;
3381 *next = TCP_INPUT_NEXT_RESET;
3382 *error = TCP_ERROR_NO_LISTENER;
3386 always_inline tcp_connection_t *
3387 tcp_input_lookup_buffer (vlib_buffer_t * b, u8 thread_index, u32 * error,
3388 u8 is_ip4, u8 is_nolookup)
3390 u32 fib_index = vnet_buffer (b)->ip.fib_index;
3391 int n_advance_bytes, n_data_bytes;
3392 transport_connection_t *tc;
3398 ip4_header_t *ip4 = vlib_buffer_get_current (b);
3399 int ip_hdr_bytes = ip4_header_bytes (ip4);
3400 if (PREDICT_FALSE (b->current_length < ip_hdr_bytes + sizeof (*tcp)))
3402 *error = TCP_ERROR_LENGTH;
3405 tcp = ip4_next_header (ip4);
3406 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip4;
3407 n_advance_bytes = (ip_hdr_bytes + tcp_header_bytes (tcp));
3408 n_data_bytes = clib_net_to_host_u16 (ip4->length) - n_advance_bytes;
3410 /* Length check. Checksum computed by ipx_local no need to compute again */
3411 if (PREDICT_FALSE (n_data_bytes < 0))
3413 *error = TCP_ERROR_LENGTH;
3418 tc = session_lookup_connection_wt4 (fib_index, &ip4->dst_address,
3419 &ip4->src_address, tcp->dst_port,
3421 TRANSPORT_PROTO_TCP, thread_index,
3426 ip6_header_t *ip6 = vlib_buffer_get_current (b);
3427 if (PREDICT_FALSE (b->current_length < sizeof (*ip6) + sizeof (*tcp)))
3429 *error = TCP_ERROR_LENGTH;
3432 tcp = ip6_next_header (ip6);
3433 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip6;
3434 n_advance_bytes = tcp_header_bytes (tcp);
3435 n_data_bytes = clib_net_to_host_u16 (ip6->payload_length)
3437 n_advance_bytes += sizeof (ip6[0]);
3439 if (PREDICT_FALSE (n_data_bytes < 0))
3441 *error = TCP_ERROR_LENGTH;
3448 (ip6_address_is_link_local_unicast (&ip6->dst_address)))
3450 ip4_main_t *im = &ip4_main;
3451 fib_index = vec_elt (im->fib_index_by_sw_if_index,
3452 vnet_buffer (b)->sw_if_index[VLIB_RX]);
3455 tc = session_lookup_connection_wt6 (fib_index, &ip6->dst_address,
3457 tcp->dst_port, tcp->src_port,
3458 TRANSPORT_PROTO_TCP,
3459 thread_index, &result);
3465 (transport_connection_t *) tcp_connection_get (vnet_buffer (b)->
3466 tcp.connection_index,
3469 vnet_buffer (b)->tcp.seq_number = clib_net_to_host_u32 (tcp->seq_number);
3470 vnet_buffer (b)->tcp.ack_number = clib_net_to_host_u32 (tcp->ack_number);
3471 vnet_buffer (b)->tcp.data_offset = n_advance_bytes;
3472 vnet_buffer (b)->tcp.data_len = n_data_bytes;
3473 vnet_buffer (b)->tcp.seq_end = vnet_buffer (b)->tcp.seq_number
3475 vnet_buffer (b)->tcp.flags = 0;
3477 *error = result ? TCP_ERROR_NONE + result : *error;
3479 return tcp_get_connection_from_transport (tc);
3483 tcp_input_dispatch_buffer (tcp_main_t * tm, tcp_connection_t * tc,
3484 vlib_buffer_t * b, u16 * next, u32 * error)
3489 tcp = tcp_buffer_hdr (b);
3490 flags = tcp->flags & filter_flags;
3491 *next = tm->dispatch_table[tc->state][flags].next;
3492 *error = tm->dispatch_table[tc->state][flags].error;
3495 if (PREDICT_FALSE (*error == TCP_ERROR_DISPATCH
3496 || *next == TCP_INPUT_NEXT_RESET))
3498 /* Overload tcp flags to store state */
3499 tcp_state_t state = tc->state;
3500 vnet_buffer (b)->tcp.flags = tc->state;
3502 if (*error == TCP_ERROR_DISPATCH)
3503 clib_warning ("tcp conn %u disp error state %U flags %U",
3504 tc->c_c_index, format_tcp_state, state,
3505 format_tcp_flags, (int) flags);
3510 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3511 vlib_frame_t * frame, int is_ip4, u8 is_nolookup)
3513 u32 n_left_from, *from, thread_index = vm->thread_index;
3514 tcp_main_t *tm = vnet_get_tcp_main ();
3515 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
3516 u16 nexts[VLIB_FRAME_SIZE], *next;
3518 tcp_set_time_now (tcp_get_worker (thread_index));
3520 from = vlib_frame_vector_args (frame);
3521 n_left_from = frame->n_vectors;
3522 vlib_get_buffers (vm, from, bufs, n_left_from);
3527 while (n_left_from >= 4)
3529 u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
3530 tcp_connection_t *tc0, *tc1;
3533 vlib_prefetch_buffer_header (b[2], STORE);
3534 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3536 vlib_prefetch_buffer_header (b[3], STORE);
3537 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3540 next[0] = next[1] = TCP_INPUT_NEXT_DROP;
3542 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
3544 tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4,
3547 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
3549 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3550 ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
3552 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3553 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3555 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3556 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3560 if (PREDICT_TRUE (tc0 != 0))
3562 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3563 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3564 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3567 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3569 if (PREDICT_TRUE (tc1 != 0))
3571 ASSERT (tcp_lookup_is_valid (tc1, tcp_buffer_hdr (b[1])));
3572 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3573 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], &error1);
3576 tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
3583 while (n_left_from > 0)
3585 tcp_connection_t *tc0;
3586 u32 error0 = TCP_ERROR_NO_LISTENER;
3588 if (n_left_from > 1)
3590 vlib_prefetch_buffer_header (b[1], STORE);
3591 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3594 next[0] = TCP_INPUT_NEXT_DROP;
3595 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
3597 if (PREDICT_TRUE (tc0 != 0))
3599 ASSERT (tcp_lookup_is_valid (tc0, tcp_buffer_hdr (b[0])));
3600 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3601 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], &error0);
3604 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3611 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
3612 tcp_input_trace_frame (vm, node, bufs, frame->n_vectors, is_ip4);
3614 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
3615 return frame->n_vectors;
3618 VLIB_NODE_FN (tcp4_input_nolookup_node) (vlib_main_t * vm,
3619 vlib_node_runtime_t * node,
3620 vlib_frame_t * from_frame)
3622 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3623 1 /* is_nolookup */ );
3626 VLIB_NODE_FN (tcp6_input_nolookup_node) (vlib_main_t * vm,
3627 vlib_node_runtime_t * node,
3628 vlib_frame_t * from_frame)
3630 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3631 1 /* is_nolookup */ );
3635 VLIB_REGISTER_NODE (tcp4_input_nolookup_node) =
3637 .name = "tcp4-input-nolookup",
3638 /* Takes a vector of packets. */
3639 .vector_size = sizeof (u32),
3640 .n_errors = TCP_N_ERROR,
3641 .error_strings = tcp_error_strings,
3642 .n_next_nodes = TCP_INPUT_N_NEXT,
3645 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3646 foreach_tcp4_input_next
3649 .format_buffer = format_tcp_header,
3650 .format_trace = format_tcp_rx_trace,
3655 VLIB_REGISTER_NODE (tcp6_input_nolookup_node) =
3657 .name = "tcp6-input-nolookup",
3658 /* Takes a vector of packets. */
3659 .vector_size = sizeof (u32),
3660 .n_errors = TCP_N_ERROR,
3661 .error_strings = tcp_error_strings,
3662 .n_next_nodes = TCP_INPUT_N_NEXT,
3665 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3666 foreach_tcp6_input_next
3669 .format_buffer = format_tcp_header,
3670 .format_trace = format_tcp_rx_trace,
3674 VLIB_NODE_FN (tcp4_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3675 vlib_frame_t * from_frame)
3677 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3678 0 /* is_nolookup */ );
3681 VLIB_NODE_FN (tcp6_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3682 vlib_frame_t * from_frame)
3684 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3685 0 /* is_nolookup */ );
3689 VLIB_REGISTER_NODE (tcp4_input_node) =
3691 .name = "tcp4-input",
3692 /* Takes a vector of packets. */
3693 .vector_size = sizeof (u32),
3694 .n_errors = TCP_N_ERROR,
3695 .error_strings = tcp_error_strings,
3696 .n_next_nodes = TCP_INPUT_N_NEXT,
3699 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3700 foreach_tcp4_input_next
3703 .format_buffer = format_tcp_header,
3704 .format_trace = format_tcp_rx_trace,
3709 VLIB_REGISTER_NODE (tcp6_input_node) =
3711 .name = "tcp6-input",
3712 /* Takes a vector of packets. */
3713 .vector_size = sizeof (u32),
3714 .n_errors = TCP_N_ERROR,
3715 .error_strings = tcp_error_strings,
3716 .n_next_nodes = TCP_INPUT_N_NEXT,
3719 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3720 foreach_tcp6_input_next
3723 .format_buffer = format_tcp_header,
3724 .format_trace = format_tcp_rx_trace,
3728 #ifndef CLIB_MARCH_VARIANT
3730 tcp_dispatch_table_init (tcp_main_t * tm)
3733 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3734 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3736 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3737 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3740 #define _(t,f,n,e) \
3742 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3743 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3746 /* RFC 793: In LISTEN if RST drop and if ACK return RST */
3747 _(LISTEN, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3748 _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_ACK_INVALID);
3749 _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_INVALID_CONNECTION);
3750 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3751 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3752 TCP_ERROR_ACK_INVALID);
3753 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3754 TCP_ERROR_SEGMENT_INVALID);
3755 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3756 TCP_ERROR_SEGMENT_INVALID);
3757 _(LISTEN, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3758 TCP_ERROR_INVALID_CONNECTION);
3759 _(LISTEN, TCP_FLAG_FIN, TCP_INPUT_NEXT_RESET, TCP_ERROR_SEGMENT_INVALID);
3760 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3761 TCP_ERROR_SEGMENT_INVALID);
3762 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3763 TCP_ERROR_SEGMENT_INVALID);
3764 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3766 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_DROP,
3767 TCP_ERROR_SEGMENT_INVALID);
3768 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3769 TCP_ERROR_SEGMENT_INVALID);
3770 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3771 TCP_ERROR_SEGMENT_INVALID);
3772 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3773 TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3774 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3775 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3776 _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3777 _(SYN_RCVD, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3779 _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3780 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3782 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3784 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3785 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3786 _(SYN_RCVD, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3787 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3789 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3791 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3792 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3793 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3795 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3796 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3797 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3798 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3799 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3800 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3801 _(SYN_RCVD, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3802 /* SYN-ACK for a SYN */
3803 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3805 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3806 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3807 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3809 _(SYN_SENT, TCP_FLAG_FIN, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3810 _(SYN_SENT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3812 /* ACK for for established connection -> tcp-established. */
3813 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3814 /* FIN for for established connection -> tcp-established. */
3815 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3816 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3818 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3820 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3821 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3822 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED,
3824 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3825 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3826 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3827 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3828 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3829 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3830 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3831 _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3833 _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3834 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3836 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3838 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3839 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3840 _(ESTABLISHED, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3841 /* ACK or FIN-ACK to our FIN */
3842 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3843 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
3845 /* FIN in reply to our FIN from the other side */
3846 _(FIN_WAIT_1, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3847 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3848 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3850 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3851 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3852 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3853 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3854 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3855 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3856 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3858 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3859 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3860 _(FIN_WAIT_1, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3861 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3863 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3865 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3866 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3867 _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3868 _(FIN_WAIT_1, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3870 _(CLOSING, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3871 _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3872 _(CLOSING, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3873 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3875 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3877 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3878 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3879 _(CLOSING, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3880 _(CLOSING, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3882 _(CLOSING, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3883 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3885 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3887 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3888 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3889 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3891 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3892 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3893 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3894 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3895 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3896 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3897 /* FIN confirming that the peer (app) has closed */
3898 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3899 _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3900 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3902 _(FIN_WAIT_2, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3903 _(FIN_WAIT_2, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3905 _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3906 _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3908 _(CLOSE_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3909 _(CLOSE_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3911 _(LAST_ACK, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3912 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3913 _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3914 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3916 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3918 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3919 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3920 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3922 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3923 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3924 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3925 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3926 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3927 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3928 _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3929 _(LAST_ACK, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3931 _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3932 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3934 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3936 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3937 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3938 _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3939 _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3940 _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3942 _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3943 _(TIME_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3945 _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3946 /* RFC793 CLOSED: An incoming segment containing a RST is discarded. An
3947 * incoming segment not containing a RST causes a RST to be sent in
3949 _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
3950 _(CLOSED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3951 TCP_ERROR_CONNECTION_CLOSED);
3952 _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3953 _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
3954 _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3959 static clib_error_t *
3960 tcp_input_init (vlib_main_t * vm)
3962 clib_error_t *error = 0;
3963 tcp_main_t *tm = vnet_get_tcp_main ();
3965 if ((error = vlib_call_init_function (vm, tcp_init)))
3968 /* Initialize dispatch table. */
3969 tcp_dispatch_table_init (tm);
3974 VLIB_INIT_FUNCTION (tcp_input_init);
3976 #endif /* CLIB_MARCH_VARIANT */
3979 * fd.io coding-style-patch-verification: ON
3982 * eval: (c-set-style "gnu")