2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/fib/ip4_fib.h>
18 #include <vnet/fib/ip6_fib.h>
19 #include <vnet/tcp/tcp_packet.h>
20 #include <vnet/tcp/tcp.h>
21 #include <vnet/session/session.h>
24 static char *tcp_error_strings[] = {
25 #define tcp_error(n,s) s,
26 #include <vnet/tcp/tcp_error.def>
30 /* All TCP nodes have the same outgoing arcs */
31 #define foreach_tcp_state_next \
32 _ (DROP4, "ip4-drop") \
33 _ (DROP6, "ip6-drop") \
34 _ (TCP4_OUTPUT, "tcp4-output") \
35 _ (TCP6_OUTPUT, "tcp6-output")
37 typedef enum _tcp_established_next
39 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
40 foreach_tcp_state_next
42 TCP_ESTABLISHED_N_NEXT,
43 } tcp_established_next_t;
45 typedef enum _tcp_rcv_process_next
47 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
48 foreach_tcp_state_next
50 TCP_RCV_PROCESS_N_NEXT,
51 } tcp_rcv_process_next_t;
53 typedef enum _tcp_syn_sent_next
55 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
56 foreach_tcp_state_next
59 } tcp_syn_sent_next_t;
61 typedef enum _tcp_listen_next
63 #define _(s,n) TCP_LISTEN_NEXT_##s,
64 foreach_tcp_state_next
69 /* Generic, state independent indices */
70 typedef enum _tcp_state_next
72 #define _(s,n) TCP_NEXT_##s,
73 foreach_tcp_state_next
78 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
79 : TCP_NEXT_TCP6_OUTPUT)
81 #define tcp_next_drop(is_ip4) (is_ip4 ? TCP_NEXT_DROP4 \
85 * Validate segment sequence number. As per RFC793:
87 * Segment Receive Test
89 * ------- ------- -------------------------------------------
90 * 0 0 SEG.SEQ = RCV.NXT
91 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
93 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
94 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
96 * This ultimately consists in checking if segment falls within the window.
97 * The one important difference compared to RFC793 is that we use rcv_las,
98 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
99 * peer's reference when computing our receive window.
102 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
103 * however, is too strict when we have retransmits. Instead we just check that
104 * the seq is not beyond the right edge and that the end of the segment is not
105 * less than the left edge.
107 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
108 * use rcv_nxt in the right edge window test instead of rcv_las.
112 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
114 return (seq_geq (end_seq, tc->rcv_las)
115 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
119 * Parse TCP header options.
121 * @param th TCP header
122 * @param to TCP options data structure to be populated
123 * @param is_syn set if packet is syn
124 * @return -1 if parsing failed
127 tcp_options_parse (tcp_header_t * th, tcp_options_t * to, u8 is_syn)
130 u8 opt_len, opts_len, kind;
134 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
135 data = (const u8 *) (th + 1);
137 /* Zero out all flags but those set in SYN */
138 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE
139 | TCP_OPTS_FLAG_TSTAMP | TCP_OPTS_FLAG_MSS);
141 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
145 /* Get options length */
146 if (kind == TCP_OPTION_EOL)
148 else if (kind == TCP_OPTION_NOOP)
160 /* weird option length */
161 if (opt_len < 2 || opt_len > opts_len)
171 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
173 to->flags |= TCP_OPTS_FLAG_MSS;
174 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
177 case TCP_OPTION_WINDOW_SCALE:
180 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
182 to->flags |= TCP_OPTS_FLAG_WSCALE;
183 to->wscale = data[2];
184 if (to->wscale > TCP_MAX_WND_SCALE)
185 to->wscale = TCP_MAX_WND_SCALE;
188 case TCP_OPTION_TIMESTAMP:
190 to->flags |= TCP_OPTS_FLAG_TSTAMP;
191 if ((to->flags & TCP_OPTS_FLAG_TSTAMP)
192 && opt_len == TCP_OPTION_LEN_TIMESTAMP)
194 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
195 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
198 case TCP_OPTION_SACK_PERMITTED:
201 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
202 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
204 case TCP_OPTION_SACK_BLOCK:
205 /* If SACK permitted was not advertised or a SYN, break */
206 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
209 /* If too short or not correctly formatted, break */
210 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
213 to->flags |= TCP_OPTS_FLAG_SACK;
214 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
215 vec_reset_length (to->sacks);
216 for (j = 0; j < to->n_sack_blocks; j++)
218 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 8 * j));
219 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 8 * j));
220 vec_add1 (to->sacks, b);
224 /* Nothing to see here */
232 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
233 * timestamp to echo and it's less than tsval_recent, drop segment
234 * but still send an ACK in order to retain TCP's mechanism for detecting
235 * and recovering from half-open connections
237 * Or at least that's what the theory says. It seems that this might not work
238 * very well with packet reordering and fast retransmit. XXX
241 tcp_segment_check_paws (tcp_connection_t * tc)
243 return tcp_opts_tstamp (&tc->rcv_opts)
244 && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
248 * Update tsval recent
251 tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
254 * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
255 * of an incoming segment:
256 * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
257 * then the TSval from the segment is copied to TS.Recent;
258 * otherwise, the TSval is ignored.
260 if (tcp_opts_tstamp (&tc->rcv_opts) && seq_leq (seq, tc->rcv_las)
261 && seq_leq (tc->rcv_las, seq_end))
263 ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval));
264 tc->tsval_recent = tc->rcv_opts.tsval;
265 tc->tsval_recent_age = tcp_time_now_w_thread (tc->c_thread_index);
270 tcp_handle_rst (tcp_connection_t * tc)
272 switch (tc->rst_state)
274 case TCP_STATE_SYN_RCVD:
275 /* Cleanup everything. App wasn't notified yet */
276 session_transport_delete_notify (&tc->connection);
277 tcp_connection_cleanup (tc);
279 case TCP_STATE_SYN_SENT:
280 session_stream_connect_notify (&tc->connection, 1 /* fail */ );
281 tcp_connection_cleanup (tc);
283 case TCP_STATE_ESTABLISHED:
284 session_transport_reset_notify (&tc->connection);
285 session_transport_closed_notify (&tc->connection);
287 case TCP_STATE_CLOSE_WAIT:
288 case TCP_STATE_FIN_WAIT_1:
289 case TCP_STATE_FIN_WAIT_2:
290 case TCP_STATE_CLOSING:
291 case TCP_STATE_LAST_ACK:
292 session_transport_closed_notify (&tc->connection);
294 case TCP_STATE_CLOSED:
295 case TCP_STATE_TIME_WAIT:
298 TCP_DBG ("reset state: %u", tc->state);
303 tcp_program_reset_ntf (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
305 if (!tcp_disconnect_pending (tc))
307 tc->rst_state = tc->state;
308 vec_add1 (wrk->pending_resets, tc->c_c_index);
309 tcp_disconnect_pending_on (tc);
314 * Handle reset packet
316 * Programs disconnect/reset notification that should be sent
317 * later by calling @ref tcp_handle_disconnects
320 tcp_rcv_rst (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
322 TCP_EVT (TCP_EVT_RST_RCVD, tc);
325 case TCP_STATE_SYN_RCVD:
326 tcp_program_reset_ntf (wrk, tc);
327 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
329 case TCP_STATE_SYN_SENT:
330 /* Do not program ntf because the connection is half-open */
333 case TCP_STATE_ESTABLISHED:
334 tcp_connection_timers_reset (tc);
335 tcp_cong_recovery_off (tc);
336 tcp_program_reset_ntf (wrk, tc);
337 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
338 tcp_program_cleanup (wrk, tc);
340 case TCP_STATE_CLOSE_WAIT:
341 case TCP_STATE_FIN_WAIT_1:
342 case TCP_STATE_FIN_WAIT_2:
343 case TCP_STATE_CLOSING:
344 case TCP_STATE_LAST_ACK:
345 tcp_connection_timers_reset (tc);
346 tcp_cong_recovery_off (tc);
347 tcp_program_reset_ntf (wrk, tc);
348 /* Make sure we mark the session as closed. In some states we may
349 * be still trying to send data */
350 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
351 tcp_program_cleanup (wrk, tc);
353 case TCP_STATE_CLOSED:
354 case TCP_STATE_TIME_WAIT:
357 TCP_DBG ("reset state: %u", tc->state);
362 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
364 * It first verifies if segment has a wrapped sequence number (PAWS) and then
365 * does the processing associated to the first four steps (ignoring security
366 * and precedence): sequence number, rst bit and syn bit checks.
368 * @return 0 if segments passes validation.
371 tcp_segment_validate (tcp_worker_ctx_t * wrk, tcp_connection_t * tc0,
372 vlib_buffer_t * b0, tcp_header_t * th0, u32 * error0)
374 /* We could get a burst of RSTs interleaved with acks */
375 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
377 tcp_send_reset (tc0);
378 *error0 = TCP_ERROR_CONNECTION_CLOSED;
382 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
384 *error0 = TCP_ERROR_SEGMENT_INVALID;
388 if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts, 0)))
390 *error0 = TCP_ERROR_OPTIONS;
394 if (PREDICT_FALSE (tcp_segment_check_paws (tc0)))
396 *error0 = TCP_ERROR_PAWS;
397 TCP_EVT (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
398 vnet_buffer (b0)->tcp.seq_end);
400 /* If it just so happens that a segment updates tsval_recent for a
401 * segment over 24 days old, invalidate tsval_recent. */
402 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
403 tcp_time_now_w_thread (tc0->c_thread_index)))
405 tc0->tsval_recent = tc0->rcv_opts.tsval;
406 clib_warning ("paws failed: 24-day old segment");
408 /* Drop after ack if not rst. Resets can fail paws check as per
409 * RFC 7323 sec. 5.2: When an <RST> segment is received, it MUST NOT
410 * be subjected to the PAWS check by verifying an acceptable value in
412 else if (!tcp_rst (th0))
414 tcp_program_ack (tc0);
415 TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
420 /* 1st: check sequence number */
421 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
422 vnet_buffer (b0)->tcp.seq_end))
424 /* SYN/SYN-ACK retransmit */
426 && vnet_buffer (b0)->tcp.seq_number == tc0->rcv_nxt - 1)
428 tcp_options_parse (th0, &tc0->rcv_opts, 1);
429 if (tc0->state == TCP_STATE_SYN_RCVD)
431 tcp_send_synack (tc0);
432 TCP_EVT (TCP_EVT_SYN_RCVD, tc0, 0);
433 *error0 = TCP_ERROR_SYNS_RCVD;
437 tcp_program_ack (tc0);
438 TCP_EVT (TCP_EVT_SYNACK_RCVD, tc0);
439 *error0 = TCP_ERROR_SYN_ACKS_RCVD;
444 /* If our window is 0 and the packet is in sequence, let it pass
445 * through for ack processing. It should be dropped later. */
446 if (tc0->rcv_wnd < tc0->snd_mss
447 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
450 /* If we entered recovery and peer did so as well, there's a chance that
451 * dup acks won't be acceptable on either end because seq_end may be less
452 * than rcv_las. This can happen if acks are lost in both directions. */
453 if (tcp_in_recovery (tc0)
454 && seq_geq (vnet_buffer (b0)->tcp.seq_number,
455 tc0->rcv_las - tc0->rcv_wnd)
456 && seq_leq (vnet_buffer (b0)->tcp.seq_end,
457 tc0->rcv_nxt + tc0->rcv_wnd))
460 *error0 = TCP_ERROR_RCV_WND;
462 /* If we advertised a zero rcv_wnd and the segment is in the past or the
463 * next one that we expect, it is probably a window probe */
464 if ((tc0->flags & TCP_CONN_ZERO_RWND_SENT)
465 && seq_lt (vnet_buffer (b0)->tcp.seq_end,
466 tc0->rcv_las + tc0->rcv_opts.mss))
467 *error0 = TCP_ERROR_ZERO_RWND;
469 tc0->errors.below_data_wnd += seq_lt (vnet_buffer (b0)->tcp.seq_end,
472 /* If not RST, send dup ack */
475 tcp_program_dupack (tc0);
476 TCP_EVT (TCP_EVT_DUPACK_SENT, tc0, vnet_buffer (b0)->tcp);
484 /* 2nd: check the RST bit */
485 if (PREDICT_FALSE (tcp_rst (th0)))
487 tcp_rcv_rst (wrk, tc0);
488 *error0 = TCP_ERROR_RST_RCVD;
492 /* 3rd: check security and precedence (skip) */
494 /* 4th: check the SYN bit (in window) */
495 if (PREDICT_FALSE (tcp_syn (th0)))
497 /* As per RFC5961 send challenge ack instead of reset */
498 tcp_program_ack (tc0);
499 *error0 = TCP_ERROR_SPURIOUS_SYN;
503 /* If segment in window, save timestamp */
504 tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
505 vnet_buffer (b0)->tcp.seq_end);
513 tcp_rcv_ack_no_cc (tcp_connection_t * tc, vlib_buffer_t * b, u32 * error)
515 /* SND.UNA =< SEG.ACK =< SND.NXT */
516 if (!(seq_leq (tc->snd_una, vnet_buffer (b)->tcp.ack_number)
517 && seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
519 if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
520 && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
522 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
525 *error = TCP_ERROR_ACK_INVALID;
530 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
531 tc->snd_una = vnet_buffer (b)->tcp.ack_number;
532 *error = TCP_ERROR_ACK_OK;
537 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
539 * Note that although the original article, srtt and rttvar are scaled
540 * to minimize round-off errors, here we don't. Instead, we rely on
541 * better precision time measurements.
543 * TODO support us rtt resolution
546 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
552 err = mrtt - tc->srtt;
554 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
555 * The increase should be bound */
556 tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
557 diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
558 tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
562 /* First measurement. */
564 tc->rttvar = mrtt >> 1;
568 #ifndef CLIB_MARCH_VARIANT
570 tcp_update_rto (tcp_connection_t * tc)
572 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
573 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
575 #endif /* CLIB_MARCH_VARIANT */
578 * Update RTT estimate and RTO timer
580 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
581 * timing. Middle boxes are known to fiddle with TCP options so we
582 * should give higher priority to ACK timing.
584 * This should be called only if previously sent bytes have been acked.
586 * return 1 if valid rtt 0 otherwise
589 tcp_update_rtt (tcp_connection_t * tc, tcp_rate_sample_t * rs, u32 ack)
593 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
594 * RTT because they're ambiguous. */
595 if (tcp_in_cong_recovery (tc))
597 /* Accept rtt estimates for samples that have not been retransmitted */
598 if ((tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
599 && !(rs->flags & TCP_BTS_IS_RXT))
601 mrtt = rs->rtt_time * THZ;
607 if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq))
609 f64 sample = tcp_time_now_us (tc->c_thread_index) - tc->rtt_ts;
610 tc->mrtt_us = tc->mrtt_us + (sample - tc->mrtt_us) * 0.125;
611 mrtt = clib_max ((u32) (sample * THZ), 1);
612 /* Allow measuring of a new RTT */
615 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
616 * snd_una, i.e., the left side of the send window:
617 * seq_lt (tc->snd_una, ack). This is a condition for calling update_rtt */
618 else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr)
620 u32 now = tcp_tstamp (tc);
621 mrtt = clib_max (now - tc->rcv_opts.tsecr, 1);
626 /* Ignore dubious measurements */
627 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
630 tcp_estimate_rtt (tc, mrtt);
634 /* If we got here something must've been ACKed so make sure boff is 0,
635 * even if mrtt is not valid since we update the rto lower */
643 tcp_estimate_initial_rtt (tcp_connection_t * tc)
645 u8 thread_index = vlib_num_workers ()? 1 : 0;
650 tc->mrtt_us = tcp_time_now_us (thread_index) - tc->rtt_ts;
651 tc->mrtt_us = clib_max (tc->mrtt_us, 0.0001);
652 mrtt = clib_max ((u32) (tc->mrtt_us * THZ), 1);
657 mrtt = tcp_time_now_w_thread (thread_index) - tc->rcv_opts.tsecr;
658 mrtt = clib_max (mrtt, 1);
659 /* Due to retransmits we don't know the initial mrtt */
660 if (tc->rto_boff && mrtt > 1 * THZ)
662 tc->mrtt_us = (f64) mrtt *TCP_TICK;
665 if (mrtt > 0 && mrtt < TCP_RTT_MAX)
666 tcp_estimate_rtt (tc, mrtt);
671 * Dequeue bytes for connections that have received acks in last burst
674 tcp_handle_postponed_dequeues (tcp_worker_ctx_t * wrk)
676 u32 thread_index = wrk->vm->thread_index;
677 u32 *pending_deq_acked;
678 tcp_connection_t *tc;
681 if (!vec_len (wrk->pending_deq_acked))
684 pending_deq_acked = wrk->pending_deq_acked;
685 for (i = 0; i < vec_len (pending_deq_acked); i++)
687 tc = tcp_connection_get (pending_deq_acked[i], thread_index);
688 tc->flags &= ~TCP_CONN_DEQ_PENDING;
690 if (PREDICT_FALSE (!tc->burst_acked))
693 /* Dequeue the newly ACKed bytes */
694 session_tx_fifo_dequeue_drop (&tc->connection, tc->burst_acked);
695 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
697 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
699 if (seq_leq (tc->psh_seq, tc->snd_una))
700 tc->flags &= ~TCP_CONN_PSH_PENDING;
703 if (tcp_is_descheduled (tc))
706 /* If everything has been acked, stop retransmit timer
707 * otherwise update. */
708 tcp_retransmit_timer_update (tc);
710 /* Update pacer based on our new cwnd estimate */
711 tcp_connection_tx_pacer_update (tc);
715 _vec_len (wrk->pending_deq_acked) = 0;
719 tcp_program_dequeue (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
721 if (!(tc->flags & TCP_CONN_DEQ_PENDING))
723 vec_add1 (wrk->pending_deq_acked, tc->c_c_index);
724 tc->flags |= TCP_CONN_DEQ_PENDING;
726 tc->burst_acked += tc->bytes_acked;
729 #ifndef CLIB_MARCH_VARIANT
731 scoreboard_hole_index (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
733 ASSERT (!pool_is_free_index (sb->holes, hole - sb->holes));
734 return hole - sb->holes;
738 scoreboard_hole_bytes (sack_scoreboard_hole_t * hole)
740 return hole->end - hole->start;
743 sack_scoreboard_hole_t *
744 scoreboard_get_hole (sack_scoreboard_t * sb, u32 index)
746 if (index != TCP_INVALID_SACK_HOLE_INDEX)
747 return pool_elt_at_index (sb->holes, index);
751 sack_scoreboard_hole_t *
752 scoreboard_next_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
754 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
755 return pool_elt_at_index (sb->holes, hole->next);
759 sack_scoreboard_hole_t *
760 scoreboard_prev_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
762 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
763 return pool_elt_at_index (sb->holes, hole->prev);
767 sack_scoreboard_hole_t *
768 scoreboard_first_hole (sack_scoreboard_t * sb)
770 if (sb->head != TCP_INVALID_SACK_HOLE_INDEX)
771 return pool_elt_at_index (sb->holes, sb->head);
775 sack_scoreboard_hole_t *
776 scoreboard_last_hole (sack_scoreboard_t * sb)
778 if (sb->tail != TCP_INVALID_SACK_HOLE_INDEX)
779 return pool_elt_at_index (sb->holes, sb->tail);
784 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
786 sack_scoreboard_hole_t *next, *prev;
788 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
790 next = pool_elt_at_index (sb->holes, hole->next);
791 next->prev = hole->prev;
795 sb->tail = hole->prev;
798 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
800 prev = pool_elt_at_index (sb->holes, hole->prev);
801 prev->next = hole->next;
805 sb->head = hole->next;
808 if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
809 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
811 /* Poison the entry */
813 clib_memset (hole, 0xfe, sizeof (*hole));
815 pool_put (sb->holes, hole);
818 static sack_scoreboard_hole_t *
819 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
822 sack_scoreboard_hole_t *hole, *next, *prev;
825 pool_get (sb->holes, hole);
826 clib_memset (hole, 0, sizeof (*hole));
830 hole_index = scoreboard_hole_index (sb, hole);
832 prev = scoreboard_get_hole (sb, prev_index);
835 hole->prev = prev_index;
836 hole->next = prev->next;
838 if ((next = scoreboard_next_hole (sb, hole)))
839 next->prev = hole_index;
841 sb->tail = hole_index;
843 prev->next = hole_index;
847 sb->head = hole_index;
848 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
849 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
856 scoreboard_update_sacked_rxt (sack_scoreboard_t * sb, u32 start, u32 end,
859 if (!has_rxt || seq_geq (start, sb->high_rxt))
863 seq_lt (end, sb->high_rxt) ? (end - start) : (sb->high_rxt - start);
867 scoreboard_update_bytes (sack_scoreboard_t * sb, u32 ack, u32 snd_mss)
869 sack_scoreboard_hole_t *left, *right;
870 u32 sacked = 0, blks = 0, old_sacked;
872 old_sacked = sb->sacked_bytes;
874 sb->last_lost_bytes = 0;
876 sb->sacked_bytes = 0;
878 right = scoreboard_last_hole (sb);
881 sb->sacked_bytes = sb->high_sacked - ack;
882 sb->last_sacked_bytes = sb->sacked_bytes
883 - (old_sacked - sb->last_bytes_delivered);
887 if (seq_gt (sb->high_sacked, right->end))
889 sacked = sb->high_sacked - right->end;
893 while (sacked < (TCP_DUPACK_THRESHOLD - 1) * snd_mss
894 && blks < TCP_DUPACK_THRESHOLD)
897 sb->lost_bytes += scoreboard_hole_bytes (right);
899 left = scoreboard_prev_hole (sb, right);
902 ASSERT (right->start == ack || sb->is_reneging);
903 sacked += right->start - ack;
908 sacked += right->start - left->end;
913 /* right is first lost */
916 sb->lost_bytes += scoreboard_hole_bytes (right);
917 sb->last_lost_bytes += right->is_lost ? 0 : (right->end - right->start);
919 left = scoreboard_prev_hole (sb, right);
922 ASSERT (right->start == ack || sb->is_reneging);
923 sacked += right->start - ack;
926 sacked += right->start - left->end;
930 sb->sacked_bytes = sacked;
931 sb->last_sacked_bytes = sacked - (old_sacked - sb->last_bytes_delivered);
935 * Figure out the next hole to retransmit
937 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
939 sack_scoreboard_hole_t *
940 scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
941 sack_scoreboard_hole_t * start,
942 u8 have_unsent, u8 * can_rescue, u8 * snd_limited)
944 sack_scoreboard_hole_t *hole = 0;
946 hole = start ? start : scoreboard_first_hole (sb);
947 while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
948 hole = scoreboard_next_hole (sb, hole);
950 /* Nothing, return */
953 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
957 /* Rule (1): if higher than rxt, less than high_sacked and lost */
958 if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
960 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
964 /* Rule (2): available unsent data */
967 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
970 /* Rule (3): if hole not lost */
971 else if (seq_lt (hole->start, sb->high_sacked))
973 /* And we didn't already retransmit it */
974 if (seq_leq (hole->end, sb->high_rxt))
976 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
980 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
982 /* Rule (4): if hole beyond high_sacked */
985 ASSERT (seq_geq (hole->start, sb->high_sacked));
988 /* HighRxt MUST NOT be updated */
993 if (hole && seq_lt (sb->high_rxt, hole->start))
994 sb->high_rxt = hole->start;
1000 scoreboard_init_rxt (sack_scoreboard_t * sb, u32 snd_una)
1002 sack_scoreboard_hole_t *hole;
1003 hole = scoreboard_first_hole (sb);
1006 snd_una = seq_gt (snd_una, hole->start) ? snd_una : hole->start;
1007 sb->cur_rxt_hole = sb->head;
1009 sb->high_rxt = snd_una;
1010 sb->rescue_rxt = snd_una - 1;
1014 scoreboard_init (sack_scoreboard_t * sb)
1016 sb->head = TCP_INVALID_SACK_HOLE_INDEX;
1017 sb->tail = TCP_INVALID_SACK_HOLE_INDEX;
1018 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
1022 scoreboard_clear (sack_scoreboard_t * sb)
1024 sack_scoreboard_hole_t *hole;
1025 while ((hole = scoreboard_first_hole (sb)))
1027 scoreboard_remove_hole (sb, hole);
1029 ASSERT (sb->head == sb->tail && sb->head == TCP_INVALID_SACK_HOLE_INDEX);
1030 ASSERT (pool_elts (sb->holes) == 0);
1031 sb->sacked_bytes = 0;
1032 sb->last_sacked_bytes = 0;
1033 sb->last_bytes_delivered = 0;
1035 sb->last_lost_bytes = 0;
1036 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
1037 sb->is_reneging = 0;
1041 scoreboard_clear_reneging (sack_scoreboard_t * sb, u32 start, u32 end)
1043 sack_scoreboard_hole_t *last_hole;
1045 clib_warning ("sack reneging");
1047 scoreboard_clear (sb);
1048 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
1050 last_hole->is_lost = 1;
1051 sb->tail = scoreboard_hole_index (sb, last_hole);
1052 sb->high_sacked = start;
1053 scoreboard_init_rxt (sb, start);
1056 #endif /* CLIB_MARCH_VARIANT */
1059 * Test that scoreboard is sane after recovery
1061 * Returns 1 if scoreboard is empty or if first hole beyond
1065 tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc)
1067 sack_scoreboard_hole_t *hole;
1068 hole = scoreboard_first_hole (&tc->sack_sb);
1069 return (!hole || (seq_geq (hole->start, tc->snd_una)
1070 && seq_lt (hole->end, tc->snd_nxt)));
1073 #ifndef CLIB_MARCH_VARIANT
1076 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
1078 sack_scoreboard_hole_t *hole, *next_hole;
1079 sack_scoreboard_t *sb = &tc->sack_sb;
1080 sack_block_t *blk, *rcv_sacks;
1081 u32 blk_index = 0, i, j;
1084 sb->last_sacked_bytes = 0;
1085 sb->last_bytes_delivered = 0;
1088 if (!tcp_opts_sack (&tc->rcv_opts) && !sb->sacked_bytes
1089 && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
1092 has_rxt = tcp_in_cong_recovery (tc);
1094 /* Remove invalid blocks */
1095 blk = tc->rcv_opts.sacks;
1096 while (blk < vec_end (tc->rcv_opts.sacks))
1098 if (seq_lt (blk->start, blk->end)
1099 && seq_gt (blk->start, tc->snd_una)
1100 && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_nxt))
1105 vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
1108 /* Add block for cumulative ack */
1109 if (seq_gt (ack, tc->snd_una))
1111 vec_add2 (tc->rcv_opts.sacks, blk, 1);
1112 blk->start = tc->snd_una;
1116 if (vec_len (tc->rcv_opts.sacks) == 0)
1119 tcp_scoreboard_trace_add (tc, ack);
1121 /* Make sure blocks are ordered */
1122 rcv_sacks = tc->rcv_opts.sacks;
1123 for (i = 0; i < vec_len (rcv_sacks); i++)
1124 for (j = i + 1; j < vec_len (rcv_sacks); j++)
1125 if (seq_lt (rcv_sacks[j].start, rcv_sacks[i].start))
1127 sack_block_t tmp = rcv_sacks[i];
1128 rcv_sacks[i] = rcv_sacks[j];
1132 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
1134 /* Handle reneging as a special case */
1135 if (PREDICT_FALSE (sb->is_reneging))
1137 /* No holes, only sacked bytes */
1138 if (seq_leq (tc->snd_nxt, sb->high_sacked))
1140 /* No progress made so return */
1141 if (seq_leq (ack, tc->snd_una))
1144 /* Update sacked bytes delivered and return */
1145 sb->last_bytes_delivered = ack - tc->snd_una;
1146 sb->sacked_bytes -= sb->last_bytes_delivered;
1147 sb->is_reneging = seq_lt (ack, sb->high_sacked);
1151 /* New hole above high sacked. Add it and process normally */
1152 hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
1153 sb->high_sacked, tc->snd_nxt);
1154 sb->tail = scoreboard_hole_index (sb, hole);
1156 /* Not reneging and no holes. Insert the first that covers all
1157 * outstanding bytes */
1160 hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
1161 tc->snd_una, tc->snd_nxt);
1162 sb->tail = scoreboard_hole_index (sb, hole);
1164 sb->high_sacked = rcv_sacks[vec_len (rcv_sacks) - 1].end;
1168 /* If we have holes but snd_nxt is beyond the last hole, update
1169 * last hole end or add new hole after high sacked */
1170 hole = scoreboard_last_hole (sb);
1171 if (seq_gt (tc->snd_nxt, hole->end))
1173 if (seq_geq (hole->start, sb->high_sacked))
1175 hole->end = tc->snd_nxt;
1177 /* New hole after high sacked block */
1178 else if (seq_lt (sb->high_sacked, tc->snd_nxt))
1180 scoreboard_insert_hole (sb, sb->tail, sb->high_sacked,
1185 /* Keep track of max byte sacked for when the last hole
1187 sb->high_sacked = seq_max (rcv_sacks[vec_len (rcv_sacks) - 1].end,
1191 /* Walk the holes with the SACK blocks */
1192 hole = pool_elt_at_index (sb->holes, sb->head);
1194 if (PREDICT_FALSE (sb->is_reneging))
1196 sb->last_bytes_delivered += clib_min (hole->start - tc->snd_una,
1198 sb->is_reneging = seq_lt (ack, hole->start);
1201 while (hole && blk_index < vec_len (rcv_sacks))
1203 blk = &rcv_sacks[blk_index];
1204 if (seq_leq (blk->start, hole->start))
1206 /* Block covers hole. Remove hole */
1207 if (seq_geq (blk->end, hole->end))
1209 next_hole = scoreboard_next_hole (sb, hole);
1211 /* If covered by ack, compute delivered bytes */
1212 if (blk->end == ack)
1214 u32 sacked = next_hole ? next_hole->start : sb->high_sacked;
1215 if (PREDICT_FALSE (seq_lt (ack, sacked)))
1217 sb->last_bytes_delivered += ack - hole->end;
1218 sb->is_reneging = 1;
1222 sb->last_bytes_delivered += sacked - hole->end;
1223 sb->is_reneging = 0;
1226 scoreboard_update_sacked_rxt (sb, hole->start, hole->end,
1228 scoreboard_remove_hole (sb, hole);
1231 /* Partial 'head' overlap */
1234 if (seq_gt (blk->end, hole->start))
1236 scoreboard_update_sacked_rxt (sb, hole->start, blk->end,
1238 hole->start = blk->end;
1245 /* Hole must be split */
1246 if (seq_lt (blk->end, hole->end))
1248 u32 hole_index = scoreboard_hole_index (sb, hole);
1249 next_hole = scoreboard_insert_hole (sb, hole_index, blk->end,
1251 /* Pool might've moved */
1252 hole = scoreboard_get_hole (sb, hole_index);
1253 hole->end = blk->start;
1255 scoreboard_update_sacked_rxt (sb, blk->start, blk->end,
1259 ASSERT (hole->next == scoreboard_hole_index (sb, next_hole));
1261 else if (seq_lt (blk->start, hole->end))
1263 scoreboard_update_sacked_rxt (sb, blk->start, hole->end,
1265 hole->end = blk->start;
1267 hole = scoreboard_next_hole (sb, hole);
1271 scoreboard_update_bytes (sb, ack, tc->snd_mss);
1273 ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc));
1274 ASSERT (sb->sacked_bytes == 0 || tcp_in_recovery (tc)
1275 || sb->sacked_bytes <= tc->snd_nxt - seq_max (tc->snd_una, ack));
1276 ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_nxt
1277 - seq_max (tc->snd_una, ack) || tcp_in_recovery (tc));
1278 ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc)
1279 || sb->is_reneging || sb->holes[sb->head].start == ack);
1280 ASSERT (sb->last_lost_bytes <= sb->lost_bytes);
1281 ASSERT ((ack - tc->snd_una) + sb->last_sacked_bytes
1282 - sb->last_bytes_delivered >= sb->rxt_sacked);
1283 ASSERT ((ack - tc->snd_una) >= tc->sack_sb.last_bytes_delivered
1284 || (tc->flags & TCP_CONN_FINSNT));
1286 TCP_EVT (TCP_EVT_CC_SCOREBOARD, tc);
1288 #endif /* CLIB_MARCH_VARIANT */
1291 * Try to update snd_wnd based on feedback received from peer.
1293 * If successful, and new window is 'effectively' 0, activate persist
1297 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
1299 /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
1300 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
1301 if (seq_lt (tc->snd_wl1, seq)
1302 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
1304 tc->snd_wnd = snd_wnd;
1307 TCP_EVT (TCP_EVT_SND_WND, tc);
1309 if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss))
1311 /* Set persist timer if not set and we just got 0 wnd */
1312 if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
1313 && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
1314 tcp_persist_timer_set (tc);
1318 if (PREDICT_FALSE (tcp_timer_is_active (tc, TCP_TIMER_PERSIST)))
1319 tcp_persist_timer_reset (tc);
1321 if (PREDICT_FALSE (tcp_is_descheduled (tc)))
1322 tcp_reschedule (tc);
1324 if (PREDICT_FALSE (!tcp_in_recovery (tc) && tc->rto_boff > 0))
1327 tcp_update_rto (tc);
1334 * Init loss recovery/fast recovery.
1336 * Triggered by dup acks as opposed to timer timeout. Note that cwnd is
1337 * updated in @ref tcp_cc_handle_event after fast retransmit
1340 tcp_cc_init_congestion (tcp_connection_t * tc)
1342 tcp_fastrecovery_on (tc);
1343 tc->snd_congestion = tc->snd_nxt;
1344 tc->cwnd_acc_bytes = 0;
1345 tc->snd_rxt_bytes = 0;
1346 tc->rxt_delivered = 0;
1347 tc->prr_delivered = 0;
1348 tc->prr_start = tc->snd_una;
1349 tc->prev_ssthresh = tc->ssthresh;
1350 tc->prev_cwnd = tc->cwnd;
1352 tc->snd_rxt_ts = tcp_tstamp (tc);
1353 tcp_cc_congestion (tc);
1355 /* Post retransmit update cwnd to ssthresh and account for the
1356 * three segments that have left the network and should've been
1357 * buffered at the receiver XXX */
1358 if (!tcp_opts_sack_permitted (&tc->rcv_opts))
1359 tc->cwnd += 3 * tc->snd_mss;
1361 tc->fr_occurences += 1;
1362 TCP_EVT (TCP_EVT_CC_EVT, tc, 4);
1366 tcp_cc_congestion_undo (tcp_connection_t * tc)
1368 tc->cwnd = tc->prev_cwnd;
1369 tc->ssthresh = tc->prev_ssthresh;
1370 tcp_cc_undo_recovery (tc);
1371 ASSERT (tc->rto_boff == 0);
1372 TCP_EVT (TCP_EVT_CC_EVT, tc, 5);
1376 tcp_cc_is_spurious_timeout_rxt (tcp_connection_t * tc)
1378 return (tcp_in_recovery (tc) && tc->rto_boff == 1
1380 && tcp_opts_tstamp (&tc->rcv_opts)
1381 && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
1385 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
1387 return (tcp_cc_is_spurious_timeout_rxt (tc));
1391 tcp_should_fastrecover_sack (tcp_connection_t * tc)
1393 return (tc->sack_sb.lost_bytes
1394 || ((TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
1395 < tc->sack_sb.sacked_bytes));
1399 tcp_should_fastrecover (tcp_connection_t * tc, u8 has_sack)
1403 /* If of of the two conditions lower hold, reset dupacks because
1404 * we're probably after timeout (RFC6582 heuristics).
1405 * If Cumulative ack does not cover more than congestion threshold,
1407 * 1) The following doesn't hold: The congestion window is greater
1408 * than SMSS bytes and the difference between highest_ack
1409 * and prev_highest_ack is at most 4*SMSS bytes
1410 * 2) Echoed timestamp in the last non-dup ack does not equal the
1413 if (seq_leq (tc->snd_una, tc->snd_congestion)
1414 && ((!(tc->cwnd > tc->snd_mss
1415 && tc->bytes_acked <= 4 * tc->snd_mss))
1416 || (tc->rcv_opts.tsecr != tc->tsecr_last_ack)))
1418 tc->rcv_dupacks = 0;
1422 return ((tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1423 || tcp_should_fastrecover_sack (tc));
1427 tcp_cc_recover (tcp_connection_t * tc)
1429 sack_scoreboard_hole_t *hole;
1432 ASSERT (tcp_in_cong_recovery (tc));
1434 if (tcp_cc_is_spurious_retransmit (tc))
1436 tcp_cc_congestion_undo (tc);
1440 tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
1441 tc->rcv_dupacks = 0;
1443 /* Previous recovery left us congested. Continue sending as part
1444 * of the current recovery event with an updated snd_congestion */
1445 if (tc->sack_sb.sacked_bytes)
1447 tc->snd_congestion = tc->snd_nxt;
1448 tcp_program_retransmit (tc);
1452 tc->rxt_delivered = 0;
1453 tc->snd_rxt_bytes = 0;
1455 tc->prr_delivered = 0;
1457 tc->flags &= ~TCP_CONN_RXT_PENDING;
1459 hole = scoreboard_first_hole (&tc->sack_sb);
1460 if (hole && hole->start == tc->snd_una && hole->end == tc->snd_nxt)
1461 scoreboard_clear (&tc->sack_sb);
1463 if (!tcp_in_recovery (tc) && !is_spurious)
1464 tcp_cc_recovered (tc);
1466 tcp_fastrecovery_off (tc);
1467 tcp_fastrecovery_first_off (tc);
1468 tcp_recovery_off (tc);
1469 TCP_EVT (TCP_EVT_CC_EVT, tc, 3);
1471 ASSERT (tc->rto_boff == 0);
1472 ASSERT (!tcp_in_cong_recovery (tc));
1473 ASSERT (tcp_scoreboard_is_sane_post_recovery (tc));
1479 tcp_cc_update (tcp_connection_t * tc, tcp_rate_sample_t * rs)
1481 ASSERT (!tcp_in_cong_recovery (tc) || tcp_is_lost_fin (tc));
1483 /* Congestion avoidance */
1484 tcp_cc_rcv_ack (tc, rs);
1486 /* If a cumulative ack, make sure dupacks is 0 */
1487 tc->rcv_dupacks = 0;
1489 /* When dupacks hits the threshold we only enter fast retransmit if
1490 * cumulative ack covers more than snd_congestion. Should snd_una
1491 * wrap this test may fail under otherwise valid circumstances.
1492 * Therefore, proactively update snd_congestion when wrap detected. */
1494 (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
1495 && seq_gt (tc->snd_congestion, tc->snd_una)))
1496 tc->snd_congestion = tc->snd_una - 1;
1500 * One function to rule them all ... and in the darkness bind them
1503 tcp_cc_handle_event (tcp_connection_t * tc, tcp_rate_sample_t * rs,
1506 u8 has_sack = tcp_opts_sack_permitted (&tc->rcv_opts);
1508 /* If reneging, wait for timer based retransmits */
1509 if (PREDICT_FALSE (tcp_is_lost_fin (tc) || tc->sack_sb.is_reneging))
1513 * If not in recovery, figure out if we should enter
1515 if (!tcp_in_cong_recovery (tc))
1520 TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
1521 tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1523 if (tcp_should_fastrecover (tc, has_sack))
1525 tcp_cc_init_congestion (tc);
1528 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una);
1530 tcp_connection_tx_pacer_reset (tc, tc->cwnd, 0 /* start bucket */ );
1531 tcp_program_retransmit (tc);
1538 * Already in recovery
1542 * Process (re)transmit feedback. Output path uses this to decide how much
1543 * more data to release into the network
1547 if (!tc->bytes_acked && tc->sack_sb.rxt_sacked)
1548 tcp_fastrecovery_first_on (tc);
1550 tc->rxt_delivered += tc->sack_sb.rxt_sacked;
1551 tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
1552 - tc->sack_sb.last_bytes_delivered;
1558 tc->rcv_dupacks += 1;
1559 TCP_EVT (TCP_EVT_DUPACK_RCVD, tc, 1);
1561 tc->rxt_delivered = clib_min (tc->rxt_delivered + tc->bytes_acked,
1564 tc->prr_delivered += clib_min (tc->snd_mss,
1565 tc->snd_nxt - tc->snd_una);
1567 tc->prr_delivered += tc->bytes_acked - clib_min (tc->bytes_acked,
1571 /* If partial ack, assume that the first un-acked segment was lost */
1572 if (tc->bytes_acked || tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1573 tcp_fastrecovery_first_on (tc);
1577 * See if we can exit and stop retransmitting
1579 if (seq_geq (tc->snd_una, tc->snd_congestion))
1581 /* If spurious return, we've already updated everything */
1582 if (tcp_cc_recover (tc))
1584 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1588 /* Treat as congestion avoidance ack */
1589 tcp_cc_rcv_ack (tc, rs);
1593 tcp_program_retransmit (tc);
1596 * Notify cc of the event
1599 if (!tc->bytes_acked)
1601 tcp_cc_rcv_cong_ack (tc, TCP_CC_DUPACK, rs);
1605 /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1606 * reset dupacks to 0. Also needed if in congestion recovery */
1607 tc->rcv_dupacks = 0;
1609 if (tcp_in_recovery (tc))
1610 tcp_cc_rcv_ack (tc, rs);
1612 tcp_cc_rcv_cong_ack (tc, TCP_CC_PARTIALACK, rs);
1616 tcp_handle_old_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
1618 if (!tcp_in_cong_recovery (tc))
1621 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1622 tcp_rcv_sacks (tc, tc->snd_una);
1624 tc->bytes_acked = 0;
1626 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1627 tcp_bt_sample_delivery_rate (tc, rs);
1629 tcp_cc_handle_event (tc, rs, 1);
1633 * Check if duplicate ack as per RFC5681 Sec. 2
1636 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
1639 return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
1640 && seq_gt (tc->snd_nxt, tc->snd_una)
1641 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
1642 && (prev_snd_wnd == tc->snd_wnd));
1646 * Checks if ack is a congestion control event.
1649 tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
1650 u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
1652 /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
1653 * defined to be 'duplicate' as well */
1654 *is_dack = tc->sack_sb.last_sacked_bytes
1655 || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
1657 return (*is_dack || tcp_in_cong_recovery (tc));
1661 * Process incoming ACK
1664 tcp_rcv_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1665 tcp_header_t * th, u32 * error)
1667 u32 prev_snd_wnd, prev_snd_una;
1668 tcp_rate_sample_t rs = { 0 };
1671 TCP_EVT (TCP_EVT_CC_STAT, tc);
1673 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1674 if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1676 /* We've probably entered recovery and the peer still has some
1677 * of the data we've sent. Update snd_nxt and accept the ack */
1678 if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)
1679 && seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una))
1681 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1685 tc->errors.above_ack_wnd += 1;
1686 *error = TCP_ERROR_ACK_FUTURE;
1687 TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number);
1691 /* If old ACK, probably it's an old dupack */
1692 if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1694 tc->errors.below_ack_wnd += 1;
1695 *error = TCP_ERROR_ACK_OLD;
1696 TCP_EVT (TCP_EVT_ACK_RCV_ERR, tc, 1, vnet_buffer (b)->tcp.ack_number);
1698 if (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una - tc->rcv_wnd))
1701 tcp_handle_old_ack (tc, &rs);
1703 /* Don't drop yet */
1710 * Looks okay, process feedback
1713 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1714 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1716 prev_snd_wnd = tc->snd_wnd;
1717 prev_snd_una = tc->snd_una;
1718 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1719 vnet_buffer (b)->tcp.ack_number,
1720 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1721 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1722 tc->snd_una = vnet_buffer (b)->tcp.ack_number;
1723 tcp_validate_txf_size (tc, tc->bytes_acked);
1725 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1726 tcp_bt_sample_delivery_rate (tc, &rs);
1728 if (tc->bytes_acked)
1730 tcp_program_dequeue (wrk, tc);
1731 tcp_update_rtt (tc, &rs, vnet_buffer (b)->tcp.ack_number);
1734 TCP_EVT (TCP_EVT_ACK_RCVD, tc);
1737 * Check if we have congestion event
1740 if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1742 tcp_cc_handle_event (tc, &rs, is_dack);
1743 tc->dupacks_in += is_dack;
1744 if (!tcp_in_cong_recovery (tc))
1746 *error = TCP_ERROR_ACK_OK;
1749 *error = TCP_ERROR_ACK_DUP;
1750 if (vnet_buffer (b)->tcp.data_len || tcp_is_fin (th))
1756 * Update congestion control (slow start/congestion avoidance)
1758 tcp_cc_update (tc, &rs);
1759 *error = TCP_ERROR_ACK_OK;
1764 tcp_program_disconnect (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1766 if (!tcp_disconnect_pending (tc))
1768 vec_add1 (wrk->pending_disconnects, tc->c_c_index);
1769 tcp_disconnect_pending_on (tc);
1774 tcp_handle_disconnects (tcp_worker_ctx_t * wrk)
1776 u32 thread_index, *pending_disconnects, *pending_resets;
1777 tcp_connection_t *tc;
1780 if (vec_len (wrk->pending_disconnects))
1782 thread_index = wrk->vm->thread_index;
1783 pending_disconnects = wrk->pending_disconnects;
1784 for (i = 0; i < vec_len (pending_disconnects); i++)
1786 tc = tcp_connection_get (pending_disconnects[i], thread_index);
1787 tcp_disconnect_pending_off (tc);
1788 session_transport_closing_notify (&tc->connection);
1790 _vec_len (wrk->pending_disconnects) = 0;
1793 if (vec_len (wrk->pending_resets))
1795 thread_index = wrk->vm->thread_index;
1796 pending_resets = wrk->pending_resets;
1797 for (i = 0; i < vec_len (pending_resets); i++)
1799 tc = tcp_connection_get (pending_resets[i], thread_index);
1800 tcp_disconnect_pending_off (tc);
1801 tcp_handle_rst (tc);
1803 _vec_len (wrk->pending_resets) = 0;
1808 tcp_rcv_fin (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b,
1811 /* Reject out-of-order fins */
1812 if (vnet_buffer (b)->tcp.seq_end != tc->rcv_nxt)
1815 /* Account for the FIN and send ack */
1817 tc->flags |= TCP_CONN_FINRCVD;
1818 tcp_program_ack (tc);
1819 /* Enter CLOSE-WAIT and notify session. To avoid lingering
1820 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1821 tcp_connection_set_state (tc, TCP_STATE_CLOSE_WAIT);
1822 tcp_program_disconnect (wrk, tc);
1823 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
1824 TCP_EVT (TCP_EVT_FIN_RCVD, tc);
1825 *error = TCP_ERROR_FIN_RCVD;
1828 #ifndef CLIB_MARCH_VARIANT
1830 tcp_sack_vector_is_sane (sack_block_t * sacks)
1833 for (i = 1; i < vec_len (sacks); i++)
1835 if (sacks[i - 1].end == sacks[i].start)
1842 * Build SACK list as per RFC2018.
1844 * Makes sure the first block contains the segment that generated the current
1845 * ACK and the following ones are the ones most recently reported in SACK
1848 * @param tc TCP connection for which the SACK list is updated
1849 * @param start Start sequence number of the newest SACK block
1850 * @param end End sequence of the newest SACK block
1853 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1855 sack_block_t *new_list = tc->snd_sacks_fl, *block = 0;
1858 /* If the first segment is ooo add it to the list. Last write might've moved
1859 * rcv_nxt over the first segment. */
1860 if (seq_lt (tc->rcv_nxt, start))
1862 vec_add2 (new_list, block, 1);
1863 block->start = start;
1867 /* Find the blocks still worth keeping. */
1868 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1870 /* Discard if rcv_nxt advanced beyond current block */
1871 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1874 /* Merge or drop if segment overlapped by the new segment */
1875 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1876 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1878 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1879 new_list[0].start = tc->snd_sacks[i].start;
1880 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1881 new_list[0].end = tc->snd_sacks[i].end;
1885 /* Save to new SACK list if we have space. */
1886 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1887 vec_add1 (new_list, tc->snd_sacks[i]);
1890 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1892 /* Replace old vector with new one */
1893 vec_reset_length (tc->snd_sacks);
1894 tc->snd_sacks_fl = tc->snd_sacks;
1895 tc->snd_sacks = new_list;
1897 /* Segments should not 'touch' */
1898 ASSERT (tcp_sack_vector_is_sane (tc->snd_sacks));
1902 tcp_sack_list_bytes (tcp_connection_t * tc)
1905 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1906 bytes += tc->snd_sacks[i].end - tc->snd_sacks[i].start;
1909 #endif /* CLIB_MARCH_VARIANT */
1911 /** Enqueue data for delivery to application */
1913 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1916 int written, error = TCP_ERROR_ENQUEUED;
1918 ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1920 written = session_enqueue_stream_connection (&tc->connection, b, 0,
1921 1 /* queue event */ , 1);
1922 tc->bytes_in += written;
1924 TCP_EVT (TCP_EVT_INPUT, tc, 0, data_len, written);
1926 /* Update rcv_nxt */
1927 if (PREDICT_TRUE (written == data_len))
1929 tc->rcv_nxt += written;
1931 /* If more data written than expected, account for out-of-order bytes. */
1932 else if (written > data_len)
1934 tc->rcv_nxt += written;
1935 TCP_EVT (TCP_EVT_CC_INPUT, tc, data_len, written);
1937 else if (written > 0)
1939 /* We've written something but FIFO is probably full now */
1940 tc->rcv_nxt += written;
1941 error = TCP_ERROR_PARTIALLY_ENQUEUED;
1945 /* Packet made it through for ack processing */
1946 if (tc->rcv_wnd < tc->snd_mss)
1947 return TCP_ERROR_ZERO_RWND;
1949 return TCP_ERROR_FIFO_FULL;
1952 /* Update SACK list if need be */
1953 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1955 /* Remove SACK blocks that have been delivered */
1956 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1962 /** Enqueue out-of-order data */
1964 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1970 ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1973 /* Enqueue out-of-order data with relative offset */
1974 rv = session_enqueue_stream_connection (&tc->connection, b,
1975 vnet_buffer (b)->tcp.seq_number -
1976 tc->rcv_nxt, 0 /* queue event */ ,
1979 /* Nothing written */
1982 TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, 0);
1983 return TCP_ERROR_FIFO_FULL;
1986 TCP_EVT (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1987 tc->bytes_in += data_len;
1989 /* Update SACK list if in use */
1990 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1992 ooo_segment_t *newest;
1995 s0 = session_get (tc->c_s_index, tc->c_thread_index);
1997 /* Get the newest segment from the fifo */
1998 newest = svm_fifo_newest_ooo_segment (s0->rx_fifo);
2001 offset = ooo_segment_offset_prod (s0->rx_fifo, newest);
2002 ASSERT (offset <= vnet_buffer (b)->tcp.seq_number - tc->rcv_nxt);
2003 start = tc->rcv_nxt + offset;
2004 end = start + ooo_segment_length (s0->rx_fifo, newest);
2005 tcp_update_sack_list (tc, start, end);
2006 svm_fifo_newest_ooo_segment_reset (s0->rx_fifo);
2007 TCP_EVT (TCP_EVT_CC_SACKS, tc);
2011 return TCP_ERROR_ENQUEUED_OOO;
2015 * Check if ACK could be delayed. If ack can be delayed, it should return
2016 * true for a full frame. If we're always acking return 0.
2019 tcp_can_delack (tcp_connection_t * tc)
2021 /* Send ack if ... */
2023 /* just sent a rcv wnd 0
2024 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0 */
2025 /* constrained to send ack */
2026 || (tc->flags & TCP_CONN_SNDACK) != 0
2027 /* we're almost out of tx wnd */
2028 || tcp_available_cc_snd_space (tc) < 4 * tc->snd_mss)
2035 tcp_buffer_discard_bytes (vlib_buffer_t * b, u32 n_bytes_to_drop)
2037 u32 discard, first = b->current_length;
2038 vlib_main_t *vm = vlib_get_main ();
2040 /* Handle multi-buffer segments */
2041 if (n_bytes_to_drop > b->current_length)
2043 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
2047 discard = clib_min (n_bytes_to_drop, b->current_length);
2048 vlib_buffer_advance (b, discard);
2049 b = vlib_get_buffer (vm, b->next_buffer);
2050 n_bytes_to_drop -= discard;
2052 while (n_bytes_to_drop);
2053 if (n_bytes_to_drop > first)
2054 b->total_length_not_including_first_buffer -= n_bytes_to_drop - first;
2057 vlib_buffer_advance (b, n_bytes_to_drop);
2058 vnet_buffer (b)->tcp.data_len -= n_bytes_to_drop;
2063 * Receive buffer for connection and handle acks
2065 * It handles both in order or out-of-order data.
2068 tcp_segment_rcv (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
2071 u32 error, n_bytes_to_drop, n_data_bytes;
2073 vlib_buffer_advance (b, vnet_buffer (b)->tcp.data_offset);
2074 n_data_bytes = vnet_buffer (b)->tcp.data_len;
2075 ASSERT (n_data_bytes);
2076 tc->data_segs_in += 1;
2078 /* Handle out-of-order data */
2079 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
2081 /* Old sequence numbers allowed through because they overlapped
2083 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
2085 /* Completely in the past (possible retransmit). Ack
2086 * retransmissions since we may not have any data to send */
2087 if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
2089 tcp_program_dupack (tc);
2090 tc->errors.below_data_wnd++;
2091 error = TCP_ERROR_SEGMENT_OLD;
2095 /* Chop off the bytes in the past and see if what is left
2096 * can be enqueued in order */
2097 n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
2098 n_data_bytes -= n_bytes_to_drop;
2099 vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt;
2100 if (tcp_buffer_discard_bytes (b, n_bytes_to_drop))
2102 error = TCP_ERROR_SEGMENT_OLD;
2108 /* RFC2581: Enqueue and send DUPACK for fast retransmit */
2109 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
2110 tcp_program_dupack (tc);
2111 TCP_EVT (TCP_EVT_DUPACK_SENT, tc, vnet_buffer (b)->tcp);
2112 tc->errors.above_data_wnd += seq_gt (vnet_buffer (b)->tcp.seq_end,
2113 tc->rcv_las + tc->rcv_wnd);
2119 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
2120 * segments can be enqueued after fifo tail offset changes. */
2121 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
2122 if (tcp_can_delack (tc))
2124 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
2125 tcp_timer_set (tc, TCP_TIMER_DELACK, tcp_cfg.delack_time);
2129 tcp_program_ack (tc);
2137 tcp_header_t tcp_header;
2138 tcp_connection_t tcp_connection;
2142 format_tcp_rx_trace (u8 * s, va_list * args)
2144 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
2145 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
2146 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
2147 tcp_connection_t *tc = &t->tcp_connection;
2148 u32 indent = format_get_indent (s);
2150 s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
2151 format_tcp_state, tc->state, format_white_space, indent,
2152 format_tcp_header, &t->tcp_header, 128);
2158 format_tcp_rx_trace_short (u8 * s, va_list * args)
2160 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
2161 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
2162 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
2164 s = format (s, "%d -> %d (%U)",
2165 clib_net_to_host_u16 (t->tcp_header.dst_port),
2166 clib_net_to_host_u16 (t->tcp_header.src_port), format_tcp_state,
2167 t->tcp_connection.state);
2173 tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
2174 tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
2178 clib_memcpy_fast (&t0->tcp_connection, tc0,
2179 sizeof (t0->tcp_connection));
2183 th0 = tcp_buffer_hdr (b0);
2185 clib_memcpy_fast (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2189 tcp_established_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2190 vlib_frame_t * frame, u8 is_ip4)
2194 n_left = frame->n_vectors;
2195 from = vlib_frame_vector_args (frame);
2199 tcp_connection_t *tc0;
2206 b0 = vlib_get_buffer (vm, bi0);
2208 if (b0->flags & VLIB_BUFFER_IS_TRACED)
2210 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2211 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2213 th0 = tcp_buffer_hdr (b0);
2214 tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
2223 tcp_node_inc_counter_i (vlib_main_t * vm, u32 tcp4_node, u32 tcp6_node,
2224 u8 is_ip4, u32 evt, u32 val)
2227 vlib_node_increment_counter (vm, tcp4_node, evt, val);
2229 vlib_node_increment_counter (vm, tcp6_node, evt, val);
2232 #define tcp_maybe_inc_counter(node_id, err, count) \
2234 if (next0 != tcp_next_drop (is_ip4)) \
2235 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
2236 tcp6_##node_id##_node.index, is_ip4, err, \
2239 #define tcp_inc_counter(node_id, err, count) \
2240 tcp_node_inc_counter_i (vm, tcp4_##node_id##_node.index, \
2241 tcp6_##node_id##_node.index, is_ip4, \
2243 #define tcp_maybe_inc_err_counter(cnts, err) \
2245 cnts[err] += (next0 != tcp_next_drop (is_ip4)); \
2247 #define tcp_inc_err_counter(cnts, err, val) \
2251 #define tcp_store_err_counters(node_id, cnts) \
2254 for (i = 0; i < TCP_N_ERROR; i++) \
2256 tcp_inc_counter(node_id, i, cnts[i]); \
2261 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2262 vlib_frame_t * frame, int is_ip4)
2264 u32 thread_index = vm->thread_index, errors = 0;
2265 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2266 u32 n_left_from, *from, *first_buffer;
2267 u16 err_counters[TCP_N_ERROR] = { 0 };
2269 if (node->flags & VLIB_NODE_FLAG_TRACE)
2270 tcp_established_trace_frame (vm, node, frame, is_ip4);
2272 first_buffer = from = vlib_frame_vector_args (frame);
2273 n_left_from = frame->n_vectors;
2275 while (n_left_from > 0)
2277 u32 bi0, error0 = TCP_ERROR_ACK_OK;
2280 tcp_connection_t *tc0;
2282 if (n_left_from > 1)
2285 pb = vlib_get_buffer (vm, from[1]);
2286 vlib_prefetch_buffer_header (pb, LOAD);
2287 CLIB_PREFETCH (pb->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
2294 b0 = vlib_get_buffer (vm, bi0);
2295 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2298 if (PREDICT_FALSE (tc0 == 0))
2300 error0 = TCP_ERROR_INVALID_CONNECTION;
2304 th0 = tcp_buffer_hdr (b0);
2306 /* TODO header prediction fast path */
2308 /* 1-4: check SEQ, RST, SYN */
2309 if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, th0, &error0)))
2311 TCP_EVT (TCP_EVT_SEG_INVALID, tc0, vnet_buffer (b0)->tcp);
2315 /* 5: check the ACK field */
2316 if (PREDICT_FALSE (tcp_rcv_ack (wrk, tc0, b0, th0, &error0)))
2319 /* 6: check the URG bit TODO */
2321 /* 7: process the segment text */
2322 if (vnet_buffer (b0)->tcp.data_len)
2323 error0 = tcp_segment_rcv (wrk, tc0, b0);
2325 /* 8: check the FIN bit */
2326 if (PREDICT_FALSE (tcp_is_fin (th0)))
2327 tcp_rcv_fin (wrk, tc0, b0, &error0);
2330 tcp_inc_err_counter (err_counters, error0, 1);
2333 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2335 err_counters[TCP_ERROR_MSG_QUEUE_FULL] = errors;
2336 tcp_store_err_counters (established, err_counters);
2337 tcp_handle_postponed_dequeues (wrk);
2338 tcp_handle_disconnects (wrk);
2339 vlib_buffer_free (vm, first_buffer, frame->n_vectors);
2341 return frame->n_vectors;
2344 VLIB_NODE_FN (tcp4_established_node) (vlib_main_t * vm,
2345 vlib_node_runtime_t * node,
2346 vlib_frame_t * from_frame)
2348 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2351 VLIB_NODE_FN (tcp6_established_node) (vlib_main_t * vm,
2352 vlib_node_runtime_t * node,
2353 vlib_frame_t * from_frame)
2355 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2359 VLIB_REGISTER_NODE (tcp4_established_node) =
2361 .name = "tcp4-established",
2362 /* Takes a vector of packets. */
2363 .vector_size = sizeof (u32),
2364 .n_errors = TCP_N_ERROR,
2365 .error_strings = tcp_error_strings,
2366 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2369 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2370 foreach_tcp_state_next
2373 .format_trace = format_tcp_rx_trace_short,
2378 VLIB_REGISTER_NODE (tcp6_established_node) =
2380 .name = "tcp6-established",
2381 /* Takes a vector of packets. */
2382 .vector_size = sizeof (u32),
2383 .n_errors = TCP_N_ERROR,
2384 .error_strings = tcp_error_strings,
2385 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
2388 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
2389 foreach_tcp_state_next
2392 .format_trace = format_tcp_rx_trace_short,
2398 tcp_lookup_is_valid (tcp_connection_t * tc, vlib_buffer_t * b,
2401 transport_connection_t *tmp = 0;
2408 if (tc->c_lcl_port == 0 && tc->state == TCP_STATE_LISTEN)
2411 u8 is_ip_valid = 0, val_l, val_r;
2413 if (tc->connection.is_ip4)
2415 ip4_header_t *ip4_hdr = (ip4_header_t *) vlib_buffer_get_current (b);
2417 val_l = !ip4_address_compare (&ip4_hdr->dst_address,
2418 &tc->connection.lcl_ip.ip4);
2419 val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 1);
2420 val_r = !ip4_address_compare (&ip4_hdr->src_address,
2421 &tc->connection.rmt_ip.ip4);
2422 val_r = val_r || tc->state == TCP_STATE_LISTEN;
2423 is_ip_valid = val_l && val_r;
2427 ip6_header_t *ip6_hdr = (ip6_header_t *) vlib_buffer_get_current (b);
2429 val_l = !ip6_address_compare (&ip6_hdr->dst_address,
2430 &tc->connection.lcl_ip.ip6);
2431 val_l = val_l || ip_is_zero (&tc->connection.lcl_ip, 0);
2432 val_r = !ip6_address_compare (&ip6_hdr->src_address,
2433 &tc->connection.rmt_ip.ip6);
2434 val_r = val_r || tc->state == TCP_STATE_LISTEN;
2435 is_ip_valid = val_l && val_r;
2438 u8 is_valid = (tc->c_lcl_port == hdr->dst_port
2439 && (tc->state == TCP_STATE_LISTEN
2440 || tc->c_rmt_port == hdr->src_port) && is_ip_valid);
2444 handle = session_lookup_half_open_handle (&tc->connection);
2445 tmp = session_lookup_half_open_connection (handle & 0xFFFFFFFF,
2446 tc->c_proto, tc->c_is_ip4);
2450 if (tmp->lcl_port == hdr->dst_port
2451 && tmp->rmt_port == hdr->src_port)
2453 TCP_DBG ("half-open is valid!");
2462 * Lookup transport connection
2464 static tcp_connection_t *
2465 tcp_lookup_connection (u32 fib_index, vlib_buffer_t * b, u8 thread_index,
2469 transport_connection_t *tconn;
2470 tcp_connection_t *tc;
2475 ip4 = vlib_buffer_get_current (b);
2476 tcp = ip4_next_header (ip4);
2477 tconn = session_lookup_connection_wt4 (fib_index,
2482 TRANSPORT_PROTO_TCP,
2483 thread_index, &is_filtered);
2484 tc = tcp_get_connection_from_transport (tconn);
2485 ASSERT (tcp_lookup_is_valid (tc, b, tcp));
2490 ip6 = vlib_buffer_get_current (b);
2491 tcp = ip6_next_header (ip6);
2492 tconn = session_lookup_connection_wt6 (fib_index,
2497 TRANSPORT_PROTO_TCP,
2498 thread_index, &is_filtered);
2499 tc = tcp_get_connection_from_transport (tconn);
2500 ASSERT (tcp_lookup_is_valid (tc, b, tcp));
2505 static tcp_connection_t *
2506 tcp_lookup_listener (vlib_buffer_t * b, u32 fib_index, int is_ip4)
2512 ip4_header_t *ip4 = vlib_buffer_get_current (b);
2513 tcp_header_t *tcp = tcp_buffer_hdr (b);
2514 s = session_lookup_listener4 (fib_index,
2516 tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
2520 ip6_header_t *ip6 = vlib_buffer_get_current (b);
2521 tcp_header_t *tcp = tcp_buffer_hdr (b);
2522 s = session_lookup_listener6 (fib_index,
2524 tcp->dst_port, TRANSPORT_PROTO_TCP, 1);
2527 if (PREDICT_TRUE (s != 0))
2528 return tcp_get_connection_from_transport (transport_get_listener
2529 (TRANSPORT_PROTO_TCP,
2530 s->connection_index));
2536 tcp_check_tx_offload (tcp_connection_t * tc, int is_ipv4)
2538 vnet_main_t *vnm = vnet_get_main ();
2539 const dpo_id_t *dpo;
2540 const load_balance_t *lb;
2541 vnet_hw_interface_t *hw_if;
2542 u32 sw_if_idx, lb_idx;
2546 ip4_address_t *dst_addr = &(tc->c_rmt_ip.ip4);
2547 lb_idx = ip4_fib_forwarding_lookup (tc->c_fib_index, dst_addr);
2551 ip6_address_t *dst_addr = &(tc->c_rmt_ip.ip6);
2552 lb_idx = ip6_fib_table_fwding_lookup (tc->c_fib_index, dst_addr);
2555 lb = load_balance_get (lb_idx);
2556 if (PREDICT_FALSE (lb->lb_n_buckets > 1))
2558 dpo = load_balance_get_bucket_i (lb, 0);
2560 sw_if_idx = dpo_get_urpf (dpo);
2561 if (PREDICT_FALSE (sw_if_idx == ~0))
2564 hw_if = vnet_get_sup_hw_interface (vnm, sw_if_idx);
2565 if (hw_if->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
2566 tc->cfg_flags |= TCP_CFG_F_TSO;
2570 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2571 vlib_frame_t * from_frame, int is_ip4)
2573 u32 n_left_from, *from, *first_buffer, errors = 0;
2574 u32 my_thread_index = vm->thread_index;
2575 tcp_worker_ctx_t *wrk = tcp_get_worker (my_thread_index);
2577 from = first_buffer = vlib_frame_vector_args (from_frame);
2578 n_left_from = from_frame->n_vectors;
2580 while (n_left_from > 0)
2582 u32 bi0, ack0, seq0, error0 = TCP_ERROR_NONE;
2583 tcp_connection_t *tc0, *new_tc0;
2584 tcp_header_t *tcp0 = 0;
2592 b0 = vlib_get_buffer (vm, bi0);
2594 tcp_half_open_connection_get (vnet_buffer (b0)->tcp.connection_index);
2595 if (PREDICT_FALSE (tc0 == 0))
2597 error0 = TCP_ERROR_INVALID_CONNECTION;
2601 /* Half-open completed recently but the connection was't removed
2602 * yet by the owning thread */
2603 if (PREDICT_FALSE (tc0->flags & TCP_CONN_HALF_OPEN_DONE))
2605 /* Make sure the connection actually exists */
2606 ASSERT (tcp_lookup_connection (tc0->c_fib_index, b0,
2607 my_thread_index, is_ip4));
2608 error0 = TCP_ERROR_SPURIOUS_SYN_ACK;
2612 ack0 = vnet_buffer (b0)->tcp.ack_number;
2613 seq0 = vnet_buffer (b0)->tcp.seq_number;
2614 tcp0 = tcp_buffer_hdr (b0);
2616 /* Crude check to see if the connection handle does not match
2617 * the packet. Probably connection just switched to established */
2618 if (PREDICT_FALSE (tcp0->dst_port != tc0->c_lcl_port
2619 || tcp0->src_port != tc0->c_rmt_port))
2621 error0 = TCP_ERROR_INVALID_CONNECTION;
2625 if (PREDICT_FALSE (!tcp_ack (tcp0) && !tcp_rst (tcp0)
2626 && !tcp_syn (tcp0)))
2628 error0 = TCP_ERROR_SEGMENT_INVALID;
2632 /* SYNs consume sequence numbers */
2633 vnet_buffer (b0)->tcp.seq_end += tcp_is_syn (tcp0);
2636 * 1. check the ACK bit
2640 * If the ACK bit is set
2641 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
2642 * the RST bit is set, if so drop the segment and return)
2643 * <SEQ=SEG.ACK><CTL=RST>
2644 * and discard the segment. Return.
2645 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
2649 if (seq_leq (ack0, tc0->iss) || seq_gt (ack0, tc0->snd_nxt))
2651 if (!tcp_rst (tcp0))
2652 tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2653 error0 = TCP_ERROR_RCV_WND;
2657 /* Make sure ACK is valid */
2658 if (seq_gt (tc0->snd_una, ack0))
2660 error0 = TCP_ERROR_ACK_INVALID;
2666 * 2. check the RST bit
2671 /* If ACK is acceptable, signal client that peer is not
2672 * willing to accept connection and drop connection*/
2674 tcp_rcv_rst (wrk, tc0);
2675 error0 = TCP_ERROR_RST_RCVD;
2680 * 3. check the security and precedence (skipped)
2684 * 4. check the SYN bit
2687 /* No SYN flag. Drop. */
2688 if (!tcp_syn (tcp0))
2690 error0 = TCP_ERROR_SEGMENT_INVALID;
2695 if (tcp_options_parse (tcp0, &tc0->rcv_opts, 1))
2697 error0 = TCP_ERROR_OPTIONS;
2701 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
2702 * current thread pool. */
2703 new_tc0 = tcp_connection_alloc_w_base (my_thread_index, tc0);
2704 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
2705 new_tc0->irs = seq0;
2706 new_tc0->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
2707 new_tc0->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
2709 /* If this is not the owning thread, wait for syn retransmit to
2710 * expire and cleanup then */
2711 if (tcp_half_open_connection_cleanup (tc0))
2712 tc0->flags |= TCP_CONN_HALF_OPEN_DONE;
2714 if (tcp_opts_tstamp (&new_tc0->rcv_opts))
2716 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
2717 new_tc0->tsval_recent_age = tcp_time_now ();
2720 if (tcp_opts_wscale (&new_tc0->rcv_opts))
2721 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
2723 new_tc0->rcv_wscale = 0;
2725 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2726 << new_tc0->snd_wscale;
2727 new_tc0->snd_wl1 = seq0;
2728 new_tc0->snd_wl2 = ack0;
2730 tcp_connection_init_vars (new_tc0);
2732 /* SYN-ACK: See if we can switch to ESTABLISHED state */
2733 if (PREDICT_TRUE (tcp_ack (tcp0)))
2735 /* Our SYN is ACKed: we have iss < ack = snd_una */
2737 /* TODO Dequeue acknowledged segments if we support Fast Open */
2738 new_tc0->snd_una = ack0;
2739 new_tc0->state = TCP_STATE_ESTABLISHED;
2741 /* Make sure las is initialized for the wnd computation */
2742 new_tc0->rcv_las = new_tc0->rcv_nxt;
2744 /* Notify app that we have connection. If session layer can't
2745 * allocate session send reset */
2746 if (session_stream_connect_notify (&new_tc0->connection, 0))
2748 tcp_send_reset_w_pkt (new_tc0, b0, my_thread_index, is_ip4);
2749 tcp_connection_cleanup (new_tc0);
2750 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2754 new_tc0->tx_fifo_size =
2755 transport_tx_fifo_size (&new_tc0->connection);
2756 /* Update rtt with the syn-ack sample */
2757 tcp_estimate_initial_rtt (new_tc0);
2758 TCP_EVT (TCP_EVT_SYNACK_RCVD, new_tc0);
2759 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2761 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
2764 new_tc0->state = TCP_STATE_SYN_RCVD;
2766 /* Notify app that we have connection */
2767 if (session_stream_connect_notify (&new_tc0->connection, 0))
2769 tcp_connection_cleanup (new_tc0);
2770 tcp_send_reset_w_pkt (tc0, b0, my_thread_index, is_ip4);
2771 TCP_EVT (TCP_EVT_RST_SENT, tc0);
2772 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2776 new_tc0->tx_fifo_size =
2777 transport_tx_fifo_size (&new_tc0->connection);
2778 new_tc0->rtt_ts = 0;
2779 tcp_init_snd_vars (new_tc0);
2780 tcp_send_synack (new_tc0);
2781 error0 = TCP_ERROR_SYNS_RCVD;
2785 if (!(new_tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2786 tcp_check_tx_offload (new_tc0, is_ip4);
2788 /* Read data, if any */
2789 if (PREDICT_FALSE (vnet_buffer (b0)->tcp.data_len))
2791 clib_warning ("rcvd data in syn-sent");
2792 error0 = tcp_segment_rcv (wrk, new_tc0, b0);
2793 if (error0 == TCP_ERROR_ACK_OK)
2794 error0 = TCP_ERROR_SYN_ACKS_RCVD;
2798 /* Send ack now instead of programming it because connection was
2799 * just established and it's not optional. */
2800 tcp_send_ack (new_tc0);
2805 tcp_inc_counter (syn_sent, error0, 1);
2806 if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0))
2808 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2809 clib_memcpy_fast (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
2810 clib_memcpy_fast (&t0->tcp_connection, tc0,
2811 sizeof (t0->tcp_connection));
2815 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
2817 tcp_inc_counter (syn_sent, TCP_ERROR_MSG_QUEUE_FULL, errors);
2818 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
2819 tcp_handle_disconnects (wrk);
2821 return from_frame->n_vectors;
2824 VLIB_NODE_FN (tcp4_syn_sent_node) (vlib_main_t * vm,
2825 vlib_node_runtime_t * node,
2826 vlib_frame_t * from_frame)
2828 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2831 VLIB_NODE_FN (tcp6_syn_sent_node) (vlib_main_t * vm,
2832 vlib_node_runtime_t * node,
2833 vlib_frame_t * from_frame)
2835 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2839 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2841 .name = "tcp4-syn-sent",
2842 /* Takes a vector of packets. */
2843 .vector_size = sizeof (u32),
2844 .n_errors = TCP_N_ERROR,
2845 .error_strings = tcp_error_strings,
2846 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2849 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2850 foreach_tcp_state_next
2853 .format_trace = format_tcp_rx_trace_short,
2858 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2860 .name = "tcp6-syn-sent",
2861 /* Takes a vector of packets. */
2862 .vector_size = sizeof (u32),
2863 .n_errors = TCP_N_ERROR,
2864 .error_strings = tcp_error_strings,
2865 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2868 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2869 foreach_tcp_state_next
2872 .format_trace = format_tcp_rx_trace_short,
2877 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2878 * as per RFC793 p. 64
2881 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2882 vlib_frame_t * from_frame, int is_ip4)
2884 u32 thread_index = vm->thread_index, errors = 0, *first_buffer;
2885 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
2886 u32 n_left_from, *from, max_dequeue;
2888 from = first_buffer = vlib_frame_vector_args (from_frame);
2889 n_left_from = from_frame->n_vectors;
2891 while (n_left_from > 0)
2893 u32 bi0, error0 = TCP_ERROR_NONE;
2894 tcp_header_t *tcp0 = 0;
2895 tcp_connection_t *tc0;
2903 b0 = vlib_get_buffer (vm, bi0);
2904 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2906 if (PREDICT_FALSE (tc0 == 0))
2908 error0 = TCP_ERROR_INVALID_CONNECTION;
2912 tcp0 = tcp_buffer_hdr (b0);
2913 is_fin0 = tcp_is_fin (tcp0);
2917 if (!(tc0->connection.flags & TRANSPORT_CONNECTION_F_NO_LOOKUP))
2919 tcp_connection_t *tmp;
2920 tmp = tcp_lookup_connection (tc0->c_fib_index, b0, thread_index,
2922 if (tmp->state != tc0->state)
2924 if (tc0->state != TCP_STATE_CLOSED)
2925 clib_warning ("state changed");
2932 * Special treatment for CLOSED
2934 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2936 error0 = TCP_ERROR_CONNECTION_CLOSED;
2941 * For all other states (except LISTEN)
2944 /* 1-4: check SEQ, RST, SYN */
2945 if (PREDICT_FALSE (tcp_segment_validate (wrk, tc0, b0, tcp0, &error0)))
2948 /* 5: check the ACK field */
2951 case TCP_STATE_SYN_RCVD:
2953 /* Make sure the segment is exactly right */
2954 if (tc0->rcv_nxt != vnet_buffer (b0)->tcp.seq_number || is_fin0)
2956 tcp_send_reset_w_pkt (tc0, b0, thread_index, is_ip4);
2957 error0 = TCP_ERROR_SEGMENT_INVALID;
2962 * If the segment acknowledgment is not acceptable, form a
2964 * <SEQ=SEG.ACK><CTL=RST>
2967 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
2969 tcp_send_reset_w_pkt (tc0, b0, thread_index, is_ip4);
2970 error0 = TCP_ERROR_SEGMENT_INVALID;
2974 /* Update rtt and rto */
2975 tcp_estimate_initial_rtt (tc0);
2976 tcp_connection_tx_pacer_update (tc0);
2978 /* Switch state to ESTABLISHED */
2979 tc0->state = TCP_STATE_ESTABLISHED;
2980 TCP_EVT (TCP_EVT_STATE_CHANGE, tc0);
2982 if (!(tc0->cfg_flags & TCP_CFG_F_NO_TSO))
2983 tcp_check_tx_offload (tc0, is_ip4);
2985 /* Initialize session variables */
2986 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2987 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2988 << tc0->rcv_opts.wscale;
2989 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2990 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2992 /* Reset SYN-ACK retransmit and SYN_RCV establish timers */
2993 tcp_retransmit_timer_reset (tc0);
2994 if (session_stream_accept_notify (&tc0->connection))
2996 error0 = TCP_ERROR_MSG_QUEUE_FULL;
2997 tcp_send_reset (tc0);
2998 session_transport_delete_notify (&tc0->connection);
2999 tcp_connection_cleanup (tc0);
3002 error0 = TCP_ERROR_ACK_OK;
3004 case TCP_STATE_ESTABLISHED:
3005 /* We can get packets in established state here because they
3006 * were enqueued before state change */
3007 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
3011 case TCP_STATE_FIN_WAIT_1:
3012 /* In addition to the processing for the ESTABLISHED state, if
3013 * our FIN is now acknowledged then enter FIN-WAIT-2 and
3014 * continue processing in that state. */
3015 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
3018 /* Still have to send the FIN */
3019 if (tc0->flags & TCP_CONN_FINPNDG)
3021 /* TX fifo finally drained */
3022 max_dequeue = transport_max_tx_dequeue (&tc0->connection);
3023 if (max_dequeue <= tc0->burst_acked)
3025 /* If a fin was received and data was acked extend wait */
3026 else if ((tc0->flags & TCP_CONN_FINRCVD) && tc0->bytes_acked)
3027 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
3028 tcp_cfg.closewait_time);
3030 /* If FIN is ACKed */
3031 else if (tc0->snd_una == tc0->snd_nxt)
3033 /* Stop all retransmit timers because we have nothing more
3035 tcp_connection_timers_reset (tc0);
3037 /* We already have a FIN but didn't transition to CLOSING
3038 * because of outstanding tx data. Close the connection. */
3039 if (tc0->flags & TCP_CONN_FINRCVD)
3041 tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
3042 session_transport_closed_notify (&tc0->connection);
3043 tcp_program_cleanup (wrk, tc0);
3047 tcp_connection_set_state (tc0, TCP_STATE_FIN_WAIT_2);
3048 /* Enable waitclose because we're willing to wait for peer's
3049 * FIN but not indefinitely. */
3050 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.finwait2_time);
3052 /* Don't try to deq the FIN acked */
3053 if (tc0->burst_acked > 1)
3054 session_tx_fifo_dequeue_drop (&tc0->connection,
3055 tc0->burst_acked - 1);
3056 tc0->burst_acked = 0;
3059 case TCP_STATE_FIN_WAIT_2:
3060 /* In addition to the processing for the ESTABLISHED state, if
3061 * the retransmission queue is empty, the user's CLOSE can be
3062 * acknowledged ("ok") but do not delete the TCB. */
3063 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
3065 tc0->burst_acked = 0;
3067 case TCP_STATE_CLOSE_WAIT:
3068 /* Do the same processing as for the ESTABLISHED state. */
3069 if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0))
3072 if (!(tc0->flags & TCP_CONN_FINPNDG))
3075 /* Still have outstanding tx data */
3076 max_dequeue = transport_max_tx_dequeue (&tc0->connection);
3077 if (max_dequeue > tc0->burst_acked)
3081 tcp_connection_timers_reset (tc0);
3082 tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
3083 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
3085 case TCP_STATE_CLOSING:
3086 /* In addition to the processing for the ESTABLISHED state, if
3087 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
3088 * otherwise ignore the segment. */
3089 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
3092 if (tc0->snd_una != tc0->snd_nxt)
3095 tcp_connection_timers_reset (tc0);
3096 tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
3097 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
3098 session_transport_closed_notify (&tc0->connection);
3102 case TCP_STATE_LAST_ACK:
3103 /* The only thing that [should] arrive in this state is an
3104 * acknowledgment of our FIN. If our FIN is now acknowledged,
3105 * delete the TCB, enter the CLOSED state, and return. */
3107 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
3110 /* Apparently our ACK for the peer's FIN was lost */
3111 if (is_fin0 && tc0->snd_una != tc0->snd_nxt)
3117 tcp_connection_set_state (tc0, TCP_STATE_CLOSED);
3118 session_transport_closed_notify (&tc0->connection);
3120 /* Don't free the connection from the data path since
3121 * we can't ensure that we have no packets already enqueued
3122 * to output. Rely instead on the waitclose timer */
3123 tcp_connection_timers_reset (tc0);
3124 tcp_program_cleanup (tcp_get_worker (tc0->c_thread_index), tc0);
3129 case TCP_STATE_TIME_WAIT:
3130 /* The only thing that can arrive in this state is a
3131 * retransmission of the remote FIN. Acknowledge it, and restart
3132 * the 2 MSL timeout. */
3134 if (tcp_rcv_ack_no_cc (tc0, b0, &error0))
3140 tcp_program_ack (tc0);
3141 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
3149 /* 6: check the URG bit TODO */
3151 /* 7: process the segment text */
3154 case TCP_STATE_ESTABLISHED:
3155 case TCP_STATE_FIN_WAIT_1:
3156 case TCP_STATE_FIN_WAIT_2:
3157 if (vnet_buffer (b0)->tcp.data_len)
3158 error0 = tcp_segment_rcv (wrk, tc0, b0);
3160 case TCP_STATE_CLOSE_WAIT:
3161 case TCP_STATE_CLOSING:
3162 case TCP_STATE_LAST_ACK:
3163 case TCP_STATE_TIME_WAIT:
3164 /* This should not occur, since a FIN has been received from the
3165 * remote side. Ignore the segment text. */
3169 /* 8: check the FIN bit */
3173 TCP_EVT (TCP_EVT_FIN_RCVD, tc0);
3177 case TCP_STATE_ESTABLISHED:
3178 /* Account for the FIN and send ack */
3180 tcp_program_ack (tc0);
3181 tcp_connection_set_state (tc0, TCP_STATE_CLOSE_WAIT);
3182 tcp_program_disconnect (wrk, tc0);
3183 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
3185 case TCP_STATE_SYN_RCVD:
3186 /* Send FIN-ACK, enter LAST-ACK and because the app was not
3187 * notified yet, set a cleanup timer instead of relying on
3188 * disconnect notify and the implicit close call. */
3189 tcp_connection_timers_reset (tc0);
3192 tcp_connection_set_state (tc0, TCP_STATE_LAST_ACK);
3193 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.lastack_time);
3195 case TCP_STATE_CLOSE_WAIT:
3196 case TCP_STATE_CLOSING:
3197 case TCP_STATE_LAST_ACK:
3200 case TCP_STATE_FIN_WAIT_1:
3203 if (tc0->flags & TCP_CONN_FINPNDG)
3205 /* If data is outstanding, stay in FIN_WAIT_1 and try to finish
3206 * sending it. Since we already received a fin, do not wait
3208 tc0->flags |= TCP_CONN_FINRCVD;
3209 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
3210 tcp_cfg.closewait_time);
3214 tcp_connection_set_state (tc0, TCP_STATE_CLOSING);
3215 tcp_program_ack (tc0);
3216 /* Wait for ACK for our FIN but not forever */
3217 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE,
3218 tcp_cfg.closing_time);
3221 case TCP_STATE_FIN_WAIT_2:
3222 /* Got FIN, send ACK! Be more aggressive with resource cleanup */
3224 tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT);
3225 tcp_connection_timers_reset (tc0);
3226 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
3227 tcp_program_ack (tc0);
3228 session_transport_closed_notify (&tc0->connection);
3230 case TCP_STATE_TIME_WAIT:
3231 /* Remain in the TIME-WAIT state. Restart the time-wait
3234 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, tcp_cfg.timewait_time);
3237 error0 = TCP_ERROR_FIN_RCVD;
3241 tcp_inc_counter (rcv_process, error0, 1);
3242 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
3244 tcp_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
3245 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
3249 errors = session_main_flush_enqueue_events (TRANSPORT_PROTO_TCP,
3251 tcp_inc_counter (rcv_process, TCP_ERROR_MSG_QUEUE_FULL, errors);
3252 tcp_handle_postponed_dequeues (wrk);
3253 tcp_handle_disconnects (wrk);
3254 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
3256 return from_frame->n_vectors;
3259 VLIB_NODE_FN (tcp4_rcv_process_node) (vlib_main_t * vm,
3260 vlib_node_runtime_t * node,
3261 vlib_frame_t * from_frame)
3263 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3266 VLIB_NODE_FN (tcp6_rcv_process_node) (vlib_main_t * vm,
3267 vlib_node_runtime_t * node,
3268 vlib_frame_t * from_frame)
3270 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3274 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
3276 .name = "tcp4-rcv-process",
3277 /* Takes a vector of packets. */
3278 .vector_size = sizeof (u32),
3279 .n_errors = TCP_N_ERROR,
3280 .error_strings = tcp_error_strings,
3281 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3284 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3285 foreach_tcp_state_next
3288 .format_trace = format_tcp_rx_trace_short,
3293 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
3295 .name = "tcp6-rcv-process",
3296 /* Takes a vector of packets. */
3297 .vector_size = sizeof (u32),
3298 .n_errors = TCP_N_ERROR,
3299 .error_strings = tcp_error_strings,
3300 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
3303 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
3304 foreach_tcp_state_next
3307 .format_trace = format_tcp_rx_trace_short,
3312 * LISTEN state processing as per RFC 793 p. 65
3315 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3316 vlib_frame_t * from_frame, int is_ip4)
3318 u32 n_left_from, *from, n_syns = 0, *first_buffer;
3319 u32 thread_index = vm->thread_index;
3321 from = first_buffer = vlib_frame_vector_args (from_frame);
3322 n_left_from = from_frame->n_vectors;
3324 while (n_left_from > 0)
3326 u32 bi, error = TCP_ERROR_NONE;
3327 tcp_connection_t *lc, *child;
3334 b = vlib_get_buffer (vm, bi);
3336 lc = tcp_listener_get (vnet_buffer (b)->tcp.connection_index);
3337 if (PREDICT_FALSE (lc == 0))
3339 tcp_connection_t *tc;
3340 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
3342 if (tc->state != TCP_STATE_TIME_WAIT)
3344 error = TCP_ERROR_CREATE_EXISTS;
3347 lc = tcp_lookup_listener (b, tc->c_fib_index, is_ip4);
3348 /* clean up the old session */
3349 tcp_connection_del (tc);
3352 /* Make sure connection wasn't just created */
3353 child = tcp_lookup_connection (lc->c_fib_index, b, thread_index,
3355 if (PREDICT_FALSE (child->state != TCP_STATE_LISTEN))
3357 error = TCP_ERROR_CREATE_EXISTS;
3361 /* Create child session. For syn-flood protection use filter */
3363 /* 1. first check for an RST: handled in dispatch */
3364 /* if (tcp_rst (th0))
3368 /* 2. second check for an ACK: handled in dispatch */
3369 /* if (tcp_ack (th0))
3371 tcp_send_reset (b0, is_ip4);
3376 /* 3. check for a SYN (did that already) */
3378 /* Create child session and send SYN-ACK */
3379 child = tcp_connection_alloc (thread_index);
3381 if (tcp_options_parse (tcp_buffer_hdr (b), &child->rcv_opts, 1))
3383 error = TCP_ERROR_OPTIONS;
3384 tcp_connection_free (child);
3388 tcp_init_w_buffer (child, b, is_ip4);
3390 child->state = TCP_STATE_SYN_RCVD;
3391 child->c_fib_index = lc->c_fib_index;
3392 child->cc_algo = lc->cc_algo;
3393 tcp_connection_init_vars (child);
3394 child->rto = TCP_RTO_MIN;
3396 if (session_stream_accept (&child->connection, lc->c_s_index,
3397 lc->c_thread_index, 0 /* notify */ ))
3399 tcp_connection_cleanup (child);
3400 error = TCP_ERROR_CREATE_SESSION_FAIL;
3404 child->tx_fifo_size = transport_tx_fifo_size (&child->connection);
3406 tcp_send_synack (child);
3408 TCP_EVT (TCP_EVT_SYN_RCVD, child, 1);
3412 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
3415 t = vlib_add_trace (vm, node, b, sizeof (*t));
3416 clib_memcpy_fast (&t->tcp_header, tcp_buffer_hdr (b),
3417 sizeof (t->tcp_header));
3418 clib_memcpy_fast (&t->tcp_connection, lc,
3419 sizeof (t->tcp_connection));
3422 n_syns += (error == TCP_ERROR_NONE);
3425 tcp_inc_counter (listen, TCP_ERROR_SYNS_RCVD, n_syns);
3426 vlib_buffer_free (vm, first_buffer, from_frame->n_vectors);
3428 return from_frame->n_vectors;
3431 VLIB_NODE_FN (tcp4_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3432 vlib_frame_t * from_frame)
3434 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
3437 VLIB_NODE_FN (tcp6_listen_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3438 vlib_frame_t * from_frame)
3440 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
3444 VLIB_REGISTER_NODE (tcp4_listen_node) =
3446 .name = "tcp4-listen",
3447 /* Takes a vector of packets. */
3448 .vector_size = sizeof (u32),
3449 .n_errors = TCP_N_ERROR,
3450 .error_strings = tcp_error_strings,
3451 .n_next_nodes = TCP_LISTEN_N_NEXT,
3454 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3455 foreach_tcp_state_next
3458 .format_trace = format_tcp_rx_trace_short,
3463 VLIB_REGISTER_NODE (tcp6_listen_node) =
3465 .name = "tcp6-listen",
3466 /* Takes a vector of packets. */
3467 .vector_size = sizeof (u32),
3468 .n_errors = TCP_N_ERROR,
3469 .error_strings = tcp_error_strings,
3470 .n_next_nodes = TCP_LISTEN_N_NEXT,
3473 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
3474 foreach_tcp_state_next
3477 .format_trace = format_tcp_rx_trace_short,
3481 typedef enum _tcp_input_next
3483 TCP_INPUT_NEXT_DROP,
3484 TCP_INPUT_NEXT_LISTEN,
3485 TCP_INPUT_NEXT_RCV_PROCESS,
3486 TCP_INPUT_NEXT_SYN_SENT,
3487 TCP_INPUT_NEXT_ESTABLISHED,
3488 TCP_INPUT_NEXT_RESET,
3489 TCP_INPUT_NEXT_PUNT,
3493 #define foreach_tcp4_input_next \
3494 _ (DROP, "ip4-drop") \
3495 _ (LISTEN, "tcp4-listen") \
3496 _ (RCV_PROCESS, "tcp4-rcv-process") \
3497 _ (SYN_SENT, "tcp4-syn-sent") \
3498 _ (ESTABLISHED, "tcp4-established") \
3499 _ (RESET, "tcp4-reset") \
3500 _ (PUNT, "ip4-punt")
3502 #define foreach_tcp6_input_next \
3503 _ (DROP, "ip6-drop") \
3504 _ (LISTEN, "tcp6-listen") \
3505 _ (RCV_PROCESS, "tcp6-rcv-process") \
3506 _ (SYN_SENT, "tcp6-syn-sent") \
3507 _ (ESTABLISHED, "tcp6-established") \
3508 _ (RESET, "tcp6-reset") \
3509 _ (PUNT, "ip6-punt")
3511 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
3514 tcp_input_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
3515 vlib_buffer_t ** bs, u32 n_bufs, u8 is_ip4)
3517 tcp_connection_t *tc;
3522 for (i = 0; i < n_bufs; i++)
3524 if (bs[i]->flags & VLIB_BUFFER_IS_TRACED)
3526 t = vlib_add_trace (vm, node, bs[i], sizeof (*t));
3527 tc = tcp_connection_get (vnet_buffer (bs[i])->tcp.connection_index,
3529 tcp = vlib_buffer_get_current (bs[i]);
3530 tcp_set_rx_trace_data (t, tc, tcp, bs[i], is_ip4);
3536 tcp_input_set_error_next (tcp_main_t * tm, u16 * next, u32 * error, u8 is_ip4)
3538 if (*error == TCP_ERROR_FILTERED || *error == TCP_ERROR_WRONG_THREAD)
3540 *next = TCP_INPUT_NEXT_DROP;
3542 else if ((is_ip4 && tm->punt_unknown4) || (!is_ip4 && tm->punt_unknown6))
3544 *next = TCP_INPUT_NEXT_PUNT;
3545 *error = TCP_ERROR_PUNT;
3549 *next = TCP_INPUT_NEXT_RESET;
3550 *error = TCP_ERROR_NO_LISTENER;
3554 always_inline tcp_connection_t *
3555 tcp_input_lookup_buffer (vlib_buffer_t * b, u8 thread_index, u32 * error,
3556 u8 is_ip4, u8 is_nolookup)
3558 u32 fib_index = vnet_buffer (b)->ip.fib_index;
3559 int n_advance_bytes, n_data_bytes;
3560 transport_connection_t *tc;
3566 ip4_header_t *ip4 = vlib_buffer_get_current (b);
3567 int ip_hdr_bytes = ip4_header_bytes (ip4);
3568 if (PREDICT_FALSE (b->current_length < ip_hdr_bytes + sizeof (*tcp)))
3570 *error = TCP_ERROR_LENGTH;
3573 tcp = ip4_next_header (ip4);
3574 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip4;
3575 n_advance_bytes = (ip_hdr_bytes + tcp_header_bytes (tcp));
3576 n_data_bytes = clib_net_to_host_u16 (ip4->length) - n_advance_bytes;
3578 /* Length check. Checksum computed by ipx_local no need to compute again */
3579 if (PREDICT_FALSE (n_data_bytes < 0))
3581 *error = TCP_ERROR_LENGTH;
3586 tc = session_lookup_connection_wt4 (fib_index, &ip4->dst_address,
3587 &ip4->src_address, tcp->dst_port,
3589 TRANSPORT_PROTO_TCP, thread_index,
3594 ip6_header_t *ip6 = vlib_buffer_get_current (b);
3595 if (PREDICT_FALSE (b->current_length < sizeof (*ip6) + sizeof (*tcp)))
3597 *error = TCP_ERROR_LENGTH;
3600 tcp = ip6_next_header (ip6);
3601 vnet_buffer (b)->tcp.hdr_offset = (u8 *) tcp - (u8 *) ip6;
3602 n_advance_bytes = tcp_header_bytes (tcp);
3603 n_data_bytes = clib_net_to_host_u16 (ip6->payload_length)
3605 n_advance_bytes += sizeof (ip6[0]);
3607 if (PREDICT_FALSE (n_data_bytes < 0))
3609 *error = TCP_ERROR_LENGTH;
3616 (ip6_address_is_link_local_unicast (&ip6->dst_address)))
3618 ip4_main_t *im = &ip4_main;
3619 fib_index = vec_elt (im->fib_index_by_sw_if_index,
3620 vnet_buffer (b)->sw_if_index[VLIB_RX]);
3623 tc = session_lookup_connection_wt6 (fib_index, &ip6->dst_address,
3625 tcp->dst_port, tcp->src_port,
3626 TRANSPORT_PROTO_TCP,
3627 thread_index, &result);
3633 (transport_connection_t *) tcp_connection_get (vnet_buffer (b)->
3634 tcp.connection_index,
3637 vnet_buffer (b)->tcp.seq_number = clib_net_to_host_u32 (tcp->seq_number);
3638 vnet_buffer (b)->tcp.ack_number = clib_net_to_host_u32 (tcp->ack_number);
3639 vnet_buffer (b)->tcp.data_offset = n_advance_bytes;
3640 vnet_buffer (b)->tcp.data_len = n_data_bytes;
3641 vnet_buffer (b)->tcp.seq_end = vnet_buffer (b)->tcp.seq_number
3643 vnet_buffer (b)->tcp.flags = 0;
3645 *error = result ? TCP_ERROR_NONE + result : *error;
3647 return tcp_get_connection_from_transport (tc);
3651 tcp_input_dispatch_buffer (tcp_main_t * tm, tcp_connection_t * tc,
3652 vlib_buffer_t * b, u16 * next,
3653 vlib_node_runtime_t * error_node)
3659 tcp = tcp_buffer_hdr (b);
3660 flags = tcp->flags & filter_flags;
3661 *next = tm->dispatch_table[tc->state][flags].next;
3662 error = tm->dispatch_table[tc->state][flags].error;
3665 if (PREDICT_FALSE (error != TCP_ERROR_NONE))
3667 b->error = error_node->errors[error];
3668 if (error == TCP_ERROR_DISPATCH)
3669 clib_warning ("tcp conn %u disp error state %U flags %U",
3670 tc->c_c_index, format_tcp_state, tc->state,
3671 format_tcp_flags, (int) flags);
3676 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
3677 vlib_frame_t * frame, int is_ip4, u8 is_nolookup)
3679 u32 n_left_from, *from, thread_index = vm->thread_index;
3680 tcp_main_t *tm = vnet_get_tcp_main ();
3681 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
3682 u16 nexts[VLIB_FRAME_SIZE], *next;
3683 vlib_node_runtime_t *error_node;
3685 tcp_set_time_now (tcp_get_worker (thread_index));
3687 error_node = vlib_node_get_runtime (vm, tcp_node_index (input, is_ip4));
3688 from = vlib_frame_vector_args (frame);
3689 n_left_from = frame->n_vectors;
3690 vlib_get_buffers (vm, from, bufs, n_left_from);
3695 while (n_left_from >= 4)
3697 u32 error0 = TCP_ERROR_NO_LISTENER, error1 = TCP_ERROR_NO_LISTENER;
3698 tcp_connection_t *tc0, *tc1;
3701 vlib_prefetch_buffer_header (b[2], STORE);
3702 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3704 vlib_prefetch_buffer_header (b[3], STORE);
3705 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3708 next[0] = next[1] = TCP_INPUT_NEXT_DROP;
3710 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
3712 tc1 = tcp_input_lookup_buffer (b[1], thread_index, &error1, is_ip4,
3715 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
3717 ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
3718 ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
3720 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3721 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3723 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], error_node);
3724 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], error_node);
3728 if (PREDICT_TRUE (tc0 != 0))
3730 ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
3731 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3732 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], error_node);
3736 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3737 b[0]->error = error_node->errors[error0];
3740 if (PREDICT_TRUE (tc1 != 0))
3742 ASSERT (tcp_lookup_is_valid (tc1, b[1], tcp_buffer_hdr (b[1])));
3743 vnet_buffer (b[1])->tcp.connection_index = tc1->c_c_index;
3744 tcp_input_dispatch_buffer (tm, tc1, b[1], &next[1], error_node);
3748 tcp_input_set_error_next (tm, &next[1], &error1, is_ip4);
3749 b[1]->error = error_node->errors[error1];
3757 while (n_left_from > 0)
3759 tcp_connection_t *tc0;
3760 u32 error0 = TCP_ERROR_NO_LISTENER;
3762 if (n_left_from > 1)
3764 vlib_prefetch_buffer_header (b[1], STORE);
3765 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
3768 next[0] = TCP_INPUT_NEXT_DROP;
3769 tc0 = tcp_input_lookup_buffer (b[0], thread_index, &error0, is_ip4,
3771 if (PREDICT_TRUE (tc0 != 0))
3773 ASSERT (tcp_lookup_is_valid (tc0, b[0], tcp_buffer_hdr (b[0])));
3774 vnet_buffer (b[0])->tcp.connection_index = tc0->c_c_index;
3775 tcp_input_dispatch_buffer (tm, tc0, b[0], &next[0], error_node);
3779 tcp_input_set_error_next (tm, &next[0], &error0, is_ip4);
3780 b[0]->error = error_node->errors[error0];
3788 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
3789 tcp_input_trace_frame (vm, node, bufs, frame->n_vectors, is_ip4);
3791 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
3792 return frame->n_vectors;
3795 VLIB_NODE_FN (tcp4_input_nolookup_node) (vlib_main_t * vm,
3796 vlib_node_runtime_t * node,
3797 vlib_frame_t * from_frame)
3799 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3800 1 /* is_nolookup */ );
3803 VLIB_NODE_FN (tcp6_input_nolookup_node) (vlib_main_t * vm,
3804 vlib_node_runtime_t * node,
3805 vlib_frame_t * from_frame)
3807 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3808 1 /* is_nolookup */ );
3812 VLIB_REGISTER_NODE (tcp4_input_nolookup_node) =
3814 .name = "tcp4-input-nolookup",
3815 /* Takes a vector of packets. */
3816 .vector_size = sizeof (u32),
3817 .n_errors = TCP_N_ERROR,
3818 .error_strings = tcp_error_strings,
3819 .n_next_nodes = TCP_INPUT_N_NEXT,
3822 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3823 foreach_tcp4_input_next
3826 .format_buffer = format_tcp_header,
3827 .format_trace = format_tcp_rx_trace,
3832 VLIB_REGISTER_NODE (tcp6_input_nolookup_node) =
3834 .name = "tcp6-input-nolookup",
3835 /* Takes a vector of packets. */
3836 .vector_size = sizeof (u32),
3837 .n_errors = TCP_N_ERROR,
3838 .error_strings = tcp_error_strings,
3839 .n_next_nodes = TCP_INPUT_N_NEXT,
3842 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3843 foreach_tcp6_input_next
3846 .format_buffer = format_tcp_header,
3847 .format_trace = format_tcp_rx_trace,
3851 VLIB_NODE_FN (tcp4_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3852 vlib_frame_t * from_frame)
3854 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ ,
3855 0 /* is_nolookup */ );
3858 VLIB_NODE_FN (tcp6_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
3859 vlib_frame_t * from_frame)
3861 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ ,
3862 0 /* is_nolookup */ );
3866 VLIB_REGISTER_NODE (tcp4_input_node) =
3868 .name = "tcp4-input",
3869 /* Takes a vector of packets. */
3870 .vector_size = sizeof (u32),
3871 .n_errors = TCP_N_ERROR,
3872 .error_strings = tcp_error_strings,
3873 .n_next_nodes = TCP_INPUT_N_NEXT,
3876 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3877 foreach_tcp4_input_next
3880 .format_buffer = format_tcp_header,
3881 .format_trace = format_tcp_rx_trace,
3886 VLIB_REGISTER_NODE (tcp6_input_node) =
3888 .name = "tcp6-input",
3889 /* Takes a vector of packets. */
3890 .vector_size = sizeof (u32),
3891 .n_errors = TCP_N_ERROR,
3892 .error_strings = tcp_error_strings,
3893 .n_next_nodes = TCP_INPUT_N_NEXT,
3896 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
3897 foreach_tcp6_input_next
3900 .format_buffer = format_tcp_header,
3901 .format_trace = format_tcp_rx_trace,
3905 #ifndef CLIB_MARCH_VARIANT
3907 tcp_dispatch_table_init (tcp_main_t * tm)
3910 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
3911 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
3913 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
3914 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
3917 #define _(t,f,n,e) \
3919 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
3920 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
3923 /* RFC 793: In LISTEN if RST drop and if ACK return RST */
3924 _(LISTEN, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3925 _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_ACK_INVALID);
3926 _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_INVALID_CONNECTION);
3927 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
3928 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3929 TCP_ERROR_ACK_INVALID);
3930 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3931 TCP_ERROR_SEGMENT_INVALID);
3932 _(LISTEN, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3933 TCP_ERROR_SEGMENT_INVALID);
3934 _(LISTEN, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3935 TCP_ERROR_INVALID_CONNECTION);
3936 _(LISTEN, TCP_FLAG_FIN, TCP_INPUT_NEXT_RESET, TCP_ERROR_SEGMENT_INVALID);
3937 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
3938 TCP_ERROR_SEGMENT_INVALID);
3939 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3940 TCP_ERROR_SEGMENT_INVALID);
3941 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3942 TCP_ERROR_SEGMENT_INVALID);
3943 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_DROP,
3944 TCP_ERROR_SEGMENT_INVALID);
3945 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
3946 TCP_ERROR_SEGMENT_INVALID);
3947 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_DROP,
3948 TCP_ERROR_SEGMENT_INVALID);
3949 _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3950 TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3951 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
3952 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3953 _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3954 _(SYN_RCVD, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3956 _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3957 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3959 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3961 _(SYN_RCVD, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3962 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3963 _(SYN_RCVD, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3964 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
3966 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
3968 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3969 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3970 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
3972 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
3973 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3974 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
3975 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3976 _(SYN_RCVD, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
3977 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
3978 _(SYN_RCVD, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
3979 /* SYN-ACK for a SYN */
3980 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3982 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3983 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3984 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3986 _(SYN_SENT, TCP_FLAG_FIN, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
3987 _(SYN_SENT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
3989 /* ACK for for established connection -> tcp-established. */
3990 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3991 /* FIN for for established connection -> tcp-established. */
3992 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3993 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
3995 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
3997 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
3998 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
3999 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED,
4001 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
4002 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
4003 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
4004 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
4005 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
4006 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
4007 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
4008 _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
4010 _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
4011 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
4013 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED,
4015 _(ESTABLISHED, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
4016 TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
4017 _(ESTABLISHED, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
4018 /* ACK or FIN-ACK to our FIN */
4019 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4020 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
4022 /* FIN in reply to our FIN from the other side */
4023 _(FIN_WAIT_1, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
4024 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4025 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
4027 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
4028 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4029 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
4030 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4031 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
4032 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4033 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
4035 _(FIN_WAIT_1, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
4036 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4037 _(FIN_WAIT_1, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4038 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
4040 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4042 _(FIN_WAIT_1, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
4043 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4044 _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4045 _(FIN_WAIT_1, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4047 _(CLOSING, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
4048 _(CLOSING, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4049 _(CLOSING, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4050 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
4052 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4054 _(CLOSING, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
4055 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4056 _(CLOSING, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4057 _(CLOSING, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4059 _(CLOSING, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4060 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4062 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
4064 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
4065 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4066 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
4068 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
4069 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4070 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
4071 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4072 _(CLOSING, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
4073 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4074 /* FIN confirming that the peer (app) has closed */
4075 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4076 _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4077 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4079 _(FIN_WAIT_2, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4080 _(FIN_WAIT_2, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4082 _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4083 _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4085 _(CLOSE_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4086 _(CLOSE_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4088 _(LAST_ACK, 0, TCP_INPUT_NEXT_DROP, TCP_ERROR_SEGMENT_INVALID);
4089 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4090 _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4091 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4093 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS,
4095 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_ACK,
4096 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4097 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
4099 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_ACK,
4100 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4101 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST,
4102 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4103 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
4104 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4105 _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4106 _(LAST_ACK, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4108 _(LAST_ACK, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4109 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4111 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS,
4113 _(LAST_ACK, TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_ACK,
4114 TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4115 _(TIME_WAIT, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
4116 _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4117 _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4119 _(TIME_WAIT, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4120 _(TIME_WAIT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
4122 _(TIME_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
4123 /* RFC793 CLOSED: An incoming segment containing a RST is discarded. An
4124 * incoming segment not containing a RST causes a RST to be sent in
4126 _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
4127 _(CLOSED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_DROP,
4128 TCP_ERROR_CONNECTION_CLOSED);
4129 _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
4130 _(CLOSED, TCP_FLAG_SYN, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
4131 _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET,
4132 TCP_ERROR_CONNECTION_CLOSED);
4136 static clib_error_t *
4137 tcp_input_init (vlib_main_t * vm)
4139 clib_error_t *error = 0;
4140 tcp_main_t *tm = vnet_get_tcp_main ();
4142 if ((error = vlib_call_init_function (vm, tcp_init)))
4145 /* Initialize dispatch table. */
4146 tcp_dispatch_table_init (tm);
4151 VLIB_INIT_FUNCTION (tcp_input_init);
4153 #endif /* CLIB_MARCH_VARIANT */
4156 * fd.io coding-style-patch-verification: ON
4159 * eval: (c-set-style "gnu")