2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/sparse_vec.h>
17 #include <vnet/tcp/tcp_packet.h>
18 #include <vnet/tcp/tcp.h>
19 #include <vnet/session/session.h>
22 static char *tcp_error_strings[] = {
23 #define tcp_error(n,s) s,
24 #include <vnet/tcp/tcp_error.def>
28 /* All TCP nodes have the same outgoing arcs */
29 #define foreach_tcp_state_next \
30 _ (DROP, "error-drop") \
31 _ (TCP4_OUTPUT, "tcp4-output") \
32 _ (TCP6_OUTPUT, "tcp6-output")
34 typedef enum _tcp_established_next
36 #define _(s,n) TCP_ESTABLISHED_NEXT_##s,
37 foreach_tcp_state_next
39 TCP_ESTABLISHED_N_NEXT,
40 } tcp_established_next_t;
42 typedef enum _tcp_rcv_process_next
44 #define _(s,n) TCP_RCV_PROCESS_NEXT_##s,
45 foreach_tcp_state_next
47 TCP_RCV_PROCESS_N_NEXT,
48 } tcp_rcv_process_next_t;
50 typedef enum _tcp_syn_sent_next
52 #define _(s,n) TCP_SYN_SENT_NEXT_##s,
53 foreach_tcp_state_next
56 } tcp_syn_sent_next_t;
58 typedef enum _tcp_listen_next
60 #define _(s,n) TCP_LISTEN_NEXT_##s,
61 foreach_tcp_state_next
66 /* Generic, state independent indices */
67 typedef enum _tcp_state_next
69 #define _(s,n) TCP_NEXT_##s,
70 foreach_tcp_state_next
75 #define tcp_next_output(is_ip4) (is_ip4 ? TCP_NEXT_TCP4_OUTPUT \
76 : TCP_NEXT_TCP6_OUTPUT)
78 vlib_node_registration_t tcp4_established_node;
79 vlib_node_registration_t tcp6_established_node;
82 * Validate segment sequence number. As per RFC793:
84 * Segment Receive Test
86 * ------- ------- -------------------------------------------
87 * 0 0 SEG.SEQ = RCV.NXT
88 * 0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
90 * >0 >0 RCV.NXT =< SEG.SEQ < RCV.NXT+RCV.WND
91 * or RCV.NXT =< SEG.SEQ+SEG.LEN-1 < RCV.NXT+RCV.WND
93 * This ultimately consists in checking if segment falls within the window.
94 * The one important difference compared to RFC793 is that we use rcv_las,
95 * or the rcv_nxt at last ack sent instead of rcv_nxt since that's the
96 * peer's reference when computing our receive window.
99 * seq_leq (end_seq, tc->rcv_las + tc->rcv_wnd) && seq_geq (seq, tc->rcv_las)
100 * however, is too strict when we have retransmits. Instead we just check that
101 * the seq is not beyond the right edge and that the end of the segment is not
102 * less than the left edge.
104 * N.B. rcv_nxt and rcv_wnd are both updated in this node if acks are sent, so
105 * use rcv_nxt in the right edge window test instead of rcv_las.
109 tcp_segment_in_rcv_wnd (tcp_connection_t * tc, u32 seq, u32 end_seq)
111 return (seq_geq (end_seq, tc->rcv_las)
112 && seq_leq (seq, tc->rcv_nxt + tc->rcv_wnd));
116 * Parse TCP header options.
118 * @param th TCP header
119 * @param to TCP options data structure to be populated
120 * @return -1 if parsing failed
123 tcp_options_parse (tcp_header_t * th, tcp_options_t * to)
126 u8 opt_len, opts_len, kind;
130 opts_len = (tcp_doff (th) << 2) - sizeof (tcp_header_t);
131 data = (const u8 *) (th + 1);
133 /* Zero out all flags but those set in SYN */
134 to->flags &= (TCP_OPTS_FLAG_SACK_PERMITTED | TCP_OPTS_FLAG_WSCALE);
136 for (; opts_len > 0; opts_len -= opt_len, data += opt_len)
140 /* Get options length */
141 if (kind == TCP_OPTION_EOL)
143 else if (kind == TCP_OPTION_NOOP)
155 /* weird option length */
156 if (opt_len < 2 || opt_len > opts_len)
164 if ((opt_len == TCP_OPTION_LEN_MSS) && tcp_syn (th))
166 to->flags |= TCP_OPTS_FLAG_MSS;
167 to->mss = clib_net_to_host_u16 (*(u16 *) (data + 2));
170 case TCP_OPTION_WINDOW_SCALE:
171 if ((opt_len == TCP_OPTION_LEN_WINDOW_SCALE) && tcp_syn (th))
173 to->flags |= TCP_OPTS_FLAG_WSCALE;
174 to->wscale = data[2];
175 if (to->wscale > TCP_MAX_WND_SCALE)
177 clib_warning ("Illegal window scaling value: %d",
179 to->wscale = TCP_MAX_WND_SCALE;
183 case TCP_OPTION_TIMESTAMP:
184 if (opt_len == TCP_OPTION_LEN_TIMESTAMP)
186 to->flags |= TCP_OPTS_FLAG_TSTAMP;
187 to->tsval = clib_net_to_host_u32 (*(u32 *) (data + 2));
188 to->tsecr = clib_net_to_host_u32 (*(u32 *) (data + 6));
191 case TCP_OPTION_SACK_PERMITTED:
192 if (opt_len == TCP_OPTION_LEN_SACK_PERMITTED && tcp_syn (th))
193 to->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
195 case TCP_OPTION_SACK_BLOCK:
196 /* If SACK permitted was not advertised or a SYN, break */
197 if ((to->flags & TCP_OPTS_FLAG_SACK_PERMITTED) == 0 || tcp_syn (th))
200 /* If too short or not correctly formatted, break */
201 if (opt_len < 10 || ((opt_len - 2) % TCP_OPTION_LEN_SACK_BLOCK))
204 to->flags |= TCP_OPTS_FLAG_SACK;
205 to->n_sack_blocks = (opt_len - 2) / TCP_OPTION_LEN_SACK_BLOCK;
206 vec_reset_length (to->sacks);
207 for (j = 0; j < to->n_sack_blocks; j++)
209 b.start = clib_net_to_host_u32 (*(u32 *) (data + 2 + 4 * j));
210 b.end = clib_net_to_host_u32 (*(u32 *) (data + 6 + 4 * j));
211 vec_add1 (to->sacks, b);
215 /* Nothing to see here */
223 * RFC1323: Check against wrapped sequence numbers (PAWS). If we have
224 * timestamp to echo and it's less than tsval_recent, drop segment
225 * but still send an ACK in order to retain TCP's mechanism for detecting
226 * and recovering from half-open connections
228 * Or at least that's what the theory says. It seems that this might not work
229 * very well with packet reordering and fast retransmit. XXX
232 tcp_segment_check_paws (tcp_connection_t * tc)
234 return tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent
235 && timestamp_lt (tc->rcv_opts.tsval, tc->tsval_recent);
239 * Update tsval recent
242 tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end)
245 * RFC1323: If Last.ACK.sent falls within the range of sequence numbers
246 * of an incoming segment:
247 * SEG.SEQ <= Last.ACK.sent < SEG.SEQ + SEG.LEN
248 * then the TSval from the segment is copied to TS.Recent;
249 * otherwise, the TSval is ignored.
251 if (tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent
252 && seq_leq (seq, tc->rcv_las) && seq_leq (tc->rcv_las, seq_end))
254 tc->tsval_recent = tc->rcv_opts.tsval;
255 tc->tsval_recent_age = tcp_time_now ();
260 * Validate incoming segment as per RFC793 p. 69 and RFC1323 p. 19
262 * It first verifies if segment has a wrapped sequence number (PAWS) and then
263 * does the processing associated to the first four steps (ignoring security
264 * and precedence): sequence number, rst bit and syn bit checks.
266 * @return 0 if segments passes validation.
269 tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0,
270 vlib_buffer_t * b0, tcp_header_t * th0, u32 * next0)
272 if (PREDICT_FALSE (!tcp_ack (th0) && !tcp_rst (th0) && !tcp_syn (th0)))
275 if (PREDICT_FALSE (tcp_options_parse (th0, &tc0->rcv_opts)))
280 if (tcp_segment_check_paws (tc0))
284 clib_warning ("paws failed\n%U", format_tcp_connection, tc0, 2);
285 clib_warning ("seq %u seq_end %u ack %u",
286 vnet_buffer (b0)->tcp.seq_number - tc0->irs,
287 vnet_buffer (b0)->tcp.seq_end - tc0->irs,
288 vnet_buffer (b0)->tcp.ack_number - tc0->iss);
290 TCP_EVT_DBG (TCP_EVT_PAWS_FAIL, tc0, vnet_buffer (b0)->tcp.seq_number,
291 vnet_buffer (b0)->tcp.seq_end);
293 /* If it just so happens that a segment updates tsval_recent for a
294 * segment over 24 days old, invalidate tsval_recent. */
295 if (timestamp_lt (tc0->tsval_recent_age + TCP_PAWS_IDLE,
298 /* Age isn't reset until we get a valid tsval (bsd inspired) */
299 tc0->tsval_recent = 0;
300 clib_warning ("paws failed - really old segment. REALLY?");
304 /* Drop after ack if not rst */
307 tcp_make_ack (tc0, b0);
308 *next0 = tcp_next_output (tc0->c_is_ip4);
309 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0);
315 /* 1st: check sequence number */
316 if (!tcp_segment_in_rcv_wnd (tc0, vnet_buffer (b0)->tcp.seq_number,
317 vnet_buffer (b0)->tcp.seq_end))
319 /* If our window is 0 and the packet is in sequence, let it pass
320 * through for ack processing. It should be dropped later.*/
321 if (tc0->rcv_wnd == 0
322 && tc0->rcv_nxt == vnet_buffer (b0)->tcp.seq_number)
324 /* TODO Should segment be tagged? */
328 /* If not RST, send dup ack */
331 tcp_make_ack (tc0, b0);
332 *next0 = tcp_next_output (tc0->c_is_ip4);
333 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc0);
339 /* 2nd: check the RST bit */
342 tcp_connection_reset (tc0);
346 /* 3rd: check security and precedence (skip) */
348 /* 4th: check the SYN bit */
351 tcp_send_reset (b0, tc0->c_is_ip4);
355 /* If segment in window, save timestamp */
356 tcp_update_timestamp (tc0, vnet_buffer (b0)->tcp.seq_number,
357 vnet_buffer (b0)->tcp.seq_end);
362 tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0)
364 /* SND.UNA =< SEG.ACK =< SND.NXT */
365 return (seq_leq (tc0->snd_una, vnet_buffer (tb0)->tcp.ack_number)
366 && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_nxt));
370 * Compute smoothed RTT as per VJ's '88 SIGCOMM and RFC6298
372 * Note that although the original article, srtt and rttvar are scaled
373 * to minimize round-off errors, here we don't. Instead, we rely on
374 * better precision time measurements.
376 * TODO support us rtt resolution
379 tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt)
385 err = mrtt - tc->srtt;
386 // tc->srtt += err >> 3;
388 /* XXX Drop in RTT results in RTTVAR increase and bigger RTO.
389 * The increase should be bound */
390 // tc->rttvar += ((int) clib_abs (err) - (int) tc->rttvar) >> 2;
392 tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1);
393 diff = (clib_abs (err) - (int) tc->rttvar) >> 2;
394 tc->rttvar = clib_max ((int) tc->rttvar + diff, 1);
398 /* First measurement. */
400 tc->rttvar = mrtt >> 1;
405 tcp_update_rto (tcp_connection_t * tc)
407 tc->rto = clib_min (tc->srtt + (tc->rttvar << 2), TCP_RTO_MAX);
408 tc->rto = clib_max (tc->rto, TCP_RTO_MIN);
411 /** Update RTT estimate and RTO timer
413 * Measure RTT: We have two sources of RTT measurements: TSOPT and ACK
414 * timing. Middle boxes are known to fiddle with TCP options so we
415 * should give higher priority to ACK timing.
417 * return 1 if valid rtt 0 otherwise
420 tcp_update_rtt (tcp_connection_t * tc, u32 ack)
425 /* Determine if only rtx bytes are acked. */
426 rtx_acked = tcp_in_cong_recovery (tc) || !tc->bytes_acked;
428 /* Karn's rule, part 1. Don't use retransmitted segments to estimate
429 * RTT because they're ambiguous. */
430 if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq) && !rtx_acked)
432 mrtt = tcp_time_now () - tc->rtt_ts;
434 /* As per RFC7323 TSecr can be used for RTTM only if the segment advances
435 * snd_una, i.e., the left side of the send window:
436 * seq_lt (tc->snd_una, ack). */
437 else if (tcp_opts_tstamp (&tc->rcv_opts) && tc->rcv_opts.tsecr
440 mrtt = tcp_time_now () - tc->rcv_opts.tsecr;
443 /* Allow measuring of a new RTT */
446 /* If ACK moves left side of the wnd make sure boff is 0, even if mrtt is
451 /* Ignore dubious measurements */
452 if (mrtt == 0 || mrtt > TCP_RTT_MAX)
455 tcp_estimate_rtt (tc, mrtt);
462 * Dequeue bytes that have been acked and while at it update RTT estimates.
465 tcp_dequeue_acked (tcp_connection_t * tc, u32 ack)
467 /* Dequeue the newly ACKed add SACKed bytes */
468 stream_session_dequeue_drop (&tc->connection,
469 tc->bytes_acked + tc->sack_sb.snd_una_adv);
471 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
473 /* Update rtt and rto */
474 tcp_update_rtt (tc, ack);
476 /* If everything has been acked, stop retransmit timer
477 * otherwise update. */
478 tcp_retransmit_timer_update (tc);
482 * Check if duplicate ack as per RFC5681 Sec. 2
485 tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd,
488 return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una)
489 && seq_gt (tc->snd_una_max, tc->snd_una)
490 && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number)
491 && (prev_snd_wnd == tc->snd_wnd));
495 * Checks if ack is a congestion control event.
498 tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b,
499 u32 prev_snd_wnd, u32 prev_snd_una, u8 * is_dack)
501 /* Check if ack is duplicate. Per RFC 6675, ACKs that SACK new data are
502 * defined to be 'duplicate' */
503 *is_dack = tc->sack_sb.last_sacked_bytes
504 || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una);
506 return (*is_dack || tcp_in_cong_recovery (tc));
510 scoreboard_remove_hole (sack_scoreboard_t * sb, sack_scoreboard_hole_t * hole)
512 sack_scoreboard_hole_t *next, *prev;
514 if (hole->next != TCP_INVALID_SACK_HOLE_INDEX)
516 next = pool_elt_at_index (sb->holes, hole->next);
517 next->prev = hole->prev;
521 sb->tail = hole->prev;
524 if (hole->prev != TCP_INVALID_SACK_HOLE_INDEX)
526 prev = pool_elt_at_index (sb->holes, hole->prev);
527 prev->next = hole->next;
531 sb->head = hole->next;
534 if (scoreboard_hole_index (sb, hole) == sb->cur_rxt_hole)
535 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
537 pool_put (sb->holes, hole);
540 sack_scoreboard_hole_t *
541 scoreboard_insert_hole (sack_scoreboard_t * sb, u32 prev_index,
544 sack_scoreboard_hole_t *hole, *next, *prev;
547 pool_get (sb->holes, hole);
548 memset (hole, 0, sizeof (*hole));
552 hole_index = hole - sb->holes;
554 prev = scoreboard_get_hole (sb, prev_index);
557 hole->prev = prev_index;
558 hole->next = prev->next;
560 if ((next = scoreboard_next_hole (sb, hole)))
561 next->prev = hole_index;
563 sb->tail = hole_index;
565 prev->next = hole_index;
569 sb->head = hole_index;
570 hole->prev = TCP_INVALID_SACK_HOLE_INDEX;
571 hole->next = TCP_INVALID_SACK_HOLE_INDEX;
578 scoreboard_update_bytes (tcp_connection_t * tc, sack_scoreboard_t * sb)
580 sack_scoreboard_hole_t *hole, *prev;
581 u32 bytes = 0, blks = 0;
584 sb->sacked_bytes = 0;
585 hole = scoreboard_last_hole (sb);
589 if (seq_gt (sb->high_sacked, hole->end))
591 bytes = sb->high_sacked - hole->end;
595 while ((prev = scoreboard_prev_hole (sb, hole))
596 && (bytes < (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss
597 && blks < TCP_DUPACK_THRESHOLD))
599 bytes += hole->start - prev->end;
606 sb->lost_bytes += scoreboard_hole_bytes (hole);
609 hole = scoreboard_prev_hole (sb, hole);
611 bytes += prev->start - hole->end;
613 sb->sacked_bytes = bytes;
617 * Figure out the next hole to retransmit
619 * Follows logic proposed in RFC6675 Sec. 4, NextSeg()
621 sack_scoreboard_hole_t *
622 scoreboard_next_rxt_hole (sack_scoreboard_t * sb,
623 sack_scoreboard_hole_t * start,
625 u8 * can_rescue, u8 * snd_limited)
627 sack_scoreboard_hole_t *hole = 0;
629 hole = start ? start : scoreboard_first_hole (sb);
630 while (hole && seq_leq (hole->end, sb->high_rxt) && hole->is_lost)
631 hole = scoreboard_next_hole (sb, hole);
633 /* Nothing, return */
636 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
640 /* Rule (1): if higher than rxt, less than high_sacked and lost */
641 if (hole->is_lost && seq_lt (hole->start, sb->high_sacked))
643 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
647 /* Rule (2): output takes care of transmitting new data */
648 if (!have_sent_1_smss)
651 sb->cur_rxt_hole = TCP_INVALID_SACK_HOLE_INDEX;
653 /* Rule (3): if hole not lost */
654 else if (seq_lt (hole->start, sb->high_sacked))
657 sb->cur_rxt_hole = scoreboard_hole_index (sb, hole);
659 /* Rule (4): if hole beyond high_sacked */
662 ASSERT (seq_geq (hole->start, sb->high_sacked));
665 /* HighRxt MUST NOT be updated */
670 if (hole && seq_lt (sb->high_rxt, hole->start))
671 sb->high_rxt = hole->start;
677 scoreboard_init_high_rxt (sack_scoreboard_t * sb)
679 sack_scoreboard_hole_t *hole;
680 hole = scoreboard_first_hole (sb);
681 sb->high_rxt = hole->start;
682 sb->cur_rxt_hole = sb->head;
686 tcp_rcv_sacks (tcp_connection_t * tc, u32 ack)
688 sack_scoreboard_t *sb = &tc->sack_sb;
689 sack_block_t *blk, tmp;
690 sack_scoreboard_hole_t *hole, *next_hole, *last_hole;
691 u32 blk_index = 0, old_sacked_bytes, hole_index;
694 sb->last_sacked_bytes = 0;
696 old_sacked_bytes = sb->sacked_bytes;
697 sb->last_bytes_delivered = 0;
699 if (!tcp_opts_sack (&tc->rcv_opts)
700 && sb->head == TCP_INVALID_SACK_HOLE_INDEX)
703 /* Remove invalid blocks */
704 blk = tc->rcv_opts.sacks;
705 while (blk < vec_end (tc->rcv_opts.sacks))
707 if (seq_lt (blk->start, blk->end)
708 && seq_gt (blk->start, tc->snd_una)
709 && seq_gt (blk->start, ack) && seq_leq (blk->end, tc->snd_nxt))
714 vec_del1 (tc->rcv_opts.sacks, blk - tc->rcv_opts.sacks);
717 /* Add block for cumulative ack */
718 if (seq_gt (ack, tc->snd_una))
720 tmp.start = tc->snd_una;
722 vec_add1 (tc->rcv_opts.sacks, tmp);
725 if (vec_len (tc->rcv_opts.sacks) == 0)
728 /* Make sure blocks are ordered */
729 for (i = 0; i < vec_len (tc->rcv_opts.sacks); i++)
730 for (j = i + 1; j < vec_len (tc->rcv_opts.sacks); j++)
731 if (seq_lt (tc->rcv_opts.sacks[j].start, tc->rcv_opts.sacks[i].start))
733 tmp = tc->rcv_opts.sacks[i];
734 tc->rcv_opts.sacks[i] = tc->rcv_opts.sacks[j];
735 tc->rcv_opts.sacks[j] = tmp;
738 if (sb->head == TCP_INVALID_SACK_HOLE_INDEX)
740 /* If no holes, insert the first that covers all outstanding bytes */
741 last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX,
742 tc->snd_una, tc->snd_una_max);
743 sb->tail = scoreboard_hole_index (sb, last_hole);
744 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
745 sb->high_sacked = tmp.end;
749 /* If we have holes but snd_una_max is beyond the last hole, update
751 tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1];
752 last_hole = scoreboard_last_hole (sb);
753 if (seq_gt (tc->snd_una_max, sb->high_sacked)
754 && seq_gt (tc->snd_una_max, last_hole->end))
755 last_hole->end = tc->snd_una_max;
756 /* keep track of max byte sacked for when the last hole
758 if (seq_gt (tmp.end, sb->high_sacked))
759 sb->high_sacked = tmp.end;
762 /* Walk the holes with the SACK blocks */
763 hole = pool_elt_at_index (sb->holes, sb->head);
764 while (hole && blk_index < vec_len (tc->rcv_opts.sacks))
766 blk = &tc->rcv_opts.sacks[blk_index];
768 if (seq_leq (blk->start, hole->start))
770 /* Block covers hole. Remove hole */
771 if (seq_geq (blk->end, hole->end))
773 next_hole = scoreboard_next_hole (sb, hole);
775 /* Byte accounting: snd_una needs to be advanced */
780 if (seq_lt (ack, next_hole->start))
781 sb->snd_una_adv = next_hole->start - ack;
782 sb->last_bytes_delivered +=
783 next_hole->start - hole->end;
787 sb->snd_una_adv = sb->high_sacked - ack;
788 sb->last_bytes_delivered += sb->high_sacked - hole->end;
792 scoreboard_remove_hole (sb, hole);
795 /* Partial 'head' overlap */
798 if (seq_gt (blk->end, hole->start))
800 hole->start = blk->end;
807 /* Hole must be split */
808 if (seq_lt (blk->end, hole->end))
810 hole_index = scoreboard_hole_index (sb, hole);
811 scoreboard_insert_hole (sb, hole_index, blk->end, hole->end);
813 /* Pool might've moved */
814 hole = scoreboard_get_hole (sb, hole_index);
815 hole->end = blk->start;
818 else if (seq_lt (blk->start, hole->end))
820 hole->end = blk->start;
823 hole = scoreboard_next_hole (sb, hole);
827 scoreboard_update_bytes (tc, sb);
828 sb->last_sacked_bytes = sb->sacked_bytes
829 - (old_sacked_bytes - sb->last_bytes_delivered);
830 ASSERT (sb->sacked_bytes == 0
831 || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack));
832 ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max
833 - seq_max (tc->snd_una, ack));
837 * Try to update snd_wnd based on feedback received from peer.
839 * If successful, and new window is 'effectively' 0, activate persist
843 tcp_update_snd_wnd (tcp_connection_t * tc, u32 seq, u32 ack, u32 snd_wnd)
845 /* If (SND.WL1 < SEG.SEQ or (SND.WL1 = SEG.SEQ and SND.WL2 =< SEG.ACK)), set
846 * SND.WND <- SEG.WND, set SND.WL1 <- SEG.SEQ, and set SND.WL2 <- SEG.ACK */
847 if (seq_lt (tc->snd_wl1, seq)
848 || (tc->snd_wl1 == seq && seq_leq (tc->snd_wl2, ack)))
850 tc->snd_wnd = snd_wnd;
853 TCP_EVT_DBG (TCP_EVT_SND_WND, tc);
855 if (tc->snd_wnd < tc->snd_mss)
857 /* Set persist timer if not set and we just got 0 wnd */
858 if (!tcp_timer_is_active (tc, TCP_TIMER_PERSIST)
859 && !tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT))
860 tcp_persist_timer_set (tc);
864 tcp_persist_timer_reset (tc);
865 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
875 tcp_cc_init_congestion (tcp_connection_t * tc)
877 tcp_fastrecovery_on (tc);
878 tc->snd_congestion = tc->snd_una_max;
879 tc->cc_algo->congestion (tc);
880 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 4);
884 tcp_cc_recovery_exit (tcp_connection_t * tc)
890 tcp_recovery_off (tc);
894 tcp_cc_fastrecovery_exit (tcp_connection_t * tc)
896 tc->cc_algo->recovered (tc);
897 tc->snd_rxt_bytes = 0;
899 tcp_fastrecovery_off (tc);
900 tcp_fastrecovery_1_smss_off (tc);
904 tcp_cc_congestion_undo (tcp_connection_t * tc)
906 tc->cwnd = tc->prev_cwnd;
907 tc->ssthresh = tc->prev_ssthresh;
908 tc->snd_nxt = tc->snd_una_max;
910 if (tcp_in_recovery (tc))
911 tcp_cc_recovery_exit (tc);
912 ASSERT (tc->rto_boff == 0);
913 /* TODO extend for fastrecovery */
917 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc)
919 return (tc->snd_rxt_ts
920 && tcp_opts_tstamp (&tc->rcv_opts)
921 && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts));
925 tcp_cc_recover (tcp_connection_t * tc)
927 ASSERT (tcp_in_cong_recovery (tc));
928 if (tcp_cc_is_spurious_retransmit (tc))
930 tcp_cc_congestion_undo (tc);
934 if (tcp_in_recovery (tc))
935 tcp_cc_recovery_exit (tc);
936 else if (tcp_in_fastrecovery (tc))
937 tcp_cc_fastrecovery_exit (tc);
939 ASSERT (tc->rto_boff == 0);
940 ASSERT (!tcp_in_cong_recovery (tc));
942 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3);
947 tcp_cc_update (tcp_connection_t * tc, vlib_buffer_t * b)
949 ASSERT (!tcp_in_cong_recovery (tc));
951 /* Congestion avoidance */
952 tc->cc_algo->rcv_ack (tc);
953 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
955 /* If a cumulative ack, make sure dupacks is 0 */
958 /* When dupacks hits the threshold we only enter fast retransmit if
959 * cumulative ack covers more than snd_congestion. Should snd_una
960 * wrap this test may fail under otherwise valid circumstances.
961 * Therefore, proactively update snd_congestion when wrap detected. */
963 (seq_leq (tc->snd_congestion, tc->snd_una - tc->bytes_acked)
964 && seq_gt (tc->snd_congestion, tc->snd_una)))
965 tc->snd_congestion = tc->snd_una - 1;
969 tcp_should_fastrecover_sack (tcp_connection_t * tc)
971 return (TCP_DUPACK_THRESHOLD - 1) * tc->snd_mss < tc->sack_sb.sacked_bytes;
975 tcp_should_fastrecover (tcp_connection_t * tc)
977 return (tc->rcv_dupacks == TCP_DUPACK_THRESHOLD
978 || tcp_should_fastrecover_sack (tc));
982 * One function to rule them all ... and in the darkness bind them
985 tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack)
990 * Duplicate ACK. Check if we should enter fast recovery, or if already in
991 * it account for the bytes that left the network.
995 ASSERT (tc->snd_una != tc->snd_una_max
996 || tc->sack_sb.last_sacked_bytes);
999 if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked)
1001 ASSERT (tcp_in_fastrecovery (tc));
1002 /* Pure duplicate ack. If some data got acked, it's handled lower */
1003 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1006 else if (tcp_should_fastrecover (tc))
1008 /* Things are already bad */
1009 if (tcp_in_cong_recovery (tc))
1011 tc->rcv_dupacks = 0;
1012 goto partial_ack_test;
1015 /* If of of the two conditions lower hold, reset dupacks
1016 * 1) Cumulative ack does not cover more than congestion threshold,
1017 * and the following doesn't hold: the congestion window is
1018 * greater than SMSS bytes and the difference between highest_ack
1019 * and prev_highest_ack is at most 4*SMSS bytes (XXX)
1020 * 2) RFC6582 heuristic to avoid multiple fast retransmits
1022 if ((seq_gt (tc->snd_una, tc->snd_congestion)
1023 || !(tc->cwnd > tc->snd_mss
1024 && tc->bytes_acked <= 4 * tc->snd_mss))
1025 || tc->rcv_opts.tsecr != tc->tsecr_last_ack)
1027 tc->rcv_dupacks = 0;
1031 tcp_cc_init_congestion (tc);
1032 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1034 /* The first segment MUST be retransmitted */
1035 tcp_retransmit_first_unacked (tc);
1037 /* Post retransmit update cwnd to ssthresh and account for the
1038 * three segments that have left the network and should've been
1039 * buffered at the receiver XXX */
1040 tc->cwnd = tc->ssthresh + tc->rcv_dupacks * tc->snd_mss;
1042 /* If cwnd allows, send more data */
1043 if (tcp_opts_sack_permitted (&tc->rcv_opts)
1044 && scoreboard_first_hole (&tc->sack_sb))
1046 scoreboard_init_high_rxt (&tc->sack_sb);
1047 tcp_fast_retransmit_sack (tc);
1051 tcp_fast_retransmit_no_sack (tc);
1056 else if (!tc->bytes_acked
1057 || (tc->bytes_acked && !tcp_in_cong_recovery (tc)))
1059 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_DUPACK);
1068 if (!tc->bytes_acked)
1073 * Legitimate ACK. 1) See if we can exit recovery
1075 /* XXX limit this only to first partial ack? */
1076 tcp_retransmit_timer_update (tc);
1078 if (seq_geq (tc->snd_una, tc->snd_congestion))
1080 /* If spurious return, we've already updated everything */
1081 if (tcp_cc_recover (tc))
1083 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1087 tc->snd_nxt = tc->snd_una_max;
1089 /* Treat as congestion avoidance ack */
1090 tc->cc_algo->rcv_ack (tc);
1091 tc->tsecr_last_ack = tc->rcv_opts.tsecr;
1096 * Legitimate ACK. 2) If PARTIAL ACK try to retransmit
1098 TCP_EVT_DBG (TCP_EVT_CC_PACK, tc);
1100 /* RFC6675: If the incoming ACK is a cumulative acknowledgment,
1101 * reset dupacks to 0 */
1102 tc->rcv_dupacks = 0;
1104 tcp_retransmit_first_unacked (tc);
1106 /* Post RTO timeout don't try anything fancy */
1107 if (tcp_in_recovery (tc))
1110 /* Remove retransmitted bytes that have been delivered */
1111 ASSERT (tc->bytes_acked + tc->sack_sb.snd_una_adv
1112 >= tc->sack_sb.last_bytes_delivered);
1113 rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv
1114 - tc->sack_sb.last_bytes_delivered;
1115 if (rxt_delivered && seq_gt (tc->sack_sb.high_rxt, tc->snd_una))
1117 /* If we have sacks and we haven't gotten an ack beyond high_rxt,
1118 * remove sacked bytes delivered */
1119 ASSERT (tc->snd_rxt_bytes >= rxt_delivered);
1120 tc->snd_rxt_bytes -= rxt_delivered;
1124 /* Either all retransmitted holes have been acked, or we're
1125 * "in the blind" and retransmitting segment by segment */
1126 tc->snd_rxt_bytes = 0;
1129 tc->cc_algo->rcv_cong_ack (tc, TCP_CC_PARTIALACK);
1132 * Since this was a partial ack, try to retransmit some more data
1134 tcp_fast_retransmit (tc);
1138 tcp_cc_init (tcp_connection_t * tc)
1140 tc->cc_algo = tcp_cc_algo_get (TCP_CC_NEWRENO);
1141 tc->cc_algo->init (tc);
1145 * Process incoming ACK
1148 tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b,
1149 tcp_header_t * th, u32 * next, u32 * error)
1151 u32 prev_snd_wnd, prev_snd_una;
1154 TCP_EVT_DBG (TCP_EVT_CC_STAT, tc);
1156 /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */
1157 if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt)))
1159 /* If we have outstanding data and this is within the window, accept it,
1160 * probably retransmit has timed out. Otherwise ACK segment and then
1162 if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max))
1164 tcp_make_ack (tc, b);
1165 *next = tcp_next_output (tc->c_is_ip4);
1166 *error = TCP_ERROR_ACK_INVALID;
1167 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0,
1168 vnet_buffer (b)->tcp.ack_number);
1172 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2,
1173 vnet_buffer (b)->tcp.ack_number);
1175 tc->snd_nxt = vnet_buffer (b)->tcp.ack_number;
1176 *error = TCP_ERROR_ACK_FUTURE;
1179 /* If old ACK, probably it's an old dupack */
1180 if (PREDICT_FALSE (seq_lt (vnet_buffer (b)->tcp.ack_number, tc->snd_una)))
1182 *error = TCP_ERROR_ACK_OLD;
1183 TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 1,
1184 vnet_buffer (b)->tcp.ack_number);
1185 if (tcp_in_fastrecovery (tc) && tc->rcv_dupacks == TCP_DUPACK_THRESHOLD)
1187 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc);
1188 tcp_cc_handle_event (tc, 1);
1190 /* Don't drop yet */
1195 * Looks okay, process feedback
1198 TCP_EVT_DBG (TCP_EVT_ACK_RCVD, tc);
1200 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1201 tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number);
1203 prev_snd_wnd = tc->snd_wnd;
1204 prev_snd_una = tc->snd_una;
1205 tcp_update_snd_wnd (tc, vnet_buffer (b)->tcp.seq_number,
1206 vnet_buffer (b)->tcp.ack_number,
1207 clib_net_to_host_u16 (th->window) << tc->snd_wscale);
1208 tc->bytes_acked = vnet_buffer (b)->tcp.ack_number - tc->snd_una;
1209 tc->snd_una = vnet_buffer (b)->tcp.ack_number + tc->sack_sb.snd_una_adv;
1210 tcp_validate_txf_size (tc, tc->bytes_acked);
1212 if (tc->bytes_acked)
1213 tcp_dequeue_acked (tc, vnet_buffer (b)->tcp.ack_number);
1216 * Check if we have congestion event
1219 if (tcp_ack_is_cc_event (tc, b, prev_snd_wnd, prev_snd_una, &is_dack))
1221 tcp_cc_handle_event (tc, is_dack);
1222 *error = TCP_ERROR_ACK_DUP;
1223 TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1);
1224 return vnet_buffer (b)->tcp.data_len ? 0 : -1;
1228 * Update congestion control (slow start/congestion avoidance)
1230 tcp_cc_update (tc, b);
1236 * Build SACK list as per RFC2018.
1238 * Makes sure the first block contains the segment that generated the current
1239 * ACK and the following ones are the ones most recently reported in SACK
1242 * @param tc TCP connection for which the SACK list is updated
1243 * @param start Start sequence number of the newest SACK block
1244 * @param end End sequence of the newest SACK block
1247 tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end)
1249 sack_block_t *new_list = 0, *block = 0;
1252 /* If the first segment is ooo add it to the list. Last write might've moved
1253 * rcv_nxt over the first segment. */
1254 if (seq_lt (tc->rcv_nxt, start))
1256 vec_add2 (new_list, block, 1);
1257 block->start = start;
1261 /* Find the blocks still worth keeping. */
1262 for (i = 0; i < vec_len (tc->snd_sacks); i++)
1264 /* Discard if rcv_nxt advanced beyond current block */
1265 if (seq_leq (tc->snd_sacks[i].start, tc->rcv_nxt))
1268 /* Merge or drop if segment overlapped by the new segment */
1269 if (block && (seq_geq (tc->snd_sacks[i].end, new_list[0].start)
1270 && seq_leq (tc->snd_sacks[i].start, new_list[0].end)))
1272 if (seq_lt (tc->snd_sacks[i].start, new_list[0].start))
1273 new_list[0].start = tc->snd_sacks[i].start;
1274 if (seq_lt (new_list[0].end, tc->snd_sacks[i].end))
1275 new_list[0].end = tc->snd_sacks[i].end;
1279 /* Save to new SACK list if we have space. */
1280 if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS)
1282 vec_add1 (new_list, tc->snd_sacks[i]);
1286 clib_warning ("sack discarded");
1290 ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS);
1292 /* Replace old vector with new one */
1293 vec_free (tc->snd_sacks);
1294 tc->snd_sacks = new_list;
1297 /** Enqueue data for delivery to application */
1299 tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b,
1304 /* Pure ACK. Update rcv_nxt and be done. */
1305 if (PREDICT_FALSE (data_len == 0))
1307 tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end;
1308 return TCP_ERROR_PURE_ACK;
1311 written = stream_session_enqueue_data (&tc->connection, b, 0,
1312 1 /* queue event */ , 1);
1314 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 0, data_len, written);
1316 /* Update rcv_nxt */
1317 if (PREDICT_TRUE (written == data_len))
1319 tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end;
1321 /* If more data written than expected, account for out-of-order bytes. */
1322 else if (written > data_len)
1324 tc->rcv_nxt = vnet_buffer (b)->tcp.seq_end + written - data_len;
1326 /* Send ACK confirming the update */
1327 tc->flags |= TCP_CONN_SNDACK;
1329 else if (written > 0)
1331 /* We've written something but FIFO is probably full now */
1332 tc->rcv_nxt += written;
1334 /* Depending on how fast the app is, all remaining buffers in burst will
1335 * not be enqueued. Inform peer */
1336 tc->flags |= TCP_CONN_SNDACK;
1338 return TCP_ERROR_PARTIALLY_ENQUEUED;
1342 tc->flags |= TCP_CONN_SNDACK;
1343 return TCP_ERROR_FIFO_FULL;
1346 /* Update SACK list if need be */
1347 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1349 /* Remove SACK blocks that have been delivered */
1350 tcp_update_sack_list (tc, tc->rcv_nxt, tc->rcv_nxt);
1353 return TCP_ERROR_ENQUEUED;
1356 /** Enqueue out-of-order data */
1358 tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b,
1361 stream_session_t *s0;
1364 ASSERT (seq_gt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt));
1366 /* Pure ACK. Do nothing */
1367 if (PREDICT_FALSE (data_len == 0))
1369 return TCP_ERROR_PURE_ACK;
1372 /* Enqueue out-of-order data with relative offset */
1373 rv = stream_session_enqueue_data (&tc->connection, b,
1374 vnet_buffer (b)->tcp.seq_number -
1375 tc->rcv_nxt, 0 /* queue event */ , 0);
1377 /* Nothing written */
1380 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, 0);
1381 return TCP_ERROR_FIFO_FULL;
1384 TCP_EVT_DBG (TCP_EVT_INPUT, tc, 1, data_len, data_len);
1386 /* Update SACK list if in use */
1387 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1389 ooo_segment_t *newest;
1392 s0 = stream_session_get (tc->c_s_index, tc->c_thread_index);
1394 /* Get the newest segment from the fifo */
1395 newest = svm_fifo_newest_ooo_segment (s0->server_rx_fifo);
1399 tc->rcv_nxt + ooo_segment_offset (s0->server_rx_fifo, newest);
1400 end = start + ooo_segment_length (s0->server_rx_fifo, newest);
1401 tcp_update_sack_list (tc, start, end);
1403 ASSERT (seq_gt (start, tc->rcv_nxt));
1407 return TCP_ERROR_ENQUEUED;
1411 * Check if ACK could be delayed. If ack can be delayed, it should return
1412 * true for a full frame. If we're always acking return 0.
1415 tcp_can_delack (tcp_connection_t * tc)
1417 /* Send ack if ... */
1419 /* just sent a rcv wnd 0 */
1420 || (tc->flags & TCP_CONN_SENT_RCV_WND0) != 0
1421 /* constrained to send ack */
1422 || (tc->flags & TCP_CONN_SNDACK) != 0
1423 /* we're almost out of tx wnd */
1424 || tcp_available_snd_space (tc) < 4 * tc->snd_mss)
1431 tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b,
1432 u16 n_data_bytes, u32 * next0)
1434 u32 error = 0, n_bytes_to_drop;
1436 /* Handle out-of-order data */
1437 if (PREDICT_FALSE (vnet_buffer (b)->tcp.seq_number != tc->rcv_nxt))
1439 /* Old sequence numbers allowed through because they overlapped
1441 if (seq_lt (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt))
1443 error = TCP_ERROR_SEGMENT_OLD;
1444 *next0 = TCP_NEXT_DROP;
1446 /* Completely in the past (possible retransmit) */
1447 if (seq_leq (vnet_buffer (b)->tcp.seq_end, tc->rcv_nxt))
1450 /* Chop off the bytes in the past */
1451 n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number;
1452 n_data_bytes -= n_bytes_to_drop;
1453 vlib_buffer_advance (b, n_bytes_to_drop);
1458 error = tcp_session_enqueue_ooo (tc, b, n_data_bytes);
1460 /* N.B. Should not filter burst of dupacks. Two issues 1) dupacks open
1461 * cwnd on remote peer when congested 2) acks leaving should have the
1462 * latest rcv_wnd since the burst may eaten up all of it, so only the
1463 * old ones could be filtered.
1466 /* RFC2581: Send DUPACK for fast retransmit */
1467 tcp_make_ack (tc, b);
1468 *next0 = tcp_next_output (tc->c_is_ip4);
1470 /* Mark as DUPACK. We may filter these in output if
1471 * the burst fills the holes. */
1473 vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_DUPACK;
1475 TCP_EVT_DBG (TCP_EVT_DUPACK_SENT, tc);
1481 /* In order data, enqueue. Fifo figures out by itself if any out-of-order
1482 * segments can be enqueued after fifo tail offset changes. */
1483 error = tcp_session_enqueue_data (tc, b, n_data_bytes);
1485 if (n_data_bytes == 0)
1487 *next0 = TCP_NEXT_DROP;
1491 /* Check if ACK can be delayed */
1492 if (tcp_can_delack (tc))
1494 if (!tcp_timer_is_active (tc, TCP_TIMER_DELACK))
1495 tcp_timer_set (tc, TCP_TIMER_DELACK, TCP_DELACK_TIME);
1499 *next0 = tcp_next_output (tc->c_is_ip4);
1500 tcp_make_ack (tc, b);
1508 tcp_header_t tcp_header;
1509 tcp_connection_t tcp_connection;
1513 format_tcp_rx_trace (u8 * s, va_list * args)
1515 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1516 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1517 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1518 uword indent = format_get_indent (s);
1520 s = format (s, "%U\n%U%U",
1521 format_tcp_header, &t->tcp_header, 128,
1522 format_white_space, indent,
1523 format_tcp_connection, &t->tcp_connection, 1);
1529 format_tcp_rx_trace_short (u8 * s, va_list * args)
1531 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1532 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1533 tcp_rx_trace_t *t = va_arg (*args, tcp_rx_trace_t *);
1535 s = format (s, "%d -> %d (%U)",
1536 clib_net_to_host_u16 (t->tcp_header.src_port),
1537 clib_net_to_host_u16 (t->tcp_header.dst_port), format_tcp_state,
1538 t->tcp_connection.state);
1544 tcp_set_rx_trace_data (tcp_rx_trace_t * t0, tcp_connection_t * tc0,
1545 tcp_header_t * th0, vlib_buffer_t * b0, u8 is_ip4)
1549 clib_memcpy (&t0->tcp_connection, tc0, sizeof (t0->tcp_connection));
1553 th0 = tcp_buffer_hdr (b0);
1555 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
1559 tcp_established_inc_counter (vlib_main_t * vm, u8 is_ip4, u8 evt, u8 val)
1561 if (PREDICT_TRUE (!val))
1565 vlib_node_increment_counter (vm, tcp4_established_node.index, evt, val);
1567 vlib_node_increment_counter (vm, tcp6_established_node.index, evt, val);
1571 tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1572 vlib_frame_t * from_frame, int is_ip4)
1574 u32 n_left_from, next_index, *from, *to_next;
1575 u32 my_thread_index = vm->thread_index, errors = 0;
1576 tcp_main_t *tm = vnet_get_tcp_main ();
1579 from = vlib_frame_vector_args (from_frame);
1580 n_left_from = from_frame->n_vectors;
1582 next_index = node->cached_next_index;
1584 while (n_left_from > 0)
1588 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1589 while (n_left_from > 0 && n_left_to_next > 0)
1593 tcp_header_t *th0 = 0;
1594 tcp_connection_t *tc0;
1595 u32 next0 = TCP_ESTABLISHED_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1602 n_left_to_next -= 1;
1604 b0 = vlib_get_buffer (vm, bi0);
1605 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
1608 if (PREDICT_FALSE (tc0 == 0))
1610 error0 = TCP_ERROR_INVALID_CONNECTION;
1614 th0 = tcp_buffer_hdr (b0);
1615 is_fin = (th0->flags & TCP_FLAG_FIN) != 0;
1617 /* SYNs, FINs and data consume sequence numbers */
1618 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
1619 + tcp_is_syn (th0) + is_fin + vnet_buffer (b0)->tcp.data_len;
1621 /* TODO header prediction fast path */
1623 /* 1-4: check SEQ, RST, SYN */
1624 if (PREDICT_FALSE (tcp_segment_validate (vm, tc0, b0, th0, &next0)))
1626 error0 = TCP_ERROR_SEGMENT_INVALID;
1627 TCP_EVT_DBG (TCP_EVT_SEG_INVALID, tc0,
1628 vnet_buffer (b0)->tcp.seq_number,
1629 vnet_buffer (b0)->tcp.seq_end);
1633 /* 5: check the ACK field */
1634 if (tcp_rcv_ack (tc0, b0, th0, &next0, &error0))
1639 /* 6: check the URG bit TODO */
1641 /* 7: process the segment text */
1643 vlib_buffer_advance (b0, vnet_buffer (b0)->tcp.data_offset);
1644 error0 = tcp_segment_rcv (tm, tc0, b0,
1645 vnet_buffer (b0)->tcp.data_len, &next0);
1647 /* N.B. buffer is rewritten if segment is ooo. Thus, th0 becomes a
1648 * dangling reference. */
1650 /* 8: check the FIN bit */
1653 /* Enter CLOSE-WAIT and notify session. Don't send ACK, instead
1654 * wait for session to call close. To avoid lingering
1655 * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */
1656 tc0->state = TCP_STATE_CLOSE_WAIT;
1657 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
1658 stream_session_disconnect_notify (&tc0->connection);
1659 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
1663 b0->error = node->errors[error0];
1664 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1666 tcp_rx_trace_t *t0 =
1667 vlib_add_trace (vm, node, b0, sizeof (*t0));
1668 tcp_set_rx_trace_data (t0, tc0, th0, b0, is_ip4);
1671 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1672 n_left_to_next, bi0, next0);
1675 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1678 errors = session_manager_flush_enqueue_events (my_thread_index);
1679 tcp_established_inc_counter (vm, is_ip4, TCP_ERROR_EVENT_FIFO_FULL, errors);
1680 return from_frame->n_vectors;
1684 tcp4_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1685 vlib_frame_t * from_frame)
1687 return tcp46_established_inline (vm, node, from_frame, 1 /* is_ip4 */ );
1691 tcp6_established (vlib_main_t * vm, vlib_node_runtime_t * node,
1692 vlib_frame_t * from_frame)
1694 return tcp46_established_inline (vm, node, from_frame, 0 /* is_ip4 */ );
1698 VLIB_REGISTER_NODE (tcp4_established_node) =
1700 .function = tcp4_established,
1701 .name = "tcp4-established",
1702 /* Takes a vector of packets. */
1703 .vector_size = sizeof (u32),
1704 .n_errors = TCP_N_ERROR,
1705 .error_strings = tcp_error_strings,
1706 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1709 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1710 foreach_tcp_state_next
1713 .format_trace = format_tcp_rx_trace_short,
1717 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_established_node, tcp4_established);
1720 VLIB_REGISTER_NODE (tcp6_established_node) =
1722 .function = tcp6_established,
1723 .name = "tcp6-established",
1724 /* Takes a vector of packets. */
1725 .vector_size = sizeof (u32),
1726 .n_errors = TCP_N_ERROR,
1727 .error_strings = tcp_error_strings,
1728 .n_next_nodes = TCP_ESTABLISHED_N_NEXT,
1731 #define _(s,n) [TCP_ESTABLISHED_NEXT_##s] = n,
1732 foreach_tcp_state_next
1735 .format_trace = format_tcp_rx_trace_short,
1740 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_established_node, tcp6_established);
1742 vlib_node_registration_t tcp4_syn_sent_node;
1743 vlib_node_registration_t tcp6_syn_sent_node;
1746 tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1747 vlib_frame_t * from_frame, int is_ip4)
1749 tcp_main_t *tm = vnet_get_tcp_main ();
1750 u32 n_left_from, next_index, *from, *to_next;
1751 u32 my_thread_index = vm->thread_index, errors = 0;
1752 u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP;
1754 from = vlib_frame_vector_args (from_frame);
1755 n_left_from = from_frame->n_vectors;
1757 next_index = node->cached_next_index;
1759 while (n_left_from > 0)
1763 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1765 while (n_left_from > 0 && n_left_to_next > 0)
1767 u32 bi0, ack0, seq0;
1770 tcp_header_t *tcp0 = 0;
1771 tcp_connection_t *tc0;
1772 tcp_connection_t *new_tc0;
1773 u32 next0 = TCP_SYN_SENT_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
1780 n_left_to_next -= 1;
1782 b0 = vlib_get_buffer (vm, bi0);
1784 tcp_half_open_connection_get (vnet_buffer (b0)->
1785 tcp.connection_index);
1787 ack0 = vnet_buffer (b0)->tcp.ack_number;
1788 seq0 = vnet_buffer (b0)->tcp.seq_number;
1789 tcp0 = tcp_buffer_hdr (b0);
1792 (!tcp_ack (tcp0) && !tcp_rst (tcp0) && !tcp_syn (tcp0)))
1795 /* SYNs, FINs and data consume sequence numbers */
1796 vnet_buffer (b0)->tcp.seq_end = seq0 + tcp_is_syn (tcp0)
1797 + tcp_is_fin (tcp0) + vnet_buffer (b0)->tcp.data_len;
1800 * 1. check the ACK bit
1804 * If the ACK bit is set
1805 * If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send a reset (unless
1806 * the RST bit is set, if so drop the segment and return)
1807 * <SEQ=SEG.ACK><CTL=RST>
1808 * and discard the segment. Return.
1809 * If SND.UNA =< SEG.ACK =< SND.NXT then the ACK is acceptable.
1813 if (ack0 <= tc0->iss || ack0 > tc0->snd_nxt)
1815 if (!tcp_rst (tcp0))
1816 tcp_send_reset (b0, is_ip4);
1821 /* Make sure ACK is valid */
1822 if (tc0->snd_una > ack0)
1827 * 2. check the RST bit
1832 /* If ACK is acceptable, signal client that peer is not
1833 * willing to accept connection and drop connection*/
1836 stream_session_connect_notify (&tc0->connection, sst,
1838 tcp_connection_cleanup (tc0);
1844 * 3. check the security and precedence (skipped)
1848 * 4. check the SYN bit
1851 /* No SYN flag. Drop. */
1852 if (!tcp_syn (tcp0))
1855 /* Stop connection establishment and retransmit timers */
1856 tcp_timer_reset (tc0, TCP_TIMER_ESTABLISH);
1857 tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT_SYN);
1859 /* Valid SYN or SYN-ACK. Move connection from half-open pool to
1860 * current thread pool. */
1861 pool_get (tm->connections[my_thread_index], new_tc0);
1862 clib_memcpy (new_tc0, tc0, sizeof (*new_tc0));
1864 new_tc0->c_thread_index = my_thread_index;
1865 new_tc0->c_c_index = new_tc0 - tm->connections[my_thread_index];
1867 /* Cleanup half-open connection XXX lock */
1868 pool_put (tm->half_open_connections, tc0);
1870 new_tc0->rcv_nxt = vnet_buffer (b0)->tcp.seq_end;
1871 new_tc0->irs = seq0;
1874 if (tcp_options_parse (tcp0, &new_tc0->rcv_opts))
1877 if (tcp_opts_tstamp (&new_tc0->rcv_opts))
1879 new_tc0->tsval_recent = new_tc0->rcv_opts.tsval;
1880 new_tc0->tsval_recent_age = tcp_time_now ();
1883 if (tcp_opts_wscale (&new_tc0->rcv_opts))
1884 new_tc0->snd_wscale = new_tc0->rcv_opts.wscale;
1886 new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
1887 << new_tc0->snd_wscale;
1888 new_tc0->snd_wl1 = seq0;
1889 new_tc0->snd_wl2 = ack0;
1891 tcp_connection_init_vars (new_tc0);
1893 /* SYN-ACK: See if we can switch to ESTABLISHED state */
1896 /* Our SYN is ACKed: we have iss < ack = snd_una */
1898 /* TODO Dequeue acknowledged segments if we support Fast Open */
1899 new_tc0->snd_una = ack0;
1900 new_tc0->state = TCP_STATE_ESTABLISHED;
1902 /* Make sure las is initialized for the wnd computation */
1903 new_tc0->rcv_las = new_tc0->rcv_nxt;
1905 /* Notify app that we have connection. If session layer can't
1906 * allocate session send reset */
1907 if (stream_session_connect_notify (&new_tc0->connection, sst,
1910 tcp_connection_cleanup (new_tc0);
1911 tcp_send_reset (b0, is_ip4);
1915 stream_session_init_fifos_pointers (&new_tc0->connection,
1918 /* Make sure after data segment processing ACK is sent */
1919 new_tc0->flags |= TCP_CONN_SNDACK;
1921 /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */
1924 new_tc0->state = TCP_STATE_SYN_RCVD;
1926 /* Notify app that we have connection */
1927 if (stream_session_connect_notify
1928 (&new_tc0->connection, sst, 0))
1930 tcp_connection_cleanup (new_tc0);
1931 tcp_send_reset (b0, is_ip4);
1935 stream_session_init_fifos_pointers (&new_tc0->connection,
1938 tcp_make_synack (new_tc0, b0);
1939 next0 = tcp_next_output (is_ip4);
1944 /* Read data, if any */
1945 if (vnet_buffer (b0)->tcp.data_len)
1947 vlib_buffer_advance (b0, vnet_buffer (b0)->tcp.data_offset);
1948 error0 = tcp_segment_rcv (tm, new_tc0, b0,
1949 vnet_buffer (b0)->tcp.data_len,
1951 if (error0 == TCP_ERROR_PURE_ACK)
1952 error0 = TCP_ERROR_SYN_ACKS_RCVD;
1956 tcp_make_ack (new_tc0, b0);
1957 next0 = tcp_next_output (new_tc0->c_is_ip4);
1962 b0->error = error0 ? node->errors[error0] : 0;
1963 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1965 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
1966 clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header));
1967 clib_memcpy (&t0->tcp_connection, tc0,
1968 sizeof (t0->tcp_connection));
1971 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1972 n_left_to_next, bi0, next0);
1975 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1978 errors = session_manager_flush_enqueue_events (my_thread_index);
1982 vlib_node_increment_counter (vm, tcp4_established_node.index,
1983 TCP_ERROR_EVENT_FIFO_FULL, errors);
1985 vlib_node_increment_counter (vm, tcp6_established_node.index,
1986 TCP_ERROR_EVENT_FIFO_FULL, errors);
1989 return from_frame->n_vectors;
1993 tcp4_syn_sent (vlib_main_t * vm, vlib_node_runtime_t * node,
1994 vlib_frame_t * from_frame)
1996 return tcp46_syn_sent_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2000 tcp6_syn_sent_rcv (vlib_main_t * vm, vlib_node_runtime_t * node,
2001 vlib_frame_t * from_frame)
2003 return tcp46_syn_sent_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2007 VLIB_REGISTER_NODE (tcp4_syn_sent_node) =
2009 .function = tcp4_syn_sent,
2010 .name = "tcp4-syn-sent",
2011 /* Takes a vector of packets. */
2012 .vector_size = sizeof (u32),
2013 .n_errors = TCP_N_ERROR,
2014 .error_strings = tcp_error_strings,
2015 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2018 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2019 foreach_tcp_state_next
2022 .format_trace = format_tcp_rx_trace_short,
2026 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_syn_sent_node, tcp4_syn_sent);
2029 VLIB_REGISTER_NODE (tcp6_syn_sent_node) =
2031 .function = tcp6_syn_sent_rcv,
2032 .name = "tcp6-syn-sent",
2033 /* Takes a vector of packets. */
2034 .vector_size = sizeof (u32),
2035 .n_errors = TCP_N_ERROR,
2036 .error_strings = tcp_error_strings,
2037 .n_next_nodes = TCP_SYN_SENT_N_NEXT,
2040 #define _(s,n) [TCP_SYN_SENT_NEXT_##s] = n,
2041 foreach_tcp_state_next
2044 .format_trace = format_tcp_rx_trace_short,
2048 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_syn_sent_node, tcp6_syn_sent_rcv);
2050 * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED
2051 * as per RFC793 p. 64
2054 tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2055 vlib_frame_t * from_frame, int is_ip4)
2057 tcp_main_t *tm = vnet_get_tcp_main ();
2058 u32 n_left_from, next_index, *from, *to_next;
2059 u32 my_thread_index = vm->thread_index, errors = 0;
2061 from = vlib_frame_vector_args (from_frame);
2062 n_left_from = from_frame->n_vectors;
2064 next_index = node->cached_next_index;
2066 while (n_left_from > 0)
2070 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2072 while (n_left_from > 0 && n_left_to_next > 0)
2076 tcp_header_t *tcp0 = 0;
2077 tcp_connection_t *tc0;
2078 u32 next0 = TCP_RCV_PROCESS_NEXT_DROP, error0 = TCP_ERROR_ENQUEUED;
2085 n_left_to_next -= 1;
2087 b0 = vlib_get_buffer (vm, bi0);
2088 tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index,
2090 if (PREDICT_FALSE (tc0 == 0))
2092 error0 = TCP_ERROR_INVALID_CONNECTION;
2096 tcp0 = tcp_buffer_hdr (b0);
2098 /* SYNs, FINs and data consume sequence numbers */
2099 vnet_buffer (b0)->tcp.seq_end = vnet_buffer (b0)->tcp.seq_number
2100 + tcp_is_syn (tcp0) + tcp_is_fin (tcp0)
2101 + vnet_buffer (b0)->tcp.data_len;
2104 * Special treatment for CLOSED
2108 case TCP_STATE_CLOSED:
2114 * For all other states (except LISTEN)
2117 /* 1-4: check SEQ, RST, SYN */
2119 (tcp_segment_validate (vm, tc0, b0, tcp0, &next0)))
2121 error0 = TCP_ERROR_SEGMENT_INVALID;
2125 /* 5: check the ACK field */
2128 case TCP_STATE_SYN_RCVD:
2130 * If the segment acknowledgment is not acceptable, form a
2132 * <SEQ=SEG.ACK><CTL=RST>
2135 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2137 tcp_send_reset (b0, is_ip4);
2141 /* Update rtt and rto */
2142 tc0->bytes_acked = 1;
2143 tcp_update_rtt (tc0, vnet_buffer (b0)->tcp.ack_number);
2145 /* Switch state to ESTABLISHED */
2146 tc0->state = TCP_STATE_ESTABLISHED;
2148 /* Initialize session variables */
2149 tc0->snd_una = vnet_buffer (b0)->tcp.ack_number;
2150 tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window)
2151 << tc0->rcv_opts.wscale;
2152 tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2153 tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2155 /* Shoulder tap the server */
2156 stream_session_accept_notify (&tc0->connection);
2158 /* Reset SYN-ACK retransmit timer */
2159 tcp_retransmit_timer_reset (tc0);
2161 case TCP_STATE_ESTABLISHED:
2162 /* We can get packets in established state here because they
2163 * were enqueued before state change */
2164 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2168 case TCP_STATE_FIN_WAIT_1:
2169 /* In addition to the processing for the ESTABLISHED state, if
2170 * our FIN is now acknowledged then enter FIN-WAIT-2 and
2171 * continue processing in that state. */
2172 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2175 /* If FIN is ACKed */
2176 if (tc0->snd_una == tc0->snd_una_max)
2178 tc0->state = TCP_STATE_FIN_WAIT_2;
2179 /* Stop all timers, 2MSL will be set lower */
2180 tcp_connection_timers_reset (tc0);
2183 case TCP_STATE_FIN_WAIT_2:
2184 /* In addition to the processing for the ESTABLISHED state, if
2185 * the retransmission queue is empty, the user's CLOSE can be
2186 * acknowledged ("ok") but do not delete the TCB. */
2187 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2189 /* check if rtx queue is empty and ack CLOSE TODO */
2191 case TCP_STATE_CLOSE_WAIT:
2192 /* Do the same processing as for the ESTABLISHED state. */
2193 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2196 case TCP_STATE_CLOSING:
2197 /* In addition to the processing for the ESTABLISHED state, if
2198 * the ACK acknowledges our FIN then enter the TIME-WAIT state,
2199 * otherwise ignore the segment. */
2200 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2203 /* XXX test that send queue empty */
2204 tc0->state = TCP_STATE_TIME_WAIT;
2208 case TCP_STATE_LAST_ACK:
2209 /* The only thing that [should] arrive in this state is an
2210 * acknowledgment of our FIN. If our FIN is now acknowledged,
2211 * delete the TCB, enter the CLOSED state, and return. */
2213 if (!tcp_rcv_ack_is_acceptable (tc0, b0))
2216 /* Apparently our FIN was lost */
2219 /* Don't "make" fin since that increments snd_nxt */
2224 tc0->state = TCP_STATE_CLOSED;
2226 /* Don't delete the connection/session yet. Instead, wait a
2227 * reasonable amount of time until the pipes are cleared. In
2228 * particular, this makes sure that we won't have dead sessions
2229 * when processing events on the tx path */
2230 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME);
2232 /* Stop retransmit */
2233 tcp_retransmit_timer_reset (tc0);
2238 case TCP_STATE_TIME_WAIT:
2239 /* The only thing that can arrive in this state is a
2240 * retransmission of the remote FIN. Acknowledge it, and restart
2241 * the 2 MSL timeout. */
2243 if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0))
2246 tcp_make_ack (tc0, b0);
2247 tcp_timer_reset (tc0, TCP_TIMER_WAITCLOSE);
2248 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2257 /* 6: check the URG bit TODO */
2259 /* 7: process the segment text */
2262 case TCP_STATE_ESTABLISHED:
2263 case TCP_STATE_FIN_WAIT_1:
2264 case TCP_STATE_FIN_WAIT_2:
2265 vlib_buffer_advance (b0, vnet_buffer (b0)->tcp.data_offset);
2266 error0 = tcp_segment_rcv (tm, tc0, b0,
2267 vnet_buffer (b0)->tcp.data_len,
2270 case TCP_STATE_CLOSE_WAIT:
2271 case TCP_STATE_CLOSING:
2272 case TCP_STATE_LAST_ACK:
2273 case TCP_STATE_TIME_WAIT:
2274 /* This should not occur, since a FIN has been received from the
2275 * remote side. Ignore the segment text. */
2279 /* 8: check the FIN bit */
2280 if (!tcp_fin (tcp0))
2285 case TCP_STATE_ESTABLISHED:
2286 case TCP_STATE_SYN_RCVD:
2287 /* Send FIN-ACK notify app and enter CLOSE-WAIT */
2288 tcp_connection_timers_reset (tc0);
2289 tcp_make_fin (tc0, b0);
2290 next0 = tcp_next_output (tc0->c_is_ip4);
2291 stream_session_disconnect_notify (&tc0->connection);
2292 tc0->state = TCP_STATE_CLOSE_WAIT;
2294 case TCP_STATE_CLOSE_WAIT:
2295 case TCP_STATE_CLOSING:
2296 case TCP_STATE_LAST_ACK:
2299 case TCP_STATE_FIN_WAIT_1:
2300 tc0->state = TCP_STATE_TIME_WAIT;
2301 tcp_connection_timers_reset (tc0);
2302 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2304 case TCP_STATE_FIN_WAIT_2:
2305 /* Got FIN, send ACK! */
2306 tc0->state = TCP_STATE_TIME_WAIT;
2307 tcp_connection_timers_reset (tc0);
2308 tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
2309 tcp_make_ack (tc0, b0);
2310 next0 = tcp_next_output (is_ip4);
2312 case TCP_STATE_TIME_WAIT:
2313 /* Remain in the TIME-WAIT state. Restart the 2 MSL time-wait
2316 tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME);
2319 TCP_EVT_DBG (TCP_EVT_FIN_RCVD, tc0);
2322 b0->error = error0 ? node->errors[error0] : 0;
2324 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2326 tcp_rx_trace_t *t0 =
2327 vlib_add_trace (vm, node, b0, sizeof (*t0));
2328 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2331 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2332 n_left_to_next, bi0, next0);
2335 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2338 errors = session_manager_flush_enqueue_events (my_thread_index);
2342 vlib_node_increment_counter (vm, tcp4_established_node.index,
2343 TCP_ERROR_EVENT_FIFO_FULL, errors);
2345 vlib_node_increment_counter (vm, tcp6_established_node.index,
2346 TCP_ERROR_EVENT_FIFO_FULL, errors);
2349 return from_frame->n_vectors;
2353 tcp4_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2354 vlib_frame_t * from_frame)
2356 return tcp46_rcv_process_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2360 tcp6_rcv_process (vlib_main_t * vm, vlib_node_runtime_t * node,
2361 vlib_frame_t * from_frame)
2363 return tcp46_rcv_process_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2367 VLIB_REGISTER_NODE (tcp4_rcv_process_node) =
2369 .function = tcp4_rcv_process,
2370 .name = "tcp4-rcv-process",
2371 /* Takes a vector of packets. */
2372 .vector_size = sizeof (u32),
2373 .n_errors = TCP_N_ERROR,
2374 .error_strings = tcp_error_strings,
2375 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2378 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2379 foreach_tcp_state_next
2382 .format_trace = format_tcp_rx_trace_short,
2386 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_rcv_process_node, tcp4_rcv_process);
2389 VLIB_REGISTER_NODE (tcp6_rcv_process_node) =
2391 .function = tcp6_rcv_process,
2392 .name = "tcp6-rcv-process",
2393 /* Takes a vector of packets. */
2394 .vector_size = sizeof (u32),
2395 .n_errors = TCP_N_ERROR,
2396 .error_strings = tcp_error_strings,
2397 .n_next_nodes = TCP_RCV_PROCESS_N_NEXT,
2400 #define _(s,n) [TCP_RCV_PROCESS_NEXT_##s] = n,
2401 foreach_tcp_state_next
2404 .format_trace = format_tcp_rx_trace_short,
2408 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_rcv_process_node, tcp6_rcv_process);
2410 vlib_node_registration_t tcp4_listen_node;
2411 vlib_node_registration_t tcp6_listen_node;
2414 * LISTEN state processing as per RFC 793 p. 65
2417 tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2418 vlib_frame_t * from_frame, int is_ip4)
2420 u32 n_left_from, next_index, *from, *to_next;
2421 u32 my_thread_index = vm->thread_index;
2422 tcp_main_t *tm = vnet_get_tcp_main ();
2423 u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP;
2425 from = vlib_frame_vector_args (from_frame);
2426 n_left_from = from_frame->n_vectors;
2428 next_index = node->cached_next_index;
2430 while (n_left_from > 0)
2434 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2436 while (n_left_from > 0 && n_left_to_next > 0)
2441 tcp_header_t *th0 = 0;
2442 tcp_connection_t *lc0;
2445 tcp_connection_t *child0;
2446 u32 error0 = TCP_ERROR_SYNS_RCVD, next0 = TCP_LISTEN_NEXT_DROP;
2453 n_left_to_next -= 1;
2455 b0 = vlib_get_buffer (vm, bi0);
2456 lc0 = tcp_listener_get (vnet_buffer (b0)->tcp.connection_index);
2460 ip40 = vlib_buffer_get_current (b0);
2461 th0 = ip4_next_header (ip40);
2465 ip60 = vlib_buffer_get_current (b0);
2466 th0 = ip6_next_header (ip60);
2469 /* Create child session. For syn-flood protection use filter */
2471 /* 1. first check for an RST: handled in dispatch */
2472 /* if (tcp_rst (th0))
2475 /* 2. second check for an ACK: handled in dispatch */
2476 /* if (tcp_ack (th0))
2478 tcp_send_reset (b0, is_ip4);
2482 /* 3. check for a SYN (did that already) */
2484 /* Create child session and send SYN-ACK */
2485 pool_get (tm->connections[my_thread_index], child0);
2486 memset (child0, 0, sizeof (*child0));
2488 child0->c_c_index = child0 - tm->connections[my_thread_index];
2489 child0->c_lcl_port = lc0->c_lcl_port;
2490 child0->c_rmt_port = th0->src_port;
2491 child0->c_is_ip4 = is_ip4;
2492 child0->c_thread_index = my_thread_index;
2493 child0->state = TCP_STATE_SYN_RCVD;
2497 child0->c_lcl_ip4.as_u32 = ip40->dst_address.as_u32;
2498 child0->c_rmt_ip4.as_u32 = ip40->src_address.as_u32;
2502 clib_memcpy (&child0->c_lcl_ip6, &ip60->dst_address,
2503 sizeof (ip6_address_t));
2504 clib_memcpy (&child0->c_rmt_ip6, &ip60->src_address,
2505 sizeof (ip6_address_t));
2508 if (stream_session_accept (&child0->connection, lc0->c_s_index, sst,
2511 error0 = TCP_ERROR_CREATE_SESSION_FAIL;
2515 if (tcp_options_parse (th0, &child0->rcv_opts))
2520 child0->irs = vnet_buffer (b0)->tcp.seq_number;
2521 child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1;
2522 child0->rcv_las = child0->rcv_nxt;
2524 /* RFC1323: TSval timestamps sent on {SYN} and {SYN,ACK}
2525 * segments are used to initialize PAWS. */
2526 if (tcp_opts_tstamp (&child0->rcv_opts))
2528 child0->tsval_recent = child0->rcv_opts.tsval;
2529 child0->tsval_recent_age = tcp_time_now ();
2532 if (tcp_opts_wscale (&child0->rcv_opts))
2533 child0->snd_wscale = child0->rcv_opts.wscale;
2535 child0->snd_wnd = clib_net_to_host_u16 (th0->window)
2536 << child0->snd_wscale;
2537 child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number;
2538 child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number;
2540 tcp_connection_init_vars (child0);
2542 TCP_EVT_DBG (TCP_EVT_SYN_RCVD, child0);
2544 /* Reuse buffer to make syn-ack and send */
2545 tcp_make_synack (child0, b0);
2546 next0 = tcp_next_output (is_ip4);
2548 /* Init fifo pointers after we have iss */
2549 stream_session_init_fifos_pointers (&child0->connection,
2553 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2555 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2556 clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header));
2557 clib_memcpy (&t0->tcp_connection, lc0,
2558 sizeof (t0->tcp_connection));
2561 b0->error = node->errors[error0];
2563 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2564 n_left_to_next, bi0, next0);
2567 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2569 return from_frame->n_vectors;
2573 tcp4_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
2574 vlib_frame_t * from_frame)
2576 return tcp46_listen_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2580 tcp6_listen (vlib_main_t * vm, vlib_node_runtime_t * node,
2581 vlib_frame_t * from_frame)
2583 return tcp46_listen_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2587 VLIB_REGISTER_NODE (tcp4_listen_node) =
2589 .function = tcp4_listen,
2590 .name = "tcp4-listen",
2591 /* Takes a vector of packets. */
2592 .vector_size = sizeof (u32),
2593 .n_errors = TCP_N_ERROR,
2594 .error_strings = tcp_error_strings,
2595 .n_next_nodes = TCP_LISTEN_N_NEXT,
2598 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2599 foreach_tcp_state_next
2602 .format_trace = format_tcp_rx_trace_short,
2606 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_listen_node, tcp4_listen);
2609 VLIB_REGISTER_NODE (tcp6_listen_node) =
2611 .function = tcp6_listen,
2612 .name = "tcp6-listen",
2613 /* Takes a vector of packets. */
2614 .vector_size = sizeof (u32),
2615 .n_errors = TCP_N_ERROR,
2616 .error_strings = tcp_error_strings,
2617 .n_next_nodes = TCP_LISTEN_N_NEXT,
2620 #define _(s,n) [TCP_LISTEN_NEXT_##s] = n,
2621 foreach_tcp_state_next
2624 .format_trace = format_tcp_rx_trace_short,
2628 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_listen_node, tcp6_listen);
2630 vlib_node_registration_t tcp4_input_node;
2631 vlib_node_registration_t tcp6_input_node;
2633 typedef enum _tcp_input_next
2635 TCP_INPUT_NEXT_DROP,
2636 TCP_INPUT_NEXT_LISTEN,
2637 TCP_INPUT_NEXT_RCV_PROCESS,
2638 TCP_INPUT_NEXT_SYN_SENT,
2639 TCP_INPUT_NEXT_ESTABLISHED,
2640 TCP_INPUT_NEXT_RESET,
2644 #define foreach_tcp4_input_next \
2645 _ (DROP, "error-drop") \
2646 _ (LISTEN, "tcp4-listen") \
2647 _ (RCV_PROCESS, "tcp4-rcv-process") \
2648 _ (SYN_SENT, "tcp4-syn-sent") \
2649 _ (ESTABLISHED, "tcp4-established") \
2650 _ (RESET, "tcp4-reset")
2652 #define foreach_tcp6_input_next \
2653 _ (DROP, "error-drop") \
2654 _ (LISTEN, "tcp6-listen") \
2655 _ (RCV_PROCESS, "tcp6-rcv-process") \
2656 _ (SYN_SENT, "tcp6-syn-sent") \
2657 _ (ESTABLISHED, "tcp6-established") \
2658 _ (RESET, "tcp6-reset")
2660 #define filter_flags (TCP_FLAG_SYN|TCP_FLAG_ACK|TCP_FLAG_RST|TCP_FLAG_FIN)
2663 tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2664 vlib_frame_t * from_frame, int is_ip4)
2666 u32 n_left_from, next_index, *from, *to_next;
2667 u32 my_thread_index = vm->thread_index;
2668 tcp_main_t *tm = vnet_get_tcp_main ();
2670 from = vlib_frame_vector_args (from_frame);
2671 n_left_from = from_frame->n_vectors;
2673 next_index = node->cached_next_index;
2675 while (n_left_from > 0)
2679 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2681 while (n_left_from > 0 && n_left_to_next > 0)
2683 int n_advance_bytes0, n_data_bytes0;
2686 tcp_header_t *tcp0 = 0;
2687 tcp_connection_t *tc0;
2690 u32 error0 = TCP_ERROR_NO_LISTENER, next0 = TCP_INPUT_NEXT_DROP;
2698 n_left_to_next -= 1;
2700 b0 = vlib_get_buffer (vm, bi0);
2701 vnet_buffer (b0)->tcp.flags = 0;
2703 /* Checksum computed by ipx_local no need to compute again */
2707 ip40 = vlib_buffer_get_current (b0);
2708 tcp0 = ip4_next_header (ip40);
2709 n_advance_bytes0 = (ip4_header_bytes (ip40)
2710 + tcp_header_bytes (tcp0));
2711 n_data_bytes0 = clib_net_to_host_u16 (ip40->length)
2714 /* lookup session */
2716 (tcp_connection_t *)
2717 stream_session_lookup_transport4 (&ip40->dst_address,
2721 SESSION_TYPE_IP4_TCP,
2726 ip60 = vlib_buffer_get_current (b0);
2727 tcp0 = ip6_next_header (ip60);
2728 n_advance_bytes0 = tcp_header_bytes (tcp0);
2729 n_data_bytes0 = clib_net_to_host_u16 (ip60->payload_length)
2731 n_advance_bytes0 += sizeof (ip60[0]);
2734 (tcp_connection_t *)
2735 stream_session_lookup_transport6 (&ip60->src_address,
2739 SESSION_TYPE_IP6_TCP,
2744 if (PREDICT_FALSE (n_advance_bytes0 < 0))
2746 error0 = TCP_ERROR_LENGTH;
2750 /* Session exists */
2751 if (PREDICT_TRUE (0 != tc0))
2753 /* Save connection index */
2754 vnet_buffer (b0)->tcp.connection_index = tc0->c_c_index;
2755 vnet_buffer (b0)->tcp.seq_number =
2756 clib_net_to_host_u32 (tcp0->seq_number);
2757 vnet_buffer (b0)->tcp.ack_number =
2758 clib_net_to_host_u32 (tcp0->ack_number);
2760 vnet_buffer (b0)->tcp.hdr_offset = (u8 *) tcp0
2761 - (u8 *) vlib_buffer_get_current (b0);
2762 vnet_buffer (b0)->tcp.data_offset = n_advance_bytes0;
2763 vnet_buffer (b0)->tcp.data_len = n_data_bytes0;
2765 flags0 = tcp0->flags & filter_flags;
2766 next0 = tm->dispatch_table[tc0->state][flags0].next;
2767 error0 = tm->dispatch_table[tc0->state][flags0].error;
2769 if (PREDICT_FALSE (error0 == TCP_ERROR_DISPATCH
2770 || next0 == TCP_INPUT_NEXT_RESET))
2772 /* Overload tcp flags to store state */
2773 tcp_state_t state0 = tc0->state;
2774 vnet_buffer (b0)->tcp.flags = tc0->state;
2776 if (error0 == TCP_ERROR_DISPATCH)
2777 clib_warning ("disp error state %U flags %U",
2778 format_tcp_state, state0, format_tcp_flags,
2785 next0 = TCP_INPUT_NEXT_RESET;
2786 error0 = TCP_ERROR_NO_LISTENER;
2790 b0->error = error0 ? node->errors[error0] : 0;
2792 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2794 tcp_rx_trace_t *t0 =
2795 vlib_add_trace (vm, node, b0, sizeof (*t0));
2796 tcp_set_rx_trace_data (t0, tc0, tcp0, b0, is_ip4);
2798 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2799 n_left_to_next, bi0, next0);
2802 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2805 return from_frame->n_vectors;
2809 tcp4_input (vlib_main_t * vm, vlib_node_runtime_t * node,
2810 vlib_frame_t * from_frame)
2812 return tcp46_input_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2816 tcp6_input (vlib_main_t * vm, vlib_node_runtime_t * node,
2817 vlib_frame_t * from_frame)
2819 return tcp46_input_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2823 VLIB_REGISTER_NODE (tcp4_input_node) =
2825 .function = tcp4_input,
2826 .name = "tcp4-input",
2827 /* Takes a vector of packets. */
2828 .vector_size = sizeof (u32),
2829 .n_errors = TCP_N_ERROR,
2830 .error_strings = tcp_error_strings,
2831 .n_next_nodes = TCP_INPUT_N_NEXT,
2834 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2835 foreach_tcp4_input_next
2838 .format_buffer = format_tcp_header,
2839 .format_trace = format_tcp_rx_trace,
2843 VLIB_NODE_FUNCTION_MULTIARCH (tcp4_input_node, tcp4_input);
2846 VLIB_REGISTER_NODE (tcp6_input_node) =
2848 .function = tcp6_input,
2849 .name = "tcp6-input",
2850 /* Takes a vector of packets. */
2851 .vector_size = sizeof (u32),
2852 .n_errors = TCP_N_ERROR,
2853 .error_strings = tcp_error_strings,
2854 .n_next_nodes = TCP_INPUT_N_NEXT,
2857 #define _(s,n) [TCP_INPUT_NEXT_##s] = n,
2858 foreach_tcp6_input_next
2861 .format_buffer = format_tcp_header,
2862 .format_trace = format_tcp_rx_trace,
2866 VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input);
2869 tcp_dispatch_table_init (tcp_main_t * tm)
2872 for (i = 0; i < ARRAY_LEN (tm->dispatch_table); i++)
2873 for (j = 0; j < ARRAY_LEN (tm->dispatch_table[i]); j++)
2875 tm->dispatch_table[i][j].next = TCP_INPUT_NEXT_DROP;
2876 tm->dispatch_table[i][j].error = TCP_ERROR_DISPATCH;
2879 #define _(t,f,n,e) \
2881 tm->dispatch_table[TCP_STATE_##t][f].next = (n); \
2882 tm->dispatch_table[TCP_STATE_##t][f].error = (e); \
2885 /* SYNs for new connections -> tcp-listen. */
2886 _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE);
2887 _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE);
2888 _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_NONE);
2889 /* ACK for for a SYN-ACK -> tcp-rcv-process. */
2890 _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2891 _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2892 /* SYN-ACK for a SYN */
2893 _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
2895 _(SYN_SENT, TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
2896 _(SYN_SENT, TCP_FLAG_RST, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE);
2897 _(SYN_SENT, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT,
2899 /* ACK for for established connection -> tcp-established. */
2900 _(ESTABLISHED, TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
2901 /* FIN for for established connection -> tcp-established. */
2902 _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
2903 _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
2905 _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE);
2906 _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED,
2908 /* ACK or FIN-ACK to our FIN */
2909 _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2910 _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS,
2912 /* FIN in reply to our FIN from the other side */
2913 _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2914 /* FIN confirming that the peer (app) has closed */
2915 _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2916 _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2917 _(FIN_WAIT_2, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
2919 _(CLOSE_WAIT, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2920 _(CLOSE_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
2922 _(LAST_ACK, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2923 _(LAST_ACK, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2924 _(LAST_ACK, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
2926 _(LAST_ACK, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2927 _(TIME_WAIT, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE);
2928 _(TIME_WAIT, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS,
2930 _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED);
2931 _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED);
2936 tcp_input_init (vlib_main_t * vm)
2938 clib_error_t *error = 0;
2939 tcp_main_t *tm = vnet_get_tcp_main ();
2941 if ((error = vlib_call_init_function (vm, tcp_init)))
2944 /* Initialize dispatch table. */
2945 tcp_dispatch_table_init (tm);
2950 VLIB_INIT_FUNCTION (tcp_input_init);
2953 * fd.io coding-style-patch-verification: ON
2956 * eval: (c-set-style "gnu")