2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/tcp/tcp.h>
19 typedef enum _tcp_output_next
22 TCP_OUTPUT_NEXT_IP_LOOKUP,
23 TCP_OUTPUT_NEXT_IP_REWRITE,
24 TCP_OUTPUT_NEXT_IP_ARP,
28 #define foreach_tcp4_output_next \
29 _ (DROP, "error-drop") \
30 _ (IP_LOOKUP, "ip4-lookup") \
31 _ (IP_REWRITE, "ip4-rewrite") \
34 #define foreach_tcp6_output_next \
35 _ (DROP, "error-drop") \
36 _ (IP_LOOKUP, "ip6-lookup") \
37 _ (IP_REWRITE, "ip6-rewrite") \
38 _ (IP_ARP, "ip6-discover-neighbor")
40 static char *tcp_error_strings[] = {
41 #define tcp_error(n,s) s,
42 #include <vnet/tcp/tcp_error.def>
48 tcp_header_t tcp_header;
49 tcp_connection_t tcp_connection;
53 format_tcp_tx_trace (u8 * s, va_list * args)
55 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
56 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57 tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *);
58 u32 indent = format_get_indent (s);
60 s = format (s, "%U\n%U%U",
61 format_tcp_header, &t->tcp_header, 128,
62 format_white_space, indent,
63 format_tcp_connection, &t->tcp_connection, 1);
68 #ifndef CLIB_MARCH_VARIANT
70 tcp_window_compute_scale (u32 window)
73 while (wnd_scale < TCP_MAX_WND_SCALE && (window >> wnd_scale) > TCP_WND_MAX)
79 * Update max segment size we're able to process.
81 * The value is constrained by our interface's MTU and IP options. It is
82 * also what we advertise to our peer.
85 tcp_update_rcv_mss (tcp_connection_t * tc)
87 /* TODO find our iface MTU */
88 tc->mss = tcp_main.default_mtu - sizeof (tcp_header_t);
92 * TCP's initial window
95 tcp_initial_wnd_unscaled (tcp_connection_t * tc)
97 /* RFC 6928 recommends the value lower. However at the time our connections
98 * are initialized, fifos may not be allocated. Therefore, advertise the
99 * smallest possible unscaled window size and update once fifos are
100 * assigned to the session.
103 tcp_update_rcv_mss (tc);
104 TCP_IW_N_SEGMENTS * tc->mss;
106 return TCP_MIN_RX_FIFO_SIZE;
110 * Compute initial window and scale factor. As per RFC1323, window field in
111 * SYN and SYN-ACK segments is never scaled.
114 tcp_initial_window_to_advertise (tcp_connection_t * tc)
116 tcp_main_t *tm = &tcp_main;
119 /* Initial wnd for SYN. Fifos are not allocated yet.
120 * Use some predefined value. For SYN-ACK we still want the
121 * scale to be computed in the same way */
122 max_fifo = tm->max_rx_fifo ? tm->max_rx_fifo : TCP_MAX_RX_FIFO_SIZE;
124 /* Compute rcv wscale only if peer advertised support for it */
125 if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
126 tc->rcv_wscale = tcp_window_compute_scale (max_fifo);
128 tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
130 return clib_min (tc->rcv_wnd, TCP_WND_MAX);
134 tcp_update_rcv_wnd (tcp_connection_t * tc)
136 u32 available_space, wnd;
139 ASSERT (tc->rcv_opts.mss < transport_rx_fifo_size (&tc->connection));
142 * Figure out how much space we have available
144 available_space = transport_max_rx_enqueue (&tc->connection);
145 if (PREDICT_FALSE (available_space < tc->rcv_opts.mss))
149 * Use the above and what we know about what we've previously advertised
150 * to compute the new window
152 observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
154 /* Bad. Thou shalt not shrink */
155 if (PREDICT_FALSE ((i32) available_space < observed_wnd))
157 wnd = clib_max (observed_wnd, 0);
158 TCP_EVT_DBG (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
162 wnd = available_space;
165 /* Make sure we have a multiple of rcv_wscale */
166 if (wnd && tc->rcv_wscale)
168 wnd &= ~((1 << tc->rcv_wscale) - 1);
170 wnd = 1 << tc->rcv_wscale;
173 tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale);
177 * Compute and return window to advertise, scaled as per RFC1323
180 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
182 if (state < TCP_STATE_ESTABLISHED)
183 return tcp_initial_window_to_advertise (tc);
185 tcp_update_rcv_wnd (tc);
186 return tc->rcv_wnd >> tc->rcv_wscale;
190 * Write TCP options to segment.
193 tcp_options_write (u8 * data, tcp_options_t * opts)
196 u32 buf, seq_len = 4;
198 if (tcp_opts_mss (opts))
200 *data++ = TCP_OPTION_MSS;
201 *data++ = TCP_OPTION_LEN_MSS;
202 buf = clib_host_to_net_u16 (opts->mss);
203 clib_memcpy_fast (data, &buf, sizeof (opts->mss));
204 data += sizeof (opts->mss);
205 opts_len += TCP_OPTION_LEN_MSS;
208 if (tcp_opts_wscale (opts))
210 *data++ = TCP_OPTION_WINDOW_SCALE;
211 *data++ = TCP_OPTION_LEN_WINDOW_SCALE;
212 *data++ = opts->wscale;
213 opts_len += TCP_OPTION_LEN_WINDOW_SCALE;
216 if (tcp_opts_sack_permitted (opts))
218 *data++ = TCP_OPTION_SACK_PERMITTED;
219 *data++ = TCP_OPTION_LEN_SACK_PERMITTED;
220 opts_len += TCP_OPTION_LEN_SACK_PERMITTED;
223 if (tcp_opts_tstamp (opts))
225 *data++ = TCP_OPTION_TIMESTAMP;
226 *data++ = TCP_OPTION_LEN_TIMESTAMP;
227 buf = clib_host_to_net_u32 (opts->tsval);
228 clib_memcpy_fast (data, &buf, sizeof (opts->tsval));
229 data += sizeof (opts->tsval);
230 buf = clib_host_to_net_u32 (opts->tsecr);
231 clib_memcpy_fast (data, &buf, sizeof (opts->tsecr));
232 data += sizeof (opts->tsecr);
233 opts_len += TCP_OPTION_LEN_TIMESTAMP;
236 if (tcp_opts_sack (opts))
240 if (opts->n_sack_blocks != 0)
242 *data++ = TCP_OPTION_SACK_BLOCK;
243 *data++ = 2 + opts->n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
244 for (i = 0; i < opts->n_sack_blocks; i++)
246 buf = clib_host_to_net_u32 (opts->sacks[i].start);
247 clib_memcpy_fast (data, &buf, seq_len);
249 buf = clib_host_to_net_u32 (opts->sacks[i].end);
250 clib_memcpy_fast (data, &buf, seq_len);
253 opts_len += 2 + opts->n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
257 /* Terminate TCP options */
260 *data++ = TCP_OPTION_EOL;
261 opts_len += TCP_OPTION_LEN_EOL;
264 /* Pad with zeroes to a u32 boundary */
267 *data++ = TCP_OPTION_NOOP;
268 opts_len += TCP_OPTION_LEN_NOOP;
274 tcp_make_syn_options (tcp_options_t * opts, u8 wnd_scale)
278 opts->flags |= TCP_OPTS_FLAG_MSS;
279 opts->mss = tcp_main.default_mtu; /*XXX discover that */
280 len += TCP_OPTION_LEN_MSS;
282 opts->flags |= TCP_OPTS_FLAG_WSCALE;
283 opts->wscale = wnd_scale;
284 len += TCP_OPTION_LEN_WINDOW_SCALE;
286 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
287 opts->tsval = tcp_time_now ();
289 len += TCP_OPTION_LEN_TIMESTAMP;
293 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
294 len += TCP_OPTION_LEN_SACK_PERMITTED;
297 /* Align to needed boundary */
298 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
303 tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
307 opts->flags |= TCP_OPTS_FLAG_MSS;
309 len += TCP_OPTION_LEN_MSS;
311 if (tcp_opts_wscale (&tc->rcv_opts))
313 opts->flags |= TCP_OPTS_FLAG_WSCALE;
314 opts->wscale = tc->rcv_wscale;
315 len += TCP_OPTION_LEN_WINDOW_SCALE;
318 if (tcp_opts_tstamp (&tc->rcv_opts))
320 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
321 opts->tsval = tcp_time_now ();
322 opts->tsecr = tc->tsval_recent;
323 len += TCP_OPTION_LEN_TIMESTAMP;
326 if (tcp_opts_sack_permitted (&tc->rcv_opts))
328 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
329 len += TCP_OPTION_LEN_SACK_PERMITTED;
332 /* Align to needed boundary */
333 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
338 tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts)
344 if (tcp_opts_tstamp (&tc->rcv_opts))
346 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
347 opts->tsval = tcp_tstamp (tc);
348 opts->tsecr = tc->tsval_recent;
349 len += TCP_OPTION_LEN_TIMESTAMP;
351 if (tcp_opts_sack_permitted (&tc->rcv_opts))
353 if (vec_len (tc->snd_sacks))
355 opts->flags |= TCP_OPTS_FLAG_SACK;
356 if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
357 tc->snd_sack_pos = 0;
358 opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
359 opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
360 opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
361 TCP_OPTS_MAX_SACK_BLOCKS);
362 tc->snd_sack_pos += opts->n_sack_blocks;
363 len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
367 /* Align to needed boundary */
368 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
373 tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
378 case TCP_STATE_ESTABLISHED:
379 case TCP_STATE_CLOSE_WAIT:
380 case TCP_STATE_FIN_WAIT_1:
381 case TCP_STATE_LAST_ACK:
382 case TCP_STATE_CLOSING:
383 case TCP_STATE_FIN_WAIT_2:
384 case TCP_STATE_TIME_WAIT:
385 case TCP_STATE_CLOSED:
386 return tcp_make_established_options (tc, opts);
387 case TCP_STATE_SYN_RCVD:
388 return tcp_make_synack_options (tc, opts);
389 case TCP_STATE_SYN_SENT:
390 return tcp_make_syn_options (opts, tc->rcv_wscale);
392 clib_warning ("State not handled! %d", state);
398 * Update burst send vars
400 * - Updates snd_mss to reflect the effective segment size that we can send
401 * by taking into account all TCP options, including SACKs.
402 * - Cache 'on the wire' options for reuse
403 * - Updates receive window which can be reused for a burst.
405 * This should *only* be called when doing bursts
408 tcp_update_burst_snd_vars (tcp_connection_t * tc)
410 tcp_main_t *tm = &tcp_main;
412 /* Compute options to be used for connection. These may be reused when
413 * sending data or to compute the effective mss (snd_mss) */
414 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts,
415 TCP_STATE_ESTABLISHED);
417 /* XXX check if MTU has been updated */
418 tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
419 ASSERT (tc->snd_mss > 0);
421 tcp_options_write (tm->wrk_ctx[tc->c_thread_index].cached_opts,
424 tcp_update_rcv_wnd (tc);
426 if (tc->flags & TCP_CONN_RATE_SAMPLE)
427 tc->flags |= TCP_CONN_TRACK_BURST;
429 if (tc->snd_una == tc->snd_nxt)
430 tcp_cc_event (tc, TCP_CC_EVT_START_TX);
434 tcp_init_mss (tcp_connection_t * tc)
436 u16 default_min_mss = 536;
437 tcp_update_rcv_mss (tc);
439 /* TODO cache mss and consider PMTU discovery */
440 tc->snd_mss = clib_min (tc->rcv_opts.mss, tc->mss);
442 if (tc->snd_mss < 45)
444 /* Assume that at least the min default mss works */
445 tc->snd_mss = default_min_mss;
446 tc->rcv_opts.mss = default_min_mss;
449 /* We should have enough space for 40 bytes of options */
450 ASSERT (tc->snd_mss > 45);
452 /* If we use timestamp option, account for it */
453 if (tcp_opts_tstamp (&tc->rcv_opts))
454 tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP;
456 #endif /* CLIB_MARCH_VARIANT */
459 tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
461 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
462 vlib_buffer_free_one (vm, b->next_buffer);
463 /* Zero all flags but free list index and trace flag */
464 b->flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
466 b->current_length = 0;
467 b->total_length_not_including_first_buffer = 0;
468 vnet_buffer (b)->tcp.flags = 0;
470 /* Leave enough space for headers */
471 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
474 #ifndef CLIB_MARCH_VARIANT
476 tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
478 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
479 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
480 b->total_length_not_including_first_buffer = 0;
482 vnet_buffer (b)->tcp.flags = 0;
483 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
484 /* Leave enough space for headers */
485 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
492 tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
495 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
496 u8 tcp_opts_len, tcp_hdr_opts_len;
500 wnd = tcp_window_to_advertise (tc, state);
502 /* Make and write options */
503 tcp_opts_len = tcp_make_established_options (tc, snd_opts);
504 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
506 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
507 tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd);
509 tcp_options_write ((u8 *) (th + 1), snd_opts);
510 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
513 tcp_zero_rwnd_sent_on (tc);
515 tcp_zero_rwnd_sent_off (tc);
519 * Convert buffer to ACK
522 tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
524 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
525 TCP_EVT_DBG (TCP_EVT_ACK_SENT, tc);
526 tc->rcv_las = tc->rcv_nxt;
530 * Convert buffer to FIN-ACK
533 tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
535 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
539 * Convert buffer to SYN
542 tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b)
544 u8 tcp_hdr_opts_len, tcp_opts_len;
547 tcp_options_t snd_opts;
549 initial_wnd = tcp_initial_window_to_advertise (tc);
551 /* Make and write options */
552 clib_memset (&snd_opts, 0, sizeof (snd_opts));
553 tcp_opts_len = tcp_make_syn_options (&snd_opts, tc->rcv_wscale);
554 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
556 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
557 tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN,
559 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
560 tcp_options_write ((u8 *) (th + 1), &snd_opts);
564 * Convert buffer to SYN-ACK
567 tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
569 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
570 u8 tcp_opts_len, tcp_hdr_opts_len;
574 clib_memset (snd_opts, 0, sizeof (*snd_opts));
575 initial_wnd = tcp_initial_window_to_advertise (tc);
576 tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
577 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
579 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
580 tc->rcv_nxt, tcp_hdr_opts_len,
581 TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd);
582 tcp_options_write ((u8 *) (th + 1), snd_opts);
584 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
588 tcp_enqueue_to_ip_lookup_i (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
589 u8 is_ip4, u32 fib_index, u8 flush)
591 vlib_main_t *vm = wrk->vm;
592 u32 *to_next, next_index;
595 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
598 vnet_buffer (b)->sw_if_index[VLIB_TX] = fib_index;
599 vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
601 /* Send to IP lookup */
602 next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
603 tcp_trajectory_add_start (b, 1);
605 f = wrk->ip_lookup_tx_frames[!is_ip4];
608 f = vlib_get_frame_to_node (vm, next_index);
610 wrk->ip_lookup_tx_frames[!is_ip4] = f;
613 to_next = vlib_frame_vector_args (f);
614 to_next[f->n_vectors] = bi;
616 if (flush || f->n_vectors == VLIB_FRAME_SIZE)
618 vlib_put_frame_to_node (vm, next_index, f);
619 wrk->ip_lookup_tx_frames[!is_ip4] = 0;
624 tcp_enqueue_to_ip_lookup_now (tcp_worker_ctx_t * wrk, vlib_buffer_t * b,
625 u32 bi, u8 is_ip4, u32 fib_index)
627 tcp_enqueue_to_ip_lookup_i (wrk, b, bi, is_ip4, fib_index, 1);
631 tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
632 u8 is_ip4, u32 fib_index)
634 tcp_enqueue_to_ip_lookup_i (wrk, b, bi, is_ip4, fib_index, 0);
635 if (wrk->vm->thread_index == 0 && vlib_num_workers ())
636 session_flush_frames_main_thread (wrk->vm);
640 tcp_enqueue_to_output_i (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
643 u32 *to_next, next_index;
646 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
649 /* Decide where to send the packet */
650 next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index;
651 tcp_trajectory_add_start (b, 2);
653 /* Get frame to v4/6 output node */
654 f = wrk->tx_frames[!is_ip4];
657 f = vlib_get_frame_to_node (wrk->vm, next_index);
659 wrk->tx_frames[!is_ip4] = f;
661 to_next = vlib_frame_vector_args (f);
662 to_next[f->n_vectors] = bi;
664 if (flush || f->n_vectors == VLIB_FRAME_SIZE)
666 vlib_put_frame_to_node (wrk->vm, next_index, f);
667 wrk->tx_frames[!is_ip4] = 0;
672 tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
675 tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 0);
679 tcp_enqueue_to_output_now (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
682 tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 1);
684 #endif /* CLIB_MARCH_VARIANT */
687 tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b0,
688 tcp_state_t state, u8 thread_index, u8 is_ip4)
693 ip4_address_t src_ip40, dst_ip40;
694 ip6_address_t src_ip60, dst_ip60;
695 u16 src_port, dst_port;
700 /* Find IP and TCP headers */
701 th0 = tcp_buffer_hdr (b0);
703 /* Save src and dst ip */
706 ih4 = vlib_buffer_get_current (b0);
707 ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40);
708 src_ip40.as_u32 = ih4->src_address.as_u32;
709 dst_ip40.as_u32 = ih4->dst_address.as_u32;
713 ih6 = vlib_buffer_get_current (b0);
714 ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
715 clib_memcpy_fast (&src_ip60, &ih6->src_address, sizeof (ip6_address_t));
716 clib_memcpy_fast (&dst_ip60, &ih6->dst_address, sizeof (ip6_address_t));
719 src_port = th0->src_port;
720 dst_port = th0->dst_port;
722 /* Try to determine what/why we're actually resetting */
723 if (state == TCP_STATE_CLOSED)
728 tmp = clib_net_to_host_u32 (th0->seq_number);
730 /* Got a SYN for no listener. */
731 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
732 ack = clib_host_to_net_u32 (tmp + 1);
737 flags = TCP_FLAG_RST;
738 seq = th0->ack_number;
742 tcp_reuse_buffer (vm, b0);
743 tcp_trajectory_add_start (b0, 4);
744 th0 = vlib_buffer_push_tcp_net_order (b0, dst_port, src_port, seq, ack,
745 sizeof (tcp_header_t), flags, 0);
749 ih4 = vlib_buffer_push_ip4 (vm, b0, &dst_ip40, &src_ip40,
751 th0->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ih4);
756 ih6 = vlib_buffer_push_ip6 (vm, b0, &dst_ip60, &src_ip60,
758 th0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0, ih6, &bogus);
765 #ifndef CLIB_MARCH_VARIANT
767 * Send reset without reusing existing buffer
769 * It extracts connection info out of original packet
772 tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
773 u32 thread_index, u8 is_ip4)
775 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
776 vlib_main_t *vm = wrk->vm;
778 u32 bi, sw_if_index, fib_index;
779 u8 tcp_hdr_len, flags = 0;
780 tcp_header_t *th, *pkt_th;
782 ip4_header_t *ih4, *pkt_ih4;
783 ip6_header_t *ih6, *pkt_ih6;
784 fib_protocol_t fib_proto;
786 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
789 b = vlib_get_buffer (vm, bi);
790 sw_if_index = vnet_buffer (pkt)->sw_if_index[VLIB_RX];
791 fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
792 fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
793 tcp_init_buffer (vm, b);
795 /* Make and write options */
796 tcp_hdr_len = sizeof (tcp_header_t);
800 pkt_ih4 = vlib_buffer_get_current (pkt);
801 pkt_th = ip4_next_header (pkt_ih4);
805 pkt_ih6 = vlib_buffer_get_current (pkt);
806 pkt_th = ip6_next_header (pkt_ih6);
809 if (tcp_ack (pkt_th))
811 flags = TCP_FLAG_RST;
812 seq = pkt_th->ack_number;
813 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
817 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
819 ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end);
822 th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port,
823 seq, ack, tcp_hdr_len, flags, 0);
825 /* Swap src and dst ip */
828 ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
829 ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
830 &pkt_ih4->src_address, IP_PROTOCOL_TCP, 1);
831 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
836 ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) ==
838 ih6 = vlib_buffer_push_ip6 (vm, b, &pkt_ih6->dst_address,
839 &pkt_ih6->src_address, IP_PROTOCOL_TCP);
840 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
844 tcp_enqueue_to_ip_lookup_now (wrk, b, bi, is_ip4, fib_index);
845 TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
846 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
847 TCP_ERROR_RST_SENT, 1);
851 * Build and set reset packet for connection
854 tcp_send_reset (tcp_connection_t * tc)
856 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
857 vlib_main_t *vm = wrk->vm;
861 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
864 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
866 b = vlib_get_buffer (vm, bi);
867 tcp_init_buffer (vm, b);
869 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
870 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
871 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
872 flags = TCP_FLAG_RST;
873 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
874 tc->rcv_nxt, tcp_hdr_opts_len, flags,
876 opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
877 ASSERT (opts_write_len == tc->snd_opts_len);
878 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
882 ih4 = vlib_buffer_push_ip4 (vm, b, &tc->c_lcl_ip.ip4,
883 &tc->c_rmt_ip.ip4, IP_PROTOCOL_TCP, 0);
884 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
890 ih6 = vlib_buffer_push_ip6 (vm, b, &tc->c_lcl_ip.ip6,
891 &tc->c_rmt_ip.ip6, IP_PROTOCOL_TCP);
892 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
895 tcp_enqueue_to_ip_lookup_now (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
896 TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
897 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
898 TCP_ERROR_RST_SENT, 1);
902 tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
905 tcp_header_t *th = vlib_buffer_get_current (b);
906 vlib_main_t *vm = wrk->vm;
910 ih = vlib_buffer_push_ip4 (vm, b, &tc->c_lcl_ip4,
911 &tc->c_rmt_ip4, IP_PROTOCOL_TCP, 1);
912 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih);
919 ih = vlib_buffer_push_ip6 (vm, b, &tc->c_lcl_ip6,
920 &tc->c_rmt_ip6, IP_PROTOCOL_TCP);
921 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih, &bogus);
929 * Builds a SYN packet for a half-open connection and sends it to ipx_lookup.
930 * The packet is not forwarded through tcpx_output to avoid doing lookups
931 * in the half_open pool.
934 tcp_send_syn (tcp_connection_t * tc)
936 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
937 vlib_main_t *vm = wrk->vm;
942 * Setup retransmit and establish timers before requesting buffer
943 * such that we can return if we've ran out.
945 tcp_timer_set (tc, TCP_TIMER_ESTABLISH_AO, TCP_ESTABLISH_TIME);
946 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
947 tc->rto * TCP_TO_TIMER_TICK);
949 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
951 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
955 b = vlib_get_buffer (vm, bi);
956 tcp_init_buffer (vm, b);
957 tcp_make_syn (tc, b);
959 /* Measure RTT with this */
960 tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
961 tc->rtt_seq = tc->snd_nxt;
964 tcp_push_ip_hdr (wrk, tc, b);
965 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
966 TCP_EVT_DBG (TCP_EVT_SYN_SENT, tc);
970 tcp_send_synack (tcp_connection_t * tc)
972 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
973 vlib_main_t *vm = wrk->vm;
977 tcp_retransmit_timer_force_update (tc);
979 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
981 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
985 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
986 b = vlib_get_buffer (vm, bi);
987 tcp_init_buffer (vm, b);
988 tcp_make_synack (tc, b);
989 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
990 TCP_EVT_DBG (TCP_EVT_SYNACK_SENT, tc);
994 * Flush tx frame populated by retransmits and timer pops
997 tcp_flush_frame_to_output (tcp_worker_ctx_t * wrk, u8 is_ip4)
999 if (wrk->tx_frames[!is_ip4])
1002 next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index;
1003 vlib_put_frame_to_node (wrk->vm, next_index, wrk->tx_frames[!is_ip4]);
1004 wrk->tx_frames[!is_ip4] = 0;
1009 * Flush ip lookup tx frames populated by timer pops
1012 tcp_flush_frame_to_ip_lookup (tcp_worker_ctx_t * wrk, u8 is_ip4)
1014 if (wrk->ip_lookup_tx_frames[!is_ip4])
1017 next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
1018 vlib_put_frame_to_node (wrk->vm, next_index,
1019 wrk->ip_lookup_tx_frames[!is_ip4]);
1020 wrk->ip_lookup_tx_frames[!is_ip4] = 0;
1025 * Flush v4 and v6 tcp and ip-lookup tx frames for thread index
1028 tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk)
1030 tcp_flush_frame_to_output (wrk, 1);
1031 tcp_flush_frame_to_output (wrk, 0);
1032 tcp_flush_frame_to_ip_lookup (wrk, 1);
1033 tcp_flush_frame_to_ip_lookup (wrk, 0);
1040 tcp_send_fin (tcp_connection_t * tc)
1042 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1043 vlib_main_t *vm = wrk->vm;
1048 fin_snt = tc->flags & TCP_CONN_FINSNT;
1052 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1054 /* Out of buffers so program fin retransmit ASAP */
1055 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
1059 /* Make sure retransmit retries a fin not data */
1060 tc->flags |= TCP_CONN_FINSNT;
1064 tcp_retransmit_timer_force_update (tc);
1065 b = vlib_get_buffer (vm, bi);
1066 tcp_init_buffer (vm, b);
1067 tcp_make_fin (tc, b);
1068 tcp_enqueue_to_output_now (wrk, b, bi, tc->c_is_ip4);
1069 TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc);
1070 /* Account for the FIN */
1074 tc->flags |= TCP_CONN_FINSNT;
1075 tc->flags &= ~TCP_CONN_FINPNDG;
1076 tc->snd_una_max = seq_max (tc->snd_una_max, tc->snd_nxt);
1081 * Push TCP header and update connection variables. Should only be called
1082 * for segments with data, not for 'control' packets.
1085 tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
1086 u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
1088 u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
1089 u32 advertise_wnd, data_len;
1090 tcp_main_t *tm = &tcp_main;
1093 data_len = b->current_length;
1094 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
1095 data_len += b->total_length_not_including_first_buffer;
1097 vnet_buffer (b)->tcp.flags = 0;
1098 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
1101 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1103 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
1106 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
1108 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
1110 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
1112 if (seq_geq (tc->psh_seq, snd_nxt)
1113 && seq_lt (tc->psh_seq, snd_nxt + data_len))
1114 flags |= TCP_FLAG_PSH;
1116 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
1117 tc->rcv_nxt, tcp_hdr_opts_len, flags,
1122 clib_memcpy_fast ((u8 *) (th + 1),
1123 tm->wrk_ctx[tc->c_thread_index].cached_opts,
1128 u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
1129 ASSERT (len == tc->snd_opts_len);
1133 * Update connection variables
1137 tc->snd_nxt += data_len;
1138 tc->rcv_las = tc->rcv_nxt;
1140 TCP_EVT_DBG (TCP_EVT_PKTIZE, tc);
1144 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
1146 tcp_connection_t *tc = (tcp_connection_t *) tconn;
1148 if (tc->flags & TCP_CONN_TRACK_BURST)
1150 tcp_bt_check_app_limited (tc);
1151 tcp_bt_track_tx (tc);
1152 tc->flags &= ~TCP_CONN_TRACK_BURST;
1155 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
1156 /* update_snd_nxt */ 1);
1158 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1159 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1160 /* If not tracking an ACK, start tracking */
1161 if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
1163 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
1164 tc->rtt_seq = tc->snd_nxt;
1166 if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
1168 tcp_retransmit_timer_set (tc);
1171 tcp_trajectory_add_start (b, 3);
1176 tcp_send_ack (tcp_connection_t * tc)
1178 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1179 vlib_main_t *vm = wrk->vm;
1183 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1185 tcp_update_rcv_wnd (tc);
1188 b = vlib_get_buffer (vm, bi);
1189 tcp_init_buffer (vm, b);
1190 tcp_make_ack (tc, b);
1191 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1195 tcp_program_ack (tcp_connection_t * tc)
1197 if (!(tc->flags & TCP_CONN_SNDACK))
1199 session_add_self_custom_tx_evt (&tc->connection, 1);
1200 tc->flags |= TCP_CONN_SNDACK;
1205 tcp_program_dupack (tcp_connection_t * tc)
1207 if (!(tc->flags & TCP_CONN_SNDACK))
1209 session_add_self_custom_tx_evt (&tc->connection, 1);
1210 tc->flags |= TCP_CONN_SNDACK;
1212 if (tc->pending_dupacks < 255)
1213 tc->pending_dupacks += 1;
1217 tcp_program_fastretransmit (tcp_connection_t * tc)
1219 if (!(tc->flags & TCP_CONN_FRXT_PENDING))
1221 session_add_self_custom_tx_evt (&tc->connection, 0);
1222 tc->flags |= TCP_CONN_FRXT_PENDING;
1227 * Delayed ack timer handler
1229 * Sends delayed ACK when timer expires
1232 tcp_timer_delack_handler (u32 index)
1234 u32 thread_index = vlib_get_thread_index ();
1235 tcp_connection_t *tc;
1237 tc = tcp_connection_get (index, thread_index);
1238 tc->timers[TCP_TIMER_DELACK] = TCP_TIMER_HANDLE_INVALID;
1243 * Send Window Update ACK,
1244 * ensuring that it will be sent once, if RWND became non-zero,
1245 * after zero RWND has been advertised in ACK before
1248 tcp_send_window_update_ack (tcp_connection_t * tc)
1252 if (tcp_zero_rwnd_sent (tc))
1254 win = tcp_window_to_advertise (tc, tc->state);
1257 tcp_zero_rwnd_sent_off (tc);
1258 tcp_program_ack (tc);
1264 * Allocate a new buffer and build a new tcp segment
1266 * @param wrk tcp worker
1267 * @param tc connection for which the segment will be allocated
1268 * @param offset offset of the first byte in the tx fifo
1269 * @param max_deq_byte segment size
1270 * @param[out] b pointer to buffer allocated
1272 * @return the number of bytes in the segment or 0 if buffer cannot be
1273 * allocated or no data available
1276 tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1277 u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
1279 u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
1280 vlib_main_t *vm = wrk->vm;
1285 seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
1290 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1293 * Allocate and fill in buffer(s)
1296 /* Easy case, buffer size greater than mss */
1297 if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
1299 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1301 *b = vlib_get_buffer (vm, bi);
1302 data = tcp_init_buffer (vm, *b);
1303 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1305 ASSERT (n_bytes == max_deq_bytes);
1306 b[0]->current_length = n_bytes;
1307 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1308 /* burst */ 0, /* update_snd_nxt */ 0);
1310 /* Split mss into multiple buffers */
1313 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1314 u16 n_peeked, len_to_deq;
1315 vlib_buffer_t *chain_b, *prev_b;
1318 /* Make sure we have enough buffers */
1319 n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
1320 vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
1321 CLIB_CACHE_LINE_BYTES);
1322 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
1323 if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
1326 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1330 *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
1331 data = tcp_init_buffer (vm, *b);
1332 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1334 TRANSPORT_MAX_HDRS_LEN);
1335 b[0]->current_length = n_bytes;
1336 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1337 b[0]->total_length_not_including_first_buffer = 0;
1338 max_deq_bytes -= n_bytes;
1341 for (i = 1; i < n_bufs_per_seg; i++)
1344 len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
1345 chain_bi = wrk->tx_buffers[--n_bufs];
1346 chain_b = vlib_get_buffer (vm, chain_bi);
1347 chain_b->current_data = 0;
1348 data = vlib_buffer_get_current (chain_b);
1349 n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
1352 ASSERT (n_peeked == len_to_deq);
1353 n_bytes += n_peeked;
1354 chain_b->current_length = n_peeked;
1355 chain_b->next_buffer = 0;
1357 /* update previous buffer */
1358 prev_b->next_buffer = chain_bi;
1359 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1361 max_deq_bytes -= n_peeked;
1362 b[0]->total_length_not_including_first_buffer += n_peeked;
1365 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1366 /* burst */ 0, /* update_snd_nxt */ 0);
1368 if (PREDICT_FALSE (n_bufs))
1370 clib_warning ("not all buffers consumed");
1371 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1375 ASSERT (n_bytes > 0);
1376 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1382 * Build a retransmit segment
1384 * @return the number of bytes in the segment or 0 if there's nothing to
1388 tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
1389 tcp_connection_t * tc, u32 offset,
1390 u32 max_deq_bytes, vlib_buffer_t ** b)
1392 u32 start, available_bytes;
1395 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1396 ASSERT (max_deq_bytes != 0);
1399 * Make sure we can retransmit something
1401 available_bytes = transport_max_tx_dequeue (&tc->connection);
1402 ASSERT (available_bytes >= offset);
1403 available_bytes -= offset;
1404 if (!available_bytes)
1407 max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
1408 max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
1410 /* Start is beyond snd_congestion */
1411 start = tc->snd_una + offset;
1412 if (seq_geq (start, tc->snd_congestion))
1415 /* Don't overshoot snd_congestion */
1416 if (seq_gt (start + max_deq_bytes, tc->snd_congestion))
1418 max_deq_bytes = tc->snd_congestion - start;
1419 if (max_deq_bytes == 0)
1423 n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
1427 if (tcp_in_fastrecovery (tc))
1429 tc->snd_rxt_bytes += n_bytes;
1430 if (tc->flags & TCP_CONN_RATE_SAMPLE)
1431 tcp_bt_track_rxt (tc, start, start + n_bytes);
1435 TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes);
1440 * Reset congestion control, switch cwnd to loss window and try again.
1443 tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
1445 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 6);
1446 tc->prev_ssthresh = tc->ssthresh;
1447 tc->prev_cwnd = tc->cwnd;
1449 /* Clear fast recovery state if needed */
1450 if (tcp_in_fastrecovery (tc))
1451 tcp_cc_fastrecovery_clear (tc);
1453 /* Let cc algo decide loss cwnd and ssthresh */
1456 /* Start again from the beginning */
1457 tc->snd_congestion = tc->snd_nxt;
1458 tc->rcv_dupacks = 0;
1460 tc->cwnd_acc_bytes = 0;
1461 tcp_connection_tx_pacer_reset (tc, tc->cwnd, 2 * tc->snd_mss);
1462 tcp_recovery_on (tc);
1466 tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
1468 u32 thread_index = vlib_get_thread_index ();
1469 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1470 vlib_main_t *vm = wrk->vm;
1471 tcp_connection_t *tc;
1472 vlib_buffer_t *b = 0;
1477 tc = tcp_half_open_connection_get (index);
1478 /* Note: the connection may have transitioned to ESTABLISHED... */
1479 if (PREDICT_FALSE (tc == 0 || tc->state != TCP_STATE_SYN_SENT))
1481 tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID;
1485 tc = tcp_connection_get (index, thread_index);
1486 /* Note: the connection may have been closed and pool_put */
1487 if (PREDICT_FALSE (tc == 0 || tc->state == TCP_STATE_SYN_SENT))
1489 tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID;
1490 /* Wait-close and retransmit could pop at the same time */
1491 if (tc->state == TCP_STATE_CLOSED)
1495 if (tc->state >= TCP_STATE_ESTABLISHED)
1497 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
1499 /* Lost FIN, retransmit and return */
1500 if (tc->flags & TCP_CONN_FINSNT)
1504 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1508 /* Shouldn't be here. This condition is tricky because it has to take
1509 * into account boff > 0 due to persist timeout. */
1510 if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_nxt)
1511 || (tc->rto_boff > 0 && seq_geq (tc->snd_una, tc->snd_congestion)
1512 && !tcp_flight_size (tc)))
1514 ASSERT (!tcp_in_recovery (tc));
1519 /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
1520 * to persist timer timeout */
1521 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1524 tcp_update_rto (tc);
1527 /* Peer is dead or network connectivity is lost. Close connection.
1528 * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
1529 * a min rto of 0.2s we need to retry about 8 times. */
1530 if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
1532 tcp_send_reset (tc);
1533 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1534 session_transport_closing_notify (&tc->connection);
1535 tcp_connection_timers_reset (tc);
1536 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME);
1540 /* Increment RTO backoff (also equal to number of retries) and go back
1541 * to first un-acked byte */
1544 /* TODO be less aggressive about clearing scoreboard */
1545 scoreboard_clear (&tc->sack_sb);
1547 /* First retransmit timeout */
1548 if (tc->rto_boff == 1)
1549 tcp_cc_init_rxt_timeout (tc);
1551 if (tc->flags & TCP_CONN_RATE_SAMPLE)
1552 tcp_bt_flush_samples (tc);
1554 /* If we've sent beyond snd_congestion, update it */
1555 tc->snd_congestion = seq_max (tc->snd_nxt, tc->snd_congestion);
1557 tc->snd_nxt = tc->snd_una;
1558 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1560 /* Send one segment. Note that n_bytes may be zero due to buffer
1562 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1565 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
1569 bi = vlib_get_buffer_index (vm, b);
1571 /* For first retransmit, record timestamp (Eifel detection RFC3522) */
1572 if (tc->rto_boff == 1)
1573 tc->snd_rxt_ts = tcp_tstamp (tc);
1575 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1576 tcp_retransmit_timer_force_update (tc);
1578 /* Retransmit for SYN */
1579 else if (tc->state == TCP_STATE_SYN_SENT)
1581 /* Half-open connection actually moved to established but we were
1582 * waiting for syn retransmit to pop to call cleanup from the right
1584 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1586 if (tcp_half_open_connection_cleanup (tc))
1587 TCP_DBG ("could not remove half-open connection");
1591 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
1593 /* Try without increasing RTO a number of times. If this fails,
1594 * start growing RTO exponentially */
1596 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1597 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1599 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
1600 tc->rto * TCP_TO_TIMER_TICK);
1602 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1604 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
1608 b = vlib_get_buffer (vm, bi);
1609 tcp_init_buffer (vm, b);
1610 tcp_make_syn (tc, b);
1613 TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 0);
1615 /* This goes straight to ipx_lookup. Retransmit timer set already */
1616 tcp_push_ip_hdr (wrk, tc, b);
1617 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
1619 /* Retransmit SYN-ACK */
1620 else if (tc->state == TCP_STATE_SYN_RCVD)
1622 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2);
1625 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1626 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1629 tcp_retransmit_timer_force_update (tc);
1631 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1633 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
1637 b = vlib_get_buffer (vm, bi);
1638 tcp_init_buffer (vm, b);
1639 tcp_make_synack (tc, b);
1640 TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 1);
1642 /* Retransmit timer already updated, just enqueue to output */
1643 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1647 ASSERT (tc->state == TCP_STATE_CLOSED);
1653 tcp_timer_retransmit_handler (u32 index)
1655 tcp_timer_retransmit_handler_i (index, 0);
1659 tcp_timer_retransmit_syn_handler (u32 index)
1661 tcp_timer_retransmit_handler_i (index, 1);
1665 * Got 0 snd_wnd from peer, try to do something about it.
1669 tcp_timer_persist_handler (u32 index)
1671 u32 thread_index = vlib_get_thread_index ();
1672 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1673 u32 bi, max_snd_bytes, available_bytes, offset;
1674 tcp_main_t *tm = vnet_get_tcp_main ();
1675 vlib_main_t *vm = wrk->vm;
1676 tcp_connection_t *tc;
1681 tc = tcp_connection_get_if_valid (index, thread_index);
1685 /* Make sure timer handle is set to invalid */
1686 tc->timers[TCP_TIMER_PERSIST] = TCP_TIMER_HANDLE_INVALID;
1688 /* Problem already solved or worse */
1689 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1690 || (tc->flags & TCP_CONN_FINSNT))
1693 available_bytes = transport_max_tx_dequeue (&tc->connection);
1694 offset = tc->snd_nxt - tc->snd_una;
1696 /* Reprogram persist if no new bytes available to send. We may have data
1698 if (!available_bytes)
1700 tcp_persist_timer_set (tc);
1704 if (available_bytes <= offset)
1706 ASSERT (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT));
1710 /* Increment RTO backoff */
1712 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1715 * Try to force the first unsent segment (or buffer)
1717 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1719 tcp_persist_timer_set (tc);
1722 b = vlib_get_buffer (vm, bi);
1723 data = tcp_init_buffer (vm, b);
1725 tcp_validate_txf_size (tc, offset);
1726 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1727 max_snd_bytes = clib_min (tc->snd_mss,
1728 tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
1729 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1731 b->current_length = n_bytes;
1732 ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
1733 || tc->snd_nxt == tc->snd_una_max
1734 || tc->rto_boff > 1));
1736 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
1737 /* burst */ 0, /* update_snd_nxt */ 1);
1738 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1739 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1740 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1742 /* Just sent new data, enable retransmit */
1743 tcp_retransmit_timer_update (tc);
1747 * Retransmit first unacked segment
1750 tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1752 vlib_main_t *vm = wrk->vm;
1756 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1);
1758 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1762 bi = vlib_get_buffer_index (vm, b);
1763 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1769 tcp_fast_retransmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1772 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1773 vlib_main_t *vm = wrk->vm;
1774 vlib_buffer_t *b = 0;
1776 offset = tc->snd_nxt - tc->snd_una;
1777 available_wnd = tc->snd_wnd - offset;
1778 burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
1780 while (n_segs < burst_size)
1782 n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
1786 bi = vlib_get_buffer_index (vm, b);
1787 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1788 offset += n_written;
1791 tc->snd_nxt += n_written;
1792 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1799 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1800 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1801 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1804 * Do fast retransmit with SACKs
1807 tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1810 u32 n_written = 0, offset, max_bytes, n_segs = 0, n_segs_now;
1811 sack_scoreboard_hole_t *hole;
1812 vlib_main_t *vm = wrk->vm;
1813 vlib_buffer_t *b = 0;
1814 sack_scoreboard_t *sb;
1817 u8 snd_limited = 0, can_rescue = 0;
1819 ASSERT (tcp_in_fastrecovery (tc));
1821 snd_space = tcp_available_cc_snd_space (tc);
1822 if (snd_space < tc->snd_mss)
1824 tcp_program_fastretransmit (tc);
1828 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
1830 hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
1832 max_deq = transport_max_tx_dequeue (&tc->connection);
1833 max_deq -= tc->snd_nxt - tc->snd_una;
1835 while (snd_space > 0 && n_segs < burst_size)
1837 hole = scoreboard_next_rxt_hole (sb, hole, max_deq, &can_rescue,
1843 snd_space = clib_min (max_deq, snd_space);
1844 burst_size = clib_min (burst_size - n_segs,
1845 snd_space / tc->snd_mss);
1846 n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size);
1847 if (max_deq > n_segs_now * tc->snd_mss)
1848 tcp_program_fastretransmit (tc);
1849 n_segs += n_segs_now;
1853 if (!can_rescue || scoreboard_rescue_rxt_valid (sb, tc))
1856 /* If rescue rxt undefined or less than snd_una then one segment of
1857 * up to SMSS octets that MUST include the highest outstanding
1858 * unSACKed sequence number SHOULD be returned, and RescueRxt set to
1859 * RecoveryPoint. HighRxt MUST NOT be updated.
1861 max_bytes = clib_min (tc->snd_mss,
1862 tc->snd_congestion - tc->snd_una);
1863 max_bytes = clib_min (max_bytes, snd_space);
1864 offset = tc->snd_congestion - tc->snd_una - max_bytes;
1865 sb->rescue_rxt = tc->snd_congestion;
1866 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1871 bi = vlib_get_buffer_index (vm, b);
1872 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1877 max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
1878 max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
1882 offset = sb->high_rxt - tc->snd_una;
1883 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1885 ASSERT (n_written <= snd_space);
1887 /* Nothing left to retransmit */
1891 bi = vlib_get_buffer_index (vm, b);
1892 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1894 sb->high_rxt += n_written;
1895 snd_space -= n_written;
1900 tcp_program_fastretransmit (tc);
1907 * Fast retransmit without SACK info
1910 tcp_fast_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1913 u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now;
1914 vlib_main_t *vm = wrk->vm;
1915 int snd_space, n_segs = 0;
1918 ASSERT (tcp_in_fastrecovery (tc));
1919 TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0);
1921 snd_space = tcp_available_cc_snd_space (tc);
1923 if (!tcp_fastrecovery_first (tc))
1926 /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
1928 while (snd_space > 0 && n_segs < burst_size)
1930 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1933 /* Nothing left to retransmit */
1937 bi = vlib_get_buffer_index (vm, b);
1938 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1939 snd_space -= n_written;
1940 offset += n_written;
1944 if (n_segs == burst_size)
1949 /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
1950 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
1953 max_deq = transport_max_tx_dequeue (&tc->connection);
1954 max_deq -= tc->snd_nxt - tc->snd_una;
1957 snd_space = clib_min (max_deq, snd_space);
1958 burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
1959 n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size);
1960 if (max_deq > n_segs_now * tc->snd_mss)
1961 tcp_program_fastretransmit (tc);
1962 n_segs += n_segs_now;
1966 tcp_fastrecovery_first_off (tc);
1971 * Do fast retransmit
1974 tcp_fast_retransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1977 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1978 return tcp_fast_retransmit_sack (wrk, tc, burst_size);
1980 return tcp_fast_retransmit_no_sack (wrk, tc, burst_size);
1984 tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
1988 if (!tc->pending_dupacks)
1994 /* If we're supposed to send dupacks but have no ooo data
1995 * send only one ack */
1996 if (!vec_len (tc->snd_sacks))
2002 /* Start with first sack block */
2003 tc->snd_sack_pos = 0;
2005 /* Generate enough dupacks to cover all sack blocks. Do not generate
2006 * more sacks than the number of packets received. But do generate at
2007 * least 3, i.e., the number needed to signal congestion, if needed. */
2008 n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
2009 n_acks = clib_min (n_acks, tc->pending_dupacks);
2010 n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
2011 for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
2014 if (n_acks < max_burst_size)
2016 tc->pending_dupacks = 0;
2017 tc->snd_sack_pos = 0;
2022 TCP_DBG ("constrained by burst size");
2023 tc->pending_dupacks = n_acks - max_burst_size;
2024 tcp_program_dupack (tc);
2025 return max_burst_size;
2030 tcp_do_fastretransmit (tcp_connection_t * tc, u32 max_burst_size)
2032 u32 n_segs = 0, burst_size, sent_bytes, burst_bytes;
2033 tcp_worker_ctx_t *wrk;
2035 wrk = tcp_get_worker (tc->c_thread_index);
2036 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection,
2038 clib_time.last_cpu_time);
2039 burst_size = clib_min (max_burst_size, burst_bytes / tc->snd_mss);
2042 tcp_program_fastretransmit (tc);
2046 n_segs = tcp_fast_retransmit (wrk, tc, burst_size);
2047 sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
2048 transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
2053 tcp_session_custom_tx (void *conn, u32 max_burst_size)
2055 tcp_connection_t *tc = (tcp_connection_t *) conn;
2058 if (tcp_in_fastrecovery (tc) && (tc->flags & TCP_CONN_FRXT_PENDING))
2060 tc->flags &= ~TCP_CONN_FRXT_PENDING;
2061 n_segs = tcp_do_fastretransmit (tc, max_burst_size);
2062 max_burst_size -= n_segs;
2065 if (!(tc->flags & TCP_CONN_SNDACK))
2068 tc->flags &= ~TCP_CONN_SNDACK;
2070 /* We have retransmitted packets and no dupack */
2071 if (n_segs && !tc->pending_dupacks)
2074 if (!max_burst_size)
2076 tcp_program_ack (tc);
2077 return max_burst_size;
2080 n_segs += tcp_send_acks (tc, max_burst_size);
2084 #endif /* CLIB_MARCH_VARIANT */
2087 tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
2088 u16 * next0, u32 * error0)
2090 ip_adjacency_t *adj;
2093 /* Not thread safe but as long as the connection exists the adj should
2095 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &tc0->c_rmt_ip,
2097 if (ai == ADJ_INDEX_INVALID)
2099 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2100 *next0 = TCP_OUTPUT_NEXT_DROP;
2101 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2106 if (PREDICT_TRUE (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE))
2107 *next0 = TCP_OUTPUT_NEXT_IP_REWRITE;
2108 else if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
2109 *next0 = TCP_OUTPUT_NEXT_IP_ARP;
2112 *next0 = TCP_OUTPUT_NEXT_DROP;
2113 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2115 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
2119 tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2120 u32 * to_next, u32 n_bufs)
2122 u32 n_trace = vlib_get_trace_count (vm, node);
2123 tcp_connection_t *tc;
2129 for (i = 0; i < clib_min (n_trace, n_bufs); i++)
2131 b = vlib_get_buffer (vm, to_next[i]);
2132 th = vlib_buffer_get_current (b);
2133 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2135 t = vlib_add_trace (vm, node, b, sizeof (*t));
2136 clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
2137 clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
2142 tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0,
2143 tcp_connection_t * tc0, u8 is_ip4)
2145 tcp_header_t *th0 = 0;
2147 th0 = vlib_buffer_get_current (b0);
2148 TCP_EVT_DBG (TCP_EVT_OUTPUT, tc0, th0->flags, b0->current_length);
2151 vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
2152 IP_PROTOCOL_TCP, 1);
2153 b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
2154 vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data;
2160 ih0 = vlib_buffer_push_ip6 (vm, b0, &tc0->c_lcl_ip6,
2161 &tc0->c_rmt_ip6, IP_PROTOCOL_TCP);
2162 b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
2163 vnet_buffer (b0)->l3_hdr_offset = (u8 *) ih0 - b0->data;
2164 vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data;
2170 tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
2171 u32 * error0, u16 * next0, u8 is_ip4)
2174 if (PREDICT_FALSE (tc0->state == TCP_STATE_CLOSED))
2176 *error0 = TCP_ERROR_INVALID_CONNECTION;
2177 *next0 = TCP_OUTPUT_NEXT_DROP;
2181 /* If next_index is not drop use it */
2182 if (tc0->next_node_index)
2184 *next0 = tc0->next_node_index;
2185 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2188 vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
2189 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
2193 if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
2194 tcp_output_handle_link_local (tc0, b0, next0, error0);
2197 if (!TCP_ALWAYS_ACK)
2198 tcp_timer_reset (tc0, TCP_TIMER_DELACK);
2202 tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2203 vlib_frame_t * frame, int is_ip4)
2205 u32 n_left_from, *from, thread_index = vm->thread_index;
2206 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2207 u16 nexts[VLIB_FRAME_SIZE], *next;
2209 from = vlib_frame_vector_args (frame);
2210 n_left_from = frame->n_vectors;
2211 tcp_set_time_now (tcp_get_worker (thread_index));
2213 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2214 tcp46_output_trace_frame (vm, node, from, n_left_from);
2216 vlib_get_buffers (vm, from, bufs, n_left_from);
2220 while (n_left_from >= 4)
2222 u32 error0 = TCP_ERROR_PKTS_SENT, error1 = TCP_ERROR_PKTS_SENT;
2223 tcp_connection_t *tc0, *tc1;
2226 vlib_prefetch_buffer_header (b[2], STORE);
2227 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2229 vlib_prefetch_buffer_header (b[3], STORE);
2230 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2233 next[0] = next[1] = TCP_OUTPUT_NEXT_IP_LOOKUP;
2235 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2237 tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
2240 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2241 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2243 tcp_output_handle_packet (tc0, b[0], &error0, &next[0], is_ip4);
2244 tcp_output_handle_packet (tc1, b[1], &error1, &next[1], is_ip4);
2250 while (n_left_from > 0)
2252 u32 error0 = TCP_ERROR_PKTS_SENT;
2253 tcp_connection_t *tc0;
2255 if (n_left_from > 1)
2257 vlib_prefetch_buffer_header (b[1], STORE);
2258 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2261 next[0] = TCP_OUTPUT_NEXT_IP_LOOKUP;
2262 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2265 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2266 tcp_output_handle_packet (tc0, b[0], &error0, &next[0], is_ip4);
2273 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2274 return frame->n_vectors;
2277 VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2278 vlib_frame_t * from_frame)
2280 return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2283 VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2284 vlib_frame_t * from_frame)
2286 return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2290 VLIB_REGISTER_NODE (tcp4_output_node) =
2292 .name = "tcp4-output",
2293 /* Takes a vector of packets. */
2294 .vector_size = sizeof (u32),
2295 .n_errors = TCP_N_ERROR,
2296 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2297 .error_strings = tcp_error_strings,
2298 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2300 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2301 foreach_tcp4_output_next
2304 .format_buffer = format_tcp_header,
2305 .format_trace = format_tcp_tx_trace,
2310 VLIB_REGISTER_NODE (tcp6_output_node) =
2312 .name = "tcp6-output",
2313 /* Takes a vector of packets. */
2314 .vector_size = sizeof (u32),
2315 .n_errors = TCP_N_ERROR,
2316 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2317 .error_strings = tcp_error_strings,
2318 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2320 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2321 foreach_tcp6_output_next
2324 .format_buffer = format_tcp_header,
2325 .format_trace = format_tcp_tx_trace,
2329 typedef enum _tcp_reset_next
2331 TCP_RESET_NEXT_DROP,
2332 TCP_RESET_NEXT_IP_LOOKUP,
2336 #define foreach_tcp4_reset_next \
2337 _(DROP, "error-drop") \
2338 _(IP_LOOKUP, "ip4-lookup")
2340 #define foreach_tcp6_reset_next \
2341 _(DROP, "error-drop") \
2342 _(IP_LOOKUP, "ip6-lookup")
2345 tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2346 vlib_frame_t * from_frame, u8 is_ip4)
2348 u32 n_left_from, next_index, *from, *to_next;
2349 u32 my_thread_index = vm->thread_index;
2351 from = vlib_frame_vector_args (from_frame);
2352 n_left_from = from_frame->n_vectors;
2354 next_index = node->cached_next_index;
2356 while (n_left_from > 0)
2360 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2362 while (n_left_from > 0 && n_left_to_next > 0)
2368 u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
2375 n_left_to_next -= 1;
2377 b0 = vlib_get_buffer (vm, bi0);
2379 if (tcp_make_reset_in_place (vm, b0, vnet_buffer (b0)->tcp.flags,
2380 my_thread_index, is_ip4))
2382 error0 = TCP_ERROR_LOOKUP_DROPS;
2383 next0 = TCP_RESET_NEXT_DROP;
2387 /* Prepare to send to IP lookup */
2388 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2389 next0 = TCP_RESET_NEXT_IP_LOOKUP;
2392 b0->error = node->errors[error0];
2393 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2394 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2396 th0 = vlib_buffer_get_current (b0);
2398 th0 = ip4_next_header ((ip4_header_t *) th0);
2400 th0 = ip6_next_header ((ip6_header_t *) th0);
2401 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2402 clib_memcpy_fast (&t0->tcp_header, th0,
2403 sizeof (t0->tcp_header));
2406 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2407 n_left_to_next, bi0, next0);
2409 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2411 return from_frame->n_vectors;
2414 VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2415 vlib_frame_t * from_frame)
2417 return tcp46_send_reset_inline (vm, node, from_frame, 1);
2420 VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2421 vlib_frame_t * from_frame)
2423 return tcp46_send_reset_inline (vm, node, from_frame, 0);
2427 VLIB_REGISTER_NODE (tcp4_reset_node) = {
2428 .name = "tcp4-reset",
2429 .vector_size = sizeof (u32),
2430 .n_errors = TCP_N_ERROR,
2431 .error_strings = tcp_error_strings,
2432 .n_next_nodes = TCP_RESET_N_NEXT,
2434 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2435 foreach_tcp4_reset_next
2438 .format_trace = format_tcp_tx_trace,
2443 VLIB_REGISTER_NODE (tcp6_reset_node) = {
2444 .name = "tcp6-reset",
2445 .vector_size = sizeof (u32),
2446 .n_errors = TCP_N_ERROR,
2447 .error_strings = tcp_error_strings,
2448 .n_next_nodes = TCP_RESET_N_NEXT,
2450 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2451 foreach_tcp6_reset_next
2454 .format_trace = format_tcp_tx_trace,
2459 * fd.io coding-style-patch-verification: ON
2462 * eval: (c-set-style "gnu")