2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/tcp/tcp.h>
19 typedef enum _tcp_output_next
22 TCP_OUTPUT_NEXT_IP_LOOKUP,
23 TCP_OUTPUT_NEXT_IP_REWRITE,
24 TCP_OUTPUT_NEXT_IP_ARP,
28 #define foreach_tcp4_output_next \
29 _ (DROP, "error-drop") \
30 _ (IP_LOOKUP, "ip4-lookup") \
31 _ (IP_REWRITE, "ip4-rewrite") \
34 #define foreach_tcp6_output_next \
35 _ (DROP, "error-drop") \
36 _ (IP_LOOKUP, "ip6-lookup") \
37 _ (IP_REWRITE, "ip6-rewrite") \
38 _ (IP_ARP, "ip6-discover-neighbor")
40 static char *tcp_error_strings[] = {
41 #define tcp_error(n,s) s,
42 #include <vnet/tcp/tcp_error.def>
48 tcp_header_t tcp_header;
49 tcp_connection_t tcp_connection;
53 format_tcp_tx_trace (u8 * s, va_list * args)
55 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
56 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57 tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *);
58 tcp_connection_t *tc = &t->tcp_connection;
59 u32 indent = format_get_indent (s);
61 s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
62 format_tcp_state, tc->state, format_white_space, indent,
63 format_tcp_header, &t->tcp_header, 128);
68 #ifndef CLIB_MARCH_VARIANT
70 tcp_window_compute_scale (u32 window)
73 while (wnd_scale < TCP_MAX_WND_SCALE && (window >> wnd_scale) > TCP_WND_MAX)
79 * TCP's initial window
82 tcp_initial_wnd_unscaled (tcp_connection_t * tc)
84 /* RFC 6928 recommends the value lower. However at the time our connections
85 * are initialized, fifos may not be allocated. Therefore, advertise the
86 * smallest possible unscaled window size and update once fifos are
87 * assigned to the session.
90 tcp_update_rcv_mss (tc);
91 TCP_IW_N_SEGMENTS * tc->mss;
93 return tcp_cfg.min_rx_fifo;
97 * Compute initial window and scale factor. As per RFC1323, window field in
98 * SYN and SYN-ACK segments is never scaled.
101 tcp_initial_window_to_advertise (tcp_connection_t * tc)
103 /* Compute rcv wscale only if peer advertised support for it */
104 if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
105 tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
107 tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
109 return clib_min (tc->rcv_wnd, TCP_WND_MAX);
113 tcp_update_rcv_wnd (tcp_connection_t * tc)
115 u32 available_space, wnd;
118 ASSERT (tc->rcv_opts.mss < transport_rx_fifo_size (&tc->connection));
121 * Figure out how much space we have available
123 available_space = transport_max_rx_enqueue (&tc->connection);
124 if (PREDICT_FALSE (available_space < tc->rcv_opts.mss))
131 * Use the above and what we know about what we've previously advertised
132 * to compute the new window
134 observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
136 /* Bad. Thou shalt not shrink */
137 if (PREDICT_FALSE ((i32) available_space < observed_wnd))
139 wnd = clib_max (observed_wnd, 0);
140 TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
144 wnd = available_space;
147 /* Make sure we have a multiple of rcv_wscale */
148 if (wnd && tc->rcv_wscale)
150 wnd &= ~((1 << tc->rcv_wscale) - 1);
152 wnd = 1 << tc->rcv_wscale;
155 tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale);
159 * Compute and return window to advertise, scaled as per RFC1323
162 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
164 if (state < TCP_STATE_ESTABLISHED)
165 return tcp_initial_window_to_advertise (tc);
167 tcp_update_rcv_wnd (tc);
168 return tc->rcv_wnd >> tc->rcv_wscale;
172 * Write TCP options to segment.
175 tcp_options_write (u8 * data, tcp_options_t * opts)
178 u32 buf, seq_len = 4;
180 if (tcp_opts_mss (opts))
182 *data++ = TCP_OPTION_MSS;
183 *data++ = TCP_OPTION_LEN_MSS;
184 buf = clib_host_to_net_u16 (opts->mss);
185 clib_memcpy_fast (data, &buf, sizeof (opts->mss));
186 data += sizeof (opts->mss);
187 opts_len += TCP_OPTION_LEN_MSS;
190 if (tcp_opts_wscale (opts))
192 *data++ = TCP_OPTION_WINDOW_SCALE;
193 *data++ = TCP_OPTION_LEN_WINDOW_SCALE;
194 *data++ = opts->wscale;
195 opts_len += TCP_OPTION_LEN_WINDOW_SCALE;
198 if (tcp_opts_sack_permitted (opts))
200 *data++ = TCP_OPTION_SACK_PERMITTED;
201 *data++ = TCP_OPTION_LEN_SACK_PERMITTED;
202 opts_len += TCP_OPTION_LEN_SACK_PERMITTED;
205 if (tcp_opts_tstamp (opts))
207 *data++ = TCP_OPTION_TIMESTAMP;
208 *data++ = TCP_OPTION_LEN_TIMESTAMP;
209 buf = clib_host_to_net_u32 (opts->tsval);
210 clib_memcpy_fast (data, &buf, sizeof (opts->tsval));
211 data += sizeof (opts->tsval);
212 buf = clib_host_to_net_u32 (opts->tsecr);
213 clib_memcpy_fast (data, &buf, sizeof (opts->tsecr));
214 data += sizeof (opts->tsecr);
215 opts_len += TCP_OPTION_LEN_TIMESTAMP;
218 if (tcp_opts_sack (opts))
222 if (opts->n_sack_blocks != 0)
224 *data++ = TCP_OPTION_SACK_BLOCK;
225 *data++ = 2 + opts->n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
226 for (i = 0; i < opts->n_sack_blocks; i++)
228 buf = clib_host_to_net_u32 (opts->sacks[i].start);
229 clib_memcpy_fast (data, &buf, seq_len);
231 buf = clib_host_to_net_u32 (opts->sacks[i].end);
232 clib_memcpy_fast (data, &buf, seq_len);
235 opts_len += 2 + opts->n_sack_blocks * TCP_OPTION_LEN_SACK_BLOCK;
239 /* Terminate TCP options */
242 *data++ = TCP_OPTION_EOL;
243 opts_len += TCP_OPTION_LEN_EOL;
246 /* Pad with zeroes to a u32 boundary */
249 *data++ = TCP_OPTION_NOOP;
250 opts_len += TCP_OPTION_LEN_NOOP;
256 tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts)
260 opts->flags |= TCP_OPTS_FLAG_MSS;
262 len += TCP_OPTION_LEN_MSS;
264 opts->flags |= TCP_OPTS_FLAG_WSCALE;
265 opts->wscale = tc->rcv_wscale;
266 len += TCP_OPTION_LEN_WINDOW_SCALE;
268 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
269 opts->tsval = tcp_time_now ();
271 len += TCP_OPTION_LEN_TIMESTAMP;
275 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
276 len += TCP_OPTION_LEN_SACK_PERMITTED;
279 /* Align to needed boundary */
280 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
285 tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
289 opts->flags |= TCP_OPTS_FLAG_MSS;
291 len += TCP_OPTION_LEN_MSS;
293 if (tcp_opts_wscale (&tc->rcv_opts))
295 opts->flags |= TCP_OPTS_FLAG_WSCALE;
296 opts->wscale = tc->rcv_wscale;
297 len += TCP_OPTION_LEN_WINDOW_SCALE;
300 if (tcp_opts_tstamp (&tc->rcv_opts))
302 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
303 opts->tsval = tcp_time_now ();
304 opts->tsecr = tc->tsval_recent;
305 len += TCP_OPTION_LEN_TIMESTAMP;
308 if (tcp_opts_sack_permitted (&tc->rcv_opts))
310 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
311 len += TCP_OPTION_LEN_SACK_PERMITTED;
314 /* Align to needed boundary */
315 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
320 tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts)
326 if (tcp_opts_tstamp (&tc->rcv_opts))
328 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
329 opts->tsval = tcp_tstamp (tc);
330 opts->tsecr = tc->tsval_recent;
331 len += TCP_OPTION_LEN_TIMESTAMP;
333 if (tcp_opts_sack_permitted (&tc->rcv_opts))
335 if (vec_len (tc->snd_sacks))
337 opts->flags |= TCP_OPTS_FLAG_SACK;
338 if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
339 tc->snd_sack_pos = 0;
340 opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
341 opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
342 opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
343 TCP_OPTS_MAX_SACK_BLOCKS);
344 tc->snd_sack_pos += opts->n_sack_blocks;
345 len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
349 /* Align to needed boundary */
350 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
355 tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
360 case TCP_STATE_ESTABLISHED:
361 case TCP_STATE_CLOSE_WAIT:
362 case TCP_STATE_FIN_WAIT_1:
363 case TCP_STATE_LAST_ACK:
364 case TCP_STATE_CLOSING:
365 case TCP_STATE_FIN_WAIT_2:
366 case TCP_STATE_TIME_WAIT:
367 case TCP_STATE_CLOSED:
368 return tcp_make_established_options (tc, opts);
369 case TCP_STATE_SYN_RCVD:
370 return tcp_make_synack_options (tc, opts);
371 case TCP_STATE_SYN_SENT:
372 return tcp_make_syn_options (tc, opts);
374 clib_warning ("State not handled! %d", state);
380 * Update burst send vars
382 * - Updates snd_mss to reflect the effective segment size that we can send
383 * by taking into account all TCP options, including SACKs.
384 * - Cache 'on the wire' options for reuse
385 * - Updates receive window which can be reused for a burst.
387 * This should *only* be called when doing bursts
390 tcp_update_burst_snd_vars (tcp_connection_t * tc)
392 tcp_main_t *tm = &tcp_main;
394 /* Compute options to be used for connection. These may be reused when
395 * sending data or to compute the effective mss (snd_mss) */
396 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts,
397 TCP_STATE_ESTABLISHED);
399 /* XXX check if MTU has been updated */
400 tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
401 ASSERT (tc->snd_mss > 0);
403 tcp_options_write (tm->wrk_ctx[tc->c_thread_index].cached_opts,
406 tcp_update_rcv_wnd (tc);
408 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
409 tcp_bt_check_app_limited (tc);
411 if (tc->snd_una == tc->snd_nxt)
413 tcp_cc_event (tc, TCP_CC_EVT_START_TX);
414 tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_BURST);
418 #endif /* CLIB_MARCH_VARIANT */
421 tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
423 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
424 vlib_buffer_free_one (vm, b->next_buffer);
425 /* Zero all flags but free list index and trace flag */
426 b->flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
428 b->current_length = 0;
429 b->total_length_not_including_first_buffer = 0;
430 vnet_buffer (b)->tcp.flags = 0;
432 /* Leave enough space for headers */
433 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
436 #ifndef CLIB_MARCH_VARIANT
438 tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
440 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
441 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
442 b->total_length_not_including_first_buffer = 0;
444 vnet_buffer (b)->tcp.flags = 0;
445 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
446 /* Leave enough space for headers */
447 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
451 /* Compute TCP checksum in software when offloading is disabled for a connection */
453 ip6_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
454 ip46_address_t * src, ip46_address_t * dst)
457 u16 payload_length_host_byte_order;
460 /* Initialize checksum with ip header. */
461 sum0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0)) +
462 clib_host_to_net_u16 (IP_PROTOCOL_TCP);
463 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
465 for (i = 0; i < ARRAY_LEN (src->ip6.as_uword); i++)
467 sum0 = ip_csum_with_carry
468 (sum0, clib_mem_unaligned (&src->ip6.as_uword[i], uword));
469 sum0 = ip_csum_with_carry
470 (sum0, clib_mem_unaligned (&dst->ip6.as_uword[i], uword));
473 return ip_calculate_l4_checksum (vm, p0, sum0,
474 payload_length_host_byte_order, NULL, 0,
479 ip4_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
480 ip46_address_t * src, ip46_address_t * dst)
483 u32 payload_length_host_byte_order;
485 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
487 clib_host_to_net_u32 (payload_length_host_byte_order +
488 (IP_PROTOCOL_TCP << 16));
490 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&src->ip4, u32));
491 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&dst->ip4, u32));
493 return ip_calculate_l4_checksum (vm, p0, sum0,
494 payload_length_host_byte_order, NULL, 0,
499 tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b)
502 if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
504 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
505 vlib_main_t *vm = wrk->vm;
508 checksum = ip4_tcp_compute_checksum_custom
509 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
511 checksum = ip6_tcp_compute_checksum_custom
512 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
516 b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
525 tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
528 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
529 u8 tcp_opts_len, tcp_hdr_opts_len;
533 wnd = tcp_window_to_advertise (tc, state);
535 /* Make and write options */
536 tcp_opts_len = tcp_make_established_options (tc, snd_opts);
537 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
539 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
540 tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd);
542 tcp_options_write ((u8 *) (th + 1), snd_opts);
544 th->checksum = tcp_compute_checksum (tc, b);
546 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
549 tcp_zero_rwnd_sent_on (tc);
551 tcp_zero_rwnd_sent_off (tc);
555 * Convert buffer to ACK
558 tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
560 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
561 TCP_EVT (TCP_EVT_ACK_SENT, tc);
562 tc->rcv_las = tc->rcv_nxt;
566 * Convert buffer to FIN-ACK
569 tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
571 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
575 * Convert buffer to SYN
578 tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b)
580 u8 tcp_hdr_opts_len, tcp_opts_len;
583 tcp_options_t snd_opts;
585 initial_wnd = tcp_initial_window_to_advertise (tc);
587 /* Make and write options */
588 clib_memset (&snd_opts, 0, sizeof (snd_opts));
589 tcp_opts_len = tcp_make_syn_options (tc, &snd_opts);
590 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
592 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
593 tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN,
595 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
596 tcp_options_write ((u8 *) (th + 1), &snd_opts);
597 th->checksum = tcp_compute_checksum (tc, b);
601 * Convert buffer to SYN-ACK
604 tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
606 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
607 u8 tcp_opts_len, tcp_hdr_opts_len;
611 clib_memset (snd_opts, 0, sizeof (*snd_opts));
612 initial_wnd = tcp_initial_window_to_advertise (tc);
613 tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
614 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
616 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
617 tc->rcv_nxt, tcp_hdr_opts_len,
618 TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd);
619 tcp_options_write ((u8 *) (th + 1), snd_opts);
621 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
622 th->checksum = tcp_compute_checksum (tc, b);
626 tcp_enqueue_to_ip_lookup_i (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
627 u8 is_ip4, u32 fib_index, u8 flush)
629 vlib_main_t *vm = wrk->vm;
630 u32 *to_next, next_index;
633 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
636 vnet_buffer (b)->sw_if_index[VLIB_TX] = fib_index;
637 vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
639 /* Send to IP lookup */
640 next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
641 tcp_trajectory_add_start (b, 1);
643 f = wrk->ip_lookup_tx_frames[!is_ip4];
646 f = vlib_get_frame_to_node (vm, next_index);
648 wrk->ip_lookup_tx_frames[!is_ip4] = f;
651 to_next = vlib_frame_vector_args (f);
652 to_next[f->n_vectors] = bi;
654 if (flush || f->n_vectors == VLIB_FRAME_SIZE)
656 vlib_put_frame_to_node (vm, next_index, f);
657 wrk->ip_lookup_tx_frames[!is_ip4] = 0;
662 tcp_enqueue_to_ip_lookup_now (tcp_worker_ctx_t * wrk, vlib_buffer_t * b,
663 u32 bi, u8 is_ip4, u32 fib_index)
665 tcp_enqueue_to_ip_lookup_i (wrk, b, bi, is_ip4, fib_index, 1);
669 tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
670 u8 is_ip4, u32 fib_index)
672 tcp_enqueue_to_ip_lookup_i (wrk, b, bi, is_ip4, fib_index, 0);
673 if (wrk->vm->thread_index == 0 && vlib_num_workers ())
674 session_flush_frames_main_thread (wrk->vm);
678 tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
683 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
686 st = session_type_from_proto_and_ip (TRANSPORT_PROTO_TCP, is_ip4);
687 session_add_pending_tx_buffer (st, wrk->vm->thread_index, bi);
690 #endif /* CLIB_MARCH_VARIANT */
693 tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b, u8 is_ip4)
698 ip4_address_t src_ip4, dst_ip4;
699 ip6_address_t src_ip6, dst_ip6;
700 u16 src_port, dst_port;
701 u32 tmp, len, seq, ack;
704 /* Find IP and TCP headers */
705 th = tcp_buffer_hdr (b);
707 /* Save src and dst ip */
710 ih4 = vlib_buffer_get_current (b);
711 ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40);
712 src_ip4.as_u32 = ih4->src_address.as_u32;
713 dst_ip4.as_u32 = ih4->dst_address.as_u32;
717 ih6 = vlib_buffer_get_current (b);
718 ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
719 clib_memcpy_fast (&src_ip6, &ih6->src_address, sizeof (ip6_address_t));
720 clib_memcpy_fast (&dst_ip6, &ih6->dst_address, sizeof (ip6_address_t));
723 src_port = th->src_port;
724 dst_port = th->dst_port;
725 flags = TCP_FLAG_RST;
728 * RFC 793. If the ACK bit is off, sequence number zero is used,
729 * <SEQ=0><ACK=SEG.SEQ+SEG.LEN><CTL=RST,ACK>
730 * If the ACK bit is on,
731 * <SEQ=SEG.ACK><CTL=RST>
735 seq = th->ack_number;
740 flags |= TCP_FLAG_ACK;
741 tmp = clib_net_to_host_u32 (th->seq_number);
742 len = vnet_buffer (b)->tcp.data_len + tcp_is_syn (th) + tcp_is_fin (th);
743 ack = clib_host_to_net_u32 (tmp + len);
747 tcp_reuse_buffer (vm, b);
748 tcp_trajectory_add_start (b, 4);
749 th = vlib_buffer_push_tcp_net_order (b, dst_port, src_port, seq, ack,
750 sizeof (tcp_header_t), flags, 0);
754 ih4 = vlib_buffer_push_ip4 (vm, b, &dst_ip4, &src_ip4,
756 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
761 ih6 = vlib_buffer_push_ip6 (vm, b, &dst_ip6, &src_ip6, IP_PROTOCOL_TCP);
762 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
769 #ifndef CLIB_MARCH_VARIANT
771 * Send reset without reusing existing buffer
773 * It extracts connection info out of original packet
776 tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
777 u32 thread_index, u8 is_ip4)
779 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
780 vlib_main_t *vm = wrk->vm;
782 u32 bi, sw_if_index, fib_index;
783 u8 tcp_hdr_len, flags = 0;
784 tcp_header_t *th, *pkt_th;
786 ip4_header_t *ih4, *pkt_ih4;
787 ip6_header_t *ih6, *pkt_ih6;
788 fib_protocol_t fib_proto;
790 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
793 b = vlib_get_buffer (vm, bi);
794 sw_if_index = vnet_buffer (pkt)->sw_if_index[VLIB_RX];
795 fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
796 fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
797 tcp_init_buffer (vm, b);
799 /* Make and write options */
800 tcp_hdr_len = sizeof (tcp_header_t);
804 pkt_ih4 = vlib_buffer_get_current (pkt);
805 pkt_th = ip4_next_header (pkt_ih4);
809 pkt_ih6 = vlib_buffer_get_current (pkt);
810 pkt_th = ip6_next_header (pkt_ih6);
813 if (tcp_ack (pkt_th))
815 flags = TCP_FLAG_RST;
816 seq = pkt_th->ack_number;
817 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
821 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
823 ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end);
826 th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port,
827 seq, ack, tcp_hdr_len, flags, 0);
829 /* Swap src and dst ip */
832 ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
833 ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
834 &pkt_ih4->src_address, IP_PROTOCOL_TCP,
835 tcp_csum_offload (tc));
836 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
841 ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) ==
843 ih6 = vlib_buffer_push_ip6_custom (vm, b, &pkt_ih6->dst_address,
844 &pkt_ih6->src_address,
846 tc->ipv6_flow_label);
847 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
851 tcp_enqueue_to_ip_lookup_now (wrk, b, bi, is_ip4, fib_index);
852 TCP_EVT (TCP_EVT_RST_SENT, tc);
853 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
854 TCP_ERROR_RST_SENT, 1);
858 * Build and set reset packet for connection
861 tcp_send_reset (tcp_connection_t * tc)
863 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
864 vlib_main_t *vm = wrk->vm;
868 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
871 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
873 b = vlib_get_buffer (vm, bi);
874 tcp_init_buffer (vm, b);
876 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
877 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
878 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
879 flags = TCP_FLAG_RST;
880 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
881 tc->rcv_nxt, tcp_hdr_opts_len, flags,
883 opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
884 th->checksum = tcp_compute_checksum (tc, b);
885 ASSERT (opts_write_len == tc->snd_opts_len);
886 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
887 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
888 TCP_EVT (TCP_EVT_RST_SENT, tc);
889 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
890 TCP_ERROR_RST_SENT, 1);
894 tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
899 vlib_buffer_push_ip4 (wrk->vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4,
900 IP_PROTOCOL_TCP, tcp_csum_offload (tc));
904 vlib_buffer_push_ip6_custom (wrk->vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6,
905 IP_PROTOCOL_TCP, tc->ipv6_flow_label);
912 * Builds a SYN packet for a half-open connection and sends it to ipx_lookup.
913 * The packet is not forwarded through tcpx_output to avoid doing lookups
914 * in the half_open pool.
917 tcp_send_syn (tcp_connection_t * tc)
919 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
920 vlib_main_t *vm = wrk->vm;
925 * Setup retransmit and establish timers before requesting buffer
926 * such that we can return if we've ran out.
928 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
929 tc->rto * TCP_TO_TIMER_TICK);
931 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
933 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
937 b = vlib_get_buffer (vm, bi);
938 tcp_init_buffer (vm, b);
939 tcp_make_syn (tc, b);
941 /* Measure RTT with this */
942 tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
943 tc->rtt_seq = tc->snd_nxt;
946 tcp_push_ip_hdr (wrk, tc, b);
947 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
948 TCP_EVT (TCP_EVT_SYN_SENT, tc);
952 tcp_send_synack (tcp_connection_t * tc)
954 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
955 vlib_main_t *vm = wrk->vm;
959 tcp_retransmit_timer_force_update (tc);
961 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
963 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
967 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
968 b = vlib_get_buffer (vm, bi);
969 tcp_init_buffer (vm, b);
970 tcp_make_synack (tc, b);
971 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
972 TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
976 * Flush ip lookup tx frames populated by timer pops
979 tcp_flush_frame_to_ip_lookup (tcp_worker_ctx_t * wrk, u8 is_ip4)
981 if (wrk->ip_lookup_tx_frames[!is_ip4])
984 next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
985 vlib_put_frame_to_node (wrk->vm, next_index,
986 wrk->ip_lookup_tx_frames[!is_ip4]);
987 wrk->ip_lookup_tx_frames[!is_ip4] = 0;
992 * Flush v4 and v6 tcp and ip-lookup tx frames for thread index
995 tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk)
997 tcp_flush_frame_to_ip_lookup (wrk, 1);
998 tcp_flush_frame_to_ip_lookup (wrk, 0);
1005 tcp_send_fin (tcp_connection_t * tc)
1007 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1008 vlib_main_t *vm = wrk->vm;
1013 fin_snt = tc->flags & TCP_CONN_FINSNT;
1017 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1019 /* Out of buffers so program fin retransmit ASAP */
1020 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
1024 /* Make sure retransmit retries a fin not data */
1025 tc->flags |= TCP_CONN_FINSNT;
1029 /* If we have non-dupacks programmed, no need to send them */
1030 if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
1031 tc->flags &= ~TCP_CONN_SNDACK;
1033 tcp_retransmit_timer_force_update (tc);
1034 b = vlib_get_buffer (vm, bi);
1035 tcp_init_buffer (vm, b);
1036 tcp_make_fin (tc, b);
1037 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1038 TCP_EVT (TCP_EVT_FIN_SENT, tc);
1039 /* Account for the FIN */
1043 tc->flags |= TCP_CONN_FINSNT;
1044 tc->flags &= ~TCP_CONN_FINPNDG;
1045 tc->snd_una_max = seq_max (tc->snd_una_max, tc->snd_nxt);
1050 * Push TCP header and update connection variables. Should only be called
1051 * for segments with data, not for 'control' packets.
1054 tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
1055 u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
1057 u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
1058 u32 advertise_wnd, data_len;
1059 tcp_main_t *tm = &tcp_main;
1062 data_len = b->current_length;
1063 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
1064 data_len += b->total_length_not_including_first_buffer;
1066 vnet_buffer (b)->tcp.flags = 0;
1067 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
1070 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1072 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
1075 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
1077 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
1079 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
1081 if (seq_geq (tc->psh_seq, snd_nxt)
1082 && seq_lt (tc->psh_seq, snd_nxt + data_len))
1083 flags |= TCP_FLAG_PSH;
1085 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
1086 tc->rcv_nxt, tcp_hdr_opts_len, flags,
1091 clib_memcpy_fast ((u8 *) (th + 1),
1092 tm->wrk_ctx[tc->c_thread_index].cached_opts,
1097 u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
1098 ASSERT (len == tc->snd_opts_len);
1102 * Update connection variables
1106 tc->snd_nxt += data_len;
1107 tc->rcv_las = tc->rcv_nxt;
1109 tc->bytes_out += data_len;
1110 tc->data_segs_out += 1;
1112 th->checksum = tcp_compute_checksum (tc, b);
1114 TCP_EVT (TCP_EVT_PKTIZE, tc);
1118 tcp_buffer_len (vlib_buffer_t * b)
1120 u32 data_len = b->current_length;
1121 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
1122 data_len += b->total_length_not_including_first_buffer;
1127 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
1129 tcp_connection_t *tc = (tcp_connection_t *) tconn;
1131 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1132 tcp_bt_track_tx (tc, tcp_buffer_len (b));
1134 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
1135 /* update_snd_nxt */ 1);
1137 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1138 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1139 /* If not tracking an ACK, start tracking */
1140 if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
1142 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
1143 tc->rtt_seq = tc->snd_nxt;
1145 if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
1147 tcp_retransmit_timer_set (tc);
1150 tcp_trajectory_add_start (b, 3);
1155 tcp_send_ack (tcp_connection_t * tc)
1157 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1158 vlib_main_t *vm = wrk->vm;
1162 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1164 tcp_update_rcv_wnd (tc);
1167 b = vlib_get_buffer (vm, bi);
1168 tcp_init_buffer (vm, b);
1169 tcp_make_ack (tc, b);
1170 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1174 tcp_program_ack (tcp_connection_t * tc)
1176 if (!(tc->flags & TCP_CONN_SNDACK))
1178 session_add_self_custom_tx_evt (&tc->connection, 1);
1179 tc->flags |= TCP_CONN_SNDACK;
1184 tcp_program_dupack (tcp_connection_t * tc)
1186 if (!(tc->flags & TCP_CONN_SNDACK))
1188 session_add_self_custom_tx_evt (&tc->connection, 1);
1189 tc->flags |= TCP_CONN_SNDACK;
1191 if (tc->pending_dupacks < 255)
1192 tc->pending_dupacks += 1;
1196 tcp_program_retransmit (tcp_connection_t * tc)
1198 if (!(tc->flags & TCP_CONN_RXT_PENDING))
1200 session_add_self_custom_tx_evt (&tc->connection, 0);
1201 tc->flags |= TCP_CONN_RXT_PENDING;
1206 * Delayed ack timer handler
1208 * Sends delayed ACK when timer expires
1211 tcp_timer_delack_handler (u32 index, u32 thread_index)
1213 tcp_connection_t *tc;
1215 tc = tcp_connection_get (index, thread_index);
1220 * Send window update ack
1222 * Ensures that it will be sent only once, after a zero rwnd has been
1223 * advertised in a previous ack, and only if rwnd has grown beyond a
1224 * configurable value.
1227 tcp_send_window_update_ack (tcp_connection_t * tc)
1229 if (tcp_zero_rwnd_sent (tc))
1231 tcp_update_rcv_wnd (tc);
1232 if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
1234 tcp_zero_rwnd_sent_off (tc);
1235 tcp_program_ack (tc);
1241 * Allocate a new buffer and build a new tcp segment
1243 * @param wrk tcp worker
1244 * @param tc connection for which the segment will be allocated
1245 * @param offset offset of the first byte in the tx fifo
1246 * @param max_deq_byte segment size
1247 * @param[out] b pointer to buffer allocated
1249 * @return the number of bytes in the segment or 0 if buffer cannot be
1250 * allocated or no data available
1253 tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1254 u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
1256 u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
1257 vlib_main_t *vm = wrk->vm;
1262 seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
1267 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1270 * Allocate and fill in buffer(s)
1273 /* Easy case, buffer size greater than mss */
1274 if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
1276 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1278 *b = vlib_get_buffer (vm, bi);
1279 data = tcp_init_buffer (vm, *b);
1280 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1282 ASSERT (n_bytes == max_deq_bytes);
1283 b[0]->current_length = n_bytes;
1284 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1285 /* burst */ 0, /* update_snd_nxt */ 0);
1287 /* Split mss into multiple buffers */
1290 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1291 u16 n_peeked, len_to_deq;
1292 vlib_buffer_t *chain_b, *prev_b;
1295 /* Make sure we have enough buffers */
1296 n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
1297 vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
1298 CLIB_CACHE_LINE_BYTES);
1299 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
1300 if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
1303 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1307 *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
1308 data = tcp_init_buffer (vm, *b);
1309 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1311 TRANSPORT_MAX_HDRS_LEN);
1312 b[0]->current_length = n_bytes;
1313 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1314 b[0]->total_length_not_including_first_buffer = 0;
1315 max_deq_bytes -= n_bytes;
1318 for (i = 1; i < n_bufs_per_seg; i++)
1321 len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
1322 chain_bi = wrk->tx_buffers[--n_bufs];
1323 chain_b = vlib_get_buffer (vm, chain_bi);
1324 chain_b->current_data = 0;
1325 data = vlib_buffer_get_current (chain_b);
1326 n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
1329 ASSERT (n_peeked == len_to_deq);
1330 n_bytes += n_peeked;
1331 chain_b->current_length = n_peeked;
1332 chain_b->next_buffer = 0;
1334 /* update previous buffer */
1335 prev_b->next_buffer = chain_bi;
1336 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1338 max_deq_bytes -= n_peeked;
1339 b[0]->total_length_not_including_first_buffer += n_peeked;
1342 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1343 /* burst */ 0, /* update_snd_nxt */ 0);
1345 if (PREDICT_FALSE (n_bufs))
1347 clib_warning ("not all buffers consumed");
1348 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1352 ASSERT (n_bytes > 0);
1353 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1359 * Build a retransmit segment
1361 * @return the number of bytes in the segment or 0 if there's nothing to
1365 tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
1366 tcp_connection_t * tc, u32 offset,
1367 u32 max_deq_bytes, vlib_buffer_t ** b)
1369 u32 start, available_bytes;
1372 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1373 ASSERT (max_deq_bytes != 0);
1376 * Make sure we can retransmit something
1378 available_bytes = transport_max_tx_dequeue (&tc->connection);
1379 ASSERT (available_bytes >= offset);
1380 available_bytes -= offset;
1381 if (!available_bytes)
1384 max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
1385 max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
1387 start = tc->snd_una + offset;
1388 ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
1390 n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
1394 tc->snd_rxt_bytes += n_bytes;
1396 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1397 tcp_bt_track_rxt (tc, start, start + n_bytes);
1399 tc->bytes_retrans += n_bytes;
1400 tc->segs_retrans += 1;
1401 tcp_workerp_stats_inc (wrk, rxt_segs, 1);
1402 TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
1408 tcp_check_sack_reneging (tcp_connection_t * tc)
1410 sack_scoreboard_t *sb = &tc->sack_sb;
1411 sack_scoreboard_hole_t *hole;
1413 hole = scoreboard_first_hole (sb);
1414 if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
1417 scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
1421 * Reset congestion control, switch cwnd to loss window and try again.
1424 tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
1426 TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
1428 tc->prev_ssthresh = tc->ssthresh;
1429 tc->prev_cwnd = tc->cwnd;
1431 /* If we entrered loss without fast recovery, notify cc algo of the
1432 * congestion event such that it can update ssthresh and its state */
1433 if (!tcp_in_fastrecovery (tc))
1434 tcp_cc_congestion (tc);
1436 /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
1440 tc->cwnd_acc_bytes = 0;
1441 tc->tr_occurences += 1;
1442 tcp_recovery_on (tc);
1446 tcp_timer_retransmit_handler (u32 tc_index, u32 thread_index)
1448 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1449 vlib_main_t *vm = wrk->vm;
1450 tcp_connection_t *tc;
1451 vlib_buffer_t *b = 0;
1454 tcp_workerp_stats_inc (wrk, tr_events, 1);
1455 tc = tcp_connection_get (tc_index, thread_index);
1457 /* Note: the connection may have been closed and pool_put */
1458 if (PREDICT_FALSE (tc == 0 || tc->state == TCP_STATE_SYN_SENT))
1461 /* Wait-close and retransmit could pop at the same time */
1462 if (tc->state == TCP_STATE_CLOSED)
1465 if (tc->state >= TCP_STATE_ESTABLISHED)
1467 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1469 /* Lost FIN, retransmit and return */
1470 if (tc->flags & TCP_CONN_FINSNT)
1474 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1478 /* Shouldn't be here. This condition is tricky because it has to take
1479 * into account boff > 0 due to persist timeout. */
1480 if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_nxt)
1481 || (tc->rto_boff > 0 && seq_geq (tc->snd_una, tc->snd_congestion)
1482 && !tcp_flight_size (tc)))
1484 ASSERT (!tcp_in_recovery (tc));
1489 /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
1490 * to persist timer timeout */
1491 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1494 tcp_update_rto (tc);
1497 /* Peer is dead or network connectivity is lost. Close connection.
1498 * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
1499 * a min rto of 0.2s we need to retry about 8 times. */
1500 if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
1502 tcp_send_reset (tc);
1503 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1504 session_transport_closing_notify (&tc->connection);
1505 session_transport_closed_notify (&tc->connection);
1506 tcp_connection_timers_reset (tc);
1507 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time);
1508 tcp_workerp_stats_inc (wrk, tr_abort, 1);
1512 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1513 tcp_check_sack_reneging (tc);
1515 /* Update send congestion to make sure that rxt has data to send */
1516 tc->snd_congestion = tc->snd_nxt;
1518 /* Send the first unacked segment. If we're short on buffers, return
1519 * as soon as possible */
1520 n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
1521 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
1524 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
1528 bi = vlib_get_buffer_index (vm, b);
1529 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1531 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1532 tcp_retransmit_timer_force_update (tc);
1535 if (tc->rto_boff == 1)
1537 tcp_cc_init_rxt_timeout (tc);
1538 /* Record timestamp. Eifel detection algorithm RFC3522 */
1539 tc->snd_rxt_ts = tcp_tstamp (tc);
1542 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1543 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
1545 tcp_program_retransmit (tc);
1547 /* Retransmit SYN-ACK */
1548 else if (tc->state == TCP_STATE_SYN_RCVD)
1550 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1554 /* Passive open establish timeout */
1555 if (tc->rto > TCP_ESTABLISH_TIME >> 1)
1557 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1558 tcp_connection_timers_reset (tc);
1559 tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time);
1560 tcp_workerp_stats_inc (wrk, tr_abort, 1);
1564 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1566 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1);
1571 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1572 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1574 tcp_retransmit_timer_force_update (tc);
1576 b = vlib_get_buffer (vm, bi);
1577 tcp_init_buffer (vm, b);
1578 tcp_make_synack (tc, b);
1579 TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
1581 /* Retransmit timer already updated, just enqueue to output */
1582 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1586 ASSERT (tc->state == TCP_STATE_CLOSED);
1592 * SYN retransmit timer handler. Active open only.
1595 tcp_timer_retransmit_syn_handler (u32 tc_index, u32 thread_index)
1597 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1598 vlib_main_t *vm = wrk->vm;
1599 tcp_connection_t *tc;
1600 vlib_buffer_t *b = 0;
1603 tc = tcp_half_open_connection_get (tc_index);
1605 /* Note: the connection may have transitioned to ESTABLISHED... */
1606 if (PREDICT_FALSE (tc == 0 || tc->state != TCP_STATE_SYN_SENT))
1609 /* Half-open connection actually moved to established but we were
1610 * waiting for syn retransmit to pop to call cleanup from the right
1612 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1614 if (tcp_half_open_connection_cleanup (tc))
1615 TCP_DBG ("could not remove half-open connection");
1619 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1622 /* Active open establish timeout */
1623 if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
1625 session_stream_connect_notify (&tc->connection, 1 /* fail */ );
1626 tcp_connection_cleanup (tc);
1630 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1632 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN, 1);
1636 /* Try without increasing RTO a number of times. If this fails,
1637 * start growing RTO exponentially */
1639 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1640 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1642 b = vlib_get_buffer (vm, bi);
1643 tcp_init_buffer (vm, b);
1644 tcp_make_syn (tc, b);
1646 TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
1648 /* This goes straight to ipx_lookup */
1649 tcp_push_ip_hdr (wrk, tc, b);
1650 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
1652 tcp_timer_update (tc, TCP_TIMER_RETRANSMIT_SYN,
1653 tc->rto * TCP_TO_TIMER_TICK);
1657 * Got 0 snd_wnd from peer, try to do something about it.
1661 tcp_timer_persist_handler (u32 index, u32 thread_index)
1663 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
1664 u32 bi, max_snd_bytes, available_bytes, offset;
1665 tcp_main_t *tm = vnet_get_tcp_main ();
1666 vlib_main_t *vm = wrk->vm;
1667 tcp_connection_t *tc;
1672 tc = tcp_connection_get_if_valid (index, thread_index);
1676 /* Problem already solved or worse */
1677 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1678 || (tc->flags & TCP_CONN_FINSNT))
1681 available_bytes = transport_max_tx_dequeue (&tc->connection);
1682 offset = tc->snd_nxt - tc->snd_una;
1684 /* Reprogram persist if no new bytes available to send. We may have data
1686 if (!available_bytes)
1688 tcp_persist_timer_set (tc);
1692 if (available_bytes <= offset)
1695 /* Increment RTO backoff */
1697 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1700 * Try to force the first unsent segment (or buffer)
1702 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1704 tcp_persist_timer_set (tc);
1707 b = vlib_get_buffer (vm, bi);
1708 data = tcp_init_buffer (vm, b);
1710 tcp_validate_txf_size (tc, offset);
1711 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1712 max_snd_bytes = clib_min (tc->snd_mss,
1713 tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
1714 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1716 b->current_length = n_bytes;
1717 ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
1718 || tc->snd_nxt == tc->snd_una_max
1719 || tc->rto_boff > 1));
1721 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1723 tcp_bt_check_app_limited (tc);
1724 tcp_bt_track_tx (tc, n_bytes);
1727 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
1728 /* burst */ 0, /* update_snd_nxt */ 1);
1729 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1730 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1731 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1733 /* Just sent new data, enable retransmit */
1734 tcp_retransmit_timer_update (tc);
1738 * Retransmit first unacked segment
1741 tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1743 vlib_main_t *vm = wrk->vm;
1747 TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
1749 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1753 bi = vlib_get_buffer_index (vm, b);
1754 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1760 tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1763 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1764 vlib_main_t *vm = wrk->vm;
1765 vlib_buffer_t *b = 0;
1767 offset = tc->snd_nxt - tc->snd_una;
1768 available_wnd = tc->snd_wnd - offset;
1769 burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
1771 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1772 tcp_bt_check_app_limited (tc);
1774 while (n_segs < burst_size)
1776 n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
1780 bi = vlib_get_buffer_index (vm, b);
1781 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1782 offset += n_written;
1785 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1786 tcp_bt_track_tx (tc, n_written);
1788 tc->snd_nxt += n_written;
1789 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1797 * Estimate send space using proportional rate reduction (RFC6937)
1800 tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
1805 pipe = tcp_flight_size (tc);
1806 prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
1808 if (pipe > tc->ssthresh)
1810 space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
1816 limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
1817 space = clib_min (tc->ssthresh - pipe, limit);
1819 space = clib_max (space, prr_out ? 0 : tc->snd_mss);
1824 tcp_retransmit_should_retry_head (tcp_connection_t * tc,
1825 sack_scoreboard_t * sb)
1827 u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
1828 f64 rr = (f64) tc->ssthresh / tc->prev_cwnd;
1830 if (tcp_fastrecovery_first (tc))
1833 return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
1837 tcp_max_tx_deq (tcp_connection_t * tc)
1839 return (transport_max_tx_dequeue (&tc->connection)
1840 - (tc->snd_nxt - tc->snd_una));
1843 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1844 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1845 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1848 * Do retransmit with SACKs
1851 tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1854 u32 n_written = 0, offset, max_bytes, n_segs = 0;
1855 u8 snd_limited = 0, can_rescue = 0;
1856 u32 bi, max_deq, burst_bytes;
1857 sack_scoreboard_hole_t *hole;
1858 vlib_main_t *vm = wrk->vm;
1859 vlib_buffer_t *b = 0;
1860 sack_scoreboard_t *sb;
1863 ASSERT (tcp_in_cong_recovery (tc));
1865 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1866 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1869 tcp_program_retransmit (tc);
1873 if (tcp_in_recovery (tc))
1874 snd_space = tcp_available_cc_snd_space (tc);
1876 snd_space = tcp_fastrecovery_prr_snd_space (tc);
1878 if (snd_space < tc->snd_mss)
1883 /* Check if snd_una is a lost retransmit */
1884 if (pool_elts (sb->holes)
1885 && seq_gt (sb->high_sacked, tc->snd_congestion)
1886 && tc->rxt_head != tc->snd_una
1887 && tcp_retransmit_should_retry_head (tc, sb))
1889 max_bytes = clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una);
1890 n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, max_bytes, &b);
1893 tcp_program_retransmit (tc);
1896 bi = vlib_get_buffer_index (vm, b);
1897 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1900 tc->rxt_head = tc->snd_una;
1901 tc->rxt_delivered += n_written;
1902 tc->prr_delivered += n_written;
1903 ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
1906 tcp_fastrecovery_first_off (tc);
1908 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1909 hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
1911 max_deq = transport_max_tx_dequeue (&tc->connection);
1912 max_deq -= tc->snd_nxt - tc->snd_una;
1914 while (snd_space > 0 && n_segs < burst_size)
1916 hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue,
1920 /* We are out of lost holes to retransmit so send some new data. */
1921 if (max_deq > tc->snd_mss)
1926 /* Make sure we don't exceed available window and leave space
1927 * for one more packet, to avoid zero window acks */
1928 av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
1929 av_wnd = clib_max (av_wnd - tc->snd_mss, 0);
1930 snd_space = clib_min (snd_space, av_wnd);
1931 snd_space = clib_min (max_deq, snd_space);
1932 burst_size = clib_min (burst_size - n_segs,
1933 snd_space / tc->snd_mss);
1934 burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
1935 n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
1936 if (max_deq > n_segs_new * tc->snd_mss)
1937 tcp_program_retransmit (tc);
1939 n_segs += n_segs_new;
1943 if (tcp_in_recovery (tc) || !can_rescue
1944 || scoreboard_rescue_rxt_valid (sb, tc))
1947 /* If rescue rxt undefined or less than snd_una then one segment of
1948 * up to SMSS octets that MUST include the highest outstanding
1949 * unSACKed sequence number SHOULD be returned, and RescueRxt set to
1950 * RecoveryPoint. HighRxt MUST NOT be updated.
1952 hole = scoreboard_last_hole (sb);
1953 max_bytes = clib_min (tc->snd_mss, hole->end - hole->start);
1954 max_bytes = clib_min (max_bytes, snd_space);
1955 offset = hole->end - tc->snd_una - max_bytes;
1956 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1961 sb->rescue_rxt = tc->snd_congestion;
1962 bi = vlib_get_buffer_index (vm, b);
1963 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1968 max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
1969 max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
1973 offset = sb->high_rxt - tc->snd_una;
1974 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1976 ASSERT (n_written <= snd_space);
1978 /* Nothing left to retransmit */
1982 bi = vlib_get_buffer_index (vm, b);
1983 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1985 sb->high_rxt += n_written;
1986 ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt));
1988 snd_space -= n_written;
1993 tcp_program_retransmit (tc);
1997 transport_connection_tx_pacer_reset_bucket (&tc->connection, 0);
2002 * Fast retransmit without SACK info
2005 tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
2008 u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now, max_bytes;
2009 u32 burst_bytes, sent_bytes;
2010 vlib_main_t *vm = wrk->vm;
2011 int snd_space, n_segs = 0;
2015 ASSERT (tcp_in_cong_recovery (tc));
2016 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
2018 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
2019 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
2022 tcp_program_retransmit (tc);
2026 snd_space = tcp_available_cc_snd_space (tc);
2027 cc_limited = snd_space < burst_bytes;
2029 if (!tcp_fastrecovery_first (tc))
2032 /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
2034 while (snd_space > 0 && n_segs < burst_size)
2036 max_bytes = clib_min (tc->snd_mss,
2037 tc->snd_congestion - tc->snd_una - offset);
2040 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
2043 /* Nothing left to retransmit */
2047 bi = vlib_get_buffer_index (vm, b);
2048 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
2049 snd_space -= n_written;
2050 offset += n_written;
2054 if (n_segs == burst_size)
2059 /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
2060 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
2063 max_deq = transport_max_tx_dequeue (&tc->connection);
2064 max_deq -= tc->snd_nxt - tc->snd_una;
2067 snd_space = clib_min (max_deq, snd_space);
2068 burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
2069 n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size);
2070 if (n_segs_now && max_deq > n_segs_now * tc->snd_mss)
2071 tcp_program_retransmit (tc);
2072 n_segs += n_segs_now;
2076 tcp_fastrecovery_first_off (tc);
2078 sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
2079 sent_bytes = cc_limited ? burst_bytes : sent_bytes;
2080 transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
2086 tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
2090 if (!tc->pending_dupacks)
2092 if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
2093 || tc->state != TCP_STATE_ESTABLISHED)
2101 /* If we're supposed to send dupacks but have no ooo data
2102 * send only one ack */
2103 if (!vec_len (tc->snd_sacks))
2106 tc->pending_dupacks = 0;
2110 /* Start with first sack block */
2111 tc->snd_sack_pos = 0;
2113 /* Generate enough dupacks to cover all sack blocks. Do not generate
2114 * more sacks than the number of packets received. But do generate at
2115 * least 3, i.e., the number needed to signal congestion, if needed. */
2116 n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
2117 n_acks = clib_min (n_acks, tc->pending_dupacks);
2118 n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
2119 for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
2122 if (n_acks < max_burst_size)
2124 tc->pending_dupacks = 0;
2125 tc->snd_sack_pos = 0;
2126 tc->dupacks_out += n_acks;
2131 TCP_DBG ("constrained by burst size");
2132 tc->pending_dupacks = n_acks - max_burst_size;
2133 tc->dupacks_out += max_burst_size;
2134 tcp_program_dupack (tc);
2135 return max_burst_size;
2140 tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
2142 tcp_worker_ctx_t *wrk;
2145 if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED))
2148 wrk = tcp_get_worker (tc->c_thread_index);
2150 if (tcp_opts_sack_permitted (&tc->rcv_opts))
2151 n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
2153 n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
2159 tcp_session_custom_tx (void *conn, u32 max_burst_size)
2161 tcp_connection_t *tc = (tcp_connection_t *) conn;
2164 if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING))
2166 tc->flags &= ~TCP_CONN_RXT_PENDING;
2167 n_segs = tcp_do_retransmit (tc, max_burst_size);
2168 max_burst_size -= n_segs;
2171 if (!(tc->flags & TCP_CONN_SNDACK))
2174 tc->flags &= ~TCP_CONN_SNDACK;
2176 /* We have retransmitted packets and no dupack */
2177 if (n_segs && !tc->pending_dupacks)
2180 if (!max_burst_size)
2182 tcp_program_ack (tc);
2183 return max_burst_size;
2186 n_segs += tcp_send_acks (tc, max_burst_size);
2190 #endif /* CLIB_MARCH_VARIANT */
2193 tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
2194 u16 * next0, u32 * error0)
2196 ip_adjacency_t *adj;
2199 /* Not thread safe but as long as the connection exists the adj should
2201 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &tc0->c_rmt_ip,
2203 if (ai == ADJ_INDEX_INVALID)
2205 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2206 *next0 = TCP_OUTPUT_NEXT_DROP;
2207 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2212 if (PREDICT_TRUE (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE))
2213 *next0 = TCP_OUTPUT_NEXT_IP_REWRITE;
2214 else if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
2215 *next0 = TCP_OUTPUT_NEXT_IP_ARP;
2218 *next0 = TCP_OUTPUT_NEXT_DROP;
2219 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2221 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
2225 tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2226 u32 * to_next, u32 n_bufs)
2228 tcp_connection_t *tc;
2234 for (i = 0; i < n_bufs; i++)
2236 b = vlib_get_buffer (vm, to_next[i]);
2237 if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2239 th = vlib_buffer_get_current (b);
2240 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2242 t = vlib_add_trace (vm, node, b, sizeof (*t));
2243 clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
2244 clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
2249 tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0,
2250 tcp_connection_t * tc0, u8 is_ip4)
2252 TCP_EVT (TCP_EVT_OUTPUT, tc0,
2253 ((tcp_header_t *) vlib_buffer_get_current (b0))->flags,
2254 b0->current_length);
2257 vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
2258 IP_PROTOCOL_TCP, tcp_csum_offload (tc0));
2260 vlib_buffer_push_ip6_custom (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6,
2261 IP_PROTOCOL_TCP, tc0->ipv6_flow_label);
2265 tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
2267 if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO)))
2270 u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len;
2272 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
2273 data_len += b->total_length_not_including_first_buffer;
2275 if (PREDICT_TRUE (data_len <= tc->snd_mss))
2279 ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
2280 ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
2281 b->flags |= VNET_BUFFER_F_GSO;
2282 vnet_buffer2 (b)->gso_l4_hdr_sz =
2283 sizeof (tcp_header_t) + tc->snd_opts_len;
2284 vnet_buffer2 (b)->gso_size = tc->snd_mss;
2289 tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
2290 vlib_node_runtime_t * error_node, u16 * next0,
2293 /* If next_index is not drop use it */
2294 if (tc0->next_node_index)
2296 *next0 = tc0->next_node_index;
2297 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2301 *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP;
2304 vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
2305 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
2311 if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
2312 tcp_output_handle_link_local (tc0, b0, next0, &error0);
2314 if (PREDICT_FALSE (error0))
2316 b0->error = error_node->errors[error0];
2321 if (!TCP_ALWAYS_ACK)
2322 tcp_timer_reset (tc0, TCP_TIMER_DELACK);
2328 tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2329 vlib_frame_t * frame, int is_ip4)
2331 u32 n_left_from, *from, thread_index = vm->thread_index;
2332 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2333 u16 nexts[VLIB_FRAME_SIZE], *next;
2334 vlib_node_runtime_t *error_node;
2336 error_node = vlib_node_get_runtime (vm, tcp_node_index (output, is_ip4));
2338 from = vlib_frame_vector_args (frame);
2339 n_left_from = frame->n_vectors;
2340 tcp_set_time_now (tcp_get_worker (thread_index));
2342 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2343 tcp46_output_trace_frame (vm, node, from, n_left_from);
2345 vlib_get_buffers (vm, from, bufs, n_left_from);
2349 while (n_left_from >= 4)
2351 tcp_connection_t *tc0, *tc1;
2354 vlib_prefetch_buffer_header (b[2], STORE);
2355 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2357 vlib_prefetch_buffer_header (b[3], STORE);
2358 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2361 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2363 tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
2366 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2368 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2369 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2371 tcp_check_if_gso (tc0, b[0]);
2372 tcp_check_if_gso (tc1, b[1]);
2374 tcp_output_handle_packet (tc0, b[0], error_node, &next[0], is_ip4);
2375 tcp_output_handle_packet (tc1, b[1], error_node, &next[1], is_ip4);
2381 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2382 tcp_check_if_gso (tc0, b[0]);
2383 tcp_output_handle_packet (tc0, b[0], error_node, &next[0],
2388 b[0]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION];
2389 next[0] = TCP_OUTPUT_NEXT_DROP;
2393 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2394 tcp_check_if_gso (tc1, b[1]);
2395 tcp_output_handle_packet (tc1, b[1], error_node, &next[1],
2400 b[1]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION];
2401 next[1] = TCP_OUTPUT_NEXT_DROP;
2409 while (n_left_from > 0)
2411 tcp_connection_t *tc0;
2413 if (n_left_from > 1)
2415 vlib_prefetch_buffer_header (b[1], STORE);
2416 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2419 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2422 if (PREDICT_TRUE (tc0 != 0))
2424 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2425 tcp_check_if_gso (tc0, b[0]);
2426 tcp_output_handle_packet (tc0, b[0], error_node, &next[0], is_ip4);
2430 b[0]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION];
2431 next[0] = TCP_OUTPUT_NEXT_DROP;
2439 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2440 vlib_node_increment_counter (vm, tcp_node_index (output, is_ip4),
2441 TCP_ERROR_PKTS_SENT, frame->n_vectors);
2442 return frame->n_vectors;
2445 VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2446 vlib_frame_t * from_frame)
2448 return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2451 VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2452 vlib_frame_t * from_frame)
2454 return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2458 VLIB_REGISTER_NODE (tcp4_output_node) =
2460 .name = "tcp4-output",
2461 /* Takes a vector of packets. */
2462 .vector_size = sizeof (u32),
2463 .n_errors = TCP_N_ERROR,
2464 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2465 .error_strings = tcp_error_strings,
2466 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2468 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2469 foreach_tcp4_output_next
2472 .format_buffer = format_tcp_header,
2473 .format_trace = format_tcp_tx_trace,
2478 VLIB_REGISTER_NODE (tcp6_output_node) =
2480 .name = "tcp6-output",
2481 /* Takes a vector of packets. */
2482 .vector_size = sizeof (u32),
2483 .n_errors = TCP_N_ERROR,
2484 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2485 .error_strings = tcp_error_strings,
2486 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2488 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2489 foreach_tcp6_output_next
2492 .format_buffer = format_tcp_header,
2493 .format_trace = format_tcp_tx_trace,
2497 typedef enum _tcp_reset_next
2499 TCP_RESET_NEXT_DROP,
2500 TCP_RESET_NEXT_IP_LOOKUP,
2504 #define foreach_tcp4_reset_next \
2505 _(DROP, "error-drop") \
2506 _(IP_LOOKUP, "ip4-lookup")
2508 #define foreach_tcp6_reset_next \
2509 _(DROP, "error-drop") \
2510 _(IP_LOOKUP, "ip6-lookup")
2513 tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2514 vlib_frame_t * from_frame, u8 is_ip4)
2516 u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
2517 u32 n_left_from, next_index, *from, *to_next;
2519 from = vlib_frame_vector_args (from_frame);
2520 n_left_from = from_frame->n_vectors;
2522 next_index = node->cached_next_index;
2524 while (n_left_from > 0)
2528 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2530 while (n_left_from > 0 && n_left_to_next > 0)
2542 n_left_to_next -= 1;
2544 b0 = vlib_get_buffer (vm, bi0);
2545 tcp_make_reset_in_place (vm, b0, is_ip4);
2547 /* Prepare to send to IP lookup */
2548 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2550 b0->error = node->errors[error0];
2551 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2552 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2554 th0 = vlib_buffer_get_current (b0);
2556 th0 = ip4_next_header ((ip4_header_t *) th0);
2558 th0 = ip6_next_header ((ip6_header_t *) th0);
2559 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2560 clib_memcpy_fast (&t0->tcp_header, th0,
2561 sizeof (t0->tcp_header));
2564 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2565 n_left_to_next, bi0, next0);
2567 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2569 return from_frame->n_vectors;
2572 VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2573 vlib_frame_t * from_frame)
2575 return tcp46_send_reset_inline (vm, node, from_frame, 1);
2578 VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2579 vlib_frame_t * from_frame)
2581 return tcp46_send_reset_inline (vm, node, from_frame, 0);
2585 VLIB_REGISTER_NODE (tcp4_reset_node) = {
2586 .name = "tcp4-reset",
2587 .vector_size = sizeof (u32),
2588 .n_errors = TCP_N_ERROR,
2589 .error_strings = tcp_error_strings,
2590 .n_next_nodes = TCP_RESET_N_NEXT,
2592 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2593 foreach_tcp4_reset_next
2596 .format_trace = format_tcp_tx_trace,
2601 VLIB_REGISTER_NODE (tcp6_reset_node) = {
2602 .name = "tcp6-reset",
2603 .vector_size = sizeof (u32),
2604 .n_errors = TCP_N_ERROR,
2605 .error_strings = tcp_error_strings,
2606 .n_next_nodes = TCP_RESET_N_NEXT,
2608 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2609 foreach_tcp6_reset_next
2612 .format_trace = format_tcp_tx_trace,
2617 * fd.io coding-style-patch-verification: ON
2620 * eval: (c-set-style "gnu")