2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/tcp/tcp.h>
17 #include <vnet/tcp/tcp_inlines.h>
20 typedef enum _tcp_output_next
23 TCP_OUTPUT_NEXT_IP_LOOKUP,
24 TCP_OUTPUT_NEXT_IP_REWRITE,
25 TCP_OUTPUT_NEXT_IP_ARP,
29 #define foreach_tcp4_output_next \
30 _ (DROP, "error-drop") \
31 _ (IP_LOOKUP, "ip4-lookup") \
32 _ (IP_REWRITE, "ip4-rewrite") \
35 #define foreach_tcp6_output_next \
36 _ (DROP, "error-drop") \
37 _ (IP_LOOKUP, "ip6-lookup") \
38 _ (IP_REWRITE, "ip6-rewrite") \
39 _ (IP_ARP, "ip6-discover-neighbor")
41 static char *tcp_error_strings[] = {
42 #define tcp_error(n,s) s,
43 #include <vnet/tcp/tcp_error.def>
49 tcp_header_t tcp_header;
50 tcp_connection_t tcp_connection;
54 format_tcp_tx_trace (u8 * s, va_list * args)
56 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
57 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
58 tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *);
59 tcp_connection_t *tc = &t->tcp_connection;
60 u32 indent = format_get_indent (s);
62 s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
63 format_tcp_state, tc->state, format_white_space, indent,
64 format_tcp_header, &t->tcp_header, 128);
69 #ifndef CLIB_MARCH_VARIANT
71 tcp_window_compute_scale (u32 window)
74 while (wnd_scale < TCP_MAX_WND_SCALE && (window >> wnd_scale) > TCP_WND_MAX)
80 * TCP's initial window
83 tcp_initial_wnd_unscaled (tcp_connection_t * tc)
85 /* RFC 6928 recommends the value lower. However at the time our connections
86 * are initialized, fifos may not be allocated. Therefore, advertise the
87 * smallest possible unscaled window size and update once fifos are
88 * assigned to the session.
91 tcp_update_rcv_mss (tc);
92 TCP_IW_N_SEGMENTS * tc->mss;
94 return tcp_cfg.min_rx_fifo;
98 * Compute initial window and scale factor. As per RFC1323, window field in
99 * SYN and SYN-ACK segments is never scaled.
102 tcp_initial_window_to_advertise (tcp_connection_t * tc)
104 /* Compute rcv wscale only if peer advertised support for it */
105 if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
106 tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
108 tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
110 return clib_min (tc->rcv_wnd, TCP_WND_MAX);
114 tcp_update_rcv_wnd (tcp_connection_t * tc)
116 u32 available_space, wnd;
120 * Figure out how much space we have available
122 available_space = transport_max_rx_enqueue (&tc->connection);
123 if (PREDICT_FALSE (available_space < tc->rcv_opts.mss))
130 * Use the above and what we know about what we've previously advertised
131 * to compute the new window
133 observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
135 /* Bad. Thou shalt not shrink */
136 if (PREDICT_FALSE ((i32) available_space < observed_wnd))
138 wnd = clib_max (observed_wnd, 0);
139 TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
143 wnd = available_space;
146 /* Make sure we have a multiple of 1 << rcv_wscale. We round up to
147 * avoid advertising a window less than mss which could happen if
148 * 1 << rcv_wscale < mss */
149 if (wnd && tc->rcv_wscale)
150 wnd = round_pow2 (wnd, 1 << tc->rcv_wscale);
152 tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale);
156 * Compute and return window to advertise, scaled as per RFC1323
159 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
161 if (state < TCP_STATE_ESTABLISHED)
162 return tcp_initial_window_to_advertise (tc);
164 tcp_update_rcv_wnd (tc);
165 return tc->rcv_wnd >> tc->rcv_wscale;
169 tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts)
173 opts->flags |= TCP_OPTS_FLAG_MSS;
175 len += TCP_OPTION_LEN_MSS;
177 opts->flags |= TCP_OPTS_FLAG_WSCALE;
178 opts->wscale = tc->rcv_wscale;
179 len += TCP_OPTION_LEN_WINDOW_SCALE;
181 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
182 opts->tsval = tcp_time_now ();
184 len += TCP_OPTION_LEN_TIMESTAMP;
188 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
189 len += TCP_OPTION_LEN_SACK_PERMITTED;
192 /* Align to needed boundary */
193 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
198 tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
202 opts->flags |= TCP_OPTS_FLAG_MSS;
204 len += TCP_OPTION_LEN_MSS;
206 if (tcp_opts_wscale (&tc->rcv_opts))
208 opts->flags |= TCP_OPTS_FLAG_WSCALE;
209 opts->wscale = tc->rcv_wscale;
210 len += TCP_OPTION_LEN_WINDOW_SCALE;
213 if (tcp_opts_tstamp (&tc->rcv_opts))
215 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
216 opts->tsval = tcp_time_now ();
217 opts->tsecr = tc->tsval_recent;
218 len += TCP_OPTION_LEN_TIMESTAMP;
221 if (tcp_opts_sack_permitted (&tc->rcv_opts))
223 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
224 len += TCP_OPTION_LEN_SACK_PERMITTED;
227 /* Align to needed boundary */
228 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
233 tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts)
239 if (tcp_opts_tstamp (&tc->rcv_opts))
241 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
242 opts->tsval = tcp_tstamp (tc);
243 opts->tsecr = tc->tsval_recent;
244 len += TCP_OPTION_LEN_TIMESTAMP;
246 if (tcp_opts_sack_permitted (&tc->rcv_opts))
248 if (vec_len (tc->snd_sacks))
250 opts->flags |= TCP_OPTS_FLAG_SACK;
251 if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
252 tc->snd_sack_pos = 0;
253 opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
254 opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
255 opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
256 TCP_OPTS_MAX_SACK_BLOCKS);
257 tc->snd_sack_pos += opts->n_sack_blocks;
258 len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
262 /* Align to needed boundary */
263 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
268 tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
273 case TCP_STATE_ESTABLISHED:
274 case TCP_STATE_CLOSE_WAIT:
275 case TCP_STATE_FIN_WAIT_1:
276 case TCP_STATE_LAST_ACK:
277 case TCP_STATE_CLOSING:
278 case TCP_STATE_FIN_WAIT_2:
279 case TCP_STATE_TIME_WAIT:
280 case TCP_STATE_CLOSED:
281 return tcp_make_established_options (tc, opts);
282 case TCP_STATE_SYN_RCVD:
283 return tcp_make_synack_options (tc, opts);
284 case TCP_STATE_SYN_SENT:
285 return tcp_make_syn_options (tc, opts);
287 clib_warning ("State not handled! %d", state);
293 * Update burst send vars
295 * - Updates snd_mss to reflect the effective segment size that we can send
296 * by taking into account all TCP options, including SACKs.
297 * - Cache 'on the wire' options for reuse
298 * - Updates receive window which can be reused for a burst.
300 * This should *only* be called when doing bursts
303 tcp_update_burst_snd_vars (tcp_connection_t * tc)
305 tcp_main_t *tm = &tcp_main;
307 /* Compute options to be used for connection. These may be reused when
308 * sending data or to compute the effective mss (snd_mss) */
309 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts,
310 TCP_STATE_ESTABLISHED);
312 /* XXX check if MTU has been updated */
313 tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
314 ASSERT (tc->snd_mss > 0);
316 tcp_options_write (tm->wrk_ctx[tc->c_thread_index].cached_opts,
319 tcp_update_rcv_wnd (tc);
321 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
322 tcp_bt_check_app_limited (tc);
324 if (tc->snd_una == tc->snd_nxt)
326 tcp_cc_event (tc, TCP_CC_EVT_START_TX);
327 tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_BURST);
331 #endif /* CLIB_MARCH_VARIANT */
334 tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
336 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
337 vlib_buffer_free_one (vm, b->next_buffer);
338 /* Zero all flags but free list index and trace flag */
339 b->flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
341 b->current_length = 0;
342 b->total_length_not_including_first_buffer = 0;
343 vnet_buffer (b)->tcp.flags = 0;
345 /* Leave enough space for headers */
346 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
349 #ifndef CLIB_MARCH_VARIANT
351 tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
353 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
354 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
355 b->total_length_not_including_first_buffer = 0;
357 vnet_buffer (b)->tcp.flags = 0;
358 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
359 /* Leave enough space for headers */
360 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
364 /* Compute TCP checksum in software when offloading is disabled for a connection */
366 ip6_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
367 ip46_address_t * src, ip46_address_t * dst)
370 u16 payload_length_host_byte_order;
373 /* Initialize checksum with ip header. */
374 sum0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0)) +
375 clib_host_to_net_u16 (IP_PROTOCOL_TCP);
376 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
378 for (i = 0; i < ARRAY_LEN (src->ip6.as_uword); i++)
380 sum0 = ip_csum_with_carry
381 (sum0, clib_mem_unaligned (&src->ip6.as_uword[i], uword));
382 sum0 = ip_csum_with_carry
383 (sum0, clib_mem_unaligned (&dst->ip6.as_uword[i], uword));
386 return ip_calculate_l4_checksum (vm, p0, sum0,
387 payload_length_host_byte_order, NULL, 0,
392 ip4_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
393 ip46_address_t * src, ip46_address_t * dst)
396 u32 payload_length_host_byte_order;
398 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
400 clib_host_to_net_u32 (payload_length_host_byte_order +
401 (IP_PROTOCOL_TCP << 16));
403 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&src->ip4, u32));
404 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&dst->ip4, u32));
406 return ip_calculate_l4_checksum (vm, p0, sum0,
407 payload_length_host_byte_order, NULL, 0,
412 tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b)
415 if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
417 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
418 vlib_main_t *vm = wrk->vm;
421 checksum = ip4_tcp_compute_checksum_custom
422 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
424 checksum = ip6_tcp_compute_checksum_custom
425 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
429 b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
438 tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
441 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
442 u8 tcp_opts_len, tcp_hdr_opts_len;
446 wnd = tcp_window_to_advertise (tc, state);
448 /* Make and write options */
449 tcp_opts_len = tcp_make_established_options (tc, snd_opts);
450 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
452 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
453 tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd);
455 tcp_options_write ((u8 *) (th + 1), snd_opts);
457 th->checksum = tcp_compute_checksum (tc, b);
459 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
462 tcp_zero_rwnd_sent_on (tc);
464 tcp_zero_rwnd_sent_off (tc);
468 * Convert buffer to ACK
471 tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
473 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
474 TCP_EVT (TCP_EVT_ACK_SENT, tc);
475 tc->rcv_las = tc->rcv_nxt;
479 * Convert buffer to FIN-ACK
482 tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
484 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
488 * Convert buffer to SYN
491 tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b)
493 u8 tcp_hdr_opts_len, tcp_opts_len;
496 tcp_options_t snd_opts;
498 initial_wnd = tcp_initial_window_to_advertise (tc);
500 /* Make and write options */
501 clib_memset (&snd_opts, 0, sizeof (snd_opts));
502 tcp_opts_len = tcp_make_syn_options (tc, &snd_opts);
503 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
505 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
506 tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN,
508 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
509 tcp_options_write ((u8 *) (th + 1), &snd_opts);
510 th->checksum = tcp_compute_checksum (tc, b);
514 * Convert buffer to SYN-ACK
517 tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
519 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
520 u8 tcp_opts_len, tcp_hdr_opts_len;
524 clib_memset (snd_opts, 0, sizeof (*snd_opts));
525 initial_wnd = tcp_initial_window_to_advertise (tc);
526 tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
527 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
529 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
530 tc->rcv_nxt, tcp_hdr_opts_len,
531 TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd);
532 tcp_options_write ((u8 *) (th + 1), snd_opts);
534 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
535 th->checksum = tcp_compute_checksum (tc, b);
539 tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
540 u8 is_ip4, u32 fib_index)
542 tcp_main_t *tm = &tcp_main;
543 vlib_main_t *vm = wrk->vm;
545 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
548 vnet_buffer (b)->sw_if_index[VLIB_TX] = fib_index;
549 vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
551 tcp_trajectory_add_start (b, 1);
553 session_add_pending_tx_buffer (vm->thread_index, bi,
554 tm->ipl_next_node[!is_ip4]);
556 if (vm->thread_index == 0 && vlib_num_workers ())
557 session_queue_run_on_main_thread (wrk->vm);
561 tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
564 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
567 session_add_pending_tx_buffer (wrk->vm->thread_index, bi,
568 wrk->tco_next_node[!is_ip4]);
571 #endif /* CLIB_MARCH_VARIANT */
574 tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b, u8 is_ip4)
579 ip4_address_t src_ip4, dst_ip4;
580 ip6_address_t src_ip6, dst_ip6;
581 u16 src_port, dst_port;
582 u32 tmp, len, seq, ack;
585 /* Find IP and TCP headers */
586 th = tcp_buffer_hdr (b);
588 /* Save src and dst ip */
591 ih4 = vlib_buffer_get_current (b);
592 ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40);
593 src_ip4.as_u32 = ih4->src_address.as_u32;
594 dst_ip4.as_u32 = ih4->dst_address.as_u32;
598 ih6 = vlib_buffer_get_current (b);
599 ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
600 clib_memcpy_fast (&src_ip6, &ih6->src_address, sizeof (ip6_address_t));
601 clib_memcpy_fast (&dst_ip6, &ih6->dst_address, sizeof (ip6_address_t));
604 src_port = th->src_port;
605 dst_port = th->dst_port;
606 flags = TCP_FLAG_RST;
609 * RFC 793. If the ACK bit is off, sequence number zero is used,
610 * <SEQ=0><ACK=SEG.SEQ+SEG.LEN><CTL=RST,ACK>
611 * If the ACK bit is on,
612 * <SEQ=SEG.ACK><CTL=RST>
616 seq = th->ack_number;
621 flags |= TCP_FLAG_ACK;
622 tmp = clib_net_to_host_u32 (th->seq_number);
623 len = vnet_buffer (b)->tcp.data_len + tcp_is_syn (th) + tcp_is_fin (th);
624 ack = clib_host_to_net_u32 (tmp + len);
628 tcp_reuse_buffer (vm, b);
629 tcp_trajectory_add_start (b, 4);
630 th = vlib_buffer_push_tcp_net_order (b, dst_port, src_port, seq, ack,
631 sizeof (tcp_header_t), flags, 0);
635 ih4 = vlib_buffer_push_ip4 (vm, b, &dst_ip4, &src_ip4,
637 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
642 ih6 = vlib_buffer_push_ip6 (vm, b, &dst_ip6, &src_ip6, IP_PROTOCOL_TCP);
643 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
650 #ifndef CLIB_MARCH_VARIANT
652 * Send reset without reusing existing buffer
654 * It extracts connection info out of original packet
657 tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
658 u32 thread_index, u8 is_ip4)
660 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
661 vlib_main_t *vm = wrk->vm;
663 u32 bi, sw_if_index, fib_index;
664 u8 tcp_hdr_len, flags = 0;
665 tcp_header_t *th, *pkt_th;
667 ip4_header_t *ih4, *pkt_ih4;
668 ip6_header_t *ih6, *pkt_ih6;
669 fib_protocol_t fib_proto;
671 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
673 tcp_worker_stats_inc (wrk, no_buffer, 1);
677 b = vlib_get_buffer (vm, bi);
678 sw_if_index = vnet_buffer (pkt)->sw_if_index[VLIB_RX];
679 fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
680 fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
681 tcp_init_buffer (vm, b);
683 /* Make and write options */
684 tcp_hdr_len = sizeof (tcp_header_t);
688 pkt_ih4 = vlib_buffer_get_current (pkt);
689 pkt_th = ip4_next_header (pkt_ih4);
693 pkt_ih6 = vlib_buffer_get_current (pkt);
694 pkt_th = ip6_next_header (pkt_ih6);
697 if (tcp_ack (pkt_th))
699 flags = TCP_FLAG_RST;
700 seq = pkt_th->ack_number;
701 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
705 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
707 ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end);
710 th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port,
711 seq, ack, tcp_hdr_len, flags, 0);
713 /* Swap src and dst ip */
716 ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
717 ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
718 &pkt_ih4->src_address, IP_PROTOCOL_TCP,
719 tcp_csum_offload (tc));
720 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
725 ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) ==
727 ih6 = vlib_buffer_push_ip6_custom (vm, b, &pkt_ih6->dst_address,
728 &pkt_ih6->src_address,
730 tc->ipv6_flow_label);
731 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
735 tcp_enqueue_to_ip_lookup (wrk, b, bi, is_ip4, fib_index);
736 TCP_EVT (TCP_EVT_RST_SENT, tc);
737 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
738 TCP_ERROR_RST_SENT, 1);
742 * Build and set reset packet for connection
745 tcp_send_reset (tcp_connection_t * tc)
747 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
748 vlib_main_t *vm = wrk->vm;
752 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
755 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
757 tcp_worker_stats_inc (wrk, no_buffer, 1);
760 b = vlib_get_buffer (vm, bi);
761 tcp_init_buffer (vm, b);
763 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
764 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
765 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
766 flags = TCP_FLAG_RST;
767 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
768 tc->rcv_nxt, tcp_hdr_opts_len, flags,
770 opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
771 th->checksum = tcp_compute_checksum (tc, b);
772 ASSERT (opts_write_len == tc->snd_opts_len);
773 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
774 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
775 TCP_EVT (TCP_EVT_RST_SENT, tc);
776 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
777 TCP_ERROR_RST_SENT, 1);
781 tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
786 vlib_buffer_push_ip4 (wrk->vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4,
787 IP_PROTOCOL_TCP, tcp_csum_offload (tc));
791 vlib_buffer_push_ip6_custom (wrk->vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6,
792 IP_PROTOCOL_TCP, tc->ipv6_flow_label);
799 * Builds a SYN packet for a half-open connection and sends it to ipx_lookup.
800 * The packet is not forwarded through tcpx_output to avoid doing lookups
801 * in the half_open pool.
804 tcp_send_syn (tcp_connection_t * tc)
806 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
807 vlib_main_t *vm = wrk->vm;
812 * Setup retransmit and establish timers before requesting buffer
813 * such that we can return if we've ran out.
815 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
816 tc->rto * TCP_TO_TIMER_TICK);
818 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
820 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
821 tcp_worker_stats_inc (wrk, no_buffer, 1);
825 b = vlib_get_buffer (vm, bi);
826 tcp_init_buffer (vm, b);
827 tcp_make_syn (tc, b);
829 /* Measure RTT with this */
830 tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
831 tc->rtt_seq = tc->snd_nxt;
834 tcp_push_ip_hdr (wrk, tc, b);
835 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
836 TCP_EVT (TCP_EVT_SYN_SENT, tc);
840 tcp_send_synack (tcp_connection_t * tc)
842 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
843 vlib_main_t *vm = wrk->vm;
847 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
849 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
851 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
852 tcp_worker_stats_inc (wrk, no_buffer, 1);
856 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
857 b = vlib_get_buffer (vm, bi);
858 tcp_init_buffer (vm, b);
859 tcp_make_synack (tc, b);
860 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
861 TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
868 tcp_send_fin (tcp_connection_t * tc)
870 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
871 vlib_main_t *vm = wrk->vm;
876 fin_snt = tc->flags & TCP_CONN_FINSNT;
880 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
882 /* Out of buffers so program fin retransmit ASAP */
883 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
887 /* Make sure retransmit retries a fin not data */
888 tc->flags |= TCP_CONN_FINSNT;
889 tcp_worker_stats_inc (wrk, no_buffer, 1);
893 /* If we have non-dupacks programmed, no need to send them */
894 if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
895 tc->flags &= ~TCP_CONN_SNDACK;
897 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
898 b = vlib_get_buffer (vm, bi);
899 tcp_init_buffer (vm, b);
900 tcp_make_fin (tc, b);
901 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
902 TCP_EVT (TCP_EVT_FIN_SENT, tc);
903 /* Account for the FIN */
907 tc->flags |= TCP_CONN_FINSNT;
908 tc->flags &= ~TCP_CONN_FINPNDG;
909 tc->snd_una_max = seq_max (tc->snd_una_max, tc->snd_nxt);
914 * Push TCP header and update connection variables. Should only be called
915 * for segments with data, not for 'control' packets.
918 tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
919 u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
921 u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
922 u32 advertise_wnd, data_len;
923 tcp_main_t *tm = &tcp_main;
926 data_len = b->current_length;
927 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
928 data_len += b->total_length_not_including_first_buffer;
930 vnet_buffer (b)->tcp.flags = 0;
931 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
934 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
936 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
939 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
941 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
943 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
945 if (seq_geq (tc->psh_seq, snd_nxt)
946 && seq_lt (tc->psh_seq, snd_nxt + data_len))
947 flags |= TCP_FLAG_PSH;
949 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
950 tc->rcv_nxt, tcp_hdr_opts_len, flags,
955 clib_memcpy_fast ((u8 *) (th + 1),
956 tm->wrk_ctx[tc->c_thread_index].cached_opts,
961 u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
962 ASSERT (len == tc->snd_opts_len);
966 * Update connection variables
970 tc->snd_nxt += data_len;
971 tc->rcv_las = tc->rcv_nxt;
973 tc->bytes_out += data_len;
974 tc->data_segs_out += 1;
976 th->checksum = tcp_compute_checksum (tc, b);
978 TCP_EVT (TCP_EVT_PKTIZE, tc);
982 tcp_buffer_len (vlib_buffer_t * b)
984 u32 data_len = b->current_length;
985 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
986 data_len += b->total_length_not_including_first_buffer;
991 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
993 tcp_connection_t *tc = (tcp_connection_t *) tconn;
995 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
996 tcp_bt_track_tx (tc, tcp_buffer_len (b));
998 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
999 /* update_snd_nxt */ 1);
1001 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1002 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1003 /* If not tracking an ACK, start tracking */
1004 if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
1006 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
1007 tc->rtt_seq = tc->snd_nxt;
1009 if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
1011 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1012 tcp_retransmit_timer_set (&wrk->timer_wheel, tc);
1015 tcp_trajectory_add_start (b, 3);
1020 tcp_send_ack (tcp_connection_t * tc)
1022 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1023 vlib_main_t *vm = wrk->vm;
1027 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1029 tcp_update_rcv_wnd (tc);
1030 tcp_worker_stats_inc (wrk, no_buffer, 1);
1033 b = vlib_get_buffer (vm, bi);
1034 tcp_init_buffer (vm, b);
1035 tcp_make_ack (tc, b);
1036 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1040 tcp_program_ack (tcp_connection_t * tc)
1042 if (!(tc->flags & TCP_CONN_SNDACK))
1044 session_add_self_custom_tx_evt (&tc->connection, 1);
1045 tc->flags |= TCP_CONN_SNDACK;
1050 tcp_program_dupack (tcp_connection_t * tc)
1052 if (!(tc->flags & TCP_CONN_SNDACK))
1054 session_add_self_custom_tx_evt (&tc->connection, 1);
1055 tc->flags |= TCP_CONN_SNDACK;
1057 if (tc->pending_dupacks < 255)
1058 tc->pending_dupacks += 1;
1062 tcp_program_retransmit (tcp_connection_t * tc)
1064 if (!(tc->flags & TCP_CONN_RXT_PENDING))
1066 session_add_self_custom_tx_evt (&tc->connection, 0);
1067 tc->flags |= TCP_CONN_RXT_PENDING;
1072 * Delayed ack timer handler
1074 * Sends delayed ACK when timer expires
1077 tcp_timer_delack_handler (tcp_connection_t * tc)
1083 * Send window update ack
1085 * Ensures that it will be sent only once, after a zero rwnd has been
1086 * advertised in a previous ack, and only if rwnd has grown beyond a
1087 * configurable value.
1090 tcp_send_window_update_ack (tcp_connection_t * tc)
1092 if (tcp_zero_rwnd_sent (tc))
1094 tcp_update_rcv_wnd (tc);
1095 if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
1097 tcp_zero_rwnd_sent_off (tc);
1098 tcp_program_ack (tc);
1104 * Allocate a new buffer and build a new tcp segment
1106 * @param wrk tcp worker
1107 * @param tc connection for which the segment will be allocated
1108 * @param offset offset of the first byte in the tx fifo
1109 * @param max_deq_byte segment size
1110 * @param[out] b pointer to buffer allocated
1112 * @return the number of bytes in the segment or 0 if buffer cannot be
1113 * allocated or no data available
1116 tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1117 u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
1119 u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
1120 vlib_main_t *vm = wrk->vm;
1125 seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
1130 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1133 * Allocate and fill in buffer(s)
1136 /* Easy case, buffer size greater than mss */
1137 if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
1139 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1141 tcp_worker_stats_inc (wrk, no_buffer, 1);
1144 *b = vlib_get_buffer (vm, bi);
1145 data = tcp_init_buffer (vm, *b);
1146 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1148 ASSERT (n_bytes == max_deq_bytes);
1149 b[0]->current_length = n_bytes;
1150 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1151 /* burst */ 0, /* update_snd_nxt */ 0);
1153 /* Split mss into multiple buffers */
1156 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1157 u16 n_peeked, len_to_deq;
1158 vlib_buffer_t *chain_b, *prev_b;
1161 /* Make sure we have enough buffers */
1162 n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
1163 vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
1164 CLIB_CACHE_LINE_BYTES);
1165 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
1166 if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
1169 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1170 tcp_worker_stats_inc (wrk, no_buffer, 1);
1174 *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
1175 data = tcp_init_buffer (vm, *b);
1176 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1178 TRANSPORT_MAX_HDRS_LEN);
1179 b[0]->current_length = n_bytes;
1180 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1181 b[0]->total_length_not_including_first_buffer = 0;
1182 max_deq_bytes -= n_bytes;
1185 for (i = 1; i < n_bufs_per_seg; i++)
1188 len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
1189 chain_bi = wrk->tx_buffers[--n_bufs];
1190 chain_b = vlib_get_buffer (vm, chain_bi);
1191 chain_b->current_data = 0;
1192 data = vlib_buffer_get_current (chain_b);
1193 n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
1196 ASSERT (n_peeked == len_to_deq);
1197 n_bytes += n_peeked;
1198 chain_b->current_length = n_peeked;
1199 chain_b->next_buffer = 0;
1201 /* update previous buffer */
1202 prev_b->next_buffer = chain_bi;
1203 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1205 max_deq_bytes -= n_peeked;
1206 b[0]->total_length_not_including_first_buffer += n_peeked;
1209 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1210 /* burst */ 0, /* update_snd_nxt */ 0);
1212 if (PREDICT_FALSE (n_bufs))
1214 clib_warning ("not all buffers consumed");
1215 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1219 ASSERT (n_bytes > 0);
1220 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1226 * Build a retransmit segment
1228 * @return the number of bytes in the segment or 0 if there's nothing to
1232 tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
1233 tcp_connection_t * tc, u32 offset,
1234 u32 max_deq_bytes, vlib_buffer_t ** b)
1236 u32 start, available_bytes;
1239 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1240 ASSERT (max_deq_bytes != 0);
1243 * Make sure we can retransmit something
1245 available_bytes = transport_max_tx_dequeue (&tc->connection);
1246 ASSERT (available_bytes >= offset);
1247 available_bytes -= offset;
1248 if (!available_bytes)
1251 max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
1252 max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
1254 start = tc->snd_una + offset;
1255 ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
1257 n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
1261 tc->snd_rxt_bytes += n_bytes;
1263 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1264 tcp_bt_track_rxt (tc, start, start + n_bytes);
1266 tc->bytes_retrans += n_bytes;
1267 tc->segs_retrans += 1;
1268 tcp_worker_stats_inc (wrk, rxt_segs, 1);
1269 TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
1275 tcp_check_sack_reneging (tcp_connection_t * tc)
1277 sack_scoreboard_t *sb = &tc->sack_sb;
1278 sack_scoreboard_hole_t *hole;
1280 hole = scoreboard_first_hole (sb);
1281 if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
1284 scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
1288 * Reset congestion control, switch cwnd to loss window and try again.
1291 tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
1293 TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
1295 tc->prev_ssthresh = tc->ssthresh;
1296 tc->prev_cwnd = tc->cwnd;
1298 /* If we entrered loss without fast recovery, notify cc algo of the
1299 * congestion event such that it can update ssthresh and its state */
1300 if (!tcp_in_fastrecovery (tc))
1301 tcp_cc_congestion (tc);
1303 /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
1307 tc->cwnd_acc_bytes = 0;
1308 tc->tr_occurences += 1;
1309 tcp_recovery_on (tc);
1313 tcp_timer_retransmit_handler (tcp_connection_t * tc)
1315 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1316 vlib_main_t *vm = wrk->vm;
1317 vlib_buffer_t *b = 0;
1320 tcp_worker_stats_inc (wrk, tr_events, 1);
1322 /* Should be handled by a different handler */
1323 if (PREDICT_FALSE (tc->state == TCP_STATE_SYN_SENT))
1326 /* Wait-close and retransmit could pop at the same time */
1327 if (tc->state == TCP_STATE_CLOSED)
1330 if (tc->state >= TCP_STATE_ESTABLISHED)
1332 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1334 /* Lost FIN, retransmit and return */
1335 if (tc->flags & TCP_CONN_FINSNT)
1339 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1343 /* Shouldn't be here. This condition is tricky because it has to take
1344 * into account boff > 0 due to persist timeout. */
1345 if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_nxt)
1346 || (tc->rto_boff > 0 && seq_geq (tc->snd_una, tc->snd_congestion)
1347 && !tcp_flight_size (tc)))
1349 ASSERT (!tcp_in_recovery (tc));
1354 /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
1355 * to persist timer timeout */
1356 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1359 tcp_update_rto (tc);
1362 /* Peer is dead or network connectivity is lost. Close connection.
1363 * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
1364 * a min rto of 0.2s we need to retry about 8 times. */
1365 if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
1367 tcp_send_reset (tc);
1368 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1369 session_transport_closing_notify (&tc->connection);
1370 session_transport_closed_notify (&tc->connection);
1371 tcp_connection_timers_reset (tc);
1372 tcp_program_cleanup (wrk, tc);
1373 tcp_worker_stats_inc (wrk, tr_abort, 1);
1377 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1378 tcp_check_sack_reneging (tc);
1380 /* Update send congestion to make sure that rxt has data to send */
1381 tc->snd_congestion = tc->snd_nxt;
1383 /* Send the first unacked segment. If we're short on buffers, return
1384 * as soon as possible */
1385 n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
1386 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
1389 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1393 bi = vlib_get_buffer_index (vm, b);
1394 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1396 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1397 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
1400 if (tc->rto_boff == 1)
1402 tcp_cc_init_rxt_timeout (tc);
1403 /* Record timestamp. Eifel detection algorithm RFC3522 */
1404 tc->snd_rxt_ts = tcp_tstamp (tc);
1407 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1408 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
1410 tcp_program_retransmit (tc);
1412 /* Retransmit SYN-ACK */
1413 else if (tc->state == TCP_STATE_SYN_RCVD)
1415 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1419 /* Passive open establish timeout */
1420 if (tc->rto > TCP_ESTABLISH_TIME >> 1)
1422 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1423 tcp_connection_timers_reset (tc);
1424 tcp_program_cleanup (wrk, tc);
1425 tcp_worker_stats_inc (wrk, tr_abort, 1);
1429 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1431 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1432 tcp_worker_stats_inc (wrk, no_buffer, 1);
1437 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1438 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1440 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
1442 b = vlib_get_buffer (vm, bi);
1443 tcp_init_buffer (vm, b);
1444 tcp_make_synack (tc, b);
1445 TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
1447 /* Retransmit timer already updated, just enqueue to output */
1448 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1452 ASSERT (tc->state == TCP_STATE_CLOSED);
1458 * SYN retransmit timer handler. Active open only.
1461 tcp_timer_retransmit_syn_handler (tcp_connection_t * tc)
1463 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1464 vlib_main_t *vm = wrk->vm;
1465 vlib_buffer_t *b = 0;
1468 /* Note: the connection may have transitioned to ESTABLISHED... */
1469 if (PREDICT_FALSE (tc->state != TCP_STATE_SYN_SENT))
1472 /* Half-open connection actually moved to established but we were
1473 * waiting for syn retransmit to pop to call cleanup from the right
1475 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1477 if (tcp_half_open_connection_cleanup (tc))
1478 TCP_DBG ("could not remove half-open connection");
1482 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1485 /* Active open establish timeout */
1486 if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
1488 session_stream_connect_notify (&tc->connection, SESSION_E_TIMEDOUT);
1489 tcp_connection_cleanup (tc);
1493 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1495 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
1496 tcp_worker_stats_inc (wrk, no_buffer, 1);
1500 /* Try without increasing RTO a number of times. If this fails,
1501 * start growing RTO exponentially */
1503 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1504 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1506 b = vlib_get_buffer (vm, bi);
1507 tcp_init_buffer (vm, b);
1508 tcp_make_syn (tc, b);
1510 TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
1512 /* This goes straight to ipx_lookup */
1513 tcp_push_ip_hdr (wrk, tc, b);
1514 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
1516 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
1517 tc->rto * TCP_TO_TIMER_TICK);
1521 * Got 0 snd_wnd from peer, try to do something about it.
1525 tcp_timer_persist_handler (tcp_connection_t * tc)
1527 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1528 u32 bi, max_snd_bytes, available_bytes, offset;
1529 tcp_main_t *tm = vnet_get_tcp_main ();
1530 vlib_main_t *vm = wrk->vm;
1535 /* Problem already solved or worse */
1536 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1537 || (tc->flags & TCP_CONN_FINSNT))
1538 goto update_scheduler;
1540 available_bytes = transport_max_tx_dequeue (&tc->connection);
1541 offset = tc->snd_nxt - tc->snd_una;
1543 /* Reprogram persist if no new bytes available to send. We may have data
1545 if (!available_bytes)
1547 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1551 if (available_bytes <= offset)
1552 goto update_scheduler;
1554 /* Increment RTO backoff */
1556 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1559 * Try to force the first unsent segment (or buffer)
1561 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1563 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1564 tcp_worker_stats_inc (wrk, no_buffer, 1);
1568 b = vlib_get_buffer (vm, bi);
1569 data = tcp_init_buffer (vm, b);
1571 tcp_validate_txf_size (tc, offset);
1572 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1573 max_snd_bytes = clib_min (tc->snd_mss,
1574 tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
1575 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1577 b->current_length = n_bytes;
1578 ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
1579 || tc->snd_nxt == tc->snd_una_max
1580 || tc->rto_boff > 1));
1582 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1584 tcp_bt_check_app_limited (tc);
1585 tcp_bt_track_tx (tc, n_bytes);
1588 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
1589 /* burst */ 0, /* update_snd_nxt */ 1);
1590 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1591 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1592 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1594 /* Just sent new data, enable retransmit */
1595 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1601 if (tcp_is_descheduled (tc))
1602 transport_connection_reschedule (&tc->connection);
1606 * Retransmit first unacked segment
1609 tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1611 vlib_main_t *vm = wrk->vm;
1615 TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
1617 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1621 bi = vlib_get_buffer_index (vm, b);
1622 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1628 tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1631 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1632 vlib_main_t *vm = wrk->vm;
1633 vlib_buffer_t *b = 0;
1635 offset = tc->snd_nxt - tc->snd_una;
1636 available_wnd = tc->snd_wnd - offset;
1637 burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
1639 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1640 tcp_bt_check_app_limited (tc);
1642 while (n_segs < burst_size)
1644 n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
1648 bi = vlib_get_buffer_index (vm, b);
1649 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1650 offset += n_written;
1653 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1654 tcp_bt_track_tx (tc, n_written);
1656 tc->snd_nxt += n_written;
1657 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1665 * Estimate send space using proportional rate reduction (RFC6937)
1668 tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
1673 pipe = tcp_flight_size (tc);
1674 prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
1676 if (pipe > tc->ssthresh)
1678 space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
1684 limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
1685 space = clib_min (tc->ssthresh - pipe, limit);
1687 space = clib_max (space, prr_out ? 0 : tc->snd_mss);
1692 tcp_retransmit_should_retry_head (tcp_connection_t * tc,
1693 sack_scoreboard_t * sb)
1695 u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
1696 f64 rr = (f64) tc->ssthresh / tc->prev_cwnd;
1698 if (tcp_fastrecovery_first (tc))
1701 return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
1705 tcp_max_tx_deq (tcp_connection_t * tc)
1707 return (transport_max_tx_dequeue (&tc->connection)
1708 - (tc->snd_nxt - tc->snd_una));
1711 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1712 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1713 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1716 * Do retransmit with SACKs
1719 tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1722 u32 n_written = 0, offset, max_bytes, n_segs = 0;
1723 u8 snd_limited = 0, can_rescue = 0;
1724 u32 bi, max_deq, burst_bytes;
1725 sack_scoreboard_hole_t *hole;
1726 vlib_main_t *vm = wrk->vm;
1727 vlib_buffer_t *b = 0;
1728 sack_scoreboard_t *sb;
1731 ASSERT (tcp_in_cong_recovery (tc));
1733 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1734 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1737 tcp_program_retransmit (tc);
1741 if (tcp_in_recovery (tc))
1742 snd_space = tcp_available_cc_snd_space (tc);
1744 snd_space = tcp_fastrecovery_prr_snd_space (tc);
1746 if (snd_space < tc->snd_mss)
1751 /* Check if snd_una is a lost retransmit */
1752 if (pool_elts (sb->holes)
1753 && seq_gt (sb->high_sacked, tc->snd_congestion)
1754 && tc->rxt_head != tc->snd_una
1755 && tcp_retransmit_should_retry_head (tc, sb))
1757 max_bytes = clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una);
1758 n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, max_bytes, &b);
1761 tcp_program_retransmit (tc);
1764 bi = vlib_get_buffer_index (vm, b);
1765 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1768 tc->rxt_head = tc->snd_una;
1769 tc->rxt_delivered += n_written;
1770 tc->prr_delivered += n_written;
1771 ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
1774 tcp_fastrecovery_first_off (tc);
1776 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1777 hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
1779 max_deq = transport_max_tx_dequeue (&tc->connection);
1780 max_deq -= tc->snd_nxt - tc->snd_una;
1782 while (snd_space > 0 && n_segs < burst_size)
1784 hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue,
1788 /* We are out of lost holes to retransmit so send some new data. */
1789 if (max_deq > tc->snd_mss)
1794 /* Make sure we don't exceed available window and leave space
1795 * for one more packet, to avoid zero window acks */
1796 av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
1797 av_wnd = clib_max (av_wnd - tc->snd_mss, 0);
1798 snd_space = clib_min (snd_space, av_wnd);
1799 snd_space = clib_min (max_deq, snd_space);
1800 burst_size = clib_min (burst_size - n_segs,
1801 snd_space / tc->snd_mss);
1802 burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
1803 n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
1804 if (max_deq > n_segs_new * tc->snd_mss)
1805 tcp_program_retransmit (tc);
1807 n_segs += n_segs_new;
1811 if (tcp_in_recovery (tc) || !can_rescue
1812 || scoreboard_rescue_rxt_valid (sb, tc))
1815 /* If rescue rxt undefined or less than snd_una then one segment of
1816 * up to SMSS octets that MUST include the highest outstanding
1817 * unSACKed sequence number SHOULD be returned, and RescueRxt set to
1818 * RecoveryPoint. HighRxt MUST NOT be updated.
1820 hole = scoreboard_last_hole (sb);
1821 max_bytes = clib_min (tc->snd_mss, hole->end - hole->start);
1822 max_bytes = clib_min (max_bytes, snd_space);
1823 offset = hole->end - tc->snd_una - max_bytes;
1824 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1829 sb->rescue_rxt = tc->snd_congestion;
1830 bi = vlib_get_buffer_index (vm, b);
1831 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1836 max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
1837 max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
1841 offset = sb->high_rxt - tc->snd_una;
1842 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1844 ASSERT (n_written <= snd_space);
1846 /* Nothing left to retransmit */
1850 bi = vlib_get_buffer_index (vm, b);
1851 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1853 sb->high_rxt += n_written;
1854 ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt));
1856 snd_space -= n_written;
1861 tcp_program_retransmit (tc);
1865 transport_connection_tx_pacer_reset_bucket (&tc->connection, 0);
1870 * Fast retransmit without SACK info
1873 tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1876 u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now, max_bytes;
1877 u32 burst_bytes, sent_bytes;
1878 vlib_main_t *vm = wrk->vm;
1879 int snd_space, n_segs = 0;
1883 ASSERT (tcp_in_cong_recovery (tc));
1884 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1886 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1887 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1890 tcp_program_retransmit (tc);
1894 snd_space = tcp_available_cc_snd_space (tc);
1895 cc_limited = snd_space < burst_bytes;
1897 if (!tcp_fastrecovery_first (tc))
1900 /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
1902 while (snd_space > 0 && n_segs < burst_size)
1904 max_bytes = clib_min (tc->snd_mss,
1905 tc->snd_congestion - tc->snd_una - offset);
1908 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1911 /* Nothing left to retransmit */
1915 bi = vlib_get_buffer_index (vm, b);
1916 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1917 snd_space -= n_written;
1918 offset += n_written;
1922 if (n_segs == burst_size)
1927 /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
1928 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
1931 max_deq = transport_max_tx_dequeue (&tc->connection);
1932 max_deq -= tc->snd_nxt - tc->snd_una;
1935 snd_space = clib_min (max_deq, snd_space);
1936 burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
1937 n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size);
1938 if (n_segs_now && max_deq > n_segs_now * tc->snd_mss)
1939 tcp_program_retransmit (tc);
1940 n_segs += n_segs_now;
1944 tcp_fastrecovery_first_off (tc);
1946 sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
1947 sent_bytes = cc_limited ? burst_bytes : sent_bytes;
1948 transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
1954 tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
1958 if (!tc->pending_dupacks)
1960 if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
1961 || tc->state != TCP_STATE_ESTABLISHED)
1969 /* If we're supposed to send dupacks but have no ooo data
1970 * send only one ack */
1971 if (!vec_len (tc->snd_sacks))
1974 tc->dupacks_out += 1;
1975 tc->pending_dupacks = 0;
1979 /* Start with first sack block */
1980 tc->snd_sack_pos = 0;
1982 /* Generate enough dupacks to cover all sack blocks. Do not generate
1983 * more sacks than the number of packets received. But do generate at
1984 * least 3, i.e., the number needed to signal congestion, if needed. */
1985 n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
1986 n_acks = clib_min (n_acks, tc->pending_dupacks);
1987 n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
1988 for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
1991 if (n_acks < max_burst_size)
1993 tc->pending_dupacks = 0;
1994 tc->snd_sack_pos = 0;
1995 tc->dupacks_out += n_acks;
2000 TCP_DBG ("constrained by burst size");
2001 tc->pending_dupacks = n_acks - max_burst_size;
2002 tc->dupacks_out += max_burst_size;
2003 tcp_program_dupack (tc);
2004 return max_burst_size;
2009 tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
2011 tcp_worker_ctx_t *wrk;
2014 if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED))
2017 wrk = tcp_get_worker (tc->c_thread_index);
2019 if (tcp_opts_sack_permitted (&tc->rcv_opts))
2020 n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
2022 n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
2028 tcp_session_custom_tx (void *conn, transport_send_params_t * sp)
2030 tcp_connection_t *tc = (tcp_connection_t *) conn;
2033 if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING))
2035 tc->flags &= ~TCP_CONN_RXT_PENDING;
2036 n_segs = tcp_do_retransmit (tc, sp->max_burst_size);
2039 if (!(tc->flags & TCP_CONN_SNDACK))
2042 tc->flags &= ~TCP_CONN_SNDACK;
2044 /* We have retransmitted packets and no dupack */
2045 if (n_segs && !tc->pending_dupacks)
2048 if (sp->max_burst_size <= n_segs)
2050 tcp_program_ack (tc);
2054 n_segs += tcp_send_acks (tc, sp->max_burst_size - n_segs);
2058 #endif /* CLIB_MARCH_VARIANT */
2061 tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
2062 u16 * next0, u32 * error0)
2064 ip_adjacency_t *adj;
2067 /* Not thread safe but as long as the connection exists the adj should
2069 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &tc0->c_rmt_ip,
2071 if (ai == ADJ_INDEX_INVALID)
2073 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2074 *next0 = TCP_OUTPUT_NEXT_DROP;
2075 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2080 if (PREDICT_TRUE (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE))
2081 *next0 = TCP_OUTPUT_NEXT_IP_REWRITE;
2082 else if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
2083 *next0 = TCP_OUTPUT_NEXT_IP_ARP;
2086 *next0 = TCP_OUTPUT_NEXT_DROP;
2087 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2089 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
2093 tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2094 u32 * to_next, u32 n_bufs)
2096 tcp_connection_t *tc;
2102 for (i = 0; i < n_bufs; i++)
2104 b = vlib_get_buffer (vm, to_next[i]);
2105 if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2107 th = vlib_buffer_get_current (b);
2108 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2110 t = vlib_add_trace (vm, node, b, sizeof (*t));
2111 clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
2112 clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
2117 tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0,
2118 tcp_connection_t * tc0, u8 is_ip4)
2120 TCP_EVT (TCP_EVT_OUTPUT, tc0,
2121 ((tcp_header_t *) vlib_buffer_get_current (b0))->flags,
2122 b0->current_length);
2125 vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
2126 IP_PROTOCOL_TCP, tcp_csum_offload (tc0));
2128 vlib_buffer_push_ip6_custom (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6,
2129 IP_PROTOCOL_TCP, tc0->ipv6_flow_label);
2133 tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
2135 if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO)))
2138 u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len;
2140 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
2141 data_len += b->total_length_not_including_first_buffer;
2143 if (PREDICT_TRUE (data_len <= tc->snd_mss))
2147 ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
2148 ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
2149 b->flags |= VNET_BUFFER_F_GSO;
2150 vnet_buffer2 (b)->gso_l4_hdr_sz =
2151 sizeof (tcp_header_t) + tc->snd_opts_len;
2152 vnet_buffer2 (b)->gso_size = tc->snd_mss;
2157 tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
2158 vlib_node_runtime_t * error_node, u16 * next0,
2161 /* If next_index is not drop use it */
2162 if (tc0->next_node_index)
2164 *next0 = tc0->next_node_index;
2165 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2169 *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP;
2172 vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
2173 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
2179 if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
2180 tcp_output_handle_link_local (tc0, b0, next0, &error0);
2182 if (PREDICT_FALSE (error0))
2184 b0->error = error_node->errors[error0];
2193 tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2194 vlib_frame_t * frame, int is_ip4)
2196 u32 n_left_from, *from, thread_index = vm->thread_index;
2197 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2198 u16 nexts[VLIB_FRAME_SIZE], *next;
2200 from = vlib_frame_vector_args (frame);
2201 n_left_from = frame->n_vectors;
2202 tcp_set_time_now (tcp_get_worker (thread_index));
2204 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2205 tcp46_output_trace_frame (vm, node, from, n_left_from);
2207 vlib_get_buffers (vm, from, bufs, n_left_from);
2211 while (n_left_from >= 4)
2213 tcp_connection_t *tc0, *tc1;
2216 vlib_prefetch_buffer_header (b[2], STORE);
2217 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2219 vlib_prefetch_buffer_header (b[3], STORE);
2220 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2223 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2225 tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
2228 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2230 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2231 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2233 tcp_check_if_gso (tc0, b[0]);
2234 tcp_check_if_gso (tc1, b[1]);
2236 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2237 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2243 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2244 tcp_check_if_gso (tc0, b[0]);
2245 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2249 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2250 next[0] = TCP_OUTPUT_NEXT_DROP;
2254 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2255 tcp_check_if_gso (tc1, b[1]);
2256 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2260 b[1]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2261 next[1] = TCP_OUTPUT_NEXT_DROP;
2269 while (n_left_from > 0)
2271 tcp_connection_t *tc0;
2273 if (n_left_from > 1)
2275 vlib_prefetch_buffer_header (b[1], STORE);
2276 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2279 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2282 if (PREDICT_TRUE (tc0 != 0))
2284 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2285 tcp_check_if_gso (tc0, b[0]);
2286 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2290 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2291 next[0] = TCP_OUTPUT_NEXT_DROP;
2299 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2300 vlib_node_increment_counter (vm, tcp_node_index (output, is_ip4),
2301 TCP_ERROR_PKTS_SENT, frame->n_vectors);
2302 return frame->n_vectors;
2305 VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2306 vlib_frame_t * from_frame)
2308 return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2311 VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2312 vlib_frame_t * from_frame)
2314 return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2318 VLIB_REGISTER_NODE (tcp4_output_node) =
2320 .name = "tcp4-output",
2321 /* Takes a vector of packets. */
2322 .vector_size = sizeof (u32),
2323 .n_errors = TCP_N_ERROR,
2324 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2325 .error_strings = tcp_error_strings,
2326 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2328 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2329 foreach_tcp4_output_next
2332 .format_buffer = format_tcp_header,
2333 .format_trace = format_tcp_tx_trace,
2338 VLIB_REGISTER_NODE (tcp6_output_node) =
2340 .name = "tcp6-output",
2341 /* Takes a vector of packets. */
2342 .vector_size = sizeof (u32),
2343 .n_errors = TCP_N_ERROR,
2344 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2345 .error_strings = tcp_error_strings,
2346 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2348 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2349 foreach_tcp6_output_next
2352 .format_buffer = format_tcp_header,
2353 .format_trace = format_tcp_tx_trace,
2357 typedef enum _tcp_reset_next
2359 TCP_RESET_NEXT_DROP,
2360 TCP_RESET_NEXT_IP_LOOKUP,
2364 #define foreach_tcp4_reset_next \
2365 _(DROP, "error-drop") \
2366 _(IP_LOOKUP, "ip4-lookup")
2368 #define foreach_tcp6_reset_next \
2369 _(DROP, "error-drop") \
2370 _(IP_LOOKUP, "ip6-lookup")
2373 tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2374 vlib_frame_t * from_frame, u8 is_ip4)
2376 u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
2377 u32 n_left_from, next_index, *from, *to_next;
2379 from = vlib_frame_vector_args (from_frame);
2380 n_left_from = from_frame->n_vectors;
2382 next_index = node->cached_next_index;
2384 while (n_left_from > 0)
2388 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2390 while (n_left_from > 0 && n_left_to_next > 0)
2402 n_left_to_next -= 1;
2404 b0 = vlib_get_buffer (vm, bi0);
2405 tcp_make_reset_in_place (vm, b0, is_ip4);
2407 /* Prepare to send to IP lookup */
2408 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2410 b0->error = node->errors[error0];
2411 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2412 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2414 th0 = vlib_buffer_get_current (b0);
2416 th0 = ip4_next_header ((ip4_header_t *) th0);
2418 th0 = ip6_next_header ((ip6_header_t *) th0);
2419 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2420 clib_memcpy_fast (&t0->tcp_header, th0,
2421 sizeof (t0->tcp_header));
2424 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2425 n_left_to_next, bi0, next0);
2427 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2429 return from_frame->n_vectors;
2432 VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2433 vlib_frame_t * from_frame)
2435 return tcp46_send_reset_inline (vm, node, from_frame, 1);
2438 VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2439 vlib_frame_t * from_frame)
2441 return tcp46_send_reset_inline (vm, node, from_frame, 0);
2445 VLIB_REGISTER_NODE (tcp4_reset_node) = {
2446 .name = "tcp4-reset",
2447 .vector_size = sizeof (u32),
2448 .n_errors = TCP_N_ERROR,
2449 .error_strings = tcp_error_strings,
2450 .n_next_nodes = TCP_RESET_N_NEXT,
2452 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2453 foreach_tcp4_reset_next
2456 .format_trace = format_tcp_tx_trace,
2461 VLIB_REGISTER_NODE (tcp6_reset_node) = {
2462 .name = "tcp6-reset",
2463 .vector_size = sizeof (u32),
2464 .n_errors = TCP_N_ERROR,
2465 .error_strings = tcp_error_strings,
2466 .n_next_nodes = TCP_RESET_N_NEXT,
2468 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2469 foreach_tcp6_reset_next
2472 .format_trace = format_tcp_tx_trace,
2477 * fd.io coding-style-patch-verification: ON
2480 * eval: (c-set-style "gnu")