2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/tcp/tcp.h>
17 #include <vnet/tcp/tcp_inlines.h>
19 #include <vnet/ip/ip4_inlines.h>
20 #include <vnet/ip/ip6_inlines.h>
22 typedef enum _tcp_output_next
25 TCP_OUTPUT_NEXT_IP_LOOKUP,
26 TCP_OUTPUT_NEXT_IP_REWRITE,
27 TCP_OUTPUT_NEXT_IP_ARP,
31 #define foreach_tcp4_output_next \
32 _ (DROP, "error-drop") \
33 _ (IP_LOOKUP, "ip4-lookup") \
34 _ (IP_REWRITE, "ip4-rewrite") \
37 #define foreach_tcp6_output_next \
38 _ (DROP, "error-drop") \
39 _ (IP_LOOKUP, "ip6-lookup") \
40 _ (IP_REWRITE, "ip6-rewrite") \
41 _ (IP_ARP, "ip6-discover-neighbor")
43 static char *tcp_error_strings[] = {
44 #define tcp_error(n,s) s,
45 #include <vnet/tcp/tcp_error.def>
51 tcp_header_t tcp_header;
52 tcp_connection_t tcp_connection;
56 format_tcp_tx_trace (u8 * s, va_list * args)
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *);
61 tcp_connection_t *tc = &t->tcp_connection;
62 u32 indent = format_get_indent (s);
64 s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
65 format_tcp_state, tc->state, format_white_space, indent,
66 format_tcp_header, &t->tcp_header, 128);
71 #ifndef CLIB_MARCH_VARIANT
73 tcp_window_compute_scale (u32 window)
76 while (wnd_scale < TCP_MAX_WND_SCALE && (window >> wnd_scale) > TCP_WND_MAX)
82 * TCP's initial window
85 tcp_initial_wnd_unscaled (tcp_connection_t * tc)
87 /* RFC 6928 recommends the value lower. However at the time our connections
88 * are initialized, fifos may not be allocated. Therefore, advertise the
89 * smallest possible unscaled window size and update once fifos are
90 * assigned to the session.
93 tcp_update_rcv_mss (tc);
94 TCP_IW_N_SEGMENTS * tc->mss;
96 return tcp_cfg.min_rx_fifo;
100 * Compute initial window and scale factor. As per RFC1323, window field in
101 * SYN and SYN-ACK segments is never scaled.
104 tcp_initial_window_to_advertise (tcp_connection_t * tc)
106 /* Compute rcv wscale only if peer advertised support for it */
107 if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
108 tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
110 tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
112 return clib_min (tc->rcv_wnd, TCP_WND_MAX);
116 tcp_update_rcv_wnd (tcp_connection_t * tc)
118 u32 available_space, wnd;
122 * Figure out how much space we have available
124 available_space = transport_max_rx_enqueue (&tc->connection);
127 * Use the above and what we know about what we've previously advertised
128 * to compute the new window
130 observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
132 /* Check if we are about to retract the window. Do the comparison before
133 * rounding to avoid errors. Per RFC7323 sec. 2.4 we could remove this */
134 if (PREDICT_FALSE ((i32) available_space < observed_wnd))
136 wnd = round_down_pow2 (clib_max (observed_wnd, 0), 1 << tc->rcv_wscale);
137 TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
141 /* Make sure we have a multiple of 1 << rcv_wscale. We round down to
142 * avoid advertising a window larger than what can be buffered */
143 wnd = round_down_pow2 (available_space, 1 << tc->rcv_wscale);
146 if (PREDICT_FALSE (wnd < tc->rcv_opts.mss))
149 tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale);
153 * Compute and return window to advertise, scaled as per RFC1323
156 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
158 if (state < TCP_STATE_ESTABLISHED)
159 return tcp_initial_window_to_advertise (tc);
161 tcp_update_rcv_wnd (tc);
162 return tc->rcv_wnd >> tc->rcv_wscale;
166 tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts)
170 opts->flags |= TCP_OPTS_FLAG_MSS;
172 len += TCP_OPTION_LEN_MSS;
174 opts->flags |= TCP_OPTS_FLAG_WSCALE;
175 opts->wscale = tc->rcv_wscale;
176 len += TCP_OPTION_LEN_WINDOW_SCALE;
178 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
179 opts->tsval = tcp_time_now ();
181 len += TCP_OPTION_LEN_TIMESTAMP;
185 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
186 len += TCP_OPTION_LEN_SACK_PERMITTED;
189 /* Align to needed boundary */
190 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
195 tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
199 opts->flags |= TCP_OPTS_FLAG_MSS;
201 len += TCP_OPTION_LEN_MSS;
203 if (tcp_opts_wscale (&tc->rcv_opts))
205 opts->flags |= TCP_OPTS_FLAG_WSCALE;
206 opts->wscale = tc->rcv_wscale;
207 len += TCP_OPTION_LEN_WINDOW_SCALE;
210 if (tcp_opts_tstamp (&tc->rcv_opts))
212 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
213 opts->tsval = tcp_time_now ();
214 opts->tsecr = tc->tsval_recent;
215 len += TCP_OPTION_LEN_TIMESTAMP;
218 if (tcp_opts_sack_permitted (&tc->rcv_opts))
220 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
221 len += TCP_OPTION_LEN_SACK_PERMITTED;
224 /* Align to needed boundary */
225 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
230 tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts)
236 if (tcp_opts_tstamp (&tc->rcv_opts))
238 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
239 opts->tsval = tcp_tstamp (tc);
240 opts->tsecr = tc->tsval_recent;
241 len += TCP_OPTION_LEN_TIMESTAMP;
243 if (tcp_opts_sack_permitted (&tc->rcv_opts))
245 if (vec_len (tc->snd_sacks))
247 opts->flags |= TCP_OPTS_FLAG_SACK;
248 if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
249 tc->snd_sack_pos = 0;
250 opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
251 opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
252 opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
253 TCP_OPTS_MAX_SACK_BLOCKS);
254 tc->snd_sack_pos += opts->n_sack_blocks;
255 len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
259 /* Align to needed boundary */
260 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
265 tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
270 case TCP_STATE_ESTABLISHED:
271 case TCP_STATE_CLOSE_WAIT:
272 case TCP_STATE_FIN_WAIT_1:
273 case TCP_STATE_LAST_ACK:
274 case TCP_STATE_CLOSING:
275 case TCP_STATE_FIN_WAIT_2:
276 case TCP_STATE_TIME_WAIT:
277 case TCP_STATE_CLOSED:
278 return tcp_make_established_options (tc, opts);
279 case TCP_STATE_SYN_RCVD:
280 return tcp_make_synack_options (tc, opts);
281 case TCP_STATE_SYN_SENT:
282 return tcp_make_syn_options (tc, opts);
284 clib_warning ("State not handled! %d", state);
290 * Update burst send vars
292 * - Updates snd_mss to reflect the effective segment size that we can send
293 * by taking into account all TCP options, including SACKs.
294 * - Cache 'on the wire' options for reuse
295 * - Updates receive window which can be reused for a burst.
297 * This should *only* be called when doing bursts
300 tcp_update_burst_snd_vars (tcp_connection_t * tc)
302 tcp_main_t *tm = &tcp_main;
304 /* Compute options to be used for connection. These may be reused when
305 * sending data or to compute the effective mss (snd_mss) */
306 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts,
307 TCP_STATE_ESTABLISHED);
309 /* XXX check if MTU has been updated */
310 tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
311 ASSERT (tc->snd_mss > 0);
313 tcp_options_write (tm->wrk_ctx[tc->c_thread_index].cached_opts,
316 tcp_update_rcv_wnd (tc);
318 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
319 tcp_bt_check_app_limited (tc);
321 if (tc->snd_una == tc->snd_nxt)
323 tcp_cc_event (tc, TCP_CC_EVT_START_TX);
324 tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_BURST);
327 if (tc->flags & TCP_CONN_PSH_PENDING)
329 u32 max_deq = transport_max_tx_dequeue (&tc->connection);
330 /* Last byte marked for push */
331 tc->psh_seq = tc->snd_una + max_deq - 1;
335 #endif /* CLIB_MARCH_VARIANT */
338 tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
340 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
341 vlib_buffer_free_one (vm, b->next_buffer);
342 /* Zero all flags but free list index and trace flag */
343 b->flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
345 b->current_length = 0;
346 b->total_length_not_including_first_buffer = 0;
347 vnet_buffer (b)->tcp.flags = 0;
349 /* Leave enough space for headers */
350 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
353 #ifndef CLIB_MARCH_VARIANT
355 tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
357 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
358 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
359 b->total_length_not_including_first_buffer = 0;
361 vnet_buffer (b)->tcp.flags = 0;
362 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
363 /* Leave enough space for headers */
364 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
368 /* Compute TCP checksum in software when offloading is disabled for a connection */
370 ip6_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
371 ip46_address_t * src, ip46_address_t * dst)
374 u16 payload_length_host_byte_order;
377 /* Initialize checksum with ip header. */
378 sum0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0)) +
379 clib_host_to_net_u16 (IP_PROTOCOL_TCP);
380 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
382 for (i = 0; i < ARRAY_LEN (src->ip6.as_uword); i++)
384 sum0 = ip_csum_with_carry
385 (sum0, clib_mem_unaligned (&src->ip6.as_uword[i], uword));
386 sum0 = ip_csum_with_carry
387 (sum0, clib_mem_unaligned (&dst->ip6.as_uword[i], uword));
390 return ip_calculate_l4_checksum (vm, p0, sum0,
391 payload_length_host_byte_order, NULL, 0,
396 ip4_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
397 ip46_address_t * src, ip46_address_t * dst)
400 u32 payload_length_host_byte_order;
402 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
404 clib_host_to_net_u32 (payload_length_host_byte_order +
405 (IP_PROTOCOL_TCP << 16));
407 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&src->ip4, u32));
408 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&dst->ip4, u32));
410 return ip_calculate_l4_checksum (vm, p0, sum0,
411 payload_length_host_byte_order, NULL, 0,
416 tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b)
419 if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
421 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
422 vlib_main_t *vm = wrk->vm;
425 checksum = ip4_tcp_compute_checksum_custom
426 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
428 checksum = ip6_tcp_compute_checksum_custom
429 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
433 vnet_buffer_offload_flags_set (b, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
442 tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
445 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
446 u8 tcp_opts_len, tcp_hdr_opts_len;
450 wnd = tcp_window_to_advertise (tc, state);
452 /* Make and write options */
453 tcp_opts_len = tcp_make_established_options (tc, snd_opts);
454 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
456 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
457 tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd);
459 tcp_options_write ((u8 *) (th + 1), snd_opts);
461 th->checksum = tcp_compute_checksum (tc, b);
463 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
466 tcp_zero_rwnd_sent_on (tc);
468 tcp_zero_rwnd_sent_off (tc);
472 * Convert buffer to ACK
475 tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
477 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
478 TCP_EVT (TCP_EVT_ACK_SENT, tc);
479 tc->rcv_las = tc->rcv_nxt;
483 * Convert buffer to FIN-ACK
486 tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
488 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
492 * Convert buffer to SYN
495 tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b)
497 u8 tcp_hdr_opts_len, tcp_opts_len;
500 tcp_options_t snd_opts;
502 initial_wnd = tcp_initial_window_to_advertise (tc);
504 /* Make and write options */
505 clib_memset (&snd_opts, 0, sizeof (snd_opts));
506 tcp_opts_len = tcp_make_syn_options (tc, &snd_opts);
507 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
509 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
510 tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN,
512 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
513 tcp_options_write ((u8 *) (th + 1), &snd_opts);
514 th->checksum = tcp_compute_checksum (tc, b);
518 * Convert buffer to SYN-ACK
521 tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
523 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
524 u8 tcp_opts_len, tcp_hdr_opts_len;
528 clib_memset (snd_opts, 0, sizeof (*snd_opts));
529 initial_wnd = tcp_initial_window_to_advertise (tc);
530 tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
531 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
533 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
534 tc->rcv_nxt, tcp_hdr_opts_len,
535 TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd);
536 tcp_options_write ((u8 *) (th + 1), snd_opts);
538 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
539 th->checksum = tcp_compute_checksum (tc, b);
543 tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
544 u8 is_ip4, u32 fib_index)
546 tcp_main_t *tm = &tcp_main;
547 vlib_main_t *vm = wrk->vm;
549 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
552 vnet_buffer (b)->sw_if_index[VLIB_TX] = fib_index;
553 vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
555 tcp_trajectory_add_start (b, 1);
557 session_add_pending_tx_buffer (vm->thread_index, bi,
558 tm->ipl_next_node[!is_ip4]);
560 if (vm->thread_index == 0 && vlib_num_workers ())
561 session_queue_run_on_main_thread (wrk->vm);
565 tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
568 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
571 session_add_pending_tx_buffer (wrk->vm->thread_index, bi,
572 wrk->tco_next_node[!is_ip4]);
575 #endif /* CLIB_MARCH_VARIANT */
578 tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b, u8 is_ip4)
583 ip4_address_t src_ip4, dst_ip4;
584 ip6_address_t src_ip6, dst_ip6;
585 u16 src_port, dst_port;
586 u32 tmp, len, seq, ack;
589 /* Find IP and TCP headers */
590 th = tcp_buffer_hdr (b);
592 /* Save src and dst ip */
595 ih4 = vlib_buffer_get_current (b);
596 ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40);
597 src_ip4.as_u32 = ih4->src_address.as_u32;
598 dst_ip4.as_u32 = ih4->dst_address.as_u32;
602 ih6 = vlib_buffer_get_current (b);
603 ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
604 clib_memcpy_fast (&src_ip6, &ih6->src_address, sizeof (ip6_address_t));
605 clib_memcpy_fast (&dst_ip6, &ih6->dst_address, sizeof (ip6_address_t));
608 src_port = th->src_port;
609 dst_port = th->dst_port;
610 flags = TCP_FLAG_RST;
613 * RFC 793. If the ACK bit is off, sequence number zero is used,
614 * <SEQ=0><ACK=SEG.SEQ+SEG.LEN><CTL=RST,ACK>
615 * If the ACK bit is on,
616 * <SEQ=SEG.ACK><CTL=RST>
620 seq = th->ack_number;
625 flags |= TCP_FLAG_ACK;
626 tmp = clib_net_to_host_u32 (th->seq_number);
627 len = vnet_buffer (b)->tcp.data_len + tcp_is_syn (th) + tcp_is_fin (th);
628 ack = clib_host_to_net_u32 (tmp + len);
632 tcp_reuse_buffer (vm, b);
633 tcp_trajectory_add_start (b, 4);
634 th = vlib_buffer_push_tcp_net_order (b, dst_port, src_port, seq, ack,
635 sizeof (tcp_header_t), flags, 0);
639 ih4 = vlib_buffer_push_ip4 (vm, b, &dst_ip4, &src_ip4,
641 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
646 ih6 = vlib_buffer_push_ip6 (vm, b, &dst_ip6, &src_ip6, IP_PROTOCOL_TCP);
647 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
654 #ifndef CLIB_MARCH_VARIANT
656 * Send reset without reusing existing buffer
658 * It extracts connection info out of original packet
661 tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
662 u32 thread_index, u8 is_ip4)
664 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
665 vlib_main_t *vm = wrk->vm;
667 u32 bi, sw_if_index, fib_index;
668 u8 tcp_hdr_len, flags = 0;
669 tcp_header_t *th, *pkt_th;
671 ip4_header_t *ih4, *pkt_ih4;
672 ip6_header_t *ih6, *pkt_ih6;
673 fib_protocol_t fib_proto;
675 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
677 tcp_worker_stats_inc (wrk, no_buffer, 1);
681 b = vlib_get_buffer (vm, bi);
682 sw_if_index = vnet_buffer (pkt)->sw_if_index[VLIB_RX];
683 fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
684 fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
685 tcp_init_buffer (vm, b);
687 /* Make and write options */
688 tcp_hdr_len = sizeof (tcp_header_t);
692 pkt_ih4 = vlib_buffer_get_current (pkt);
693 pkt_th = ip4_next_header (pkt_ih4);
697 pkt_ih6 = vlib_buffer_get_current (pkt);
698 pkt_th = ip6_next_header (pkt_ih6);
701 if (tcp_ack (pkt_th))
703 flags = TCP_FLAG_RST;
704 seq = pkt_th->ack_number;
705 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
709 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
711 ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end);
714 th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port,
715 seq, ack, tcp_hdr_len, flags, 0);
717 /* Swap src and dst ip */
720 ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
721 ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
722 &pkt_ih4->src_address, IP_PROTOCOL_TCP,
723 tcp_csum_offload (tc));
724 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
729 ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) ==
731 ih6 = vlib_buffer_push_ip6_custom (vm, b, &pkt_ih6->dst_address,
732 &pkt_ih6->src_address,
734 tc->ipv6_flow_label);
735 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
739 tcp_enqueue_to_ip_lookup (wrk, b, bi, is_ip4, fib_index);
740 TCP_EVT (TCP_EVT_RST_SENT, tc);
741 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
742 TCP_ERROR_RST_SENT, 1);
746 * Build and set reset packet for connection
749 tcp_send_reset (tcp_connection_t * tc)
751 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
752 vlib_main_t *vm = wrk->vm;
756 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
759 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
761 tcp_worker_stats_inc (wrk, no_buffer, 1);
764 b = vlib_get_buffer (vm, bi);
765 tcp_init_buffer (vm, b);
767 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
768 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
769 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
770 flags = TCP_FLAG_RST;
771 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
772 tc->rcv_nxt, tcp_hdr_opts_len, flags,
774 opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
775 th->checksum = tcp_compute_checksum (tc, b);
776 ASSERT (opts_write_len == tc->snd_opts_len);
777 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
778 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
779 TCP_EVT (TCP_EVT_RST_SENT, tc);
780 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
781 TCP_ERROR_RST_SENT, 1);
785 tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
790 vlib_buffer_push_ip4 (wrk->vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4,
791 IP_PROTOCOL_TCP, tcp_csum_offload (tc));
795 vlib_buffer_push_ip6_custom (wrk->vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6,
796 IP_PROTOCOL_TCP, tc->ipv6_flow_label);
803 * Builds a SYN packet for a half-open connection and sends it to ipx_lookup.
804 * The packet is not forwarded through tcpx_output to avoid doing lookups
805 * in the half_open pool.
808 tcp_send_syn (tcp_connection_t * tc)
810 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
811 vlib_main_t *vm = wrk->vm;
816 * Setup retransmit and establish timers before requesting buffer
817 * such that we can return if we've ran out.
819 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
820 tc->rto * TCP_TO_TIMER_TICK);
822 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
824 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
825 tcp_worker_stats_inc (wrk, no_buffer, 1);
829 b = vlib_get_buffer (vm, bi);
830 tcp_init_buffer (vm, b);
831 tcp_make_syn (tc, b);
833 /* Measure RTT with this */
834 tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
835 tc->rtt_seq = tc->snd_nxt;
838 tcp_push_ip_hdr (wrk, tc, b);
839 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
840 TCP_EVT (TCP_EVT_SYN_SENT, tc);
844 tcp_send_synack (tcp_connection_t * tc)
846 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
847 vlib_main_t *vm = wrk->vm;
851 ASSERT (tc->snd_una != tc->snd_nxt);
852 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
854 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
856 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
857 tcp_worker_stats_inc (wrk, no_buffer, 1);
861 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
862 b = vlib_get_buffer (vm, bi);
863 tcp_init_buffer (vm, b);
864 tcp_make_synack (tc, b);
865 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
866 TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
873 tcp_send_fin (tcp_connection_t * tc)
875 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
876 vlib_main_t *vm = wrk->vm;
881 fin_snt = tc->flags & TCP_CONN_FINSNT;
885 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
887 /* Out of buffers so program fin retransmit ASAP */
888 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
892 /* Make sure retransmit retries a fin not data */
893 tc->flags |= TCP_CONN_FINSNT;
894 tcp_worker_stats_inc (wrk, no_buffer, 1);
898 /* If we have non-dupacks programmed, no need to send them */
899 if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
900 tc->flags &= ~TCP_CONN_SNDACK;
902 b = vlib_get_buffer (vm, bi);
903 tcp_init_buffer (vm, b);
904 tcp_make_fin (tc, b);
905 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
906 TCP_EVT (TCP_EVT_FIN_SENT, tc);
907 /* Account for the FIN */
909 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
912 tc->flags |= TCP_CONN_FINSNT;
913 tc->flags &= ~TCP_CONN_FINPNDG;
918 * Push TCP header and update connection variables. Should only be called
919 * for segments with data, not for 'control' packets.
922 tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
923 u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
925 u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
926 u32 advertise_wnd, data_len;
927 tcp_main_t *tm = &tcp_main;
930 data_len = b->current_length;
931 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
932 data_len += b->total_length_not_including_first_buffer;
934 vnet_buffer (b)->tcp.flags = 0;
935 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
938 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
940 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
943 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
945 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
947 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
949 if (seq_geq (tc->psh_seq, snd_nxt)
950 && seq_lt (tc->psh_seq, snd_nxt + data_len))
951 flags |= TCP_FLAG_PSH;
953 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
954 tc->rcv_nxt, tcp_hdr_opts_len, flags,
959 clib_memcpy_fast ((u8 *) (th + 1),
960 tm->wrk_ctx[tc->c_thread_index].cached_opts,
965 u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
966 ASSERT (len == tc->snd_opts_len);
970 * Update connection variables
974 tc->snd_nxt += data_len;
975 tc->rcv_las = tc->rcv_nxt;
977 tc->bytes_out += data_len;
978 tc->data_segs_out += 1;
980 th->checksum = tcp_compute_checksum (tc, b);
982 TCP_EVT (TCP_EVT_PKTIZE, tc);
986 tcp_buffer_len (vlib_buffer_t * b)
988 u32 data_len = b->current_length;
989 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
990 data_len += b->total_length_not_including_first_buffer;
995 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
997 tcp_connection_t *tc = (tcp_connection_t *) tconn;
999 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1000 tcp_bt_track_tx (tc, tcp_buffer_len (b));
1002 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
1003 /* update_snd_nxt */ 1);
1005 tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
1006 /* If not tracking an ACK, start tracking */
1007 if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
1009 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
1010 tc->rtt_seq = tc->snd_nxt;
1012 if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
1014 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1015 tcp_retransmit_timer_set (&wrk->timer_wheel, tc);
1018 tcp_trajectory_add_start (b, 3);
1023 tcp_send_ack (tcp_connection_t * tc)
1025 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1026 vlib_main_t *vm = wrk->vm;
1030 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1032 tcp_update_rcv_wnd (tc);
1033 tcp_worker_stats_inc (wrk, no_buffer, 1);
1036 b = vlib_get_buffer (vm, bi);
1037 tcp_init_buffer (vm, b);
1038 tcp_make_ack (tc, b);
1039 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1043 tcp_program_ack (tcp_connection_t * tc)
1045 if (!(tc->flags & TCP_CONN_SNDACK))
1047 session_add_self_custom_tx_evt (&tc->connection, 1);
1048 tc->flags |= TCP_CONN_SNDACK;
1053 tcp_program_dupack (tcp_connection_t * tc)
1055 if (!(tc->flags & TCP_CONN_SNDACK))
1057 session_add_self_custom_tx_evt (&tc->connection, 1);
1058 tc->flags |= TCP_CONN_SNDACK;
1060 if (tc->pending_dupacks < 255)
1061 tc->pending_dupacks += 1;
1065 tcp_program_retransmit (tcp_connection_t * tc)
1067 if (!(tc->flags & TCP_CONN_RXT_PENDING))
1069 session_add_self_custom_tx_evt (&tc->connection, 0);
1070 tc->flags |= TCP_CONN_RXT_PENDING;
1075 * Send window update ack
1077 * Ensures that it will be sent only once, after a zero rwnd has been
1078 * advertised in a previous ack, and only if rwnd has grown beyond a
1079 * configurable value.
1082 tcp_send_window_update_ack (tcp_connection_t * tc)
1084 if (tcp_zero_rwnd_sent (tc))
1086 tcp_update_rcv_wnd (tc);
1087 if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
1089 tcp_zero_rwnd_sent_off (tc);
1090 tcp_program_ack (tc);
1096 * Allocate a new buffer and build a new tcp segment
1098 * @param wrk tcp worker
1099 * @param tc connection for which the segment will be allocated
1100 * @param offset offset of the first byte in the tx fifo
1101 * @param max_deq_byte segment size
1102 * @param[out] b pointer to buffer allocated
1104 * @return the number of bytes in the segment or 0 if buffer cannot be
1105 * allocated or no data available
1108 tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1109 u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
1111 u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
1112 vlib_main_t *vm = wrk->vm;
1117 seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
1122 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1125 * Allocate and fill in buffer(s)
1128 /* Easy case, buffer size greater than mss */
1129 if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
1131 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1133 tcp_worker_stats_inc (wrk, no_buffer, 1);
1136 *b = vlib_get_buffer (vm, bi);
1137 data = tcp_init_buffer (vm, *b);
1138 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1140 ASSERT (n_bytes == max_deq_bytes);
1141 b[0]->current_length = n_bytes;
1142 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1143 /* burst */ 0, /* update_snd_nxt */ 0);
1145 /* Split mss into multiple buffers */
1148 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1149 u16 n_peeked, len_to_deq;
1150 vlib_buffer_t *chain_b, *prev_b;
1153 /* Make sure we have enough buffers */
1154 n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
1155 vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
1156 CLIB_CACHE_LINE_BYTES);
1157 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
1158 if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
1161 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1162 tcp_worker_stats_inc (wrk, no_buffer, 1);
1166 *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
1167 data = tcp_init_buffer (vm, *b);
1168 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1170 TRANSPORT_MAX_HDRS_LEN);
1171 b[0]->current_length = n_bytes;
1172 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1173 b[0]->total_length_not_including_first_buffer = 0;
1174 max_deq_bytes -= n_bytes;
1177 for (i = 1; i < n_bufs_per_seg; i++)
1180 len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
1181 chain_bi = wrk->tx_buffers[--n_bufs];
1182 chain_b = vlib_get_buffer (vm, chain_bi);
1183 chain_b->current_data = 0;
1184 data = vlib_buffer_get_current (chain_b);
1185 n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
1188 ASSERT (n_peeked == len_to_deq);
1189 n_bytes += n_peeked;
1190 chain_b->current_length = n_peeked;
1191 chain_b->next_buffer = 0;
1193 /* update previous buffer */
1194 prev_b->next_buffer = chain_bi;
1195 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1197 max_deq_bytes -= n_peeked;
1198 b[0]->total_length_not_including_first_buffer += n_peeked;
1201 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1202 /* burst */ 0, /* update_snd_nxt */ 0);
1204 if (PREDICT_FALSE (n_bufs))
1206 clib_warning ("not all buffers consumed");
1207 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1211 ASSERT (n_bytes > 0);
1212 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1218 * Build a retransmit segment
1220 * @return the number of bytes in the segment or 0 if there's nothing to
1224 tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
1225 tcp_connection_t * tc, u32 offset,
1226 u32 max_deq_bytes, vlib_buffer_t ** b)
1228 u32 start, available_bytes;
1231 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1232 ASSERT (max_deq_bytes != 0);
1235 * Make sure we can retransmit something
1237 available_bytes = transport_max_tx_dequeue (&tc->connection);
1238 ASSERT (available_bytes >= offset);
1239 available_bytes -= offset;
1240 if (!available_bytes)
1243 max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
1244 max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
1246 start = tc->snd_una + offset;
1247 ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
1249 n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
1253 tc->snd_rxt_bytes += n_bytes;
1255 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1256 tcp_bt_track_rxt (tc, start, start + n_bytes);
1258 tc->bytes_retrans += n_bytes;
1259 tc->segs_retrans += 1;
1260 tcp_worker_stats_inc (wrk, rxt_segs, 1);
1261 TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
1267 tcp_check_sack_reneging (tcp_connection_t * tc)
1269 sack_scoreboard_t *sb = &tc->sack_sb;
1270 sack_scoreboard_hole_t *hole;
1272 hole = scoreboard_first_hole (sb);
1273 if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
1276 scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
1280 * Reset congestion control, switch cwnd to loss window and try again.
1283 tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
1285 TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
1287 tc->prev_ssthresh = tc->ssthresh;
1288 tc->prev_cwnd = tc->cwnd;
1290 /* If we entrered loss without fast recovery, notify cc algo of the
1291 * congestion event such that it can update ssthresh and its state */
1292 if (!tcp_in_fastrecovery (tc))
1293 tcp_cc_congestion (tc);
1295 /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
1299 tc->cwnd_acc_bytes = 0;
1300 tc->tr_occurences += 1;
1301 tc->sack_sb.reorder = TCP_DUPACK_THRESHOLD;
1302 tcp_recovery_on (tc);
1306 tcp_timer_retransmit_handler (tcp_connection_t * tc)
1308 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1309 vlib_main_t *vm = wrk->vm;
1310 vlib_buffer_t *b = 0;
1313 tcp_worker_stats_inc (wrk, tr_events, 1);
1315 /* Should be handled by a different handler */
1316 if (PREDICT_FALSE (tc->state == TCP_STATE_SYN_SENT))
1319 /* Wait-close and retransmit could pop at the same time */
1320 if (tc->state == TCP_STATE_CLOSED)
1323 if (tc->state >= TCP_STATE_ESTABLISHED)
1325 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1327 /* Lost FIN, retransmit and return */
1328 if (tc->flags & TCP_CONN_FINSNT)
1332 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1336 /* Shouldn't be here */
1337 if (tc->snd_una == tc->snd_nxt)
1339 ASSERT (!tcp_in_recovery (tc));
1344 /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
1345 * to persist timer timeout */
1346 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1349 tcp_update_rto (tc);
1352 /* Peer is dead or network connectivity is lost. Close connection.
1353 * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
1354 * a min rto of 0.2s we need to retry about 8 times. */
1355 if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
1357 tcp_send_reset (tc);
1358 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1359 session_transport_closing_notify (&tc->connection);
1360 session_transport_closed_notify (&tc->connection);
1361 tcp_connection_timers_reset (tc);
1362 tcp_program_cleanup (wrk, tc);
1363 tcp_worker_stats_inc (wrk, tr_abort, 1);
1367 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1368 tcp_check_sack_reneging (tc);
1370 /* Update send congestion to make sure that rxt has data to send */
1371 tc->snd_congestion = tc->snd_nxt;
1373 /* Send the first unacked segment. If we're short on buffers, return
1374 * as soon as possible */
1375 n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
1376 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
1379 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1383 bi = vlib_get_buffer_index (vm, b);
1384 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1386 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1387 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1390 if (tc->rto_boff == 1)
1392 tcp_cc_init_rxt_timeout (tc);
1393 /* Record timestamp. Eifel detection algorithm RFC3522 */
1394 tc->snd_rxt_ts = tcp_tstamp (tc);
1397 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1398 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
1400 tcp_program_retransmit (tc);
1402 /* Retransmit SYN-ACK */
1403 else if (tc->state == TCP_STATE_SYN_RCVD)
1405 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1409 /* Passive open establish timeout */
1410 if (tc->rto > TCP_ESTABLISH_TIME >> 1)
1412 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1413 tcp_connection_timers_reset (tc);
1414 tcp_program_cleanup (wrk, tc);
1415 tcp_worker_stats_inc (wrk, tr_abort, 1);
1419 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1421 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1422 tcp_worker_stats_inc (wrk, no_buffer, 1);
1427 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1428 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1430 ASSERT (tc->snd_una != tc->snd_nxt);
1431 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1433 b = vlib_get_buffer (vm, bi);
1434 tcp_init_buffer (vm, b);
1435 tcp_make_synack (tc, b);
1436 TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
1438 /* Retransmit timer already updated, just enqueue to output */
1439 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1443 ASSERT (tc->state == TCP_STATE_CLOSED);
1449 * SYN retransmit timer handler. Active open only.
1452 tcp_timer_retransmit_syn_handler (tcp_connection_t * tc)
1454 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1455 vlib_main_t *vm = wrk->vm;
1456 vlib_buffer_t *b = 0;
1459 /* Note: the connection may have transitioned to ESTABLISHED... */
1460 if (PREDICT_FALSE (tc->state != TCP_STATE_SYN_SENT))
1463 /* Half-open connection actually moved to established but we were
1464 * waiting for syn retransmit to pop to call cleanup from the right
1466 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1468 if (tcp_half_open_connection_cleanup (tc))
1469 TCP_DBG ("could not remove half-open connection");
1473 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1476 /* Active open establish timeout */
1477 if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
1479 session_stream_connect_notify (&tc->connection, SESSION_E_TIMEDOUT);
1480 tcp_connection_cleanup (tc);
1484 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1486 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
1487 tcp_worker_stats_inc (wrk, no_buffer, 1);
1491 /* Try without increasing RTO a number of times. If this fails,
1492 * start growing RTO exponentially */
1494 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1495 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1497 b = vlib_get_buffer (vm, bi);
1498 tcp_init_buffer (vm, b);
1499 tcp_make_syn (tc, b);
1501 TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
1503 /* This goes straight to ipx_lookup */
1504 tcp_push_ip_hdr (wrk, tc, b);
1505 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
1507 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
1508 tc->rto * TCP_TO_TIMER_TICK);
1512 * Got 0 snd_wnd from peer, try to do something about it.
1516 tcp_timer_persist_handler (tcp_connection_t * tc)
1518 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1519 u32 bi, max_snd_bytes, available_bytes, offset;
1520 tcp_main_t *tm = vnet_get_tcp_main ();
1521 vlib_main_t *vm = wrk->vm;
1526 /* Problem already solved or worse */
1527 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1528 || (tc->flags & TCP_CONN_FINSNT))
1529 goto update_scheduler;
1531 available_bytes = transport_max_tx_dequeue (&tc->connection);
1532 offset = tc->snd_nxt - tc->snd_una;
1534 /* Reprogram persist if no new bytes available to send. We may have data
1536 if (!available_bytes)
1538 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1542 if (available_bytes <= offset)
1543 goto update_scheduler;
1545 /* Increment RTO backoff */
1547 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1550 * Try to force the first unsent segment (or buffer)
1552 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1554 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1555 tcp_worker_stats_inc (wrk, no_buffer, 1);
1559 b = vlib_get_buffer (vm, bi);
1560 data = tcp_init_buffer (vm, b);
1562 tcp_validate_txf_size (tc, offset);
1563 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1564 max_snd_bytes = clib_min (tc->snd_mss,
1565 tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
1566 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1568 b->current_length = n_bytes;
1569 ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
1570 || tc->snd_una == tc->snd_nxt
1571 || tc->rto_boff > 1));
1573 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1575 tcp_bt_check_app_limited (tc);
1576 tcp_bt_track_tx (tc, n_bytes);
1579 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
1580 /* burst */ 0, /* update_snd_nxt */ 1);
1581 tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
1582 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1584 /* Just sent new data, enable retransmit */
1585 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1591 if (tcp_is_descheduled (tc))
1592 transport_connection_reschedule (&tc->connection);
1596 * Retransmit first unacked segment
1599 tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1601 vlib_main_t *vm = wrk->vm;
1605 TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
1607 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1611 bi = vlib_get_buffer_index (vm, b);
1612 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1618 tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1621 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1622 vlib_main_t *vm = wrk->vm;
1623 vlib_buffer_t *b = 0;
1625 offset = tc->snd_nxt - tc->snd_una;
1626 available_wnd = tc->snd_wnd - offset;
1627 burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
1629 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1630 tcp_bt_check_app_limited (tc);
1632 while (n_segs < burst_size)
1634 n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
1638 bi = vlib_get_buffer_index (vm, b);
1639 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1640 offset += n_written;
1643 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1644 tcp_bt_track_tx (tc, n_written);
1646 tc->snd_nxt += n_written;
1654 * Estimate send space using proportional rate reduction (RFC6937)
1657 tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
1662 pipe = tcp_flight_size (tc);
1663 prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
1665 if (pipe > tc->ssthresh)
1667 space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
1673 limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
1674 space = clib_min (tc->ssthresh - pipe, limit);
1676 space = clib_max (space, prr_out ? 0 : tc->snd_mss);
1681 tcp_retransmit_should_retry_head (tcp_connection_t * tc,
1682 sack_scoreboard_t * sb)
1684 u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
1685 f64 rr = (f64) tc->ssthresh / tc->prev_cwnd;
1687 if (tcp_fastrecovery_first (tc))
1690 return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
1694 tcp_max_tx_deq (tcp_connection_t * tc)
1696 return (transport_max_tx_dequeue (&tc->connection)
1697 - (tc->snd_nxt - tc->snd_una));
1700 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1701 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1702 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1705 * Do retransmit with SACKs
1708 tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1711 u32 n_written = 0, offset, max_bytes, n_segs = 0;
1712 u8 snd_limited = 0, can_rescue = 0;
1713 u32 bi, max_deq, burst_bytes;
1714 sack_scoreboard_hole_t *hole;
1715 vlib_main_t *vm = wrk->vm;
1716 vlib_buffer_t *b = 0;
1717 sack_scoreboard_t *sb;
1720 ASSERT (tcp_in_cong_recovery (tc));
1722 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1723 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1726 tcp_program_retransmit (tc);
1730 if (tcp_in_recovery (tc))
1731 snd_space = tcp_available_cc_snd_space (tc);
1733 snd_space = tcp_fastrecovery_prr_snd_space (tc);
1735 if (snd_space < tc->snd_mss)
1740 /* Check if snd_una is a lost retransmit */
1741 if (pool_elts (sb->holes)
1742 && seq_gt (sb->high_sacked, tc->snd_congestion)
1743 && tc->rxt_head != tc->snd_una
1744 && tcp_retransmit_should_retry_head (tc, sb))
1746 max_bytes = clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una);
1747 n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, max_bytes, &b);
1750 tcp_program_retransmit (tc);
1753 bi = vlib_get_buffer_index (vm, b);
1754 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1757 tc->rxt_head = tc->snd_una;
1758 tc->rxt_delivered += n_written;
1759 tc->prr_delivered += n_written;
1760 ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
1763 tcp_fastrecovery_first_off (tc);
1765 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1766 hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
1768 max_deq = transport_max_tx_dequeue (&tc->connection);
1769 max_deq -= tc->snd_nxt - tc->snd_una;
1771 while (snd_space > 0 && n_segs < burst_size)
1773 hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue,
1777 /* We are out of lost holes to retransmit so send some new data. */
1778 if (max_deq > tc->snd_mss)
1783 /* Make sure we don't exceed available window and leave space
1784 * for one more packet, to avoid zero window acks */
1785 av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
1786 av_wnd = clib_max (av_wnd - tc->snd_mss, 0);
1787 snd_space = clib_min (snd_space, av_wnd);
1788 snd_space = clib_min (max_deq, snd_space);
1789 burst_size = clib_min (burst_size - n_segs,
1790 snd_space / tc->snd_mss);
1791 burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
1792 n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
1793 if (max_deq > n_segs_new * tc->snd_mss)
1794 tcp_program_retransmit (tc);
1796 n_segs += n_segs_new;
1800 if (tcp_in_recovery (tc) || !can_rescue
1801 || scoreboard_rescue_rxt_valid (sb, tc))
1804 /* If rescue rxt undefined or less than snd_una then one segment of
1805 * up to SMSS octets that MUST include the highest outstanding
1806 * unSACKed sequence number SHOULD be returned, and RescueRxt set to
1807 * RecoveryPoint. HighRxt MUST NOT be updated.
1809 hole = scoreboard_last_hole (sb);
1810 max_bytes = clib_min (tc->snd_mss, hole->end - hole->start);
1811 max_bytes = clib_min (max_bytes, snd_space);
1812 offset = hole->end - tc->snd_una - max_bytes;
1813 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1818 sb->rescue_rxt = tc->snd_congestion;
1819 bi = vlib_get_buffer_index (vm, b);
1820 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1825 max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
1826 max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
1830 offset = sb->high_rxt - tc->snd_una;
1831 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1833 ASSERT (n_written <= snd_space);
1835 /* Nothing left to retransmit */
1839 bi = vlib_get_buffer_index (vm, b);
1840 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1842 sb->high_rxt += n_written;
1843 ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt));
1845 snd_space -= n_written;
1850 tcp_program_retransmit (tc);
1854 transport_connection_tx_pacer_reset_bucket (&tc->connection, 0);
1859 * Fast retransmit without SACK info
1862 tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1865 u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now, max_bytes;
1866 u32 burst_bytes, sent_bytes;
1867 vlib_main_t *vm = wrk->vm;
1868 int snd_space, n_segs = 0;
1872 ASSERT (tcp_in_cong_recovery (tc));
1873 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1875 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1876 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1879 tcp_program_retransmit (tc);
1883 snd_space = tcp_available_cc_snd_space (tc);
1884 cc_limited = snd_space < burst_bytes;
1886 if (!tcp_fastrecovery_first (tc))
1889 /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
1891 while (snd_space > 0 && n_segs < burst_size)
1893 max_bytes = clib_min (tc->snd_mss,
1894 tc->snd_congestion - tc->snd_una - offset);
1897 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1900 /* Nothing left to retransmit */
1904 bi = vlib_get_buffer_index (vm, b);
1905 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1906 snd_space -= n_written;
1907 offset += n_written;
1911 if (n_segs == burst_size)
1916 /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
1917 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
1920 max_deq = transport_max_tx_dequeue (&tc->connection);
1921 max_deq -= tc->snd_nxt - tc->snd_una;
1924 snd_space = clib_min (max_deq, snd_space);
1925 burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
1926 n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size);
1927 if (n_segs_now && max_deq > n_segs_now * tc->snd_mss)
1928 tcp_program_retransmit (tc);
1929 n_segs += n_segs_now;
1933 tcp_fastrecovery_first_off (tc);
1935 sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
1936 sent_bytes = cc_limited ? burst_bytes : sent_bytes;
1937 transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
1943 tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
1947 if (!tc->pending_dupacks)
1949 if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
1950 || tc->state != TCP_STATE_ESTABLISHED)
1958 /* If we're supposed to send dupacks but have no ooo data
1959 * send only one ack */
1960 if (!vec_len (tc->snd_sacks))
1963 tc->dupacks_out += 1;
1964 tc->pending_dupacks = 0;
1968 /* Start with first sack block */
1969 tc->snd_sack_pos = 0;
1971 /* Generate enough dupacks to cover all sack blocks. Do not generate
1972 * more sacks than the number of packets received. But do generate at
1973 * least 3, i.e., the number needed to signal congestion, if needed. */
1974 n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
1975 n_acks = clib_min (n_acks, tc->pending_dupacks);
1976 n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
1977 for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
1980 if (n_acks < max_burst_size)
1982 tc->pending_dupacks = 0;
1983 tc->snd_sack_pos = 0;
1984 tc->dupacks_out += n_acks;
1989 TCP_DBG ("constrained by burst size");
1990 tc->pending_dupacks = n_acks - max_burst_size;
1991 tc->dupacks_out += max_burst_size;
1992 tcp_program_dupack (tc);
1993 return max_burst_size;
1998 tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
2000 tcp_worker_ctx_t *wrk;
2003 if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED))
2006 wrk = tcp_get_worker (tc->c_thread_index);
2008 if (tcp_opts_sack_permitted (&tc->rcv_opts))
2009 n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
2011 n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
2017 tcp_session_custom_tx (void *conn, transport_send_params_t * sp)
2019 tcp_connection_t *tc = (tcp_connection_t *) conn;
2022 if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING))
2024 tc->flags &= ~TCP_CONN_RXT_PENDING;
2025 n_segs = tcp_do_retransmit (tc, sp->max_burst_size);
2028 if (!(tc->flags & TCP_CONN_SNDACK))
2031 tc->flags &= ~TCP_CONN_SNDACK;
2033 /* We have retransmitted packets and no dupack */
2034 if (n_segs && !tc->pending_dupacks)
2037 if (sp->max_burst_size <= n_segs)
2039 tcp_program_ack (tc);
2043 n_segs += tcp_send_acks (tc, sp->max_burst_size - n_segs);
2047 #endif /* CLIB_MARCH_VARIANT */
2050 tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
2051 u16 * next0, u32 * error0)
2053 ip_adjacency_t *adj;
2056 /* Not thread safe but as long as the connection exists the adj should
2058 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &tc0->c_rmt_ip,
2060 if (ai == ADJ_INDEX_INVALID)
2062 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2063 *next0 = TCP_OUTPUT_NEXT_DROP;
2064 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2069 if (PREDICT_TRUE (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE))
2070 *next0 = TCP_OUTPUT_NEXT_IP_REWRITE;
2071 else if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
2072 *next0 = TCP_OUTPUT_NEXT_IP_ARP;
2075 *next0 = TCP_OUTPUT_NEXT_DROP;
2076 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2078 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
2082 tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2083 u32 * to_next, u32 n_bufs)
2085 tcp_connection_t *tc;
2091 for (i = 0; i < n_bufs; i++)
2093 b = vlib_get_buffer (vm, to_next[i]);
2094 if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2096 th = vlib_buffer_get_current (b);
2097 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2099 t = vlib_add_trace (vm, node, b, sizeof (*t));
2100 clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
2101 clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
2106 tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0,
2107 tcp_connection_t * tc0, u8 is_ip4)
2109 TCP_EVT (TCP_EVT_OUTPUT, tc0,
2110 ((tcp_header_t *) vlib_buffer_get_current (b0))->flags,
2111 b0->current_length);
2114 vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
2115 IP_PROTOCOL_TCP, tcp_csum_offload (tc0));
2117 vlib_buffer_push_ip6_custom (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6,
2118 IP_PROTOCOL_TCP, tc0->ipv6_flow_label);
2122 tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
2124 if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO)))
2127 u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len;
2129 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
2130 data_len += b->total_length_not_including_first_buffer;
2132 if (PREDICT_TRUE (data_len <= tc->snd_mss))
2136 ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
2137 ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
2138 b->flags |= VNET_BUFFER_F_GSO;
2139 vnet_buffer2 (b)->gso_l4_hdr_sz =
2140 sizeof (tcp_header_t) + tc->snd_opts_len;
2141 vnet_buffer2 (b)->gso_size = tc->snd_mss;
2146 tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
2147 vlib_node_runtime_t * error_node, u16 * next0,
2150 /* If next_index is not drop use it */
2151 if (tc0->next_node_index)
2153 *next0 = tc0->next_node_index;
2154 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2158 *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP;
2161 vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
2162 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
2168 if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
2169 tcp_output_handle_link_local (tc0, b0, next0, &error0);
2171 if (PREDICT_FALSE (error0))
2173 b0->error = error_node->errors[error0];
2182 tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2183 vlib_frame_t * frame, int is_ip4)
2185 u32 n_left_from, *from, thread_index = vm->thread_index;
2186 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2187 u16 nexts[VLIB_FRAME_SIZE], *next;
2189 from = vlib_frame_vector_args (frame);
2190 n_left_from = frame->n_vectors;
2191 tcp_set_time_now (tcp_get_worker (thread_index));
2193 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2194 tcp46_output_trace_frame (vm, node, from, n_left_from);
2196 vlib_get_buffers (vm, from, bufs, n_left_from);
2200 while (n_left_from >= 4)
2202 tcp_connection_t *tc0, *tc1;
2205 vlib_prefetch_buffer_header (b[2], STORE);
2206 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2208 vlib_prefetch_buffer_header (b[3], STORE);
2209 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2212 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2214 tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
2217 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2219 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2220 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2222 tcp_check_if_gso (tc0, b[0]);
2223 tcp_check_if_gso (tc1, b[1]);
2225 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2226 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2232 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2233 tcp_check_if_gso (tc0, b[0]);
2234 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2238 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2239 next[0] = TCP_OUTPUT_NEXT_DROP;
2243 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2244 tcp_check_if_gso (tc1, b[1]);
2245 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2249 b[1]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2250 next[1] = TCP_OUTPUT_NEXT_DROP;
2258 while (n_left_from > 0)
2260 tcp_connection_t *tc0;
2262 if (n_left_from > 1)
2264 vlib_prefetch_buffer_header (b[1], STORE);
2265 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2268 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2271 if (PREDICT_TRUE (tc0 != 0))
2273 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2274 tcp_check_if_gso (tc0, b[0]);
2275 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2279 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2280 next[0] = TCP_OUTPUT_NEXT_DROP;
2288 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2289 vlib_node_increment_counter (vm, tcp_node_index (output, is_ip4),
2290 TCP_ERROR_PKTS_SENT, frame->n_vectors);
2291 return frame->n_vectors;
2294 VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2295 vlib_frame_t * from_frame)
2297 return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2300 VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2301 vlib_frame_t * from_frame)
2303 return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2307 VLIB_REGISTER_NODE (tcp4_output_node) =
2309 .name = "tcp4-output",
2310 /* Takes a vector of packets. */
2311 .vector_size = sizeof (u32),
2312 .n_errors = TCP_N_ERROR,
2313 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2314 .error_strings = tcp_error_strings,
2315 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2317 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2318 foreach_tcp4_output_next
2321 .format_buffer = format_tcp_header,
2322 .format_trace = format_tcp_tx_trace,
2327 VLIB_REGISTER_NODE (tcp6_output_node) =
2329 .name = "tcp6-output",
2330 /* Takes a vector of packets. */
2331 .vector_size = sizeof (u32),
2332 .n_errors = TCP_N_ERROR,
2333 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2334 .error_strings = tcp_error_strings,
2335 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2337 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2338 foreach_tcp6_output_next
2341 .format_buffer = format_tcp_header,
2342 .format_trace = format_tcp_tx_trace,
2346 typedef enum _tcp_reset_next
2348 TCP_RESET_NEXT_DROP,
2349 TCP_RESET_NEXT_IP_LOOKUP,
2353 #define foreach_tcp4_reset_next \
2354 _(DROP, "error-drop") \
2355 _(IP_LOOKUP, "ip4-lookup")
2357 #define foreach_tcp6_reset_next \
2358 _(DROP, "error-drop") \
2359 _(IP_LOOKUP, "ip6-lookup")
2362 tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2363 vlib_frame_t * from_frame, u8 is_ip4)
2365 u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
2366 u32 n_left_from, next_index, *from, *to_next;
2368 from = vlib_frame_vector_args (from_frame);
2369 n_left_from = from_frame->n_vectors;
2371 next_index = node->cached_next_index;
2373 while (n_left_from > 0)
2377 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2379 while (n_left_from > 0 && n_left_to_next > 0)
2391 n_left_to_next -= 1;
2393 b0 = vlib_get_buffer (vm, bi0);
2394 tcp_make_reset_in_place (vm, b0, is_ip4);
2396 /* Prepare to send to IP lookup */
2397 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2399 b0->error = node->errors[error0];
2400 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2401 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2403 th0 = vlib_buffer_get_current (b0);
2405 th0 = ip4_next_header ((ip4_header_t *) th0);
2407 th0 = ip6_next_header ((ip6_header_t *) th0);
2408 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2409 clib_memcpy_fast (&t0->tcp_header, th0,
2410 sizeof (t0->tcp_header));
2413 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2414 n_left_to_next, bi0, next0);
2416 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2418 return from_frame->n_vectors;
2421 VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2422 vlib_frame_t * from_frame)
2424 return tcp46_send_reset_inline (vm, node, from_frame, 1);
2427 VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2428 vlib_frame_t * from_frame)
2430 return tcp46_send_reset_inline (vm, node, from_frame, 0);
2434 VLIB_REGISTER_NODE (tcp4_reset_node) = {
2435 .name = "tcp4-reset",
2436 .vector_size = sizeof (u32),
2437 .n_errors = TCP_N_ERROR,
2438 .error_strings = tcp_error_strings,
2439 .n_next_nodes = TCP_RESET_N_NEXT,
2441 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2442 foreach_tcp4_reset_next
2445 .format_trace = format_tcp_tx_trace,
2450 VLIB_REGISTER_NODE (tcp6_reset_node) = {
2451 .name = "tcp6-reset",
2452 .vector_size = sizeof (u32),
2453 .n_errors = TCP_N_ERROR,
2454 .error_strings = tcp_error_strings,
2455 .n_next_nodes = TCP_RESET_N_NEXT,
2457 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2458 foreach_tcp6_reset_next
2461 .format_trace = format_tcp_tx_trace,
2466 * fd.io coding-style-patch-verification: ON
2469 * eval: (c-set-style "gnu")