2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/tcp/tcp.h>
17 #include <vnet/tcp/tcp_inlines.h>
19 #include <vnet/ip/ip4_inlines.h>
20 #include <vnet/ip/ip6_inlines.h>
22 typedef enum _tcp_output_next
25 TCP_OUTPUT_NEXT_IP_LOOKUP,
26 TCP_OUTPUT_NEXT_IP_REWRITE,
27 TCP_OUTPUT_NEXT_IP_ARP,
31 #define foreach_tcp4_output_next \
32 _ (DROP, "error-drop") \
33 _ (IP_LOOKUP, "ip4-lookup") \
34 _ (IP_REWRITE, "ip4-rewrite") \
37 #define foreach_tcp6_output_next \
38 _ (DROP, "error-drop") \
39 _ (IP_LOOKUP, "ip6-lookup") \
40 _ (IP_REWRITE, "ip6-rewrite") \
41 _ (IP_ARP, "ip6-discover-neighbor")
43 static char *tcp_error_strings[] = {
44 #define tcp_error(n,s) s,
45 #include <vnet/tcp/tcp_error.def>
51 tcp_header_t tcp_header;
52 tcp_connection_t tcp_connection;
56 format_tcp_tx_trace (u8 * s, va_list * args)
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *);
61 tcp_connection_t *tc = &t->tcp_connection;
62 u32 indent = format_get_indent (s);
64 s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
65 format_tcp_state, tc->state, format_white_space, indent,
66 format_tcp_header, &t->tcp_header, 128);
71 #ifndef CLIB_MARCH_VARIANT
73 tcp_window_compute_scale (u32 window)
76 while (wnd_scale < TCP_MAX_WND_SCALE && (window >> wnd_scale) > TCP_WND_MAX)
82 * TCP's initial window
85 tcp_initial_wnd_unscaled (tcp_connection_t * tc)
87 /* RFC 6928 recommends the value lower. However at the time our connections
88 * are initialized, fifos may not be allocated. Therefore, advertise the
89 * smallest possible unscaled window size and update once fifos are
90 * assigned to the session.
93 tcp_update_rcv_mss (tc);
94 TCP_IW_N_SEGMENTS * tc->mss;
96 return tcp_cfg.min_rx_fifo;
100 * Compute initial window and scale factor. As per RFC1323, window field in
101 * SYN and SYN-ACK segments is never scaled.
104 tcp_initial_window_to_advertise (tcp_connection_t * tc)
106 /* Compute rcv wscale only if peer advertised support for it */
107 if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
108 tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
110 tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
112 return clib_min (tc->rcv_wnd, TCP_WND_MAX);
116 tcp_update_rcv_wnd (tcp_connection_t * tc)
118 u32 available_space, wnd;
122 * Figure out how much space we have available
124 available_space = transport_max_rx_enqueue (&tc->connection);
127 * Use the above and what we know about what we've previously advertised
128 * to compute the new window
130 observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
132 /* Check if we are about to retract the window. Do the comparison before
133 * rounding to avoid errors. Per RFC7323 sec. 2.4 we could remove this */
134 if (PREDICT_FALSE ((i32) available_space < observed_wnd))
136 wnd = round_down_pow2 (clib_max (observed_wnd, 0), 1 << tc->rcv_wscale);
137 TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
141 /* Make sure we have a multiple of 1 << rcv_wscale. We round down to
142 * avoid advertising a window larger than what can be buffered */
143 wnd = round_down_pow2 (available_space, 1 << tc->rcv_wscale);
146 if (PREDICT_FALSE (wnd < tc->rcv_opts.mss))
149 tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale);
153 * Compute and return window to advertise, scaled as per RFC1323
156 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
158 if (state < TCP_STATE_ESTABLISHED)
159 return tcp_initial_window_to_advertise (tc);
161 tcp_update_rcv_wnd (tc);
162 return tc->rcv_wnd >> tc->rcv_wscale;
166 tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts)
170 opts->flags |= TCP_OPTS_FLAG_MSS;
172 len += TCP_OPTION_LEN_MSS;
174 opts->flags |= TCP_OPTS_FLAG_WSCALE;
175 opts->wscale = tc->rcv_wscale;
176 len += TCP_OPTION_LEN_WINDOW_SCALE;
178 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
179 opts->tsval = tcp_time_tstamp (tc->c_thread_index);
181 len += TCP_OPTION_LEN_TIMESTAMP;
185 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
186 len += TCP_OPTION_LEN_SACK_PERMITTED;
189 /* Align to needed boundary */
190 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
195 tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
199 opts->flags |= TCP_OPTS_FLAG_MSS;
201 len += TCP_OPTION_LEN_MSS;
203 if (tcp_opts_wscale (&tc->rcv_opts))
205 opts->flags |= TCP_OPTS_FLAG_WSCALE;
206 opts->wscale = tc->rcv_wscale;
207 len += TCP_OPTION_LEN_WINDOW_SCALE;
210 if (tcp_opts_tstamp (&tc->rcv_opts))
212 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
213 opts->tsval = tcp_time_tstamp (tc->c_thread_index);
214 opts->tsecr = tc->tsval_recent;
215 len += TCP_OPTION_LEN_TIMESTAMP;
218 if (tcp_opts_sack_permitted (&tc->rcv_opts))
220 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
221 len += TCP_OPTION_LEN_SACK_PERMITTED;
224 /* Align to needed boundary */
225 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
230 tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts)
236 if (tcp_opts_tstamp (&tc->rcv_opts))
238 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
239 opts->tsval = tcp_tstamp (tc);
240 opts->tsecr = tc->tsval_recent;
241 len += TCP_OPTION_LEN_TIMESTAMP;
243 if (tcp_opts_sack_permitted (&tc->rcv_opts))
245 if (vec_len (tc->snd_sacks))
247 opts->flags |= TCP_OPTS_FLAG_SACK;
248 if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
249 tc->snd_sack_pos = 0;
250 opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
251 opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
252 opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
253 TCP_OPTS_MAX_SACK_BLOCKS);
254 tc->snd_sack_pos += opts->n_sack_blocks;
255 len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
259 /* Align to needed boundary */
260 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
265 tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
270 case TCP_STATE_ESTABLISHED:
271 case TCP_STATE_CLOSE_WAIT:
272 case TCP_STATE_FIN_WAIT_1:
273 case TCP_STATE_LAST_ACK:
274 case TCP_STATE_CLOSING:
275 case TCP_STATE_FIN_WAIT_2:
276 case TCP_STATE_TIME_WAIT:
277 case TCP_STATE_CLOSED:
278 return tcp_make_established_options (tc, opts);
279 case TCP_STATE_SYN_RCVD:
280 return tcp_make_synack_options (tc, opts);
281 case TCP_STATE_SYN_SENT:
282 return tcp_make_syn_options (tc, opts);
284 clib_warning ("State not handled! %d", state);
290 * Update burst send vars
292 * - Updates snd_mss to reflect the effective segment size that we can send
293 * by taking into account all TCP options, including SACKs.
294 * - Cache 'on the wire' options for reuse
295 * - Updates receive window which can be reused for a burst.
297 * This should *only* be called when doing bursts
300 tcp_update_burst_snd_vars (tcp_connection_t * tc)
302 tcp_main_t *tm = &tcp_main;
304 /* Compute options to be used for connection. These may be reused when
305 * sending data or to compute the effective mss (snd_mss) */
306 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts,
307 TCP_STATE_ESTABLISHED);
309 /* XXX check if MTU has been updated */
310 tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
311 ASSERT (tc->snd_mss > 0);
313 tcp_options_write (tm->wrk_ctx[tc->c_thread_index].cached_opts,
316 tcp_update_rcv_wnd (tc);
318 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
319 tcp_bt_check_app_limited (tc);
321 if (tc->snd_una == tc->snd_nxt)
323 tcp_cc_event (tc, TCP_CC_EVT_START_TX);
324 tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_BURST);
327 if (tc->flags & TCP_CONN_PSH_PENDING)
329 u32 max_deq = transport_max_tx_dequeue (&tc->connection);
330 /* Last byte marked for push */
331 tc->psh_seq = tc->snd_una + max_deq - 1;
335 #endif /* CLIB_MARCH_VARIANT */
338 tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
340 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
341 vlib_buffer_free_one (vm, b->next_buffer);
342 /* Zero all flags but free list index and trace flag */
343 b->flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
345 b->current_length = 0;
346 b->total_length_not_including_first_buffer = 0;
347 vnet_buffer (b)->tcp.flags = 0;
348 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
349 /* Leave enough space for headers */
350 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
353 #ifndef CLIB_MARCH_VARIANT
355 tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
357 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
358 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
359 b->total_length_not_including_first_buffer = 0;
361 vnet_buffer (b)->tcp.flags = 0;
362 /* Leave enough space for headers */
363 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
367 /* Compute TCP checksum in software when offloading is disabled for a connection */
369 ip6_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
370 ip46_address_t * src, ip46_address_t * dst)
373 u16 payload_length_host_byte_order;
376 /* Initialize checksum with ip header. */
377 sum0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0)) +
378 clib_host_to_net_u16 (IP_PROTOCOL_TCP);
379 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
381 for (i = 0; i < ARRAY_LEN (src->ip6.as_uword); i++)
383 sum0 = ip_csum_with_carry
384 (sum0, clib_mem_unaligned (&src->ip6.as_uword[i], uword));
385 sum0 = ip_csum_with_carry
386 (sum0, clib_mem_unaligned (&dst->ip6.as_uword[i], uword));
389 return ip_calculate_l4_checksum (vm, p0, sum0,
390 payload_length_host_byte_order, NULL, 0,
395 ip4_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
396 ip46_address_t * src, ip46_address_t * dst)
399 u32 payload_length_host_byte_order;
401 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
403 clib_host_to_net_u32 (payload_length_host_byte_order +
404 (IP_PROTOCOL_TCP << 16));
406 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&src->ip4, u32));
407 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&dst->ip4, u32));
409 return ip_calculate_l4_checksum (vm, p0, sum0,
410 payload_length_host_byte_order, NULL, 0,
415 tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b)
418 if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
420 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
421 vlib_main_t *vm = wrk->vm;
424 checksum = ip4_tcp_compute_checksum_custom
425 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
427 checksum = ip6_tcp_compute_checksum_custom
428 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
432 vnet_buffer_offload_flags_set (b, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
441 tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
444 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
445 u8 tcp_opts_len, tcp_hdr_opts_len;
449 wnd = tcp_window_to_advertise (tc, state);
451 /* Make and write options */
452 tcp_opts_len = tcp_make_established_options (tc, snd_opts);
453 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
455 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
456 tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd);
458 tcp_options_write ((u8 *) (th + 1), snd_opts);
460 th->checksum = tcp_compute_checksum (tc, b);
462 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
466 transport_rx_fifo_req_deq_ntf (&tc->connection);
467 tcp_zero_rwnd_sent_on (tc);
470 tcp_zero_rwnd_sent_off (tc);
474 * Convert buffer to ACK
477 tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
479 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
480 TCP_EVT (TCP_EVT_ACK_SENT, tc);
481 tc->rcv_las = tc->rcv_nxt;
485 * Convert buffer to FIN-ACK
488 tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
490 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
494 * Convert buffer to SYN
497 tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b)
499 u8 tcp_hdr_opts_len, tcp_opts_len;
502 tcp_options_t snd_opts;
504 initial_wnd = tcp_initial_window_to_advertise (tc);
506 /* Make and write options */
507 clib_memset (&snd_opts, 0, sizeof (snd_opts));
508 tcp_opts_len = tcp_make_syn_options (tc, &snd_opts);
509 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
511 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
512 tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN,
514 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
515 tcp_options_write ((u8 *) (th + 1), &snd_opts);
516 th->checksum = tcp_compute_checksum (tc, b);
520 * Convert buffer to SYN-ACK
523 tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
525 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
526 u8 tcp_opts_len, tcp_hdr_opts_len;
530 clib_memset (snd_opts, 0, sizeof (*snd_opts));
531 initial_wnd = tcp_initial_window_to_advertise (tc);
532 tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
533 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
535 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
536 tc->rcv_nxt, tcp_hdr_opts_len,
537 TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd);
538 tcp_options_write ((u8 *) (th + 1), snd_opts);
540 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
541 th->checksum = tcp_compute_checksum (tc, b);
545 tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
546 u8 is_ip4, u32 fib_index)
548 tcp_main_t *tm = &tcp_main;
549 vlib_main_t *vm = wrk->vm;
551 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
554 vnet_buffer (b)->sw_if_index[VLIB_TX] = fib_index;
555 vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
557 session_add_pending_tx_buffer (vm->thread_index, bi,
558 tm->ipl_next_node[!is_ip4]);
560 if (vm->thread_index == 0 && vlib_num_workers ())
561 session_queue_run_on_main_thread (wrk->vm);
565 tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
568 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
571 session_add_pending_tx_buffer (wrk->vm->thread_index, bi,
572 wrk->tco_next_node[!is_ip4]);
575 #endif /* CLIB_MARCH_VARIANT */
578 tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b, u8 is_ip4)
583 ip4_address_t src_ip4, dst_ip4;
584 ip6_address_t src_ip6, dst_ip6;
585 u16 src_port, dst_port;
586 u32 tmp, len, seq, ack;
589 /* Find IP and TCP headers */
590 th = tcp_buffer_hdr (b);
592 /* Save src and dst ip */
595 ih4 = vlib_buffer_get_current (b);
596 ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40);
597 src_ip4.as_u32 = ih4->src_address.as_u32;
598 dst_ip4.as_u32 = ih4->dst_address.as_u32;
602 ih6 = vlib_buffer_get_current (b);
603 ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
604 clib_memcpy_fast (&src_ip6, &ih6->src_address, sizeof (ip6_address_t));
605 clib_memcpy_fast (&dst_ip6, &ih6->dst_address, sizeof (ip6_address_t));
608 src_port = th->src_port;
609 dst_port = th->dst_port;
610 flags = TCP_FLAG_RST;
613 * RFC 793. If the ACK bit is off, sequence number zero is used,
614 * <SEQ=0><ACK=SEG.SEQ+SEG.LEN><CTL=RST,ACK>
615 * If the ACK bit is on,
616 * <SEQ=SEG.ACK><CTL=RST>
620 seq = th->ack_number;
625 flags |= TCP_FLAG_ACK;
626 tmp = clib_net_to_host_u32 (th->seq_number);
627 len = vnet_buffer (b)->tcp.data_len + tcp_is_syn (th) + tcp_is_fin (th);
628 ack = clib_host_to_net_u32 (tmp + len);
632 tcp_reuse_buffer (vm, b);
633 th = vlib_buffer_push_tcp_net_order (b, dst_port, src_port, seq, ack,
634 sizeof (tcp_header_t), flags, 0);
638 ih4 = vlib_buffer_push_ip4 (vm, b, &dst_ip4, &src_ip4,
640 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
645 ih6 = vlib_buffer_push_ip6 (vm, b, &dst_ip6, &src_ip6, IP_PROTOCOL_TCP);
646 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
653 #ifndef CLIB_MARCH_VARIANT
655 * Send reset without reusing existing buffer
657 * It extracts connection info out of original packet
660 tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
661 u32 thread_index, u8 is_ip4)
663 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
664 vlib_main_t *vm = wrk->vm;
666 u32 bi, sw_if_index, fib_index;
667 u8 tcp_hdr_len, flags = 0;
668 tcp_header_t *th, *pkt_th;
670 ip4_header_t *ih4, *pkt_ih4;
671 ip6_header_t *ih6, *pkt_ih6;
672 fib_protocol_t fib_proto;
674 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
676 tcp_worker_stats_inc (wrk, no_buffer, 1);
680 b = vlib_get_buffer (vm, bi);
681 sw_if_index = vnet_buffer (pkt)->sw_if_index[VLIB_RX];
682 fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
683 fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
684 tcp_init_buffer (vm, b);
686 /* Make and write options */
687 tcp_hdr_len = sizeof (tcp_header_t);
691 pkt_ih4 = vlib_buffer_get_current (pkt);
692 pkt_th = ip4_next_header (pkt_ih4);
696 pkt_ih6 = vlib_buffer_get_current (pkt);
697 pkt_th = ip6_next_header (pkt_ih6);
700 if (tcp_ack (pkt_th))
702 flags = TCP_FLAG_RST;
703 seq = pkt_th->ack_number;
704 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
708 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
710 ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end);
713 th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port,
714 seq, ack, tcp_hdr_len, flags, 0);
716 /* Swap src and dst ip */
719 ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
720 ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
721 &pkt_ih4->src_address, IP_PROTOCOL_TCP,
722 tcp_csum_offload (tc));
723 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
728 ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) ==
730 ih6 = vlib_buffer_push_ip6_custom (vm, b, &pkt_ih6->dst_address,
731 &pkt_ih6->src_address,
733 tc->ipv6_flow_label);
734 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
738 tcp_enqueue_to_ip_lookup (wrk, b, bi, is_ip4, fib_index);
739 TCP_EVT (TCP_EVT_RST_SENT, tc);
740 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
741 TCP_ERROR_RST_SENT, 1);
745 * Build and set reset packet for connection
748 tcp_send_reset (tcp_connection_t * tc)
750 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
751 vlib_main_t *vm = wrk->vm;
755 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
758 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
760 tcp_worker_stats_inc (wrk, no_buffer, 1);
763 b = vlib_get_buffer (vm, bi);
764 tcp_init_buffer (vm, b);
766 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
767 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
768 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
769 flags = TCP_FLAG_RST;
770 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
771 tc->rcv_nxt, tcp_hdr_opts_len, flags,
773 opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
774 th->checksum = tcp_compute_checksum (tc, b);
775 ASSERT (opts_write_len == tc->snd_opts_len);
776 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
777 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
778 TCP_EVT (TCP_EVT_RST_SENT, tc);
779 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
780 TCP_ERROR_RST_SENT, 1);
784 tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
789 vlib_buffer_push_ip4 (wrk->vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4,
790 IP_PROTOCOL_TCP, tcp_csum_offload (tc));
794 vlib_buffer_push_ip6_custom (wrk->vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6,
795 IP_PROTOCOL_TCP, tc->ipv6_flow_label);
802 * Builds a SYN packet for a half-open connection and sends it to ipx_lookup.
803 * The packet is not forwarded through tcpx_output to avoid doing lookups
804 * in the half_open pool.
807 tcp_send_syn (tcp_connection_t * tc)
809 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
810 vlib_main_t *vm = wrk->vm;
815 * Setup retransmit and establish timers before requesting buffer
816 * such that we can return if we've ran out.
818 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
819 tc->rto * TCP_TO_TIMER_TICK);
821 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
823 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
824 tcp_worker_stats_inc (wrk, no_buffer, 1);
828 b = vlib_get_buffer (vm, bi);
829 tcp_init_buffer (vm, b);
830 tcp_make_syn (tc, b);
832 /* Measure RTT with this */
833 tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
834 tc->rtt_seq = tc->snd_nxt;
837 tcp_push_ip_hdr (wrk, tc, b);
838 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
839 TCP_EVT (TCP_EVT_SYN_SENT, tc);
843 tcp_send_synack (tcp_connection_t * tc)
845 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
846 vlib_main_t *vm = wrk->vm;
850 ASSERT (tc->snd_una != tc->snd_nxt);
851 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
853 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
855 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
856 tcp_worker_stats_inc (wrk, no_buffer, 1);
860 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
861 b = vlib_get_buffer (vm, bi);
862 tcp_init_buffer (vm, b);
863 tcp_make_synack (tc, b);
864 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
865 TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
872 tcp_send_fin (tcp_connection_t * tc)
874 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
875 vlib_main_t *vm = wrk->vm;
880 fin_snt = tc->flags & TCP_CONN_FINSNT;
884 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
886 /* Out of buffers so program fin retransmit ASAP */
887 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
891 /* Make sure retransmit retries a fin not data */
892 tc->flags |= TCP_CONN_FINSNT;
893 tcp_worker_stats_inc (wrk, no_buffer, 1);
897 /* If we have non-dupacks programmed, no need to send them */
898 if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
899 tc->flags &= ~TCP_CONN_SNDACK;
901 b = vlib_get_buffer (vm, bi);
902 tcp_init_buffer (vm, b);
903 tcp_make_fin (tc, b);
904 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
905 TCP_EVT (TCP_EVT_FIN_SENT, tc);
906 /* Account for the FIN */
908 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
911 tc->flags |= TCP_CONN_FINSNT;
912 tc->flags &= ~TCP_CONN_FINPNDG;
917 * Push TCP header and update connection variables. Should only be called
918 * for segments with data, not for 'control' packets.
921 tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
922 u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
924 u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
925 u32 advertise_wnd, data_len;
926 tcp_main_t *tm = &tcp_main;
929 data_len = b->current_length;
930 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
931 data_len += b->total_length_not_including_first_buffer;
933 vnet_buffer (b)->tcp.flags = 0;
934 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
937 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
939 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
942 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
944 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
946 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
948 if (seq_geq (tc->psh_seq, snd_nxt)
949 && seq_lt (tc->psh_seq, snd_nxt + data_len))
950 flags |= TCP_FLAG_PSH;
952 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
953 tc->rcv_nxt, tcp_hdr_opts_len, flags,
958 clib_memcpy_fast ((u8 *) (th + 1),
959 tm->wrk_ctx[tc->c_thread_index].cached_opts,
964 u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
965 ASSERT (len == tc->snd_opts_len);
969 * Update connection variables
973 tc->snd_nxt += data_len;
974 tc->rcv_las = tc->rcv_nxt;
976 tc->bytes_out += data_len;
977 tc->data_segs_out += 1;
979 th->checksum = tcp_compute_checksum (tc, b);
981 TCP_EVT (TCP_EVT_PKTIZE, tc);
985 tcp_buffer_len (vlib_buffer_t * b)
987 u32 data_len = b->current_length;
988 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
989 data_len += b->total_length_not_including_first_buffer;
994 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
996 tcp_connection_t *tc = (tcp_connection_t *) tconn;
998 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
999 tcp_bt_track_tx (tc, tcp_buffer_len (b));
1001 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
1002 /* update_snd_nxt */ 1);
1004 tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
1005 /* If not tracking an ACK, start tracking */
1006 if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
1008 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
1009 tc->rtt_seq = tc->snd_nxt;
1011 if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
1013 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1014 tcp_retransmit_timer_set (&wrk->timer_wheel, tc);
1021 tcp_send_ack (tcp_connection_t * tc)
1023 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1024 vlib_main_t *vm = wrk->vm;
1028 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1030 tcp_update_rcv_wnd (tc);
1031 tcp_worker_stats_inc (wrk, no_buffer, 1);
1034 b = vlib_get_buffer (vm, bi);
1035 tcp_init_buffer (vm, b);
1036 tcp_make_ack (tc, b);
1037 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1041 tcp_program_ack (tcp_connection_t * tc)
1043 if (!(tc->flags & TCP_CONN_SNDACK))
1045 session_add_self_custom_tx_evt (&tc->connection, 1);
1046 tc->flags |= TCP_CONN_SNDACK;
1051 tcp_program_dupack (tcp_connection_t * tc)
1053 if (!(tc->flags & TCP_CONN_SNDACK))
1055 session_add_self_custom_tx_evt (&tc->connection, 1);
1056 tc->flags |= TCP_CONN_SNDACK;
1058 if (tc->pending_dupacks < 255)
1059 tc->pending_dupacks += 1;
1063 tcp_program_retransmit (tcp_connection_t * tc)
1065 if (!(tc->flags & TCP_CONN_RXT_PENDING))
1067 session_add_self_custom_tx_evt (&tc->connection, 0);
1068 tc->flags |= TCP_CONN_RXT_PENDING;
1073 * Send window update ack
1075 * Ensures that it will be sent only once, after a zero rwnd has been
1076 * advertised in a previous ack, and only if rwnd has grown beyond a
1077 * configurable value.
1080 tcp_send_window_update_ack (tcp_connection_t * tc)
1082 if (tcp_zero_rwnd_sent (tc))
1084 tcp_update_rcv_wnd (tc);
1085 if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
1087 tcp_zero_rwnd_sent_off (tc);
1088 tcp_program_ack (tc);
1094 * Allocate a new buffer and build a new tcp segment
1096 * @param wrk tcp worker
1097 * @param tc connection for which the segment will be allocated
1098 * @param offset offset of the first byte in the tx fifo
1099 * @param max_deq_byte segment size
1100 * @param[out] b pointer to buffer allocated
1102 * @return the number of bytes in the segment or 0 if buffer cannot be
1103 * allocated or no data available
1106 tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1107 u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
1109 u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
1110 vlib_main_t *vm = wrk->vm;
1115 seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
1120 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1123 * Allocate and fill in buffer(s)
1126 /* Easy case, buffer size greater than mss */
1127 if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
1129 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1131 tcp_worker_stats_inc (wrk, no_buffer, 1);
1134 *b = vlib_get_buffer (vm, bi);
1135 data = tcp_init_buffer (vm, *b);
1136 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1138 ASSERT (n_bytes == max_deq_bytes);
1139 b[0]->current_length = n_bytes;
1140 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1141 /* burst */ 0, /* update_snd_nxt */ 0);
1143 /* Split mss into multiple buffers */
1146 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1147 u16 n_peeked, len_to_deq;
1148 vlib_buffer_t *chain_b, *prev_b;
1151 /* Make sure we have enough buffers */
1152 n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
1153 vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
1154 CLIB_CACHE_LINE_BYTES);
1155 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
1156 if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
1159 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1160 tcp_worker_stats_inc (wrk, no_buffer, 1);
1164 *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
1165 data = tcp_init_buffer (vm, *b);
1166 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1168 TRANSPORT_MAX_HDRS_LEN);
1169 b[0]->current_length = n_bytes;
1170 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1171 b[0]->total_length_not_including_first_buffer = 0;
1172 max_deq_bytes -= n_bytes;
1175 for (i = 1; i < n_bufs_per_seg; i++)
1178 len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
1179 chain_bi = wrk->tx_buffers[--n_bufs];
1180 chain_b = vlib_get_buffer (vm, chain_bi);
1181 chain_b->current_data = 0;
1182 data = vlib_buffer_get_current (chain_b);
1183 n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
1186 ASSERT (n_peeked == len_to_deq);
1187 n_bytes += n_peeked;
1188 chain_b->current_length = n_peeked;
1189 chain_b->next_buffer = 0;
1191 /* update previous buffer */
1192 prev_b->next_buffer = chain_bi;
1193 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1195 max_deq_bytes -= n_peeked;
1196 b[0]->total_length_not_including_first_buffer += n_peeked;
1199 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1200 /* burst */ 0, /* update_snd_nxt */ 0);
1202 if (PREDICT_FALSE (n_bufs))
1204 clib_warning ("not all buffers consumed");
1205 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1209 ASSERT (n_bytes > 0);
1210 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1216 * Build a retransmit segment
1218 * @return the number of bytes in the segment or 0 if there's nothing to
1222 tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
1223 tcp_connection_t * tc, u32 offset,
1224 u32 max_deq_bytes, vlib_buffer_t ** b)
1226 u32 start, available_bytes;
1229 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1230 ASSERT (max_deq_bytes != 0);
1233 * Make sure we can retransmit something
1235 available_bytes = transport_max_tx_dequeue (&tc->connection);
1236 ASSERT (available_bytes >= offset);
1237 available_bytes -= offset;
1238 if (!available_bytes)
1241 max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
1242 max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
1244 start = tc->snd_una + offset;
1245 ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
1247 n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
1251 tc->snd_rxt_bytes += n_bytes;
1253 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1254 tcp_bt_track_rxt (tc, start, start + n_bytes);
1256 tc->bytes_retrans += n_bytes;
1257 tc->segs_retrans += 1;
1258 tcp_worker_stats_inc (wrk, rxt_segs, 1);
1259 TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
1265 tcp_check_sack_reneging (tcp_connection_t * tc)
1267 sack_scoreboard_t *sb = &tc->sack_sb;
1268 sack_scoreboard_hole_t *hole;
1270 hole = scoreboard_first_hole (sb);
1271 if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
1274 scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
1278 * Reset congestion control, switch cwnd to loss window and try again.
1281 tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
1283 TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
1285 tc->prev_ssthresh = tc->ssthresh;
1286 tc->prev_cwnd = tc->cwnd;
1288 /* If we entrered loss without fast recovery, notify cc algo of the
1289 * congestion event such that it can update ssthresh and its state */
1290 if (!tcp_in_fastrecovery (tc))
1291 tcp_cc_congestion (tc);
1293 /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
1297 tc->cwnd_acc_bytes = 0;
1298 tc->tr_occurences += 1;
1299 tc->sack_sb.reorder = TCP_DUPACK_THRESHOLD;
1300 tcp_recovery_on (tc);
1304 tcp_timer_retransmit_handler (tcp_connection_t * tc)
1306 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1307 vlib_main_t *vm = wrk->vm;
1308 vlib_buffer_t *b = 0;
1311 tcp_worker_stats_inc (wrk, tr_events, 1);
1313 /* Should be handled by a different handler */
1314 if (PREDICT_FALSE (tc->state == TCP_STATE_SYN_SENT))
1317 /* Wait-close and retransmit could pop at the same time */
1318 if (tc->state == TCP_STATE_CLOSED)
1321 if (tc->state >= TCP_STATE_ESTABLISHED)
1323 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1325 /* Lost FIN, retransmit and return */
1326 if (tc->flags & TCP_CONN_FINSNT)
1330 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1334 /* Shouldn't be here */
1335 if (tc->snd_una == tc->snd_nxt)
1337 ASSERT (!tcp_in_recovery (tc));
1342 /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
1343 * to persist timer timeout */
1344 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1347 tcp_update_rto (tc);
1350 /* Peer is dead or network connectivity is lost. Close connection.
1351 * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
1352 * a min rto of 0.2s we need to retry about 8 times. */
1353 if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
1355 tcp_send_reset (tc);
1356 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1357 session_transport_closing_notify (&tc->connection);
1358 session_transport_closed_notify (&tc->connection);
1359 tcp_connection_timers_reset (tc);
1360 tcp_program_cleanup (wrk, tc);
1361 tcp_worker_stats_inc (wrk, tr_abort, 1);
1365 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1366 tcp_check_sack_reneging (tc);
1368 /* Update send congestion to make sure that rxt has data to send */
1369 tc->snd_congestion = tc->snd_nxt;
1371 /* Send the first unacked segment. If we're short on buffers, return
1372 * as soon as possible */
1373 n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
1374 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
1377 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1381 bi = vlib_get_buffer_index (vm, b);
1382 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1384 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1385 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1388 if (tc->rto_boff == 1)
1390 tcp_cc_init_rxt_timeout (tc);
1391 /* Record timestamp. Eifel detection algorithm RFC3522 */
1392 tc->snd_rxt_ts = tcp_tstamp (tc);
1395 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1396 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
1398 tcp_program_retransmit (tc);
1400 /* Retransmit SYN-ACK */
1401 else if (tc->state == TCP_STATE_SYN_RCVD)
1403 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1407 /* Passive open establish timeout */
1408 if (tc->rto > TCP_ESTABLISH_TIME >> 1)
1410 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1411 tcp_connection_timers_reset (tc);
1412 tcp_program_cleanup (wrk, tc);
1413 tcp_worker_stats_inc (wrk, tr_abort, 1);
1417 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1419 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1420 tcp_worker_stats_inc (wrk, no_buffer, 1);
1425 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1426 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1428 ASSERT (tc->snd_una != tc->snd_nxt);
1429 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1431 b = vlib_get_buffer (vm, bi);
1432 tcp_init_buffer (vm, b);
1433 tcp_make_synack (tc, b);
1434 TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
1436 /* Retransmit timer already updated, just enqueue to output */
1437 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1441 ASSERT (tc->state == TCP_STATE_CLOSED);
1447 * SYN retransmit timer handler. Active open only.
1450 tcp_timer_retransmit_syn_handler (tcp_connection_t * tc)
1452 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1453 vlib_main_t *vm = wrk->vm;
1454 vlib_buffer_t *b = 0;
1457 /* Note: the connection may have transitioned to ESTABLISHED... */
1458 if (PREDICT_FALSE (tc->state != TCP_STATE_SYN_SENT))
1461 /* Half-open connection actually moved to established but we were
1462 * waiting for syn retransmit to pop to call cleanup from the right
1464 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1466 if (tcp_half_open_connection_cleanup (tc))
1467 TCP_DBG ("could not remove half-open connection");
1471 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1474 /* Active open establish timeout */
1475 if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
1477 session_stream_connect_notify (&tc->connection, SESSION_E_TIMEDOUT);
1478 tcp_connection_cleanup (tc);
1482 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1484 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
1485 tcp_worker_stats_inc (wrk, no_buffer, 1);
1489 /* Try without increasing RTO a number of times. If this fails,
1490 * start growing RTO exponentially */
1492 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1493 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1495 b = vlib_get_buffer (vm, bi);
1496 tcp_init_buffer (vm, b);
1497 tcp_make_syn (tc, b);
1499 TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
1501 /* This goes straight to ipx_lookup */
1502 tcp_push_ip_hdr (wrk, tc, b);
1503 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
1505 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
1506 tc->rto * TCP_TO_TIMER_TICK);
1510 * Got 0 snd_wnd from peer, try to do something about it.
1514 tcp_timer_persist_handler (tcp_connection_t * tc)
1516 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1517 u32 bi, max_snd_bytes, available_bytes, offset;
1518 tcp_main_t *tm = vnet_get_tcp_main ();
1519 vlib_main_t *vm = wrk->vm;
1524 /* Problem already solved or worse */
1525 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1526 || (tc->flags & TCP_CONN_FINSNT))
1527 goto update_scheduler;
1529 available_bytes = transport_max_tx_dequeue (&tc->connection);
1530 offset = tc->snd_nxt - tc->snd_una;
1532 /* Reprogram persist if no new bytes available to send. We may have data
1534 if (!available_bytes)
1536 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1540 if (available_bytes <= offset)
1541 goto update_scheduler;
1543 /* Increment RTO backoff */
1545 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1548 * Try to force the first unsent segment (or buffer)
1550 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1552 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1553 tcp_worker_stats_inc (wrk, no_buffer, 1);
1557 b = vlib_get_buffer (vm, bi);
1558 data = tcp_init_buffer (vm, b);
1560 tcp_validate_txf_size (tc, offset);
1561 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1562 max_snd_bytes = clib_min (tc->snd_mss,
1563 tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
1564 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1566 b->current_length = n_bytes;
1567 ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
1568 || tc->snd_una == tc->snd_nxt
1569 || tc->rto_boff > 1));
1571 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1573 tcp_bt_check_app_limited (tc);
1574 tcp_bt_track_tx (tc, n_bytes);
1577 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
1578 /* burst */ 0, /* update_snd_nxt */ 1);
1579 tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
1580 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1582 /* Just sent new data, enable retransmit */
1583 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1589 if (tcp_is_descheduled (tc))
1590 transport_connection_reschedule (&tc->connection);
1594 * Retransmit first unacked segment
1597 tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1599 vlib_main_t *vm = wrk->vm;
1603 TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
1605 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1609 bi = vlib_get_buffer_index (vm, b);
1610 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1616 tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1619 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1620 vlib_main_t *vm = wrk->vm;
1621 vlib_buffer_t *b = 0;
1623 offset = tc->snd_nxt - tc->snd_una;
1624 available_wnd = tc->snd_wnd - offset;
1625 burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
1627 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1628 tcp_bt_check_app_limited (tc);
1630 while (n_segs < burst_size)
1632 n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
1636 bi = vlib_get_buffer_index (vm, b);
1637 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1638 offset += n_written;
1641 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1642 tcp_bt_track_tx (tc, n_written);
1644 tc->snd_nxt += n_written;
1652 * Estimate send space using proportional rate reduction (RFC6937)
1655 tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
1660 pipe = tcp_flight_size (tc);
1661 prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
1663 if (pipe > tc->ssthresh)
1665 space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
1671 limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
1672 space = clib_min (tc->ssthresh - pipe, limit);
1674 space = clib_max (space, prr_out ? 0 : tc->snd_mss);
1679 tcp_retransmit_should_retry_head (tcp_connection_t * tc,
1680 sack_scoreboard_t * sb)
1682 u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
1683 f64 rr = (f64) tc->ssthresh / tc->prev_cwnd;
1685 if (tcp_fastrecovery_first (tc))
1688 return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
1692 tcp_max_tx_deq (tcp_connection_t * tc)
1694 return (transport_max_tx_dequeue (&tc->connection)
1695 - (tc->snd_nxt - tc->snd_una));
1698 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1699 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1700 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1703 * Do retransmit with SACKs
1706 tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1709 u32 n_written = 0, offset, max_bytes, n_segs = 0;
1710 u8 snd_limited = 0, can_rescue = 0;
1711 u32 bi, max_deq, burst_bytes;
1712 sack_scoreboard_hole_t *hole;
1713 vlib_main_t *vm = wrk->vm;
1714 vlib_buffer_t *b = 0;
1715 sack_scoreboard_t *sb;
1718 ASSERT (tcp_in_cong_recovery (tc));
1720 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1721 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1724 tcp_program_retransmit (tc);
1728 if (tcp_in_recovery (tc))
1729 snd_space = tcp_available_cc_snd_space (tc);
1731 snd_space = tcp_fastrecovery_prr_snd_space (tc);
1733 if (snd_space < tc->snd_mss)
1738 /* Check if snd_una is a lost retransmit */
1739 if (pool_elts (sb->holes)
1740 && seq_gt (sb->high_sacked, tc->snd_congestion)
1741 && tc->rxt_head != tc->snd_una
1742 && tcp_retransmit_should_retry_head (tc, sb))
1744 max_bytes = clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una);
1745 n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, max_bytes, &b);
1748 tcp_program_retransmit (tc);
1751 bi = vlib_get_buffer_index (vm, b);
1752 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1755 tc->rxt_head = tc->snd_una;
1756 tc->rxt_delivered += n_written;
1757 tc->prr_delivered += n_written;
1758 ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
1761 tcp_fastrecovery_first_off (tc);
1763 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1764 hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
1766 max_deq = transport_max_tx_dequeue (&tc->connection);
1767 max_deq -= tc->snd_nxt - tc->snd_una;
1769 while (snd_space > 0 && n_segs < burst_size)
1771 hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue,
1775 /* We are out of lost holes to retransmit so send some new data. */
1776 if (max_deq > tc->snd_mss)
1781 /* Make sure we don't exceed available window and leave space
1782 * for one more packet, to avoid zero window acks */
1783 av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
1784 av_wnd = clib_max (av_wnd - tc->snd_mss, 0);
1785 snd_space = clib_min (snd_space, av_wnd);
1786 snd_space = clib_min (max_deq, snd_space);
1787 burst_size = clib_min (burst_size - n_segs,
1788 snd_space / tc->snd_mss);
1789 burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
1790 n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
1791 if (max_deq > n_segs_new * tc->snd_mss)
1792 tcp_program_retransmit (tc);
1794 n_segs += n_segs_new;
1798 if (tcp_in_recovery (tc) || !can_rescue
1799 || scoreboard_rescue_rxt_valid (sb, tc))
1802 /* If rescue rxt undefined or less than snd_una then one segment of
1803 * up to SMSS octets that MUST include the highest outstanding
1804 * unSACKed sequence number SHOULD be returned, and RescueRxt set to
1805 * RecoveryPoint. HighRxt MUST NOT be updated.
1807 hole = scoreboard_last_hole (sb);
1808 max_bytes = clib_min (tc->snd_mss, hole->end - hole->start);
1809 max_bytes = clib_min (max_bytes, snd_space);
1810 offset = hole->end - tc->snd_una - max_bytes;
1811 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1816 sb->rescue_rxt = tc->snd_congestion;
1817 bi = vlib_get_buffer_index (vm, b);
1818 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1823 max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
1824 max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
1828 offset = sb->high_rxt - tc->snd_una;
1829 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1831 ASSERT (n_written <= snd_space);
1833 /* Nothing left to retransmit */
1837 bi = vlib_get_buffer_index (vm, b);
1838 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1840 sb->high_rxt += n_written;
1841 ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt));
1843 snd_space -= n_written;
1848 tcp_program_retransmit (tc);
1852 transport_connection_tx_pacer_reset_bucket (&tc->connection, 0);
1857 * Fast retransmit without SACK info
1860 tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1863 u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now, max_bytes;
1864 u32 burst_bytes, sent_bytes;
1865 vlib_main_t *vm = wrk->vm;
1866 int snd_space, n_segs = 0;
1870 ASSERT (tcp_in_cong_recovery (tc));
1871 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1873 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1874 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1877 tcp_program_retransmit (tc);
1881 snd_space = tcp_available_cc_snd_space (tc);
1882 cc_limited = snd_space < burst_bytes;
1884 if (!tcp_fastrecovery_first (tc))
1887 /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
1889 while (snd_space > 0 && n_segs < burst_size)
1891 max_bytes = clib_min (tc->snd_mss,
1892 tc->snd_congestion - tc->snd_una - offset);
1895 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1898 /* Nothing left to retransmit */
1902 bi = vlib_get_buffer_index (vm, b);
1903 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1904 snd_space -= n_written;
1905 offset += n_written;
1909 if (n_segs == burst_size)
1914 /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
1915 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
1918 max_deq = transport_max_tx_dequeue (&tc->connection);
1919 max_deq -= tc->snd_nxt - tc->snd_una;
1922 snd_space = clib_min (max_deq, snd_space);
1923 burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
1924 n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size);
1925 if (n_segs_now && max_deq > n_segs_now * tc->snd_mss)
1926 tcp_program_retransmit (tc);
1927 n_segs += n_segs_now;
1931 tcp_fastrecovery_first_off (tc);
1933 sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
1934 sent_bytes = cc_limited ? burst_bytes : sent_bytes;
1935 transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
1941 tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
1945 if (!tc->pending_dupacks)
1947 if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
1948 || tc->state != TCP_STATE_ESTABLISHED)
1956 /* If we're supposed to send dupacks but have no ooo data
1957 * send only one ack */
1958 if (!vec_len (tc->snd_sacks))
1961 tc->dupacks_out += 1;
1962 tc->pending_dupacks = 0;
1966 /* Start with first sack block */
1967 tc->snd_sack_pos = 0;
1969 /* Generate enough dupacks to cover all sack blocks. Do not generate
1970 * more sacks than the number of packets received. But do generate at
1971 * least 3, i.e., the number needed to signal congestion, if needed. */
1972 n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
1973 n_acks = clib_min (n_acks, tc->pending_dupacks);
1974 n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
1975 for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
1978 if (n_acks < max_burst_size)
1980 tc->pending_dupacks = 0;
1981 tc->snd_sack_pos = 0;
1982 tc->dupacks_out += n_acks;
1987 TCP_DBG ("constrained by burst size");
1988 tc->pending_dupacks = n_acks - max_burst_size;
1989 tc->dupacks_out += max_burst_size;
1990 tcp_program_dupack (tc);
1991 return max_burst_size;
1996 tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
1998 tcp_worker_ctx_t *wrk;
2001 if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED))
2004 wrk = tcp_get_worker (tc->c_thread_index);
2006 if (tcp_opts_sack_permitted (&tc->rcv_opts))
2007 n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
2009 n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
2015 tcp_session_custom_tx (void *conn, transport_send_params_t * sp)
2017 tcp_connection_t *tc = (tcp_connection_t *) conn;
2020 if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING))
2022 tc->flags &= ~TCP_CONN_RXT_PENDING;
2023 n_segs = tcp_do_retransmit (tc, sp->max_burst_size);
2026 if (!(tc->flags & TCP_CONN_SNDACK))
2029 tc->flags &= ~TCP_CONN_SNDACK;
2031 /* We have retransmitted packets and no dupack */
2032 if (n_segs && !tc->pending_dupacks)
2035 if (sp->max_burst_size <= n_segs)
2037 tcp_program_ack (tc);
2041 n_segs += tcp_send_acks (tc, sp->max_burst_size - n_segs);
2045 #endif /* CLIB_MARCH_VARIANT */
2048 tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
2049 u16 * next0, u32 * error0)
2051 ip_adjacency_t *adj;
2054 /* Not thread safe but as long as the connection exists the adj should
2056 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &tc0->c_rmt_ip,
2058 if (ai == ADJ_INDEX_INVALID)
2060 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2061 *next0 = TCP_OUTPUT_NEXT_DROP;
2062 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2067 if (PREDICT_TRUE (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE))
2068 *next0 = TCP_OUTPUT_NEXT_IP_REWRITE;
2069 else if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
2070 *next0 = TCP_OUTPUT_NEXT_IP_ARP;
2073 *next0 = TCP_OUTPUT_NEXT_DROP;
2074 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2076 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
2080 tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2081 u32 * to_next, u32 n_bufs)
2083 tcp_connection_t *tc;
2089 for (i = 0; i < n_bufs; i++)
2091 b = vlib_get_buffer (vm, to_next[i]);
2092 if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2094 th = vlib_buffer_get_current (b);
2095 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2097 t = vlib_add_trace (vm, node, b, sizeof (*t));
2098 clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
2099 clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
2104 tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0,
2105 tcp_connection_t * tc0, u8 is_ip4)
2107 TCP_EVT (TCP_EVT_OUTPUT, tc0,
2108 ((tcp_header_t *) vlib_buffer_get_current (b0))->flags,
2109 b0->current_length);
2112 vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
2113 IP_PROTOCOL_TCP, tcp_csum_offload (tc0));
2115 vlib_buffer_push_ip6_custom (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6,
2116 IP_PROTOCOL_TCP, tc0->ipv6_flow_label);
2120 tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
2122 if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO)))
2125 u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len;
2127 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
2128 data_len += b->total_length_not_including_first_buffer;
2130 if (PREDICT_TRUE (data_len <= tc->snd_mss))
2134 ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
2135 ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
2136 b->flags |= VNET_BUFFER_F_GSO;
2137 vnet_buffer2 (b)->gso_l4_hdr_sz =
2138 sizeof (tcp_header_t) + tc->snd_opts_len;
2139 vnet_buffer2 (b)->gso_size = tc->snd_mss;
2144 tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
2145 vlib_node_runtime_t * error_node, u16 * next0,
2148 /* If next_index is not drop use it */
2149 if (tc0->next_node_index)
2151 *next0 = tc0->next_node_index;
2152 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2156 *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP;
2159 vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
2160 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
2166 if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
2167 tcp_output_handle_link_local (tc0, b0, next0, &error0);
2169 if (PREDICT_FALSE (error0))
2171 b0->error = error_node->errors[error0];
2180 tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2181 vlib_frame_t * frame, int is_ip4)
2183 u32 n_left_from, *from, thread_index = vm->thread_index;
2184 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2185 u16 nexts[VLIB_FRAME_SIZE], *next;
2187 from = vlib_frame_vector_args (frame);
2188 n_left_from = frame->n_vectors;
2189 tcp_update_time_now (tcp_get_worker (thread_index));
2191 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2192 tcp46_output_trace_frame (vm, node, from, n_left_from);
2194 vlib_get_buffers (vm, from, bufs, n_left_from);
2198 while (n_left_from >= 4)
2200 tcp_connection_t *tc0, *tc1;
2203 vlib_prefetch_buffer_header (b[2], STORE);
2204 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2206 vlib_prefetch_buffer_header (b[3], STORE);
2207 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2210 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2212 tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
2215 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2217 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2218 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2220 tcp_check_if_gso (tc0, b[0]);
2221 tcp_check_if_gso (tc1, b[1]);
2223 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2224 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2230 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2231 tcp_check_if_gso (tc0, b[0]);
2232 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2236 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2237 next[0] = TCP_OUTPUT_NEXT_DROP;
2241 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2242 tcp_check_if_gso (tc1, b[1]);
2243 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2247 b[1]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2248 next[1] = TCP_OUTPUT_NEXT_DROP;
2256 while (n_left_from > 0)
2258 tcp_connection_t *tc0;
2260 if (n_left_from > 1)
2262 vlib_prefetch_buffer_header (b[1], STORE);
2263 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2266 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2269 if (PREDICT_TRUE (tc0 != 0))
2271 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2272 tcp_check_if_gso (tc0, b[0]);
2273 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2277 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2278 next[0] = TCP_OUTPUT_NEXT_DROP;
2286 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2287 vlib_node_increment_counter (vm, tcp_node_index (output, is_ip4),
2288 TCP_ERROR_PKTS_SENT, frame->n_vectors);
2289 return frame->n_vectors;
2292 VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2293 vlib_frame_t * from_frame)
2295 return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2298 VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2299 vlib_frame_t * from_frame)
2301 return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2305 VLIB_REGISTER_NODE (tcp4_output_node) =
2307 .name = "tcp4-output",
2308 /* Takes a vector of packets. */
2309 .vector_size = sizeof (u32),
2310 .n_errors = TCP_N_ERROR,
2311 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2312 .error_strings = tcp_error_strings,
2313 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2315 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2316 foreach_tcp4_output_next
2319 .format_buffer = format_tcp_header,
2320 .format_trace = format_tcp_tx_trace,
2325 VLIB_REGISTER_NODE (tcp6_output_node) =
2327 .name = "tcp6-output",
2328 /* Takes a vector of packets. */
2329 .vector_size = sizeof (u32),
2330 .n_errors = TCP_N_ERROR,
2331 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2332 .error_strings = tcp_error_strings,
2333 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2335 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2336 foreach_tcp6_output_next
2339 .format_buffer = format_tcp_header,
2340 .format_trace = format_tcp_tx_trace,
2344 typedef enum _tcp_reset_next
2346 TCP_RESET_NEXT_DROP,
2347 TCP_RESET_NEXT_IP_LOOKUP,
2351 #define foreach_tcp4_reset_next \
2352 _(DROP, "error-drop") \
2353 _(IP_LOOKUP, "ip4-lookup")
2355 #define foreach_tcp6_reset_next \
2356 _(DROP, "error-drop") \
2357 _(IP_LOOKUP, "ip6-lookup")
2360 tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2361 vlib_frame_t * from_frame, u8 is_ip4)
2363 u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
2364 u32 n_left_from, next_index, *from, *to_next;
2366 from = vlib_frame_vector_args (from_frame);
2367 n_left_from = from_frame->n_vectors;
2369 next_index = node->cached_next_index;
2371 while (n_left_from > 0)
2375 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2377 while (n_left_from > 0 && n_left_to_next > 0)
2389 n_left_to_next -= 1;
2391 b0 = vlib_get_buffer (vm, bi0);
2392 tcp_make_reset_in_place (vm, b0, is_ip4);
2394 /* Prepare to send to IP lookup */
2395 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2397 b0->error = node->errors[error0];
2398 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2399 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2401 th0 = vlib_buffer_get_current (b0);
2403 th0 = ip4_next_header ((ip4_header_t *) th0);
2405 th0 = ip6_next_header ((ip6_header_t *) th0);
2406 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2407 clib_memcpy_fast (&t0->tcp_header, th0,
2408 sizeof (t0->tcp_header));
2411 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2412 n_left_to_next, bi0, next0);
2414 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2416 return from_frame->n_vectors;
2419 VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2420 vlib_frame_t * from_frame)
2422 return tcp46_send_reset_inline (vm, node, from_frame, 1);
2425 VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2426 vlib_frame_t * from_frame)
2428 return tcp46_send_reset_inline (vm, node, from_frame, 0);
2432 VLIB_REGISTER_NODE (tcp4_reset_node) = {
2433 .name = "tcp4-reset",
2434 .vector_size = sizeof (u32),
2435 .n_errors = TCP_N_ERROR,
2436 .error_strings = tcp_error_strings,
2437 .n_next_nodes = TCP_RESET_N_NEXT,
2439 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2440 foreach_tcp4_reset_next
2443 .format_trace = format_tcp_tx_trace,
2448 VLIB_REGISTER_NODE (tcp6_reset_node) = {
2449 .name = "tcp6-reset",
2450 .vector_size = sizeof (u32),
2451 .n_errors = TCP_N_ERROR,
2452 .error_strings = tcp_error_strings,
2453 .n_next_nodes = TCP_RESET_N_NEXT,
2455 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2456 foreach_tcp6_reset_next
2459 .format_trace = format_tcp_tx_trace,
2464 * fd.io coding-style-patch-verification: ON
2467 * eval: (c-set-style "gnu")