2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/tcp/tcp.h>
17 #include <vnet/tcp/tcp_inlines.h>
19 #include <vnet/ip/ip4_inlines.h>
20 #include <vnet/ip/ip6_inlines.h>
22 typedef enum _tcp_output_next
25 TCP_OUTPUT_NEXT_IP_LOOKUP,
26 TCP_OUTPUT_NEXT_IP_REWRITE,
27 TCP_OUTPUT_NEXT_IP_ARP,
31 #define foreach_tcp4_output_next \
32 _ (DROP, "error-drop") \
33 _ (IP_LOOKUP, "ip4-lookup") \
34 _ (IP_REWRITE, "ip4-rewrite") \
37 #define foreach_tcp6_output_next \
38 _ (DROP, "error-drop") \
39 _ (IP_LOOKUP, "ip6-lookup") \
40 _ (IP_REWRITE, "ip6-rewrite") \
41 _ (IP_ARP, "ip6-discover-neighbor")
43 static char *tcp_error_strings[] = {
44 #define tcp_error(n,s) s,
45 #include <vnet/tcp/tcp_error.def>
51 tcp_header_t tcp_header;
52 tcp_connection_t tcp_connection;
56 format_tcp_tx_trace (u8 * s, va_list * args)
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *);
61 tcp_connection_t *tc = &t->tcp_connection;
62 u32 indent = format_get_indent (s);
64 s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
65 format_tcp_state, tc->state, format_white_space, indent,
66 format_tcp_header, &t->tcp_header, 128);
71 #ifndef CLIB_MARCH_VARIANT
73 tcp_window_compute_scale (u32 window)
76 while (wnd_scale < TCP_MAX_WND_SCALE && (window >> wnd_scale) > TCP_WND_MAX)
82 * TCP's initial window
85 tcp_initial_wnd_unscaled (tcp_connection_t * tc)
87 /* RFC 6928 recommends the value lower. However at the time our connections
88 * are initialized, fifos may not be allocated. Therefore, advertise the
89 * smallest possible unscaled window size and update once fifos are
90 * assigned to the session.
93 tcp_update_rcv_mss (tc);
94 TCP_IW_N_SEGMENTS * tc->mss;
96 return tcp_cfg.min_rx_fifo;
100 * Compute initial window and scale factor. As per RFC1323, window field in
101 * SYN and SYN-ACK segments is never scaled.
104 tcp_initial_window_to_advertise (tcp_connection_t * tc)
106 /* Compute rcv wscale only if peer advertised support for it */
107 if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
108 tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
110 tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
112 return clib_min (tc->rcv_wnd, TCP_WND_MAX);
116 tcp_update_rcv_wnd (tcp_connection_t * tc)
118 u32 available_space, wnd;
122 * Figure out how much space we have available
124 available_space = transport_max_rx_enqueue (&tc->connection);
127 * Use the above and what we know about what we've previously advertised
128 * to compute the new window
130 observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
132 /* Check if we are about to retract the window. Do the comparison before
133 * rounding to avoid errors. Per RFC7323 sec. 2.4 we could remove this */
134 if (PREDICT_FALSE ((i32) available_space < observed_wnd))
136 wnd = round_down_pow2 (clib_max (observed_wnd, 0), 1 << tc->rcv_wscale);
137 TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
141 /* Make sure we have a multiple of 1 << rcv_wscale. We round down to
142 * avoid advertising a window larger than what can be buffered */
143 wnd = round_down_pow2 (available_space, 1 << tc->rcv_wscale);
146 if (PREDICT_FALSE (wnd < tc->rcv_opts.mss))
149 tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale);
153 * Compute and return window to advertise, scaled as per RFC1323
156 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
158 if (state < TCP_STATE_ESTABLISHED)
159 return tcp_initial_window_to_advertise (tc);
161 tcp_update_rcv_wnd (tc);
162 return tc->rcv_wnd >> tc->rcv_wscale;
166 tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts)
170 opts->flags |= TCP_OPTS_FLAG_MSS;
172 len += TCP_OPTION_LEN_MSS;
174 opts->flags |= TCP_OPTS_FLAG_WSCALE;
175 opts->wscale = tc->rcv_wscale;
176 len += TCP_OPTION_LEN_WINDOW_SCALE;
178 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
179 opts->tsval = tcp_time_tstamp (tc->c_thread_index);
181 len += TCP_OPTION_LEN_TIMESTAMP;
185 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
186 len += TCP_OPTION_LEN_SACK_PERMITTED;
189 /* Align to needed boundary */
190 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
195 tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
199 opts->flags |= TCP_OPTS_FLAG_MSS;
201 len += TCP_OPTION_LEN_MSS;
203 if (tcp_opts_wscale (&tc->rcv_opts))
205 opts->flags |= TCP_OPTS_FLAG_WSCALE;
206 opts->wscale = tc->rcv_wscale;
207 len += TCP_OPTION_LEN_WINDOW_SCALE;
210 if (tcp_opts_tstamp (&tc->rcv_opts))
212 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
213 opts->tsval = tcp_time_tstamp (tc->c_thread_index);
214 opts->tsecr = tc->tsval_recent;
215 len += TCP_OPTION_LEN_TIMESTAMP;
218 if (tcp_opts_sack_permitted (&tc->rcv_opts))
220 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
221 len += TCP_OPTION_LEN_SACK_PERMITTED;
224 /* Align to needed boundary */
225 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
230 tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts)
236 if (tcp_opts_tstamp (&tc->rcv_opts))
238 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
239 opts->tsval = tcp_tstamp (tc);
240 opts->tsecr = tc->tsval_recent;
241 len += TCP_OPTION_LEN_TIMESTAMP;
243 if (tcp_opts_sack_permitted (&tc->rcv_opts))
245 if (vec_len (tc->snd_sacks))
247 opts->flags |= TCP_OPTS_FLAG_SACK;
248 if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
249 tc->snd_sack_pos = 0;
250 opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
251 opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
252 opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
253 TCP_OPTS_MAX_SACK_BLOCKS);
254 tc->snd_sack_pos += opts->n_sack_blocks;
255 len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
259 /* Align to needed boundary */
260 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
265 tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
270 case TCP_STATE_ESTABLISHED:
271 case TCP_STATE_CLOSE_WAIT:
272 case TCP_STATE_FIN_WAIT_1:
273 case TCP_STATE_LAST_ACK:
274 case TCP_STATE_CLOSING:
275 case TCP_STATE_FIN_WAIT_2:
276 case TCP_STATE_TIME_WAIT:
277 case TCP_STATE_CLOSED:
278 return tcp_make_established_options (tc, opts);
279 case TCP_STATE_SYN_RCVD:
280 return tcp_make_synack_options (tc, opts);
281 case TCP_STATE_SYN_SENT:
282 return tcp_make_syn_options (tc, opts);
284 clib_warning ("State not handled! %d", state);
290 * Update burst send vars
292 * - Updates snd_mss to reflect the effective segment size that we can send
293 * by taking into account all TCP options, including SACKs.
294 * - Cache 'on the wire' options for reuse
295 * - Updates receive window which can be reused for a burst.
297 * This should *only* be called when doing bursts
300 tcp_update_burst_snd_vars (tcp_connection_t * tc)
302 tcp_main_t *tm = &tcp_main;
304 /* Compute options to be used for connection. These may be reused when
305 * sending data or to compute the effective mss (snd_mss) */
306 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts,
307 TCP_STATE_ESTABLISHED);
309 /* XXX check if MTU has been updated */
310 tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
311 ASSERT (tc->snd_mss > 0);
313 tcp_options_write (tm->wrk_ctx[tc->c_thread_index].cached_opts,
316 tcp_update_rcv_wnd (tc);
318 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
319 tcp_bt_check_app_limited (tc);
321 if (tc->snd_una == tc->snd_nxt)
323 tcp_cc_event (tc, TCP_CC_EVT_START_TX);
324 tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_BURST);
327 if (tc->flags & TCP_CONN_PSH_PENDING)
329 u32 max_deq = transport_max_tx_dequeue (&tc->connection);
330 /* Last byte marked for push */
331 tc->psh_seq = tc->snd_una + max_deq - 1;
335 #endif /* CLIB_MARCH_VARIANT */
338 tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
340 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
341 vlib_buffer_free_one (vm, b->next_buffer);
342 /* Zero all flags but free list index and trace flag */
343 b->flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
345 b->current_length = 0;
346 b->total_length_not_including_first_buffer = 0;
347 vnet_buffer (b)->tcp.flags = 0;
348 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
349 /* Leave enough space for headers */
350 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
353 #ifndef CLIB_MARCH_VARIANT
355 tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
357 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
358 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
359 b->total_length_not_including_first_buffer = 0;
361 vnet_buffer (b)->tcp.flags = 0;
362 /* Leave enough space for headers */
363 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
367 /* Compute TCP checksum in software when offloading is disabled for a connection */
369 ip6_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
370 ip46_address_t * src, ip46_address_t * dst)
373 u16 payload_length_host_byte_order;
376 /* Initialize checksum with ip header. */
377 sum0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0)) +
378 clib_host_to_net_u16 (IP_PROTOCOL_TCP);
379 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
381 for (i = 0; i < ARRAY_LEN (src->ip6.as_uword); i++)
383 sum0 = ip_csum_with_carry
384 (sum0, clib_mem_unaligned (&src->ip6.as_uword[i], uword));
385 sum0 = ip_csum_with_carry
386 (sum0, clib_mem_unaligned (&dst->ip6.as_uword[i], uword));
389 return ip_calculate_l4_checksum (vm, p0, sum0,
390 payload_length_host_byte_order, NULL, 0,
395 ip4_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
396 ip46_address_t * src, ip46_address_t * dst)
399 u32 payload_length_host_byte_order;
401 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
403 clib_host_to_net_u32 (payload_length_host_byte_order +
404 (IP_PROTOCOL_TCP << 16));
406 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&src->ip4, u32));
407 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&dst->ip4, u32));
409 return ip_calculate_l4_checksum (vm, p0, sum0,
410 payload_length_host_byte_order, NULL, 0,
415 tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b)
418 if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
420 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
421 vlib_main_t *vm = wrk->vm;
424 checksum = ip4_tcp_compute_checksum_custom
425 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
427 checksum = ip6_tcp_compute_checksum_custom
428 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
432 vnet_buffer_offload_flags_set (b, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
441 tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
444 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
445 u8 tcp_opts_len, tcp_hdr_opts_len;
449 wnd = tcp_window_to_advertise (tc, state);
451 /* Make and write options */
452 tcp_opts_len = tcp_make_established_options (tc, snd_opts);
453 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
455 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
456 tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd);
458 tcp_options_write ((u8 *) (th + 1), snd_opts);
460 th->checksum = tcp_compute_checksum (tc, b);
462 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
466 transport_rx_fifo_req_deq_ntf (&tc->connection);
467 tcp_zero_rwnd_sent_on (tc);
470 tcp_zero_rwnd_sent_off (tc);
474 * Convert buffer to ACK
477 tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
479 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
480 TCP_EVT (TCP_EVT_ACK_SENT, tc);
481 tc->rcv_las = tc->rcv_nxt;
485 * Convert buffer to FIN-ACK
488 tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
490 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
494 * Convert buffer to SYN
497 tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b)
499 u8 tcp_hdr_opts_len, tcp_opts_len;
502 tcp_options_t snd_opts;
504 initial_wnd = tcp_initial_window_to_advertise (tc);
506 /* Make and write options */
507 clib_memset (&snd_opts, 0, sizeof (snd_opts));
508 tcp_opts_len = tcp_make_syn_options (tc, &snd_opts);
509 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
511 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
512 tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN,
514 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
515 tcp_options_write ((u8 *) (th + 1), &snd_opts);
516 th->checksum = tcp_compute_checksum (tc, b);
520 * Convert buffer to SYN-ACK
523 tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
525 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
526 u8 tcp_opts_len, tcp_hdr_opts_len;
530 clib_memset (snd_opts, 0, sizeof (*snd_opts));
531 initial_wnd = tcp_initial_window_to_advertise (tc);
532 tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
533 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
535 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
536 tc->rcv_nxt, tcp_hdr_opts_len,
537 TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd);
538 tcp_options_write ((u8 *) (th + 1), snd_opts);
540 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
541 th->checksum = tcp_compute_checksum (tc, b);
545 tcp_enqueue_half_open (tcp_worker_ctx_t *wrk, tcp_connection_t *tc,
546 vlib_buffer_t *b, u32 bi)
548 vlib_main_t *vm = wrk->vm;
550 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
553 session_add_pending_tx_buffer (vm->thread_index, bi,
554 wrk->tco_next_node[!tc->c_is_ip4]);
556 if (vm->thread_index == 0 && vlib_num_workers ())
557 session_queue_run_on_main_thread (vm);
561 tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
564 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
567 session_add_pending_tx_buffer (wrk->vm->thread_index, bi,
568 wrk->tco_next_node[!is_ip4]);
571 #endif /* CLIB_MARCH_VARIANT */
574 tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b, u8 is_ip4)
579 ip4_address_t src_ip4, dst_ip4;
580 ip6_address_t src_ip6, dst_ip6;
581 u16 src_port, dst_port;
582 u32 tmp, len, seq, ack;
585 /* Find IP and TCP headers */
586 th = tcp_buffer_hdr (b);
588 /* Save src and dst ip */
591 ih4 = vlib_buffer_get_current (b);
592 ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40);
593 src_ip4.as_u32 = ih4->src_address.as_u32;
594 dst_ip4.as_u32 = ih4->dst_address.as_u32;
598 ih6 = vlib_buffer_get_current (b);
599 ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
600 clib_memcpy_fast (&src_ip6, &ih6->src_address, sizeof (ip6_address_t));
601 clib_memcpy_fast (&dst_ip6, &ih6->dst_address, sizeof (ip6_address_t));
604 src_port = th->src_port;
605 dst_port = th->dst_port;
606 flags = TCP_FLAG_RST;
609 * RFC 793. If the ACK bit is off, sequence number zero is used,
610 * <SEQ=0><ACK=SEG.SEQ+SEG.LEN><CTL=RST,ACK>
611 * If the ACK bit is on,
612 * <SEQ=SEG.ACK><CTL=RST>
616 seq = th->ack_number;
621 flags |= TCP_FLAG_ACK;
622 tmp = clib_net_to_host_u32 (th->seq_number);
623 len = vnet_buffer (b)->tcp.data_len + tcp_is_syn (th) + tcp_is_fin (th);
624 ack = clib_host_to_net_u32 (tmp + len);
628 tcp_reuse_buffer (vm, b);
629 th = vlib_buffer_push_tcp_net_order (b, dst_port, src_port, seq, ack,
630 sizeof (tcp_header_t), flags, 0);
634 ih4 = vlib_buffer_push_ip4 (vm, b, &dst_ip4, &src_ip4,
636 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
641 ih6 = vlib_buffer_push_ip6 (vm, b, &dst_ip6, &src_ip6, IP_PROTOCOL_TCP);
642 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
649 #ifndef CLIB_MARCH_VARIANT
651 * Send reset without reusing existing buffer
653 * It extracts connection info out of original packet
656 tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
657 u32 thread_index, u8 is_ip4)
659 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
660 vlib_main_t *vm = wrk->vm;
662 u8 tcp_hdr_len, flags = 0;
663 tcp_header_t *th, *pkt_th;
665 ip4_header_t *ih4, *pkt_ih4;
666 ip6_header_t *ih6, *pkt_ih6;
668 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
670 tcp_worker_stats_inc (wrk, no_buffer, 1);
674 b = vlib_get_buffer (vm, bi);
675 tcp_init_buffer (vm, b);
677 /* Make and write options */
678 tcp_hdr_len = sizeof (tcp_header_t);
682 pkt_ih4 = vlib_buffer_get_current (pkt);
683 pkt_th = ip4_next_header (pkt_ih4);
687 pkt_ih6 = vlib_buffer_get_current (pkt);
688 pkt_th = ip6_next_header (pkt_ih6);
691 if (tcp_ack (pkt_th))
693 flags = TCP_FLAG_RST;
694 seq = pkt_th->ack_number;
695 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
699 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
701 ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end);
704 th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port,
705 seq, ack, tcp_hdr_len, flags, 0);
707 /* Swap src and dst ip */
710 ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
711 ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
712 &pkt_ih4->src_address, IP_PROTOCOL_TCP,
713 tcp_csum_offload (tc));
714 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
719 ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) ==
721 ih6 = vlib_buffer_push_ip6_custom (vm, b, &pkt_ih6->dst_address,
722 &pkt_ih6->src_address,
724 tc->ipv6_flow_label);
725 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
729 tcp_enqueue_half_open (wrk, tc, b, bi);
730 TCP_EVT (TCP_EVT_RST_SENT, tc);
731 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
732 TCP_ERROR_RST_SENT, 1);
736 * Build and set reset packet for connection
739 tcp_send_reset (tcp_connection_t * tc)
741 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
742 vlib_main_t *vm = wrk->vm;
746 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
749 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
751 tcp_worker_stats_inc (wrk, no_buffer, 1);
754 b = vlib_get_buffer (vm, bi);
755 tcp_init_buffer (vm, b);
757 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
758 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
759 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
760 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
761 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
762 tc->rcv_nxt, tcp_hdr_opts_len, flags,
764 opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
765 th->checksum = tcp_compute_checksum (tc, b);
766 ASSERT (opts_write_len == tc->snd_opts_len);
767 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
768 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
769 TCP_EVT (TCP_EVT_RST_SENT, tc);
770 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
771 TCP_ERROR_RST_SENT, 1);
777 * Builds a SYN packet for a half-open connection and sends it to tcp-output.
778 * The packet is handled by main thread and because half-open and established
779 * connections use the same pool the connection can be retrieved without
783 tcp_send_syn (tcp_connection_t * tc)
785 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
786 vlib_main_t *vm = wrk->vm;
791 * Setup retransmit and establish timers before requesting buffer
792 * such that we can return if we've ran out.
794 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
795 tc->rto * TCP_TO_TIMER_TICK);
797 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
799 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
800 tcp_cfg.alloc_err_timeout);
801 tcp_worker_stats_inc (wrk, no_buffer, 1);
805 b = vlib_get_buffer (vm, bi);
806 tcp_init_buffer (vm, b);
807 tcp_make_syn (tc, b);
809 /* Measure RTT with this */
810 tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
811 tc->rtt_seq = tc->snd_nxt;
814 tcp_enqueue_half_open (wrk, tc, b, bi);
815 TCP_EVT (TCP_EVT_SYN_SENT, tc);
819 tcp_send_synack (tcp_connection_t * tc)
821 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
822 vlib_main_t *vm = wrk->vm;
826 ASSERT (tc->snd_una != tc->snd_nxt);
827 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
829 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
831 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT,
832 tcp_cfg.alloc_err_timeout);
833 tcp_worker_stats_inc (wrk, no_buffer, 1);
837 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
838 b = vlib_get_buffer (vm, bi);
839 tcp_init_buffer (vm, b);
840 tcp_make_synack (tc, b);
841 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
842 TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
849 tcp_send_fin (tcp_connection_t * tc)
851 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
852 vlib_main_t *vm = wrk->vm;
857 fin_snt = tc->flags & TCP_CONN_FINSNT;
861 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
863 /* Out of buffers so program fin retransmit ASAP */
864 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT,
865 tcp_cfg.alloc_err_timeout);
869 /* Make sure retransmit retries a fin not data */
870 tc->flags |= TCP_CONN_FINSNT;
871 tcp_worker_stats_inc (wrk, no_buffer, 1);
875 /* If we have non-dupacks programmed, no need to send them */
876 if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
877 tc->flags &= ~TCP_CONN_SNDACK;
879 b = vlib_get_buffer (vm, bi);
880 tcp_init_buffer (vm, b);
881 tcp_make_fin (tc, b);
882 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
883 TCP_EVT (TCP_EVT_FIN_SENT, tc);
884 /* Account for the FIN */
886 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
889 tc->flags |= TCP_CONN_FINSNT;
890 tc->flags &= ~TCP_CONN_FINPNDG;
895 * Push TCP header and update connection variables. Should only be called
896 * for segments with data, not for 'control' packets.
899 tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
900 u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
902 u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
903 u32 advertise_wnd, data_len;
904 tcp_main_t *tm = &tcp_main;
907 data_len = b->current_length;
908 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
909 data_len += b->total_length_not_including_first_buffer;
911 vnet_buffer (b)->tcp.flags = 0;
912 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
915 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
917 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
920 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
922 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
924 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
926 if (seq_geq (tc->psh_seq, snd_nxt)
927 && seq_lt (tc->psh_seq, snd_nxt + data_len))
928 flags |= TCP_FLAG_PSH;
930 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
931 tc->rcv_nxt, tcp_hdr_opts_len, flags,
936 clib_memcpy_fast ((u8 *) (th + 1),
937 tm->wrk_ctx[tc->c_thread_index].cached_opts,
942 u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
943 ASSERT (len == tc->snd_opts_len);
947 * Update connection variables
951 tc->snd_nxt += data_len;
952 tc->rcv_las = tc->rcv_nxt;
954 tc->bytes_out += data_len;
955 tc->data_segs_out += 1;
957 th->checksum = tcp_compute_checksum (tc, b);
959 TCP_EVT (TCP_EVT_PKTIZE, tc);
963 tcp_buffer_len (vlib_buffer_t * b)
965 u32 data_len = b->current_length;
966 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
967 data_len += b->total_length_not_including_first_buffer;
972 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
974 tcp_connection_t *tc = (tcp_connection_t *) tconn;
976 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
977 tcp_bt_track_tx (tc, tcp_buffer_len (b));
979 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
980 /* update_snd_nxt */ 1);
982 tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
983 /* If not tracking an ACK, start tracking */
984 if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
986 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
987 tc->rtt_seq = tc->snd_nxt;
989 if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
991 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
992 tcp_retransmit_timer_set (&wrk->timer_wheel, tc);
999 tcp_send_ack (tcp_connection_t * tc)
1001 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1002 vlib_main_t *vm = wrk->vm;
1006 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1008 tcp_update_rcv_wnd (tc);
1009 tcp_worker_stats_inc (wrk, no_buffer, 1);
1012 b = vlib_get_buffer (vm, bi);
1013 tcp_init_buffer (vm, b);
1014 tcp_make_ack (tc, b);
1015 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1019 tcp_program_ack (tcp_connection_t * tc)
1021 if (!(tc->flags & TCP_CONN_SNDACK))
1023 session_add_self_custom_tx_evt (&tc->connection, 1);
1024 tc->flags |= TCP_CONN_SNDACK;
1029 tcp_program_dupack (tcp_connection_t * tc)
1031 if (!(tc->flags & TCP_CONN_SNDACK))
1033 session_add_self_custom_tx_evt (&tc->connection, 1);
1034 tc->flags |= TCP_CONN_SNDACK;
1036 if (tc->pending_dupacks < 255)
1037 tc->pending_dupacks += 1;
1041 tcp_program_retransmit (tcp_connection_t * tc)
1043 if (!(tc->flags & TCP_CONN_RXT_PENDING))
1045 session_add_self_custom_tx_evt (&tc->connection, 0);
1046 tc->flags |= TCP_CONN_RXT_PENDING;
1051 * Send window update ack
1053 * Ensures that it will be sent only once, after a zero rwnd has been
1054 * advertised in a previous ack, and only if rwnd has grown beyond a
1055 * configurable value.
1058 tcp_send_window_update_ack (tcp_connection_t * tc)
1060 if (tcp_zero_rwnd_sent (tc))
1062 tcp_update_rcv_wnd (tc);
1063 if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
1065 tcp_zero_rwnd_sent_off (tc);
1066 tcp_program_ack (tc);
1072 * Allocate a new buffer and build a new tcp segment
1074 * @param wrk tcp worker
1075 * @param tc connection for which the segment will be allocated
1076 * @param offset offset of the first byte in the tx fifo
1077 * @param max_deq_byte segment size
1078 * @param[out] b pointer to buffer allocated
1080 * @return the number of bytes in the segment or 0 if buffer cannot be
1081 * allocated or no data available
1084 tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1085 u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
1087 u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
1088 vlib_main_t *vm = wrk->vm;
1093 seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
1098 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1101 * Allocate and fill in buffer(s)
1104 /* Easy case, buffer size greater than mss */
1105 if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
1107 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1109 tcp_worker_stats_inc (wrk, no_buffer, 1);
1112 *b = vlib_get_buffer (vm, bi);
1113 data = tcp_init_buffer (vm, *b);
1114 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1116 ASSERT (n_bytes == max_deq_bytes);
1117 b[0]->current_length = n_bytes;
1118 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1119 /* burst */ 0, /* update_snd_nxt */ 0);
1121 /* Split mss into multiple buffers */
1124 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1125 u16 n_peeked, len_to_deq;
1126 vlib_buffer_t *chain_b, *prev_b;
1129 /* Make sure we have enough buffers */
1130 n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
1131 vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
1132 CLIB_CACHE_LINE_BYTES);
1133 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
1134 if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
1137 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1138 tcp_worker_stats_inc (wrk, no_buffer, 1);
1142 *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
1143 data = tcp_init_buffer (vm, *b);
1144 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1146 TRANSPORT_MAX_HDRS_LEN);
1147 b[0]->current_length = n_bytes;
1148 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1149 b[0]->total_length_not_including_first_buffer = 0;
1150 max_deq_bytes -= n_bytes;
1153 for (i = 1; i < n_bufs_per_seg; i++)
1156 len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
1157 chain_bi = wrk->tx_buffers[--n_bufs];
1158 chain_b = vlib_get_buffer (vm, chain_bi);
1159 chain_b->current_data = 0;
1160 data = vlib_buffer_get_current (chain_b);
1161 n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
1164 ASSERT (n_peeked == len_to_deq);
1165 n_bytes += n_peeked;
1166 chain_b->current_length = n_peeked;
1167 chain_b->next_buffer = 0;
1169 /* update previous buffer */
1170 prev_b->next_buffer = chain_bi;
1171 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1173 max_deq_bytes -= n_peeked;
1174 b[0]->total_length_not_including_first_buffer += n_peeked;
1177 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1178 /* burst */ 0, /* update_snd_nxt */ 0);
1180 if (PREDICT_FALSE (n_bufs))
1182 clib_warning ("not all buffers consumed");
1183 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1187 ASSERT (n_bytes > 0);
1188 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1194 * Build a retransmit segment
1196 * @return the number of bytes in the segment or 0 if there's nothing to
1200 tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
1201 tcp_connection_t * tc, u32 offset,
1202 u32 max_deq_bytes, vlib_buffer_t ** b)
1204 u32 start, available_bytes;
1207 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1208 ASSERT (max_deq_bytes != 0);
1211 * Make sure we can retransmit something
1213 available_bytes = transport_max_tx_dequeue (&tc->connection);
1214 ASSERT (available_bytes >= offset);
1215 available_bytes -= offset;
1216 if (!available_bytes)
1219 max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
1220 max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
1222 start = tc->snd_una + offset;
1223 ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
1225 n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
1229 tc->snd_rxt_bytes += n_bytes;
1231 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1232 tcp_bt_track_rxt (tc, start, start + n_bytes);
1234 tc->bytes_retrans += n_bytes;
1235 tc->segs_retrans += 1;
1236 tcp_worker_stats_inc (wrk, rxt_segs, 1);
1237 TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
1243 tcp_check_sack_reneging (tcp_connection_t * tc)
1245 sack_scoreboard_t *sb = &tc->sack_sb;
1246 sack_scoreboard_hole_t *hole;
1248 hole = scoreboard_first_hole (sb);
1249 if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
1252 scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
1256 * Reset congestion control, switch cwnd to loss window and try again.
1259 tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
1261 TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
1263 tc->prev_ssthresh = tc->ssthresh;
1264 tc->prev_cwnd = tc->cwnd;
1266 /* If we entrered loss without fast recovery, notify cc algo of the
1267 * congestion event such that it can update ssthresh and its state */
1268 if (!tcp_in_fastrecovery (tc))
1269 tcp_cc_congestion (tc);
1271 /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
1275 tc->cwnd_acc_bytes = 0;
1276 tc->tr_occurences += 1;
1277 tc->sack_sb.reorder = TCP_DUPACK_THRESHOLD;
1278 tcp_recovery_on (tc);
1282 tcp_timer_retransmit_handler (tcp_connection_t * tc)
1284 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1285 vlib_main_t *vm = wrk->vm;
1286 vlib_buffer_t *b = 0;
1289 tcp_worker_stats_inc (wrk, tr_events, 1);
1291 /* Should be handled by a different handler */
1292 if (PREDICT_FALSE (tc->state == TCP_STATE_SYN_SENT))
1295 /* Wait-close and retransmit could pop at the same time */
1296 if (tc->state == TCP_STATE_CLOSED)
1299 if (tc->state >= TCP_STATE_ESTABLISHED)
1301 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1303 /* Lost FIN, retransmit and return */
1304 if (tc->flags & TCP_CONN_FINSNT)
1308 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1312 /* Shouldn't be here */
1313 if (tc->snd_una == tc->snd_nxt)
1315 ASSERT (!tcp_in_recovery (tc));
1320 /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
1321 * to persist timer timeout */
1322 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1325 tcp_update_rto (tc);
1328 /* Peer is dead or network connectivity is lost. Close connection.
1329 * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
1330 * a min rto of 0.2s we need to retry about 8 times. */
1331 if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
1333 tcp_send_reset (tc);
1334 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1335 session_transport_closing_notify (&tc->connection);
1336 session_transport_closed_notify (&tc->connection);
1337 tcp_connection_timers_reset (tc);
1338 tcp_program_cleanup (wrk, tc);
1339 tcp_worker_stats_inc (wrk, tr_abort, 1);
1343 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1344 tcp_check_sack_reneging (tc);
1346 /* Update send congestion to make sure that rxt has data to send */
1347 tc->snd_congestion = tc->snd_nxt;
1349 /* Send the first unacked segment. If we're short on buffers, return
1350 * as soon as possible */
1351 n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
1352 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
1355 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT,
1356 tcp_cfg.alloc_err_timeout);
1360 bi = vlib_get_buffer_index (vm, b);
1361 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1363 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1364 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1367 if (tc->rto_boff == 1)
1369 tcp_cc_init_rxt_timeout (tc);
1370 /* Record timestamp. Eifel detection algorithm RFC3522 */
1371 tc->snd_rxt_ts = tcp_tstamp (tc);
1374 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1375 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
1377 tcp_program_retransmit (tc);
1379 /* Retransmit SYN-ACK */
1380 else if (tc->state == TCP_STATE_SYN_RCVD)
1382 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1386 /* Passive open establish timeout */
1387 if (tc->rto > TCP_ESTABLISH_TIME >> 1)
1389 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1390 tcp_connection_timers_reset (tc);
1391 tcp_program_cleanup (wrk, tc);
1392 tcp_worker_stats_inc (wrk, tr_abort, 1);
1396 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1398 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT,
1399 tcp_cfg.alloc_err_timeout);
1400 tcp_worker_stats_inc (wrk, no_buffer, 1);
1405 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1406 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1408 ASSERT (tc->snd_una != tc->snd_nxt);
1409 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1411 b = vlib_get_buffer (vm, bi);
1412 tcp_init_buffer (vm, b);
1413 tcp_make_synack (tc, b);
1414 TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
1416 /* Retransmit timer already updated, just enqueue to output */
1417 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1421 ASSERT (tc->state == TCP_STATE_CLOSED);
1427 * SYN retransmit timer handler. Active open only.
1430 tcp_timer_retransmit_syn_handler (tcp_connection_t * tc)
1432 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1433 vlib_main_t *vm = wrk->vm;
1434 vlib_buffer_t *b = 0;
1437 /* Note: the connection may have transitioned to ESTABLISHED... */
1438 if (PREDICT_FALSE (tc->state != TCP_STATE_SYN_SENT))
1441 /* Half-open connection actually moved to established but we were
1442 * waiting for syn retransmit to pop to call cleanup from the right
1444 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1446 if (tcp_half_open_connection_cleanup (tc))
1447 TCP_DBG ("could not remove half-open connection");
1451 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1454 /* Active open establish timeout */
1455 if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
1457 session_stream_connect_notify (&tc->connection, SESSION_E_TIMEDOUT);
1458 tcp_connection_cleanup (tc);
1462 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1464 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
1465 tcp_cfg.alloc_err_timeout);
1466 tcp_worker_stats_inc (wrk, no_buffer, 1);
1470 /* Try without increasing RTO a number of times. If this fails,
1471 * start growing RTO exponentially */
1473 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1474 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1476 b = vlib_get_buffer (vm, bi);
1477 tcp_init_buffer (vm, b);
1478 tcp_make_syn (tc, b);
1480 TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
1482 tcp_enqueue_half_open (wrk, tc, b, bi);
1484 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
1485 tc->rto * TCP_TO_TIMER_TICK);
1489 * Got 0 snd_wnd from peer, try to do something about it.
1493 tcp_timer_persist_handler (tcp_connection_t * tc)
1495 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1496 u32 bi, max_snd_bytes, available_bytes, offset;
1497 tcp_main_t *tm = vnet_get_tcp_main ();
1498 vlib_main_t *vm = wrk->vm;
1503 /* Problem already solved or worse */
1504 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1505 || (tc->flags & TCP_CONN_FINSNT))
1506 goto update_scheduler;
1508 available_bytes = transport_max_tx_dequeue (&tc->connection);
1509 offset = tc->snd_nxt - tc->snd_una;
1511 /* Reprogram persist if no new bytes available to send. We may have data
1513 if (!available_bytes)
1515 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1519 if (available_bytes <= offset)
1520 goto update_scheduler;
1522 /* Increment RTO backoff */
1524 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1527 * Try to force the first unsent segment (or buffer)
1529 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1531 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1532 tcp_worker_stats_inc (wrk, no_buffer, 1);
1536 b = vlib_get_buffer (vm, bi);
1537 data = tcp_init_buffer (vm, b);
1539 tcp_validate_txf_size (tc, offset);
1540 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1541 max_snd_bytes = clib_min (tc->snd_mss,
1542 tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
1543 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1545 b->current_length = n_bytes;
1546 ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
1547 || tc->snd_una == tc->snd_nxt
1548 || tc->rto_boff > 1));
1550 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1552 tcp_bt_check_app_limited (tc);
1553 tcp_bt_track_tx (tc, n_bytes);
1556 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
1557 /* burst */ 0, /* update_snd_nxt */ 1);
1558 tcp_validate_txf_size (tc, tc->snd_nxt - tc->snd_una);
1559 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1561 /* Just sent new data, enable retransmit */
1562 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1568 if (tcp_is_descheduled (tc))
1569 transport_connection_reschedule (&tc->connection);
1573 * Retransmit first unacked segment
1576 tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1578 vlib_main_t *vm = wrk->vm;
1582 TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
1584 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1588 bi = vlib_get_buffer_index (vm, b);
1589 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1595 tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1598 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1599 vlib_main_t *vm = wrk->vm;
1600 vlib_buffer_t *b = 0;
1602 offset = tc->snd_nxt - tc->snd_una;
1603 available_wnd = tc->snd_wnd - offset;
1604 burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
1606 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1607 tcp_bt_check_app_limited (tc);
1609 while (n_segs < burst_size)
1611 n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
1615 bi = vlib_get_buffer_index (vm, b);
1616 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1617 offset += n_written;
1620 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1621 tcp_bt_track_tx (tc, n_written);
1623 tc->snd_nxt += n_written;
1631 * Estimate send space using proportional rate reduction (RFC6937)
1634 tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
1639 pipe = tcp_flight_size (tc);
1640 prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
1642 if (pipe > tc->ssthresh)
1644 space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
1650 limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
1651 space = clib_min (tc->ssthresh - pipe, limit);
1653 space = clib_max (space, prr_out ? 0 : tc->snd_mss);
1658 tcp_retransmit_should_retry_head (tcp_connection_t * tc,
1659 sack_scoreboard_t * sb)
1661 u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
1662 f64 rr = (f64) tc->ssthresh / tc->prev_cwnd;
1664 if (tcp_fastrecovery_first (tc))
1667 return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
1671 tcp_max_tx_deq (tcp_connection_t * tc)
1673 return (transport_max_tx_dequeue (&tc->connection)
1674 - (tc->snd_nxt - tc->snd_una));
1677 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1678 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1679 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1682 * Do retransmit with SACKs
1685 tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1688 u32 n_written = 0, offset, max_bytes, n_segs = 0;
1689 u8 snd_limited = 0, can_rescue = 0;
1690 u32 bi, max_deq, burst_bytes;
1691 sack_scoreboard_hole_t *hole;
1692 vlib_main_t *vm = wrk->vm;
1693 vlib_buffer_t *b = 0;
1694 sack_scoreboard_t *sb;
1697 ASSERT (tcp_in_cong_recovery (tc));
1699 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1700 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1703 tcp_program_retransmit (tc);
1707 if (tcp_in_recovery (tc))
1708 snd_space = tcp_available_cc_snd_space (tc);
1710 snd_space = tcp_fastrecovery_prr_snd_space (tc);
1712 if (snd_space < tc->snd_mss)
1717 /* Check if snd_una is a lost retransmit */
1718 if (pool_elts (sb->holes)
1719 && seq_gt (sb->high_sacked, tc->snd_congestion)
1720 && tc->rxt_head != tc->snd_una
1721 && tcp_retransmit_should_retry_head (tc, sb))
1723 max_bytes = clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una);
1724 n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, max_bytes, &b);
1727 tcp_program_retransmit (tc);
1730 bi = vlib_get_buffer_index (vm, b);
1731 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1734 tc->rxt_head = tc->snd_una;
1735 tc->rxt_delivered += n_written;
1736 tc->prr_delivered += n_written;
1737 ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
1740 tcp_fastrecovery_first_off (tc);
1742 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1743 hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
1745 max_deq = transport_max_tx_dequeue (&tc->connection);
1746 max_deq -= tc->snd_nxt - tc->snd_una;
1748 while (snd_space > 0 && n_segs < burst_size)
1750 hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue,
1754 /* We are out of lost holes to retransmit so send some new data. */
1755 if (max_deq > tc->snd_mss)
1760 /* Make sure we don't exceed available window and leave space
1761 * for one more packet, to avoid zero window acks */
1762 av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
1763 av_wnd = clib_max (av_wnd - tc->snd_mss, 0);
1764 snd_space = clib_min (snd_space, av_wnd);
1765 snd_space = clib_min (max_deq, snd_space);
1766 burst_size = clib_min (burst_size - n_segs,
1767 snd_space / tc->snd_mss);
1768 burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
1769 n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
1770 if (max_deq > n_segs_new * tc->snd_mss)
1771 tcp_program_retransmit (tc);
1773 n_segs += n_segs_new;
1777 if (tcp_in_recovery (tc) || !can_rescue
1778 || scoreboard_rescue_rxt_valid (sb, tc))
1781 /* If rescue rxt undefined or less than snd_una then one segment of
1782 * up to SMSS octets that MUST include the highest outstanding
1783 * unSACKed sequence number SHOULD be returned, and RescueRxt set to
1784 * RecoveryPoint. HighRxt MUST NOT be updated.
1786 hole = scoreboard_last_hole (sb);
1787 max_bytes = clib_min (tc->snd_mss, hole->end - hole->start);
1788 max_bytes = clib_min (max_bytes, snd_space);
1789 offset = hole->end - tc->snd_una - max_bytes;
1790 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1795 sb->rescue_rxt = tc->snd_congestion;
1796 bi = vlib_get_buffer_index (vm, b);
1797 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1802 max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
1803 max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
1807 offset = sb->high_rxt - tc->snd_una;
1808 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1810 ASSERT (n_written <= snd_space);
1812 /* Nothing left to retransmit */
1816 bi = vlib_get_buffer_index (vm, b);
1817 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1819 sb->high_rxt += n_written;
1820 ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt));
1822 snd_space -= n_written;
1827 tcp_program_retransmit (tc);
1831 transport_connection_tx_pacer_reset_bucket (&tc->connection, 0);
1836 * Fast retransmit without SACK info
1839 tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1842 u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now, max_bytes;
1843 u32 burst_bytes, sent_bytes;
1844 vlib_main_t *vm = wrk->vm;
1845 int snd_space, n_segs = 0;
1849 ASSERT (tcp_in_cong_recovery (tc));
1850 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1852 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1853 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1856 tcp_program_retransmit (tc);
1860 snd_space = tcp_available_cc_snd_space (tc);
1861 cc_limited = snd_space < burst_bytes;
1863 if (!tcp_fastrecovery_first (tc))
1866 /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
1868 while (snd_space > 0 && n_segs < burst_size)
1870 max_bytes = clib_min (tc->snd_mss,
1871 tc->snd_congestion - tc->snd_una - offset);
1874 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1877 /* Nothing left to retransmit */
1881 bi = vlib_get_buffer_index (vm, b);
1882 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1883 snd_space -= n_written;
1884 offset += n_written;
1888 if (n_segs == burst_size)
1893 /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
1894 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
1897 max_deq = transport_max_tx_dequeue (&tc->connection);
1898 max_deq -= tc->snd_nxt - tc->snd_una;
1901 snd_space = clib_min (max_deq, snd_space);
1902 burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
1903 n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size);
1904 if (n_segs_now && max_deq > n_segs_now * tc->snd_mss)
1905 tcp_program_retransmit (tc);
1906 n_segs += n_segs_now;
1910 tcp_fastrecovery_first_off (tc);
1912 sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
1913 sent_bytes = cc_limited ? burst_bytes : sent_bytes;
1914 transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
1920 tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
1924 if (!tc->pending_dupacks)
1926 if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
1927 || tc->state != TCP_STATE_ESTABLISHED)
1935 /* If we're supposed to send dupacks but have no ooo data
1936 * send only one ack */
1937 if (!vec_len (tc->snd_sacks))
1940 tc->dupacks_out += 1;
1941 tc->pending_dupacks = 0;
1945 /* Start with first sack block */
1946 tc->snd_sack_pos = 0;
1948 /* Generate enough dupacks to cover all sack blocks. Do not generate
1949 * more sacks than the number of packets received. But do generate at
1950 * least 3, i.e., the number needed to signal congestion, if needed. */
1951 n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
1952 n_acks = clib_min (n_acks, tc->pending_dupacks);
1953 n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
1954 for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
1957 if (n_acks < max_burst_size)
1959 tc->pending_dupacks = 0;
1960 tc->snd_sack_pos = 0;
1961 tc->dupacks_out += n_acks;
1966 TCP_DBG ("constrained by burst size");
1967 tc->pending_dupacks = n_acks - max_burst_size;
1968 tc->dupacks_out += max_burst_size;
1969 tcp_program_dupack (tc);
1970 return max_burst_size;
1975 tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
1977 tcp_worker_ctx_t *wrk;
1980 if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED))
1983 wrk = tcp_get_worker (tc->c_thread_index);
1985 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1986 n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
1988 n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
1994 tcp_session_custom_tx (void *conn, transport_send_params_t * sp)
1996 tcp_connection_t *tc = (tcp_connection_t *) conn;
1999 if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING))
2001 tc->flags &= ~TCP_CONN_RXT_PENDING;
2002 n_segs = tcp_do_retransmit (tc, sp->max_burst_size);
2005 if (!(tc->flags & TCP_CONN_SNDACK))
2008 tc->flags &= ~TCP_CONN_SNDACK;
2010 /* We have retransmitted packets and no dupack */
2011 if (n_segs && !tc->pending_dupacks)
2014 if (sp->max_burst_size <= n_segs)
2016 tcp_program_ack (tc);
2020 n_segs += tcp_send_acks (tc, sp->max_burst_size - n_segs);
2024 #endif /* CLIB_MARCH_VARIANT */
2027 tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
2028 u16 * next0, u32 * error0)
2030 ip_adjacency_t *adj;
2033 /* Not thread safe but as long as the connection exists the adj should
2035 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &tc0->c_rmt_ip,
2037 if (ai == ADJ_INDEX_INVALID)
2039 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2040 *next0 = TCP_OUTPUT_NEXT_DROP;
2041 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2046 if (PREDICT_TRUE (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE))
2047 *next0 = TCP_OUTPUT_NEXT_IP_REWRITE;
2048 else if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
2049 *next0 = TCP_OUTPUT_NEXT_IP_ARP;
2052 *next0 = TCP_OUTPUT_NEXT_DROP;
2053 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2055 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
2059 tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2060 u32 * to_next, u32 n_bufs)
2062 tcp_connection_t *tc;
2068 for (i = 0; i < n_bufs; i++)
2070 b = vlib_get_buffer (vm, to_next[i]);
2071 if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2073 th = vlib_buffer_get_current (b);
2074 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2076 t = vlib_add_trace (vm, node, b, sizeof (*t));
2077 clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
2078 clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
2083 tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0,
2084 tcp_connection_t * tc0, u8 is_ip4)
2086 TCP_EVT (TCP_EVT_OUTPUT, tc0,
2087 ((tcp_header_t *) vlib_buffer_get_current (b0))->flags,
2088 b0->current_length);
2091 vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
2092 IP_PROTOCOL_TCP, tcp_csum_offload (tc0));
2094 vlib_buffer_push_ip6_custom (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6,
2095 IP_PROTOCOL_TCP, tc0->ipv6_flow_label);
2099 tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
2101 if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO)))
2104 u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len;
2106 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
2107 data_len += b->total_length_not_including_first_buffer;
2109 if (PREDICT_TRUE (data_len <= tc->snd_mss))
2113 ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
2114 ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
2115 b->flags |= VNET_BUFFER_F_GSO;
2116 vnet_buffer2 (b)->gso_l4_hdr_sz =
2117 sizeof (tcp_header_t) + tc->snd_opts_len;
2118 vnet_buffer2 (b)->gso_size = tc->snd_mss;
2123 tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
2124 vlib_node_runtime_t * error_node, u16 * next0,
2127 /* If next_index is not drop use it */
2128 if (tc0->next_node_index)
2130 *next0 = tc0->next_node_index;
2131 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2135 *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP;
2138 vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
2139 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
2145 if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
2146 tcp_output_handle_link_local (tc0, b0, next0, &error0);
2148 if (PREDICT_FALSE (error0))
2150 b0->error = error_node->errors[error0];
2159 tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2160 vlib_frame_t * frame, int is_ip4)
2162 u32 n_left_from, *from, thread_index = vm->thread_index;
2163 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2164 u16 nexts[VLIB_FRAME_SIZE], *next;
2166 from = vlib_frame_vector_args (frame);
2167 n_left_from = frame->n_vectors;
2168 tcp_update_time_now (tcp_get_worker (thread_index));
2170 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2171 tcp46_output_trace_frame (vm, node, from, n_left_from);
2173 vlib_get_buffers (vm, from, bufs, n_left_from);
2177 while (n_left_from >= 4)
2179 tcp_connection_t *tc0, *tc1;
2182 vlib_prefetch_buffer_header (b[2], STORE);
2183 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2185 vlib_prefetch_buffer_header (b[3], STORE);
2186 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2189 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2191 tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
2194 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2196 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2197 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2199 tcp_check_if_gso (tc0, b[0]);
2200 tcp_check_if_gso (tc1, b[1]);
2202 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2203 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2209 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2210 tcp_check_if_gso (tc0, b[0]);
2211 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2215 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2216 next[0] = TCP_OUTPUT_NEXT_DROP;
2220 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2221 tcp_check_if_gso (tc1, b[1]);
2222 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2226 b[1]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2227 next[1] = TCP_OUTPUT_NEXT_DROP;
2235 while (n_left_from > 0)
2237 tcp_connection_t *tc0;
2239 if (n_left_from > 1)
2241 vlib_prefetch_buffer_header (b[1], STORE);
2242 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2245 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2248 if (PREDICT_TRUE (tc0 != 0))
2250 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2251 tcp_check_if_gso (tc0, b[0]);
2252 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2256 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2257 next[0] = TCP_OUTPUT_NEXT_DROP;
2265 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2266 vlib_node_increment_counter (vm, tcp_node_index (output, is_ip4),
2267 TCP_ERROR_PKTS_SENT, frame->n_vectors);
2268 return frame->n_vectors;
2271 VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2272 vlib_frame_t * from_frame)
2274 return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2277 VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2278 vlib_frame_t * from_frame)
2280 return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2284 VLIB_REGISTER_NODE (tcp4_output_node) =
2286 .name = "tcp4-output",
2287 /* Takes a vector of packets. */
2288 .vector_size = sizeof (u32),
2289 .n_errors = TCP_N_ERROR,
2290 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2291 .error_strings = tcp_error_strings,
2292 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2294 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2295 foreach_tcp4_output_next
2298 .format_buffer = format_tcp_header,
2299 .format_trace = format_tcp_tx_trace,
2304 VLIB_REGISTER_NODE (tcp6_output_node) =
2306 .name = "tcp6-output",
2307 /* Takes a vector of packets. */
2308 .vector_size = sizeof (u32),
2309 .n_errors = TCP_N_ERROR,
2310 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2311 .error_strings = tcp_error_strings,
2312 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2314 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2315 foreach_tcp6_output_next
2318 .format_buffer = format_tcp_header,
2319 .format_trace = format_tcp_tx_trace,
2323 typedef enum _tcp_reset_next
2325 TCP_RESET_NEXT_DROP,
2326 TCP_RESET_NEXT_IP_LOOKUP,
2330 #define foreach_tcp4_reset_next \
2331 _(DROP, "error-drop") \
2332 _(IP_LOOKUP, "ip4-lookup")
2334 #define foreach_tcp6_reset_next \
2335 _(DROP, "error-drop") \
2336 _(IP_LOOKUP, "ip6-lookup")
2339 tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2340 vlib_frame_t * from_frame, u8 is_ip4)
2342 u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
2343 u32 n_left_from, next_index, *from, *to_next;
2345 from = vlib_frame_vector_args (from_frame);
2346 n_left_from = from_frame->n_vectors;
2348 next_index = node->cached_next_index;
2350 while (n_left_from > 0)
2354 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2356 while (n_left_from > 0 && n_left_to_next > 0)
2368 n_left_to_next -= 1;
2370 b0 = vlib_get_buffer (vm, bi0);
2371 tcp_make_reset_in_place (vm, b0, is_ip4);
2373 /* Prepare to send to IP lookup */
2374 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2376 b0->error = node->errors[error0];
2377 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2378 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2380 th0 = vlib_buffer_get_current (b0);
2382 th0 = ip4_next_header ((ip4_header_t *) th0);
2384 th0 = ip6_next_header ((ip6_header_t *) th0);
2385 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2386 clib_memcpy_fast (&t0->tcp_header, th0,
2387 sizeof (t0->tcp_header));
2390 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2391 n_left_to_next, bi0, next0);
2393 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2395 return from_frame->n_vectors;
2398 VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2399 vlib_frame_t * from_frame)
2401 return tcp46_send_reset_inline (vm, node, from_frame, 1);
2404 VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2405 vlib_frame_t * from_frame)
2407 return tcp46_send_reset_inline (vm, node, from_frame, 0);
2411 VLIB_REGISTER_NODE (tcp4_reset_node) = {
2412 .name = "tcp4-reset",
2413 .vector_size = sizeof (u32),
2414 .n_errors = TCP_N_ERROR,
2415 .error_strings = tcp_error_strings,
2416 .n_next_nodes = TCP_RESET_N_NEXT,
2418 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2419 foreach_tcp4_reset_next
2422 .format_trace = format_tcp_tx_trace,
2427 VLIB_REGISTER_NODE (tcp6_reset_node) = {
2428 .name = "tcp6-reset",
2429 .vector_size = sizeof (u32),
2430 .n_errors = TCP_N_ERROR,
2431 .error_strings = tcp_error_strings,
2432 .n_next_nodes = TCP_RESET_N_NEXT,
2434 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2435 foreach_tcp6_reset_next
2438 .format_trace = format_tcp_tx_trace,
2443 * fd.io coding-style-patch-verification: ON
2446 * eval: (c-set-style "gnu")