2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/tcp/tcp.h>
17 #include <vnet/tcp/tcp_inlines.h>
20 typedef enum _tcp_output_next
23 TCP_OUTPUT_NEXT_IP_LOOKUP,
24 TCP_OUTPUT_NEXT_IP_REWRITE,
25 TCP_OUTPUT_NEXT_IP_ARP,
29 #define foreach_tcp4_output_next \
30 _ (DROP, "error-drop") \
31 _ (IP_LOOKUP, "ip4-lookup") \
32 _ (IP_REWRITE, "ip4-rewrite") \
35 #define foreach_tcp6_output_next \
36 _ (DROP, "error-drop") \
37 _ (IP_LOOKUP, "ip6-lookup") \
38 _ (IP_REWRITE, "ip6-rewrite") \
39 _ (IP_ARP, "ip6-discover-neighbor")
41 static char *tcp_error_strings[] = {
42 #define tcp_error(n,s) s,
43 #include <vnet/tcp/tcp_error.def>
49 tcp_header_t tcp_header;
50 tcp_connection_t tcp_connection;
54 format_tcp_tx_trace (u8 * s, va_list * args)
56 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
57 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
58 tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *);
59 tcp_connection_t *tc = &t->tcp_connection;
60 u32 indent = format_get_indent (s);
62 s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
63 format_tcp_state, tc->state, format_white_space, indent,
64 format_tcp_header, &t->tcp_header, 128);
69 #ifndef CLIB_MARCH_VARIANT
71 tcp_window_compute_scale (u32 window)
74 while (wnd_scale < TCP_MAX_WND_SCALE && (window >> wnd_scale) > TCP_WND_MAX)
80 * TCP's initial window
83 tcp_initial_wnd_unscaled (tcp_connection_t * tc)
85 /* RFC 6928 recommends the value lower. However at the time our connections
86 * are initialized, fifos may not be allocated. Therefore, advertise the
87 * smallest possible unscaled window size and update once fifos are
88 * assigned to the session.
91 tcp_update_rcv_mss (tc);
92 TCP_IW_N_SEGMENTS * tc->mss;
94 return tcp_cfg.min_rx_fifo;
98 * Compute initial window and scale factor. As per RFC1323, window field in
99 * SYN and SYN-ACK segments is never scaled.
102 tcp_initial_window_to_advertise (tcp_connection_t * tc)
104 /* Compute rcv wscale only if peer advertised support for it */
105 if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
106 tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
108 tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
110 return clib_min (tc->rcv_wnd, TCP_WND_MAX);
114 tcp_update_rcv_wnd (tcp_connection_t * tc)
116 u32 available_space, wnd;
120 * Figure out how much space we have available
122 available_space = transport_max_rx_enqueue (&tc->connection);
125 * Use the above and what we know about what we've previously advertised
126 * to compute the new window
128 observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
130 /* Check if we are about to retract the window. Do the comparison before
131 * rounding to avoid errors. Per RFC7323 sec. 2.4 we could remove this */
132 if (PREDICT_FALSE ((i32) available_space < observed_wnd))
134 wnd = round_down_pow2 (clib_max (observed_wnd, 0), 1 << tc->rcv_wscale);
135 TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
139 /* Make sure we have a multiple of 1 << rcv_wscale. We round down to
140 * avoid advertising a window larger than what can be buffered */
141 wnd = round_down_pow2 (available_space, 1 << tc->rcv_wscale);
144 if (PREDICT_FALSE (wnd < tc->rcv_opts.mss))
147 tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale);
151 * Compute and return window to advertise, scaled as per RFC1323
154 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
156 if (state < TCP_STATE_ESTABLISHED)
157 return tcp_initial_window_to_advertise (tc);
159 tcp_update_rcv_wnd (tc);
160 return tc->rcv_wnd >> tc->rcv_wscale;
164 tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts)
168 opts->flags |= TCP_OPTS_FLAG_MSS;
170 len += TCP_OPTION_LEN_MSS;
172 opts->flags |= TCP_OPTS_FLAG_WSCALE;
173 opts->wscale = tc->rcv_wscale;
174 len += TCP_OPTION_LEN_WINDOW_SCALE;
176 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
177 opts->tsval = tcp_time_now ();
179 len += TCP_OPTION_LEN_TIMESTAMP;
183 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
184 len += TCP_OPTION_LEN_SACK_PERMITTED;
187 /* Align to needed boundary */
188 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
193 tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
197 opts->flags |= TCP_OPTS_FLAG_MSS;
199 len += TCP_OPTION_LEN_MSS;
201 if (tcp_opts_wscale (&tc->rcv_opts))
203 opts->flags |= TCP_OPTS_FLAG_WSCALE;
204 opts->wscale = tc->rcv_wscale;
205 len += TCP_OPTION_LEN_WINDOW_SCALE;
208 if (tcp_opts_tstamp (&tc->rcv_opts))
210 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
211 opts->tsval = tcp_time_now ();
212 opts->tsecr = tc->tsval_recent;
213 len += TCP_OPTION_LEN_TIMESTAMP;
216 if (tcp_opts_sack_permitted (&tc->rcv_opts))
218 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
219 len += TCP_OPTION_LEN_SACK_PERMITTED;
222 /* Align to needed boundary */
223 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
228 tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts)
234 if (tcp_opts_tstamp (&tc->rcv_opts))
236 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
237 opts->tsval = tcp_tstamp (tc);
238 opts->tsecr = tc->tsval_recent;
239 len += TCP_OPTION_LEN_TIMESTAMP;
241 if (tcp_opts_sack_permitted (&tc->rcv_opts))
243 if (vec_len (tc->snd_sacks))
245 opts->flags |= TCP_OPTS_FLAG_SACK;
246 if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
247 tc->snd_sack_pos = 0;
248 opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
249 opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
250 opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
251 TCP_OPTS_MAX_SACK_BLOCKS);
252 tc->snd_sack_pos += opts->n_sack_blocks;
253 len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
257 /* Align to needed boundary */
258 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
263 tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
268 case TCP_STATE_ESTABLISHED:
269 case TCP_STATE_CLOSE_WAIT:
270 case TCP_STATE_FIN_WAIT_1:
271 case TCP_STATE_LAST_ACK:
272 case TCP_STATE_CLOSING:
273 case TCP_STATE_FIN_WAIT_2:
274 case TCP_STATE_TIME_WAIT:
275 case TCP_STATE_CLOSED:
276 return tcp_make_established_options (tc, opts);
277 case TCP_STATE_SYN_RCVD:
278 return tcp_make_synack_options (tc, opts);
279 case TCP_STATE_SYN_SENT:
280 return tcp_make_syn_options (tc, opts);
282 clib_warning ("State not handled! %d", state);
288 * Update burst send vars
290 * - Updates snd_mss to reflect the effective segment size that we can send
291 * by taking into account all TCP options, including SACKs.
292 * - Cache 'on the wire' options for reuse
293 * - Updates receive window which can be reused for a burst.
295 * This should *only* be called when doing bursts
298 tcp_update_burst_snd_vars (tcp_connection_t * tc)
300 tcp_main_t *tm = &tcp_main;
302 /* Compute options to be used for connection. These may be reused when
303 * sending data or to compute the effective mss (snd_mss) */
304 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts,
305 TCP_STATE_ESTABLISHED);
307 /* XXX check if MTU has been updated */
308 tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
309 ASSERT (tc->snd_mss > 0);
311 tcp_options_write (tm->wrk_ctx[tc->c_thread_index].cached_opts,
314 tcp_update_rcv_wnd (tc);
316 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
317 tcp_bt_check_app_limited (tc);
319 if (tc->snd_una == tc->snd_nxt)
321 tcp_cc_event (tc, TCP_CC_EVT_START_TX);
322 tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_BURST);
326 #endif /* CLIB_MARCH_VARIANT */
329 tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
331 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
332 vlib_buffer_free_one (vm, b->next_buffer);
333 /* Zero all flags but free list index and trace flag */
334 b->flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
336 b->current_length = 0;
337 b->total_length_not_including_first_buffer = 0;
338 vnet_buffer (b)->tcp.flags = 0;
340 /* Leave enough space for headers */
341 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
344 #ifndef CLIB_MARCH_VARIANT
346 tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
348 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
349 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
350 b->total_length_not_including_first_buffer = 0;
352 vnet_buffer (b)->tcp.flags = 0;
353 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
354 /* Leave enough space for headers */
355 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
359 /* Compute TCP checksum in software when offloading is disabled for a connection */
361 ip6_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
362 ip46_address_t * src, ip46_address_t * dst)
365 u16 payload_length_host_byte_order;
368 /* Initialize checksum with ip header. */
369 sum0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0)) +
370 clib_host_to_net_u16 (IP_PROTOCOL_TCP);
371 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
373 for (i = 0; i < ARRAY_LEN (src->ip6.as_uword); i++)
375 sum0 = ip_csum_with_carry
376 (sum0, clib_mem_unaligned (&src->ip6.as_uword[i], uword));
377 sum0 = ip_csum_with_carry
378 (sum0, clib_mem_unaligned (&dst->ip6.as_uword[i], uword));
381 return ip_calculate_l4_checksum (vm, p0, sum0,
382 payload_length_host_byte_order, NULL, 0,
387 ip4_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
388 ip46_address_t * src, ip46_address_t * dst)
391 u32 payload_length_host_byte_order;
393 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
395 clib_host_to_net_u32 (payload_length_host_byte_order +
396 (IP_PROTOCOL_TCP << 16));
398 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&src->ip4, u32));
399 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&dst->ip4, u32));
401 return ip_calculate_l4_checksum (vm, p0, sum0,
402 payload_length_host_byte_order, NULL, 0,
407 tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b)
410 if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
412 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
413 vlib_main_t *vm = wrk->vm;
416 checksum = ip4_tcp_compute_checksum_custom
417 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
419 checksum = ip6_tcp_compute_checksum_custom
420 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
424 b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
433 tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
436 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
437 u8 tcp_opts_len, tcp_hdr_opts_len;
441 wnd = tcp_window_to_advertise (tc, state);
443 /* Make and write options */
444 tcp_opts_len = tcp_make_established_options (tc, snd_opts);
445 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
447 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
448 tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd);
450 tcp_options_write ((u8 *) (th + 1), snd_opts);
452 th->checksum = tcp_compute_checksum (tc, b);
454 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
457 tcp_zero_rwnd_sent_on (tc);
459 tcp_zero_rwnd_sent_off (tc);
463 * Convert buffer to ACK
466 tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
468 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
469 TCP_EVT (TCP_EVT_ACK_SENT, tc);
470 tc->rcv_las = tc->rcv_nxt;
474 * Convert buffer to FIN-ACK
477 tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
479 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
483 * Convert buffer to SYN
486 tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b)
488 u8 tcp_hdr_opts_len, tcp_opts_len;
491 tcp_options_t snd_opts;
493 initial_wnd = tcp_initial_window_to_advertise (tc);
495 /* Make and write options */
496 clib_memset (&snd_opts, 0, sizeof (snd_opts));
497 tcp_opts_len = tcp_make_syn_options (tc, &snd_opts);
498 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
500 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
501 tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN,
503 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
504 tcp_options_write ((u8 *) (th + 1), &snd_opts);
505 th->checksum = tcp_compute_checksum (tc, b);
509 * Convert buffer to SYN-ACK
512 tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
514 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
515 u8 tcp_opts_len, tcp_hdr_opts_len;
519 clib_memset (snd_opts, 0, sizeof (*snd_opts));
520 initial_wnd = tcp_initial_window_to_advertise (tc);
521 tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
522 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
524 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
525 tc->rcv_nxt, tcp_hdr_opts_len,
526 TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd);
527 tcp_options_write ((u8 *) (th + 1), snd_opts);
529 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
530 th->checksum = tcp_compute_checksum (tc, b);
534 tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
535 u8 is_ip4, u32 fib_index)
537 tcp_main_t *tm = &tcp_main;
538 vlib_main_t *vm = wrk->vm;
540 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
543 vnet_buffer (b)->sw_if_index[VLIB_TX] = fib_index;
544 vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
546 tcp_trajectory_add_start (b, 1);
548 session_add_pending_tx_buffer (vm->thread_index, bi,
549 tm->ipl_next_node[!is_ip4]);
551 if (vm->thread_index == 0 && vlib_num_workers ())
552 session_queue_run_on_main_thread (wrk->vm);
556 tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
559 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
562 session_add_pending_tx_buffer (wrk->vm->thread_index, bi,
563 wrk->tco_next_node[!is_ip4]);
566 #endif /* CLIB_MARCH_VARIANT */
569 tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b, u8 is_ip4)
574 ip4_address_t src_ip4, dst_ip4;
575 ip6_address_t src_ip6, dst_ip6;
576 u16 src_port, dst_port;
577 u32 tmp, len, seq, ack;
580 /* Find IP and TCP headers */
581 th = tcp_buffer_hdr (b);
583 /* Save src and dst ip */
586 ih4 = vlib_buffer_get_current (b);
587 ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40);
588 src_ip4.as_u32 = ih4->src_address.as_u32;
589 dst_ip4.as_u32 = ih4->dst_address.as_u32;
593 ih6 = vlib_buffer_get_current (b);
594 ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
595 clib_memcpy_fast (&src_ip6, &ih6->src_address, sizeof (ip6_address_t));
596 clib_memcpy_fast (&dst_ip6, &ih6->dst_address, sizeof (ip6_address_t));
599 src_port = th->src_port;
600 dst_port = th->dst_port;
601 flags = TCP_FLAG_RST;
604 * RFC 793. If the ACK bit is off, sequence number zero is used,
605 * <SEQ=0><ACK=SEG.SEQ+SEG.LEN><CTL=RST,ACK>
606 * If the ACK bit is on,
607 * <SEQ=SEG.ACK><CTL=RST>
611 seq = th->ack_number;
616 flags |= TCP_FLAG_ACK;
617 tmp = clib_net_to_host_u32 (th->seq_number);
618 len = vnet_buffer (b)->tcp.data_len + tcp_is_syn (th) + tcp_is_fin (th);
619 ack = clib_host_to_net_u32 (tmp + len);
623 tcp_reuse_buffer (vm, b);
624 tcp_trajectory_add_start (b, 4);
625 th = vlib_buffer_push_tcp_net_order (b, dst_port, src_port, seq, ack,
626 sizeof (tcp_header_t), flags, 0);
630 ih4 = vlib_buffer_push_ip4 (vm, b, &dst_ip4, &src_ip4,
632 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
637 ih6 = vlib_buffer_push_ip6 (vm, b, &dst_ip6, &src_ip6, IP_PROTOCOL_TCP);
638 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
645 #ifndef CLIB_MARCH_VARIANT
647 * Send reset without reusing existing buffer
649 * It extracts connection info out of original packet
652 tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
653 u32 thread_index, u8 is_ip4)
655 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
656 vlib_main_t *vm = wrk->vm;
658 u32 bi, sw_if_index, fib_index;
659 u8 tcp_hdr_len, flags = 0;
660 tcp_header_t *th, *pkt_th;
662 ip4_header_t *ih4, *pkt_ih4;
663 ip6_header_t *ih6, *pkt_ih6;
664 fib_protocol_t fib_proto;
666 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
668 tcp_worker_stats_inc (wrk, no_buffer, 1);
672 b = vlib_get_buffer (vm, bi);
673 sw_if_index = vnet_buffer (pkt)->sw_if_index[VLIB_RX];
674 fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
675 fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
676 tcp_init_buffer (vm, b);
678 /* Make and write options */
679 tcp_hdr_len = sizeof (tcp_header_t);
683 pkt_ih4 = vlib_buffer_get_current (pkt);
684 pkt_th = ip4_next_header (pkt_ih4);
688 pkt_ih6 = vlib_buffer_get_current (pkt);
689 pkt_th = ip6_next_header (pkt_ih6);
692 if (tcp_ack (pkt_th))
694 flags = TCP_FLAG_RST;
695 seq = pkt_th->ack_number;
696 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
700 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
702 ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end);
705 th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port,
706 seq, ack, tcp_hdr_len, flags, 0);
708 /* Swap src and dst ip */
711 ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
712 ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
713 &pkt_ih4->src_address, IP_PROTOCOL_TCP,
714 tcp_csum_offload (tc));
715 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
720 ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) ==
722 ih6 = vlib_buffer_push_ip6_custom (vm, b, &pkt_ih6->dst_address,
723 &pkt_ih6->src_address,
725 tc->ipv6_flow_label);
726 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
730 tcp_enqueue_to_ip_lookup (wrk, b, bi, is_ip4, fib_index);
731 TCP_EVT (TCP_EVT_RST_SENT, tc);
732 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
733 TCP_ERROR_RST_SENT, 1);
737 * Build and set reset packet for connection
740 tcp_send_reset (tcp_connection_t * tc)
742 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
743 vlib_main_t *vm = wrk->vm;
747 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
750 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
752 tcp_worker_stats_inc (wrk, no_buffer, 1);
755 b = vlib_get_buffer (vm, bi);
756 tcp_init_buffer (vm, b);
758 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
759 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
760 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
761 flags = TCP_FLAG_RST;
762 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
763 tc->rcv_nxt, tcp_hdr_opts_len, flags,
765 opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
766 th->checksum = tcp_compute_checksum (tc, b);
767 ASSERT (opts_write_len == tc->snd_opts_len);
768 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
769 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
770 TCP_EVT (TCP_EVT_RST_SENT, tc);
771 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
772 TCP_ERROR_RST_SENT, 1);
776 tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
781 vlib_buffer_push_ip4 (wrk->vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4,
782 IP_PROTOCOL_TCP, tcp_csum_offload (tc));
786 vlib_buffer_push_ip6_custom (wrk->vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6,
787 IP_PROTOCOL_TCP, tc->ipv6_flow_label);
794 * Builds a SYN packet for a half-open connection and sends it to ipx_lookup.
795 * The packet is not forwarded through tcpx_output to avoid doing lookups
796 * in the half_open pool.
799 tcp_send_syn (tcp_connection_t * tc)
801 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
802 vlib_main_t *vm = wrk->vm;
807 * Setup retransmit and establish timers before requesting buffer
808 * such that we can return if we've ran out.
810 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
811 tc->rto * TCP_TO_TIMER_TICK);
813 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
815 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
816 tcp_worker_stats_inc (wrk, no_buffer, 1);
820 b = vlib_get_buffer (vm, bi);
821 tcp_init_buffer (vm, b);
822 tcp_make_syn (tc, b);
824 /* Measure RTT with this */
825 tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
826 tc->rtt_seq = tc->snd_nxt;
829 tcp_push_ip_hdr (wrk, tc, b);
830 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
831 TCP_EVT (TCP_EVT_SYN_SENT, tc);
835 tcp_send_synack (tcp_connection_t * tc)
837 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
838 vlib_main_t *vm = wrk->vm;
842 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
844 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
846 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
847 tcp_worker_stats_inc (wrk, no_buffer, 1);
851 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
852 b = vlib_get_buffer (vm, bi);
853 tcp_init_buffer (vm, b);
854 tcp_make_synack (tc, b);
855 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
856 TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
863 tcp_send_fin (tcp_connection_t * tc)
865 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
866 vlib_main_t *vm = wrk->vm;
871 fin_snt = tc->flags & TCP_CONN_FINSNT;
875 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
877 /* Out of buffers so program fin retransmit ASAP */
878 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
882 /* Make sure retransmit retries a fin not data */
883 tc->flags |= TCP_CONN_FINSNT;
884 tcp_worker_stats_inc (wrk, no_buffer, 1);
888 /* If we have non-dupacks programmed, no need to send them */
889 if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
890 tc->flags &= ~TCP_CONN_SNDACK;
892 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
893 b = vlib_get_buffer (vm, bi);
894 tcp_init_buffer (vm, b);
895 tcp_make_fin (tc, b);
896 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
897 TCP_EVT (TCP_EVT_FIN_SENT, tc);
898 /* Account for the FIN */
902 tc->flags |= TCP_CONN_FINSNT;
903 tc->flags &= ~TCP_CONN_FINPNDG;
904 tc->snd_una_max = seq_max (tc->snd_una_max, tc->snd_nxt);
909 * Push TCP header and update connection variables. Should only be called
910 * for segments with data, not for 'control' packets.
913 tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
914 u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
916 u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
917 u32 advertise_wnd, data_len;
918 tcp_main_t *tm = &tcp_main;
921 data_len = b->current_length;
922 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
923 data_len += b->total_length_not_including_first_buffer;
925 vnet_buffer (b)->tcp.flags = 0;
926 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
929 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
931 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
934 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
936 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
938 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
940 if (seq_geq (tc->psh_seq, snd_nxt)
941 && seq_lt (tc->psh_seq, snd_nxt + data_len))
942 flags |= TCP_FLAG_PSH;
944 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
945 tc->rcv_nxt, tcp_hdr_opts_len, flags,
950 clib_memcpy_fast ((u8 *) (th + 1),
951 tm->wrk_ctx[tc->c_thread_index].cached_opts,
956 u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
957 ASSERT (len == tc->snd_opts_len);
961 * Update connection variables
965 tc->snd_nxt += data_len;
966 tc->rcv_las = tc->rcv_nxt;
968 tc->bytes_out += data_len;
969 tc->data_segs_out += 1;
971 th->checksum = tcp_compute_checksum (tc, b);
973 TCP_EVT (TCP_EVT_PKTIZE, tc);
977 tcp_buffer_len (vlib_buffer_t * b)
979 u32 data_len = b->current_length;
980 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
981 data_len += b->total_length_not_including_first_buffer;
986 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
988 tcp_connection_t *tc = (tcp_connection_t *) tconn;
990 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
991 tcp_bt_track_tx (tc, tcp_buffer_len (b));
993 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
994 /* update_snd_nxt */ 1);
996 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
997 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
998 /* If not tracking an ACK, start tracking */
999 if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
1001 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
1002 tc->rtt_seq = tc->snd_nxt;
1004 if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
1006 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1007 tcp_retransmit_timer_set (&wrk->timer_wheel, tc);
1010 tcp_trajectory_add_start (b, 3);
1015 tcp_send_ack (tcp_connection_t * tc)
1017 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1018 vlib_main_t *vm = wrk->vm;
1022 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1024 tcp_update_rcv_wnd (tc);
1025 tcp_worker_stats_inc (wrk, no_buffer, 1);
1028 b = vlib_get_buffer (vm, bi);
1029 tcp_init_buffer (vm, b);
1030 tcp_make_ack (tc, b);
1031 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1035 tcp_program_ack (tcp_connection_t * tc)
1037 if (!(tc->flags & TCP_CONN_SNDACK))
1039 session_add_self_custom_tx_evt (&tc->connection, 1);
1040 tc->flags |= TCP_CONN_SNDACK;
1045 tcp_program_dupack (tcp_connection_t * tc)
1047 if (!(tc->flags & TCP_CONN_SNDACK))
1049 session_add_self_custom_tx_evt (&tc->connection, 1);
1050 tc->flags |= TCP_CONN_SNDACK;
1052 if (tc->pending_dupacks < 255)
1053 tc->pending_dupacks += 1;
1057 tcp_program_retransmit (tcp_connection_t * tc)
1059 if (!(tc->flags & TCP_CONN_RXT_PENDING))
1061 session_add_self_custom_tx_evt (&tc->connection, 0);
1062 tc->flags |= TCP_CONN_RXT_PENDING;
1067 * Send window update ack
1069 * Ensures that it will be sent only once, after a zero rwnd has been
1070 * advertised in a previous ack, and only if rwnd has grown beyond a
1071 * configurable value.
1074 tcp_send_window_update_ack (tcp_connection_t * tc)
1076 if (tcp_zero_rwnd_sent (tc))
1078 tcp_update_rcv_wnd (tc);
1079 if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
1081 tcp_zero_rwnd_sent_off (tc);
1082 tcp_program_ack (tc);
1088 * Allocate a new buffer and build a new tcp segment
1090 * @param wrk tcp worker
1091 * @param tc connection for which the segment will be allocated
1092 * @param offset offset of the first byte in the tx fifo
1093 * @param max_deq_byte segment size
1094 * @param[out] b pointer to buffer allocated
1096 * @return the number of bytes in the segment or 0 if buffer cannot be
1097 * allocated or no data available
1100 tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1101 u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
1103 u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
1104 vlib_main_t *vm = wrk->vm;
1109 seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
1114 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1117 * Allocate and fill in buffer(s)
1120 /* Easy case, buffer size greater than mss */
1121 if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
1123 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1125 tcp_worker_stats_inc (wrk, no_buffer, 1);
1128 *b = vlib_get_buffer (vm, bi);
1129 data = tcp_init_buffer (vm, *b);
1130 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1132 ASSERT (n_bytes == max_deq_bytes);
1133 b[0]->current_length = n_bytes;
1134 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1135 /* burst */ 0, /* update_snd_nxt */ 0);
1137 /* Split mss into multiple buffers */
1140 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1141 u16 n_peeked, len_to_deq;
1142 vlib_buffer_t *chain_b, *prev_b;
1145 /* Make sure we have enough buffers */
1146 n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
1147 vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
1148 CLIB_CACHE_LINE_BYTES);
1149 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
1150 if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
1153 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1154 tcp_worker_stats_inc (wrk, no_buffer, 1);
1158 *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
1159 data = tcp_init_buffer (vm, *b);
1160 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1162 TRANSPORT_MAX_HDRS_LEN);
1163 b[0]->current_length = n_bytes;
1164 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1165 b[0]->total_length_not_including_first_buffer = 0;
1166 max_deq_bytes -= n_bytes;
1169 for (i = 1; i < n_bufs_per_seg; i++)
1172 len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
1173 chain_bi = wrk->tx_buffers[--n_bufs];
1174 chain_b = vlib_get_buffer (vm, chain_bi);
1175 chain_b->current_data = 0;
1176 data = vlib_buffer_get_current (chain_b);
1177 n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
1180 ASSERT (n_peeked == len_to_deq);
1181 n_bytes += n_peeked;
1182 chain_b->current_length = n_peeked;
1183 chain_b->next_buffer = 0;
1185 /* update previous buffer */
1186 prev_b->next_buffer = chain_bi;
1187 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1189 max_deq_bytes -= n_peeked;
1190 b[0]->total_length_not_including_first_buffer += n_peeked;
1193 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1194 /* burst */ 0, /* update_snd_nxt */ 0);
1196 if (PREDICT_FALSE (n_bufs))
1198 clib_warning ("not all buffers consumed");
1199 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1203 ASSERT (n_bytes > 0);
1204 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1210 * Build a retransmit segment
1212 * @return the number of bytes in the segment or 0 if there's nothing to
1216 tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
1217 tcp_connection_t * tc, u32 offset,
1218 u32 max_deq_bytes, vlib_buffer_t ** b)
1220 u32 start, available_bytes;
1223 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1224 ASSERT (max_deq_bytes != 0);
1227 * Make sure we can retransmit something
1229 available_bytes = transport_max_tx_dequeue (&tc->connection);
1230 ASSERT (available_bytes >= offset);
1231 available_bytes -= offset;
1232 if (!available_bytes)
1235 max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
1236 max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
1238 start = tc->snd_una + offset;
1239 ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
1241 n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
1245 tc->snd_rxt_bytes += n_bytes;
1247 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1248 tcp_bt_track_rxt (tc, start, start + n_bytes);
1250 tc->bytes_retrans += n_bytes;
1251 tc->segs_retrans += 1;
1252 tcp_worker_stats_inc (wrk, rxt_segs, 1);
1253 TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
1259 tcp_check_sack_reneging (tcp_connection_t * tc)
1261 sack_scoreboard_t *sb = &tc->sack_sb;
1262 sack_scoreboard_hole_t *hole;
1264 hole = scoreboard_first_hole (sb);
1265 if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
1268 scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
1272 * Reset congestion control, switch cwnd to loss window and try again.
1275 tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
1277 TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
1279 tc->prev_ssthresh = tc->ssthresh;
1280 tc->prev_cwnd = tc->cwnd;
1282 /* If we entrered loss without fast recovery, notify cc algo of the
1283 * congestion event such that it can update ssthresh and its state */
1284 if (!tcp_in_fastrecovery (tc))
1285 tcp_cc_congestion (tc);
1287 /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
1291 tc->cwnd_acc_bytes = 0;
1292 tc->tr_occurences += 1;
1293 tcp_recovery_on (tc);
1297 tcp_timer_retransmit_handler (tcp_connection_t * tc)
1299 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1300 vlib_main_t *vm = wrk->vm;
1301 vlib_buffer_t *b = 0;
1304 tcp_worker_stats_inc (wrk, tr_events, 1);
1306 /* Should be handled by a different handler */
1307 if (PREDICT_FALSE (tc->state == TCP_STATE_SYN_SENT))
1310 /* Wait-close and retransmit could pop at the same time */
1311 if (tc->state == TCP_STATE_CLOSED)
1314 if (tc->state >= TCP_STATE_ESTABLISHED)
1316 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1318 /* Lost FIN, retransmit and return */
1319 if (tc->flags & TCP_CONN_FINSNT)
1323 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1327 /* Shouldn't be here. This condition is tricky because it has to take
1328 * into account boff > 0 due to persist timeout. */
1329 if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_nxt)
1330 || (tc->rto_boff > 0 && seq_geq (tc->snd_una, tc->snd_congestion)
1331 && !tcp_flight_size (tc)))
1333 ASSERT (!tcp_in_recovery (tc));
1338 /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
1339 * to persist timer timeout */
1340 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1343 tcp_update_rto (tc);
1346 /* Peer is dead or network connectivity is lost. Close connection.
1347 * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
1348 * a min rto of 0.2s we need to retry about 8 times. */
1349 if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
1351 tcp_send_reset (tc);
1352 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1353 session_transport_closing_notify (&tc->connection);
1354 session_transport_closed_notify (&tc->connection);
1355 tcp_connection_timers_reset (tc);
1356 tcp_program_cleanup (wrk, tc);
1357 tcp_worker_stats_inc (wrk, tr_abort, 1);
1361 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1362 tcp_check_sack_reneging (tc);
1364 /* Update send congestion to make sure that rxt has data to send */
1365 tc->snd_congestion = tc->snd_nxt;
1367 /* Send the first unacked segment. If we're short on buffers, return
1368 * as soon as possible */
1369 n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
1370 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
1373 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1377 bi = vlib_get_buffer_index (vm, b);
1378 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1380 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1381 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
1384 if (tc->rto_boff == 1)
1386 tcp_cc_init_rxt_timeout (tc);
1387 /* Record timestamp. Eifel detection algorithm RFC3522 */
1388 tc->snd_rxt_ts = tcp_tstamp (tc);
1391 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1392 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
1394 tcp_program_retransmit (tc);
1396 /* Retransmit SYN-ACK */
1397 else if (tc->state == TCP_STATE_SYN_RCVD)
1399 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1403 /* Passive open establish timeout */
1404 if (tc->rto > TCP_ESTABLISH_TIME >> 1)
1406 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1407 tcp_connection_timers_reset (tc);
1408 tcp_program_cleanup (wrk, tc);
1409 tcp_worker_stats_inc (wrk, tr_abort, 1);
1413 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1415 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1416 tcp_worker_stats_inc (wrk, no_buffer, 1);
1421 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1422 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1424 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
1426 b = vlib_get_buffer (vm, bi);
1427 tcp_init_buffer (vm, b);
1428 tcp_make_synack (tc, b);
1429 TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
1431 /* Retransmit timer already updated, just enqueue to output */
1432 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1436 ASSERT (tc->state == TCP_STATE_CLOSED);
1442 * SYN retransmit timer handler. Active open only.
1445 tcp_timer_retransmit_syn_handler (tcp_connection_t * tc)
1447 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1448 vlib_main_t *vm = wrk->vm;
1449 vlib_buffer_t *b = 0;
1452 /* Note: the connection may have transitioned to ESTABLISHED... */
1453 if (PREDICT_FALSE (tc->state != TCP_STATE_SYN_SENT))
1456 /* Half-open connection actually moved to established but we were
1457 * waiting for syn retransmit to pop to call cleanup from the right
1459 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1461 if (tcp_half_open_connection_cleanup (tc))
1462 TCP_DBG ("could not remove half-open connection");
1466 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1469 /* Active open establish timeout */
1470 if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
1472 session_stream_connect_notify (&tc->connection, SESSION_E_TIMEDOUT);
1473 tcp_connection_cleanup (tc);
1477 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1479 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
1480 tcp_worker_stats_inc (wrk, no_buffer, 1);
1484 /* Try without increasing RTO a number of times. If this fails,
1485 * start growing RTO exponentially */
1487 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1488 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1490 b = vlib_get_buffer (vm, bi);
1491 tcp_init_buffer (vm, b);
1492 tcp_make_syn (tc, b);
1494 TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
1496 /* This goes straight to ipx_lookup */
1497 tcp_push_ip_hdr (wrk, tc, b);
1498 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
1500 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
1501 tc->rto * TCP_TO_TIMER_TICK);
1505 * Got 0 snd_wnd from peer, try to do something about it.
1509 tcp_timer_persist_handler (tcp_connection_t * tc)
1511 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1512 u32 bi, max_snd_bytes, available_bytes, offset;
1513 tcp_main_t *tm = vnet_get_tcp_main ();
1514 vlib_main_t *vm = wrk->vm;
1519 /* Problem already solved or worse */
1520 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1521 || (tc->flags & TCP_CONN_FINSNT))
1522 goto update_scheduler;
1524 available_bytes = transport_max_tx_dequeue (&tc->connection);
1525 offset = tc->snd_nxt - tc->snd_una;
1527 /* Reprogram persist if no new bytes available to send. We may have data
1529 if (!available_bytes)
1531 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1535 if (available_bytes <= offset)
1536 goto update_scheduler;
1538 /* Increment RTO backoff */
1540 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1543 * Try to force the first unsent segment (or buffer)
1545 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1547 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1548 tcp_worker_stats_inc (wrk, no_buffer, 1);
1552 b = vlib_get_buffer (vm, bi);
1553 data = tcp_init_buffer (vm, b);
1555 tcp_validate_txf_size (tc, offset);
1556 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1557 max_snd_bytes = clib_min (tc->snd_mss,
1558 tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
1559 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1561 b->current_length = n_bytes;
1562 ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
1563 || tc->snd_nxt == tc->snd_una_max
1564 || tc->rto_boff > 1));
1566 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1568 tcp_bt_check_app_limited (tc);
1569 tcp_bt_track_tx (tc, n_bytes);
1572 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
1573 /* burst */ 0, /* update_snd_nxt */ 1);
1574 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1575 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1576 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1578 /* Just sent new data, enable retransmit */
1579 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1585 if (tcp_is_descheduled (tc))
1586 transport_connection_reschedule (&tc->connection);
1590 * Retransmit first unacked segment
1593 tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1595 vlib_main_t *vm = wrk->vm;
1599 TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
1601 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1605 bi = vlib_get_buffer_index (vm, b);
1606 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1612 tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1615 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1616 vlib_main_t *vm = wrk->vm;
1617 vlib_buffer_t *b = 0;
1619 offset = tc->snd_nxt - tc->snd_una;
1620 available_wnd = tc->snd_wnd - offset;
1621 burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
1623 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1624 tcp_bt_check_app_limited (tc);
1626 while (n_segs < burst_size)
1628 n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
1632 bi = vlib_get_buffer_index (vm, b);
1633 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1634 offset += n_written;
1637 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1638 tcp_bt_track_tx (tc, n_written);
1640 tc->snd_nxt += n_written;
1641 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1649 * Estimate send space using proportional rate reduction (RFC6937)
1652 tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
1657 pipe = tcp_flight_size (tc);
1658 prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
1660 if (pipe > tc->ssthresh)
1662 space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
1668 limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
1669 space = clib_min (tc->ssthresh - pipe, limit);
1671 space = clib_max (space, prr_out ? 0 : tc->snd_mss);
1676 tcp_retransmit_should_retry_head (tcp_connection_t * tc,
1677 sack_scoreboard_t * sb)
1679 u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
1680 f64 rr = (f64) tc->ssthresh / tc->prev_cwnd;
1682 if (tcp_fastrecovery_first (tc))
1685 return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
1689 tcp_max_tx_deq (tcp_connection_t * tc)
1691 return (transport_max_tx_dequeue (&tc->connection)
1692 - (tc->snd_nxt - tc->snd_una));
1695 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1696 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1697 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1700 * Do retransmit with SACKs
1703 tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1706 u32 n_written = 0, offset, max_bytes, n_segs = 0;
1707 u8 snd_limited = 0, can_rescue = 0;
1708 u32 bi, max_deq, burst_bytes;
1709 sack_scoreboard_hole_t *hole;
1710 vlib_main_t *vm = wrk->vm;
1711 vlib_buffer_t *b = 0;
1712 sack_scoreboard_t *sb;
1715 ASSERT (tcp_in_cong_recovery (tc));
1717 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1718 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1721 tcp_program_retransmit (tc);
1725 if (tcp_in_recovery (tc))
1726 snd_space = tcp_available_cc_snd_space (tc);
1728 snd_space = tcp_fastrecovery_prr_snd_space (tc);
1730 if (snd_space < tc->snd_mss)
1735 /* Check if snd_una is a lost retransmit */
1736 if (pool_elts (sb->holes)
1737 && seq_gt (sb->high_sacked, tc->snd_congestion)
1738 && tc->rxt_head != tc->snd_una
1739 && tcp_retransmit_should_retry_head (tc, sb))
1741 max_bytes = clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una);
1742 n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, max_bytes, &b);
1745 tcp_program_retransmit (tc);
1748 bi = vlib_get_buffer_index (vm, b);
1749 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1752 tc->rxt_head = tc->snd_una;
1753 tc->rxt_delivered += n_written;
1754 tc->prr_delivered += n_written;
1755 ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
1758 tcp_fastrecovery_first_off (tc);
1760 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1761 hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
1763 max_deq = transport_max_tx_dequeue (&tc->connection);
1764 max_deq -= tc->snd_nxt - tc->snd_una;
1766 while (snd_space > 0 && n_segs < burst_size)
1768 hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue,
1772 /* We are out of lost holes to retransmit so send some new data. */
1773 if (max_deq > tc->snd_mss)
1778 /* Make sure we don't exceed available window and leave space
1779 * for one more packet, to avoid zero window acks */
1780 av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
1781 av_wnd = clib_max (av_wnd - tc->snd_mss, 0);
1782 snd_space = clib_min (snd_space, av_wnd);
1783 snd_space = clib_min (max_deq, snd_space);
1784 burst_size = clib_min (burst_size - n_segs,
1785 snd_space / tc->snd_mss);
1786 burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
1787 n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
1788 if (max_deq > n_segs_new * tc->snd_mss)
1789 tcp_program_retransmit (tc);
1791 n_segs += n_segs_new;
1795 if (tcp_in_recovery (tc) || !can_rescue
1796 || scoreboard_rescue_rxt_valid (sb, tc))
1799 /* If rescue rxt undefined or less than snd_una then one segment of
1800 * up to SMSS octets that MUST include the highest outstanding
1801 * unSACKed sequence number SHOULD be returned, and RescueRxt set to
1802 * RecoveryPoint. HighRxt MUST NOT be updated.
1804 hole = scoreboard_last_hole (sb);
1805 max_bytes = clib_min (tc->snd_mss, hole->end - hole->start);
1806 max_bytes = clib_min (max_bytes, snd_space);
1807 offset = hole->end - tc->snd_una - max_bytes;
1808 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1813 sb->rescue_rxt = tc->snd_congestion;
1814 bi = vlib_get_buffer_index (vm, b);
1815 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1820 max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
1821 max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
1825 offset = sb->high_rxt - tc->snd_una;
1826 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1828 ASSERT (n_written <= snd_space);
1830 /* Nothing left to retransmit */
1834 bi = vlib_get_buffer_index (vm, b);
1835 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1837 sb->high_rxt += n_written;
1838 ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt));
1840 snd_space -= n_written;
1845 tcp_program_retransmit (tc);
1849 transport_connection_tx_pacer_reset_bucket (&tc->connection, 0);
1854 * Fast retransmit without SACK info
1857 tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1860 u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now, max_bytes;
1861 u32 burst_bytes, sent_bytes;
1862 vlib_main_t *vm = wrk->vm;
1863 int snd_space, n_segs = 0;
1867 ASSERT (tcp_in_cong_recovery (tc));
1868 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1870 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1871 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1874 tcp_program_retransmit (tc);
1878 snd_space = tcp_available_cc_snd_space (tc);
1879 cc_limited = snd_space < burst_bytes;
1881 if (!tcp_fastrecovery_first (tc))
1884 /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
1886 while (snd_space > 0 && n_segs < burst_size)
1888 max_bytes = clib_min (tc->snd_mss,
1889 tc->snd_congestion - tc->snd_una - offset);
1892 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1895 /* Nothing left to retransmit */
1899 bi = vlib_get_buffer_index (vm, b);
1900 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1901 snd_space -= n_written;
1902 offset += n_written;
1906 if (n_segs == burst_size)
1911 /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
1912 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
1915 max_deq = transport_max_tx_dequeue (&tc->connection);
1916 max_deq -= tc->snd_nxt - tc->snd_una;
1919 snd_space = clib_min (max_deq, snd_space);
1920 burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
1921 n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size);
1922 if (n_segs_now && max_deq > n_segs_now * tc->snd_mss)
1923 tcp_program_retransmit (tc);
1924 n_segs += n_segs_now;
1928 tcp_fastrecovery_first_off (tc);
1930 sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
1931 sent_bytes = cc_limited ? burst_bytes : sent_bytes;
1932 transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
1938 tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
1942 if (!tc->pending_dupacks)
1944 if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
1945 || tc->state != TCP_STATE_ESTABLISHED)
1953 /* If we're supposed to send dupacks but have no ooo data
1954 * send only one ack */
1955 if (!vec_len (tc->snd_sacks))
1958 tc->dupacks_out += 1;
1959 tc->pending_dupacks = 0;
1963 /* Start with first sack block */
1964 tc->snd_sack_pos = 0;
1966 /* Generate enough dupacks to cover all sack blocks. Do not generate
1967 * more sacks than the number of packets received. But do generate at
1968 * least 3, i.e., the number needed to signal congestion, if needed. */
1969 n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
1970 n_acks = clib_min (n_acks, tc->pending_dupacks);
1971 n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
1972 for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
1975 if (n_acks < max_burst_size)
1977 tc->pending_dupacks = 0;
1978 tc->snd_sack_pos = 0;
1979 tc->dupacks_out += n_acks;
1984 TCP_DBG ("constrained by burst size");
1985 tc->pending_dupacks = n_acks - max_burst_size;
1986 tc->dupacks_out += max_burst_size;
1987 tcp_program_dupack (tc);
1988 return max_burst_size;
1993 tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
1995 tcp_worker_ctx_t *wrk;
1998 if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED))
2001 wrk = tcp_get_worker (tc->c_thread_index);
2003 if (tcp_opts_sack_permitted (&tc->rcv_opts))
2004 n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
2006 n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
2012 tcp_session_custom_tx (void *conn, transport_send_params_t * sp)
2014 tcp_connection_t *tc = (tcp_connection_t *) conn;
2017 if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING))
2019 tc->flags &= ~TCP_CONN_RXT_PENDING;
2020 n_segs = tcp_do_retransmit (tc, sp->max_burst_size);
2023 if (!(tc->flags & TCP_CONN_SNDACK))
2026 tc->flags &= ~TCP_CONN_SNDACK;
2028 /* We have retransmitted packets and no dupack */
2029 if (n_segs && !tc->pending_dupacks)
2032 if (sp->max_burst_size <= n_segs)
2034 tcp_program_ack (tc);
2038 n_segs += tcp_send_acks (tc, sp->max_burst_size - n_segs);
2042 #endif /* CLIB_MARCH_VARIANT */
2045 tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
2046 u16 * next0, u32 * error0)
2048 ip_adjacency_t *adj;
2051 /* Not thread safe but as long as the connection exists the adj should
2053 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &tc0->c_rmt_ip,
2055 if (ai == ADJ_INDEX_INVALID)
2057 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2058 *next0 = TCP_OUTPUT_NEXT_DROP;
2059 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2064 if (PREDICT_TRUE (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE))
2065 *next0 = TCP_OUTPUT_NEXT_IP_REWRITE;
2066 else if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
2067 *next0 = TCP_OUTPUT_NEXT_IP_ARP;
2070 *next0 = TCP_OUTPUT_NEXT_DROP;
2071 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2073 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
2077 tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2078 u32 * to_next, u32 n_bufs)
2080 tcp_connection_t *tc;
2086 for (i = 0; i < n_bufs; i++)
2088 b = vlib_get_buffer (vm, to_next[i]);
2089 if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2091 th = vlib_buffer_get_current (b);
2092 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2094 t = vlib_add_trace (vm, node, b, sizeof (*t));
2095 clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
2096 clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
2101 tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0,
2102 tcp_connection_t * tc0, u8 is_ip4)
2104 TCP_EVT (TCP_EVT_OUTPUT, tc0,
2105 ((tcp_header_t *) vlib_buffer_get_current (b0))->flags,
2106 b0->current_length);
2109 vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
2110 IP_PROTOCOL_TCP, tcp_csum_offload (tc0));
2112 vlib_buffer_push_ip6_custom (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6,
2113 IP_PROTOCOL_TCP, tc0->ipv6_flow_label);
2117 tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
2119 if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO)))
2122 u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len;
2124 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
2125 data_len += b->total_length_not_including_first_buffer;
2127 if (PREDICT_TRUE (data_len <= tc->snd_mss))
2131 ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
2132 ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
2133 b->flags |= VNET_BUFFER_F_GSO;
2134 vnet_buffer2 (b)->gso_l4_hdr_sz =
2135 sizeof (tcp_header_t) + tc->snd_opts_len;
2136 vnet_buffer2 (b)->gso_size = tc->snd_mss;
2141 tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
2142 vlib_node_runtime_t * error_node, u16 * next0,
2145 /* If next_index is not drop use it */
2146 if (tc0->next_node_index)
2148 *next0 = tc0->next_node_index;
2149 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2153 *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP;
2156 vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
2157 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
2163 if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
2164 tcp_output_handle_link_local (tc0, b0, next0, &error0);
2166 if (PREDICT_FALSE (error0))
2168 b0->error = error_node->errors[error0];
2177 tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2178 vlib_frame_t * frame, int is_ip4)
2180 u32 n_left_from, *from, thread_index = vm->thread_index;
2181 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2182 u16 nexts[VLIB_FRAME_SIZE], *next;
2184 from = vlib_frame_vector_args (frame);
2185 n_left_from = frame->n_vectors;
2186 tcp_set_time_now (tcp_get_worker (thread_index));
2188 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2189 tcp46_output_trace_frame (vm, node, from, n_left_from);
2191 vlib_get_buffers (vm, from, bufs, n_left_from);
2195 while (n_left_from >= 4)
2197 tcp_connection_t *tc0, *tc1;
2200 vlib_prefetch_buffer_header (b[2], STORE);
2201 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2203 vlib_prefetch_buffer_header (b[3], STORE);
2204 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2207 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2209 tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
2212 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2214 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2215 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2217 tcp_check_if_gso (tc0, b[0]);
2218 tcp_check_if_gso (tc1, b[1]);
2220 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2221 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2227 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2228 tcp_check_if_gso (tc0, b[0]);
2229 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2233 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2234 next[0] = TCP_OUTPUT_NEXT_DROP;
2238 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2239 tcp_check_if_gso (tc1, b[1]);
2240 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2244 b[1]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2245 next[1] = TCP_OUTPUT_NEXT_DROP;
2253 while (n_left_from > 0)
2255 tcp_connection_t *tc0;
2257 if (n_left_from > 1)
2259 vlib_prefetch_buffer_header (b[1], STORE);
2260 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2263 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2266 if (PREDICT_TRUE (tc0 != 0))
2268 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2269 tcp_check_if_gso (tc0, b[0]);
2270 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2274 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2275 next[0] = TCP_OUTPUT_NEXT_DROP;
2283 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2284 vlib_node_increment_counter (vm, tcp_node_index (output, is_ip4),
2285 TCP_ERROR_PKTS_SENT, frame->n_vectors);
2286 return frame->n_vectors;
2289 VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2290 vlib_frame_t * from_frame)
2292 return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2295 VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2296 vlib_frame_t * from_frame)
2298 return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2302 VLIB_REGISTER_NODE (tcp4_output_node) =
2304 .name = "tcp4-output",
2305 /* Takes a vector of packets. */
2306 .vector_size = sizeof (u32),
2307 .n_errors = TCP_N_ERROR,
2308 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2309 .error_strings = tcp_error_strings,
2310 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2312 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2313 foreach_tcp4_output_next
2316 .format_buffer = format_tcp_header,
2317 .format_trace = format_tcp_tx_trace,
2322 VLIB_REGISTER_NODE (tcp6_output_node) =
2324 .name = "tcp6-output",
2325 /* Takes a vector of packets. */
2326 .vector_size = sizeof (u32),
2327 .n_errors = TCP_N_ERROR,
2328 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2329 .error_strings = tcp_error_strings,
2330 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2332 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2333 foreach_tcp6_output_next
2336 .format_buffer = format_tcp_header,
2337 .format_trace = format_tcp_tx_trace,
2341 typedef enum _tcp_reset_next
2343 TCP_RESET_NEXT_DROP,
2344 TCP_RESET_NEXT_IP_LOOKUP,
2348 #define foreach_tcp4_reset_next \
2349 _(DROP, "error-drop") \
2350 _(IP_LOOKUP, "ip4-lookup")
2352 #define foreach_tcp6_reset_next \
2353 _(DROP, "error-drop") \
2354 _(IP_LOOKUP, "ip6-lookup")
2357 tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2358 vlib_frame_t * from_frame, u8 is_ip4)
2360 u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
2361 u32 n_left_from, next_index, *from, *to_next;
2363 from = vlib_frame_vector_args (from_frame);
2364 n_left_from = from_frame->n_vectors;
2366 next_index = node->cached_next_index;
2368 while (n_left_from > 0)
2372 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2374 while (n_left_from > 0 && n_left_to_next > 0)
2386 n_left_to_next -= 1;
2388 b0 = vlib_get_buffer (vm, bi0);
2389 tcp_make_reset_in_place (vm, b0, is_ip4);
2391 /* Prepare to send to IP lookup */
2392 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2394 b0->error = node->errors[error0];
2395 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2396 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2398 th0 = vlib_buffer_get_current (b0);
2400 th0 = ip4_next_header ((ip4_header_t *) th0);
2402 th0 = ip6_next_header ((ip6_header_t *) th0);
2403 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2404 clib_memcpy_fast (&t0->tcp_header, th0,
2405 sizeof (t0->tcp_header));
2408 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2409 n_left_to_next, bi0, next0);
2411 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2413 return from_frame->n_vectors;
2416 VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2417 vlib_frame_t * from_frame)
2419 return tcp46_send_reset_inline (vm, node, from_frame, 1);
2422 VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2423 vlib_frame_t * from_frame)
2425 return tcp46_send_reset_inline (vm, node, from_frame, 0);
2429 VLIB_REGISTER_NODE (tcp4_reset_node) = {
2430 .name = "tcp4-reset",
2431 .vector_size = sizeof (u32),
2432 .n_errors = TCP_N_ERROR,
2433 .error_strings = tcp_error_strings,
2434 .n_next_nodes = TCP_RESET_N_NEXT,
2436 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2437 foreach_tcp4_reset_next
2440 .format_trace = format_tcp_tx_trace,
2445 VLIB_REGISTER_NODE (tcp6_reset_node) = {
2446 .name = "tcp6-reset",
2447 .vector_size = sizeof (u32),
2448 .n_errors = TCP_N_ERROR,
2449 .error_strings = tcp_error_strings,
2450 .n_next_nodes = TCP_RESET_N_NEXT,
2452 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2453 foreach_tcp6_reset_next
2456 .format_trace = format_tcp_tx_trace,
2461 * fd.io coding-style-patch-verification: ON
2464 * eval: (c-set-style "gnu")