2 * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/tcp/tcp.h>
17 #include <vnet/tcp/tcp_inlines.h>
20 typedef enum _tcp_output_next
23 TCP_OUTPUT_NEXT_IP_LOOKUP,
24 TCP_OUTPUT_NEXT_IP_REWRITE,
25 TCP_OUTPUT_NEXT_IP_ARP,
29 #define foreach_tcp4_output_next \
30 _ (DROP, "error-drop") \
31 _ (IP_LOOKUP, "ip4-lookup") \
32 _ (IP_REWRITE, "ip4-rewrite") \
35 #define foreach_tcp6_output_next \
36 _ (DROP, "error-drop") \
37 _ (IP_LOOKUP, "ip6-lookup") \
38 _ (IP_REWRITE, "ip6-rewrite") \
39 _ (IP_ARP, "ip6-discover-neighbor")
41 static char *tcp_error_strings[] = {
42 #define tcp_error(n,s) s,
43 #include <vnet/tcp/tcp_error.def>
49 tcp_header_t tcp_header;
50 tcp_connection_t tcp_connection;
54 format_tcp_tx_trace (u8 * s, va_list * args)
56 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
57 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
58 tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *);
59 tcp_connection_t *tc = &t->tcp_connection;
60 u32 indent = format_get_indent (s);
62 s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc,
63 format_tcp_state, tc->state, format_white_space, indent,
64 format_tcp_header, &t->tcp_header, 128);
69 #ifndef CLIB_MARCH_VARIANT
71 tcp_window_compute_scale (u32 window)
74 while (wnd_scale < TCP_MAX_WND_SCALE && (window >> wnd_scale) > TCP_WND_MAX)
80 * TCP's initial window
83 tcp_initial_wnd_unscaled (tcp_connection_t * tc)
85 /* RFC 6928 recommends the value lower. However at the time our connections
86 * are initialized, fifos may not be allocated. Therefore, advertise the
87 * smallest possible unscaled window size and update once fifos are
88 * assigned to the session.
91 tcp_update_rcv_mss (tc);
92 TCP_IW_N_SEGMENTS * tc->mss;
94 return tcp_cfg.min_rx_fifo;
98 * Compute initial window and scale factor. As per RFC1323, window field in
99 * SYN and SYN-ACK segments is never scaled.
102 tcp_initial_window_to_advertise (tcp_connection_t * tc)
104 /* Compute rcv wscale only if peer advertised support for it */
105 if (tc->state != TCP_STATE_SYN_RCVD || tcp_opts_wscale (&tc->rcv_opts))
106 tc->rcv_wscale = tcp_window_compute_scale (tcp_cfg.max_rx_fifo);
108 tc->rcv_wnd = tcp_initial_wnd_unscaled (tc);
110 return clib_min (tc->rcv_wnd, TCP_WND_MAX);
114 tcp_update_rcv_wnd (tcp_connection_t * tc)
116 u32 available_space, wnd;
120 * Figure out how much space we have available
122 available_space = transport_max_rx_enqueue (&tc->connection);
124 /* Make sure we have a multiple of 1 << rcv_wscale. We round down to
125 * avoid advertising a window larger than what can be buffered */
126 available_space = round_down_pow2 (available_space, 1 << tc->rcv_wscale);
128 if (PREDICT_FALSE (available_space < tc->rcv_opts.mss))
135 * Use the above and what we know about what we've previously advertised
136 * to compute the new window
138 observed_wnd = (i32) tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las);
140 /* Bad. Thou shalt not shrink */
141 if (PREDICT_FALSE ((i32) available_space < observed_wnd))
143 wnd = round_pow2 (clib_max (observed_wnd, 0), 1 << tc->rcv_wscale);
144 TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space);
148 wnd = available_space;
151 tc->rcv_wnd = clib_min (wnd, TCP_WND_MAX << tc->rcv_wscale);
155 * Compute and return window to advertise, scaled as per RFC1323
158 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state)
160 if (state < TCP_STATE_ESTABLISHED)
161 return tcp_initial_window_to_advertise (tc);
163 tcp_update_rcv_wnd (tc);
164 return tc->rcv_wnd >> tc->rcv_wscale;
168 tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts)
172 opts->flags |= TCP_OPTS_FLAG_MSS;
174 len += TCP_OPTION_LEN_MSS;
176 opts->flags |= TCP_OPTS_FLAG_WSCALE;
177 opts->wscale = tc->rcv_wscale;
178 len += TCP_OPTION_LEN_WINDOW_SCALE;
180 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
181 opts->tsval = tcp_time_now ();
183 len += TCP_OPTION_LEN_TIMESTAMP;
187 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
188 len += TCP_OPTION_LEN_SACK_PERMITTED;
191 /* Align to needed boundary */
192 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
197 tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
201 opts->flags |= TCP_OPTS_FLAG_MSS;
203 len += TCP_OPTION_LEN_MSS;
205 if (tcp_opts_wscale (&tc->rcv_opts))
207 opts->flags |= TCP_OPTS_FLAG_WSCALE;
208 opts->wscale = tc->rcv_wscale;
209 len += TCP_OPTION_LEN_WINDOW_SCALE;
212 if (tcp_opts_tstamp (&tc->rcv_opts))
214 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
215 opts->tsval = tcp_time_now ();
216 opts->tsecr = tc->tsval_recent;
217 len += TCP_OPTION_LEN_TIMESTAMP;
220 if (tcp_opts_sack_permitted (&tc->rcv_opts))
222 opts->flags |= TCP_OPTS_FLAG_SACK_PERMITTED;
223 len += TCP_OPTION_LEN_SACK_PERMITTED;
226 /* Align to needed boundary */
227 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
232 tcp_make_established_options (tcp_connection_t * tc, tcp_options_t * opts)
238 if (tcp_opts_tstamp (&tc->rcv_opts))
240 opts->flags |= TCP_OPTS_FLAG_TSTAMP;
241 opts->tsval = tcp_tstamp (tc);
242 opts->tsecr = tc->tsval_recent;
243 len += TCP_OPTION_LEN_TIMESTAMP;
245 if (tcp_opts_sack_permitted (&tc->rcv_opts))
247 if (vec_len (tc->snd_sacks))
249 opts->flags |= TCP_OPTS_FLAG_SACK;
250 if (tc->snd_sack_pos >= vec_len (tc->snd_sacks))
251 tc->snd_sack_pos = 0;
252 opts->sacks = &tc->snd_sacks[tc->snd_sack_pos];
253 opts->n_sack_blocks = vec_len (tc->snd_sacks) - tc->snd_sack_pos;
254 opts->n_sack_blocks = clib_min (opts->n_sack_blocks,
255 TCP_OPTS_MAX_SACK_BLOCKS);
256 tc->snd_sack_pos += opts->n_sack_blocks;
257 len += 2 + TCP_OPTION_LEN_SACK_BLOCK * opts->n_sack_blocks;
261 /* Align to needed boundary */
262 len += (TCP_OPTS_ALIGN - len % TCP_OPTS_ALIGN) % TCP_OPTS_ALIGN;
267 tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
272 case TCP_STATE_ESTABLISHED:
273 case TCP_STATE_CLOSE_WAIT:
274 case TCP_STATE_FIN_WAIT_1:
275 case TCP_STATE_LAST_ACK:
276 case TCP_STATE_CLOSING:
277 case TCP_STATE_FIN_WAIT_2:
278 case TCP_STATE_TIME_WAIT:
279 case TCP_STATE_CLOSED:
280 return tcp_make_established_options (tc, opts);
281 case TCP_STATE_SYN_RCVD:
282 return tcp_make_synack_options (tc, opts);
283 case TCP_STATE_SYN_SENT:
284 return tcp_make_syn_options (tc, opts);
286 clib_warning ("State not handled! %d", state);
292 * Update burst send vars
294 * - Updates snd_mss to reflect the effective segment size that we can send
295 * by taking into account all TCP options, including SACKs.
296 * - Cache 'on the wire' options for reuse
297 * - Updates receive window which can be reused for a burst.
299 * This should *only* be called when doing bursts
302 tcp_update_burst_snd_vars (tcp_connection_t * tc)
304 tcp_main_t *tm = &tcp_main;
306 /* Compute options to be used for connection. These may be reused when
307 * sending data or to compute the effective mss (snd_mss) */
308 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts,
309 TCP_STATE_ESTABLISHED);
311 /* XXX check if MTU has been updated */
312 tc->snd_mss = clib_min (tc->mss, tc->rcv_opts.mss) - tc->snd_opts_len;
313 ASSERT (tc->snd_mss > 0);
315 tcp_options_write (tm->wrk_ctx[tc->c_thread_index].cached_opts,
318 tcp_update_rcv_wnd (tc);
320 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
321 tcp_bt_check_app_limited (tc);
323 if (tc->snd_una == tc->snd_nxt)
325 tcp_cc_event (tc, TCP_CC_EVT_START_TX);
326 tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_BURST);
330 #endif /* CLIB_MARCH_VARIANT */
333 tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b)
335 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
336 vlib_buffer_free_one (vm, b->next_buffer);
337 /* Zero all flags but free list index and trace flag */
338 b->flags &= VLIB_BUFFER_NEXT_PRESENT - 1;
340 b->current_length = 0;
341 b->total_length_not_including_first_buffer = 0;
342 vnet_buffer (b)->tcp.flags = 0;
344 /* Leave enough space for headers */
345 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
348 #ifndef CLIB_MARCH_VARIANT
350 tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b)
352 ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
353 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
354 b->total_length_not_including_first_buffer = 0;
356 vnet_buffer (b)->tcp.flags = 0;
357 VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
358 /* Leave enough space for headers */
359 return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN);
363 /* Compute TCP checksum in software when offloading is disabled for a connection */
365 ip6_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
366 ip46_address_t * src, ip46_address_t * dst)
369 u16 payload_length_host_byte_order;
372 /* Initialize checksum with ip header. */
373 sum0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0)) +
374 clib_host_to_net_u16 (IP_PROTOCOL_TCP);
375 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
377 for (i = 0; i < ARRAY_LEN (src->ip6.as_uword); i++)
379 sum0 = ip_csum_with_carry
380 (sum0, clib_mem_unaligned (&src->ip6.as_uword[i], uword));
381 sum0 = ip_csum_with_carry
382 (sum0, clib_mem_unaligned (&dst->ip6.as_uword[i], uword));
385 return ip_calculate_l4_checksum (vm, p0, sum0,
386 payload_length_host_byte_order, NULL, 0,
391 ip4_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0,
392 ip46_address_t * src, ip46_address_t * dst)
395 u32 payload_length_host_byte_order;
397 payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0);
399 clib_host_to_net_u32 (payload_length_host_byte_order +
400 (IP_PROTOCOL_TCP << 16));
402 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&src->ip4, u32));
403 sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&dst->ip4, u32));
405 return ip_calculate_l4_checksum (vm, p0, sum0,
406 payload_length_host_byte_order, NULL, 0,
411 tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b)
414 if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD))
416 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
417 vlib_main_t *vm = wrk->vm;
420 checksum = ip4_tcp_compute_checksum_custom
421 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
423 checksum = ip6_tcp_compute_checksum_custom
424 (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip);
428 b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
437 tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state,
440 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
441 u8 tcp_opts_len, tcp_hdr_opts_len;
445 wnd = tcp_window_to_advertise (tc, state);
447 /* Make and write options */
448 tcp_opts_len = tcp_make_established_options (tc, snd_opts);
449 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
451 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
452 tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd);
454 tcp_options_write ((u8 *) (th + 1), snd_opts);
456 th->checksum = tcp_compute_checksum (tc, b);
458 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
461 tcp_zero_rwnd_sent_on (tc);
463 tcp_zero_rwnd_sent_off (tc);
467 * Convert buffer to ACK
470 tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b)
472 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK);
473 TCP_EVT (TCP_EVT_ACK_SENT, tc);
474 tc->rcv_las = tc->rcv_nxt;
478 * Convert buffer to FIN-ACK
481 tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b)
483 tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK);
487 * Convert buffer to SYN
490 tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b)
492 u8 tcp_hdr_opts_len, tcp_opts_len;
495 tcp_options_t snd_opts;
497 initial_wnd = tcp_initial_window_to_advertise (tc);
499 /* Make and write options */
500 clib_memset (&snd_opts, 0, sizeof (snd_opts));
501 tcp_opts_len = tcp_make_syn_options (tc, &snd_opts);
502 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
504 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
505 tc->rcv_nxt, tcp_hdr_opts_len, TCP_FLAG_SYN,
507 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
508 tcp_options_write ((u8 *) (th + 1), &snd_opts);
509 th->checksum = tcp_compute_checksum (tc, b);
513 * Convert buffer to SYN-ACK
516 tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
518 tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
519 u8 tcp_opts_len, tcp_hdr_opts_len;
523 clib_memset (snd_opts, 0, sizeof (*snd_opts));
524 initial_wnd = tcp_initial_window_to_advertise (tc);
525 tcp_opts_len = tcp_make_synack_options (tc, snd_opts);
526 tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
528 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss,
529 tc->rcv_nxt, tcp_hdr_opts_len,
530 TCP_FLAG_SYN | TCP_FLAG_ACK, initial_wnd);
531 tcp_options_write ((u8 *) (th + 1), snd_opts);
533 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
534 th->checksum = tcp_compute_checksum (tc, b);
538 tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
539 u8 is_ip4, u32 fib_index)
541 tcp_main_t *tm = &tcp_main;
542 vlib_main_t *vm = wrk->vm;
544 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
547 vnet_buffer (b)->sw_if_index[VLIB_TX] = fib_index;
548 vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
550 tcp_trajectory_add_start (b, 1);
552 session_add_pending_tx_buffer (vm->thread_index, bi,
553 tm->ipl_next_node[!is_ip4]);
555 if (vm->thread_index == 0 && vlib_num_workers ())
556 session_queue_run_on_main_thread (wrk->vm);
560 tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi,
563 b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
566 session_add_pending_tx_buffer (wrk->vm->thread_index, bi,
567 wrk->tco_next_node[!is_ip4]);
570 #endif /* CLIB_MARCH_VARIANT */
573 tcp_make_reset_in_place (vlib_main_t * vm, vlib_buffer_t * b, u8 is_ip4)
578 ip4_address_t src_ip4, dst_ip4;
579 ip6_address_t src_ip6, dst_ip6;
580 u16 src_port, dst_port;
581 u32 tmp, len, seq, ack;
584 /* Find IP and TCP headers */
585 th = tcp_buffer_hdr (b);
587 /* Save src and dst ip */
590 ih4 = vlib_buffer_get_current (b);
591 ASSERT ((ih4->ip_version_and_header_length & 0xF0) == 0x40);
592 src_ip4.as_u32 = ih4->src_address.as_u32;
593 dst_ip4.as_u32 = ih4->dst_address.as_u32;
597 ih6 = vlib_buffer_get_current (b);
598 ASSERT ((ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60);
599 clib_memcpy_fast (&src_ip6, &ih6->src_address, sizeof (ip6_address_t));
600 clib_memcpy_fast (&dst_ip6, &ih6->dst_address, sizeof (ip6_address_t));
603 src_port = th->src_port;
604 dst_port = th->dst_port;
605 flags = TCP_FLAG_RST;
608 * RFC 793. If the ACK bit is off, sequence number zero is used,
609 * <SEQ=0><ACK=SEG.SEQ+SEG.LEN><CTL=RST,ACK>
610 * If the ACK bit is on,
611 * <SEQ=SEG.ACK><CTL=RST>
615 seq = th->ack_number;
620 flags |= TCP_FLAG_ACK;
621 tmp = clib_net_to_host_u32 (th->seq_number);
622 len = vnet_buffer (b)->tcp.data_len + tcp_is_syn (th) + tcp_is_fin (th);
623 ack = clib_host_to_net_u32 (tmp + len);
627 tcp_reuse_buffer (vm, b);
628 tcp_trajectory_add_start (b, 4);
629 th = vlib_buffer_push_tcp_net_order (b, dst_port, src_port, seq, ack,
630 sizeof (tcp_header_t), flags, 0);
634 ih4 = vlib_buffer_push_ip4 (vm, b, &dst_ip4, &src_ip4,
636 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
641 ih6 = vlib_buffer_push_ip6 (vm, b, &dst_ip6, &src_ip6, IP_PROTOCOL_TCP);
642 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
649 #ifndef CLIB_MARCH_VARIANT
651 * Send reset without reusing existing buffer
653 * It extracts connection info out of original packet
656 tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt,
657 u32 thread_index, u8 is_ip4)
659 tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
660 vlib_main_t *vm = wrk->vm;
662 u32 bi, sw_if_index, fib_index;
663 u8 tcp_hdr_len, flags = 0;
664 tcp_header_t *th, *pkt_th;
666 ip4_header_t *ih4, *pkt_ih4;
667 ip6_header_t *ih6, *pkt_ih6;
668 fib_protocol_t fib_proto;
670 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
672 tcp_worker_stats_inc (wrk, no_buffer, 1);
676 b = vlib_get_buffer (vm, bi);
677 sw_if_index = vnet_buffer (pkt)->sw_if_index[VLIB_RX];
678 fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
679 fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
680 tcp_init_buffer (vm, b);
682 /* Make and write options */
683 tcp_hdr_len = sizeof (tcp_header_t);
687 pkt_ih4 = vlib_buffer_get_current (pkt);
688 pkt_th = ip4_next_header (pkt_ih4);
692 pkt_ih6 = vlib_buffer_get_current (pkt);
693 pkt_th = ip6_next_header (pkt_ih6);
696 if (tcp_ack (pkt_th))
698 flags = TCP_FLAG_RST;
699 seq = pkt_th->ack_number;
700 ack = (tc->state >= TCP_STATE_SYN_RCVD) ? tc->rcv_nxt : 0;
704 flags = TCP_FLAG_RST | TCP_FLAG_ACK;
706 ack = clib_host_to_net_u32 (vnet_buffer (pkt)->tcp.seq_end);
709 th = vlib_buffer_push_tcp_net_order (b, pkt_th->dst_port, pkt_th->src_port,
710 seq, ack, tcp_hdr_len, flags, 0);
712 /* Swap src and dst ip */
715 ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40);
716 ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address,
717 &pkt_ih4->src_address, IP_PROTOCOL_TCP,
718 tcp_csum_offload (tc));
719 th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4);
724 ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) ==
726 ih6 = vlib_buffer_push_ip6_custom (vm, b, &pkt_ih6->dst_address,
727 &pkt_ih6->src_address,
729 tc->ipv6_flow_label);
730 th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
734 tcp_enqueue_to_ip_lookup (wrk, b, bi, is_ip4, fib_index);
735 TCP_EVT (TCP_EVT_RST_SENT, tc);
736 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
737 TCP_ERROR_RST_SENT, 1);
741 * Build and set reset packet for connection
744 tcp_send_reset (tcp_connection_t * tc)
746 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
747 vlib_main_t *vm = wrk->vm;
751 u16 tcp_hdr_opts_len, advertise_wnd, opts_write_len;
754 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
756 tcp_worker_stats_inc (wrk, no_buffer, 1);
759 b = vlib_get_buffer (vm, bi);
760 tcp_init_buffer (vm, b);
762 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
763 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
764 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
765 flags = TCP_FLAG_RST;
766 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt,
767 tc->rcv_nxt, tcp_hdr_opts_len, flags,
769 opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
770 th->checksum = tcp_compute_checksum (tc, b);
771 ASSERT (opts_write_len == tc->snd_opts_len);
772 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
773 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
774 TCP_EVT (TCP_EVT_RST_SENT, tc);
775 vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4),
776 TCP_ERROR_RST_SENT, 1);
780 tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
785 vlib_buffer_push_ip4 (wrk->vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4,
786 IP_PROTOCOL_TCP, tcp_csum_offload (tc));
790 vlib_buffer_push_ip6_custom (wrk->vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6,
791 IP_PROTOCOL_TCP, tc->ipv6_flow_label);
798 * Builds a SYN packet for a half-open connection and sends it to ipx_lookup.
799 * The packet is not forwarded through tcpx_output to avoid doing lookups
800 * in the half_open pool.
803 tcp_send_syn (tcp_connection_t * tc)
805 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
806 vlib_main_t *vm = wrk->vm;
811 * Setup retransmit and establish timers before requesting buffer
812 * such that we can return if we've ran out.
814 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
815 tc->rto * TCP_TO_TIMER_TICK);
817 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
819 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
820 tcp_worker_stats_inc (wrk, no_buffer, 1);
824 b = vlib_get_buffer (vm, bi);
825 tcp_init_buffer (vm, b);
826 tcp_make_syn (tc, b);
828 /* Measure RTT with this */
829 tc->rtt_ts = tcp_time_now_us (vlib_num_workers ()? 1 : 0);
830 tc->rtt_seq = tc->snd_nxt;
833 tcp_push_ip_hdr (wrk, tc, b);
834 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
835 TCP_EVT (TCP_EVT_SYN_SENT, tc);
839 tcp_send_synack (tcp_connection_t * tc)
841 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
842 vlib_main_t *vm = wrk->vm;
846 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
848 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
850 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
851 tcp_worker_stats_inc (wrk, no_buffer, 1);
855 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
856 b = vlib_get_buffer (vm, bi);
857 tcp_init_buffer (vm, b);
858 tcp_make_synack (tc, b);
859 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
860 TCP_EVT (TCP_EVT_SYNACK_SENT, tc);
867 tcp_send_fin (tcp_connection_t * tc)
869 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
870 vlib_main_t *vm = wrk->vm;
875 fin_snt = tc->flags & TCP_CONN_FINSNT;
879 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
881 /* Out of buffers so program fin retransmit ASAP */
882 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
886 /* Make sure retransmit retries a fin not data */
887 tc->flags |= TCP_CONN_FINSNT;
888 tcp_worker_stats_inc (wrk, no_buffer, 1);
892 /* If we have non-dupacks programmed, no need to send them */
893 if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks)
894 tc->flags &= ~TCP_CONN_SNDACK;
896 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
897 b = vlib_get_buffer (vm, bi);
898 tcp_init_buffer (vm, b);
899 tcp_make_fin (tc, b);
900 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
901 TCP_EVT (TCP_EVT_FIN_SENT, tc);
902 /* Account for the FIN */
906 tc->flags |= TCP_CONN_FINSNT;
907 tc->flags &= ~TCP_CONN_FINPNDG;
908 tc->snd_una_max = seq_max (tc->snd_una_max, tc->snd_nxt);
913 * Push TCP header and update connection variables. Should only be called
914 * for segments with data, not for 'control' packets.
917 tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt,
918 u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt)
920 u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK;
921 u32 advertise_wnd, data_len;
922 tcp_main_t *tm = &tcp_main;
925 data_len = b->current_length;
926 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
927 data_len += b->total_length_not_including_first_buffer;
929 vnet_buffer (b)->tcp.flags = 0;
930 vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
933 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
935 tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
938 advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale;
940 advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED);
942 if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING))
944 if (seq_geq (tc->psh_seq, snd_nxt)
945 && seq_lt (tc->psh_seq, snd_nxt + data_len))
946 flags |= TCP_FLAG_PSH;
948 th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt,
949 tc->rcv_nxt, tcp_hdr_opts_len, flags,
954 clib_memcpy_fast ((u8 *) (th + 1),
955 tm->wrk_ctx[tc->c_thread_index].cached_opts,
960 u8 len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
961 ASSERT (len == tc->snd_opts_len);
965 * Update connection variables
969 tc->snd_nxt += data_len;
970 tc->rcv_las = tc->rcv_nxt;
972 tc->bytes_out += data_len;
973 tc->data_segs_out += 1;
975 th->checksum = tcp_compute_checksum (tc, b);
977 TCP_EVT (TCP_EVT_PKTIZE, tc);
981 tcp_buffer_len (vlib_buffer_t * b)
983 u32 data_len = b->current_length;
984 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
985 data_len += b->total_length_not_including_first_buffer;
990 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
992 tcp_connection_t *tc = (tcp_connection_t *) tconn;
994 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
995 tcp_bt_track_tx (tc, tcp_buffer_len (b));
997 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1,
998 /* update_snd_nxt */ 1);
1000 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1001 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1002 /* If not tracking an ACK, start tracking */
1003 if (tc->rtt_ts == 0 && !tcp_in_cong_recovery (tc))
1005 tc->rtt_ts = tcp_time_now_us (tc->c_thread_index);
1006 tc->rtt_seq = tc->snd_nxt;
1008 if (PREDICT_FALSE (!tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)))
1010 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1011 tcp_retransmit_timer_set (&wrk->timer_wheel, tc);
1014 tcp_trajectory_add_start (b, 3);
1019 tcp_send_ack (tcp_connection_t * tc)
1021 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1022 vlib_main_t *vm = wrk->vm;
1026 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1028 tcp_update_rcv_wnd (tc);
1029 tcp_worker_stats_inc (wrk, no_buffer, 1);
1032 b = vlib_get_buffer (vm, bi);
1033 tcp_init_buffer (vm, b);
1034 tcp_make_ack (tc, b);
1035 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1039 tcp_program_ack (tcp_connection_t * tc)
1041 if (!(tc->flags & TCP_CONN_SNDACK))
1043 session_add_self_custom_tx_evt (&tc->connection, 1);
1044 tc->flags |= TCP_CONN_SNDACK;
1049 tcp_program_dupack (tcp_connection_t * tc)
1051 if (!(tc->flags & TCP_CONN_SNDACK))
1053 session_add_self_custom_tx_evt (&tc->connection, 1);
1054 tc->flags |= TCP_CONN_SNDACK;
1056 if (tc->pending_dupacks < 255)
1057 tc->pending_dupacks += 1;
1061 tcp_program_retransmit (tcp_connection_t * tc)
1063 if (!(tc->flags & TCP_CONN_RXT_PENDING))
1065 session_add_self_custom_tx_evt (&tc->connection, 0);
1066 tc->flags |= TCP_CONN_RXT_PENDING;
1071 * Delayed ack timer handler
1073 * Sends delayed ACK when timer expires
1076 tcp_timer_delack_handler (tcp_connection_t * tc)
1082 * Send window update ack
1084 * Ensures that it will be sent only once, after a zero rwnd has been
1085 * advertised in a previous ack, and only if rwnd has grown beyond a
1086 * configurable value.
1089 tcp_send_window_update_ack (tcp_connection_t * tc)
1091 if (tcp_zero_rwnd_sent (tc))
1093 tcp_update_rcv_wnd (tc);
1094 if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss)
1096 tcp_zero_rwnd_sent_off (tc);
1097 tcp_program_ack (tc);
1103 * Allocate a new buffer and build a new tcp segment
1105 * @param wrk tcp worker
1106 * @param tc connection for which the segment will be allocated
1107 * @param offset offset of the first byte in the tx fifo
1108 * @param max_deq_byte segment size
1109 * @param[out] b pointer to buffer allocated
1111 * @return the number of bytes in the segment or 0 if buffer cannot be
1112 * allocated or no data available
1115 tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1116 u32 offset, u32 max_deq_bytes, vlib_buffer_t ** b)
1118 u32 bytes_per_buffer = vnet_get_tcp_main ()->bytes_per_buffer;
1119 vlib_main_t *vm = wrk->vm;
1124 seg_size = max_deq_bytes + TRANSPORT_MAX_HDRS_LEN;
1129 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1132 * Allocate and fill in buffer(s)
1135 /* Easy case, buffer size greater than mss */
1136 if (PREDICT_TRUE (seg_size <= bytes_per_buffer))
1138 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1140 tcp_worker_stats_inc (wrk, no_buffer, 1);
1143 *b = vlib_get_buffer (vm, bi);
1144 data = tcp_init_buffer (vm, *b);
1145 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1147 ASSERT (n_bytes == max_deq_bytes);
1148 b[0]->current_length = n_bytes;
1149 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1150 /* burst */ 0, /* update_snd_nxt */ 0);
1152 /* Split mss into multiple buffers */
1155 u32 chain_bi = ~0, n_bufs_per_seg, n_bufs;
1156 u16 n_peeked, len_to_deq;
1157 vlib_buffer_t *chain_b, *prev_b;
1160 /* Make sure we have enough buffers */
1161 n_bufs_per_seg = ceil ((double) seg_size / bytes_per_buffer);
1162 vec_validate_aligned (wrk->tx_buffers, n_bufs_per_seg - 1,
1163 CLIB_CACHE_LINE_BYTES);
1164 n_bufs = vlib_buffer_alloc (vm, wrk->tx_buffers, n_bufs_per_seg);
1165 if (PREDICT_FALSE (n_bufs != n_bufs_per_seg))
1168 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1169 tcp_worker_stats_inc (wrk, no_buffer, 1);
1173 *b = vlib_get_buffer (vm, wrk->tx_buffers[--n_bufs]);
1174 data = tcp_init_buffer (vm, *b);
1175 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1177 TRANSPORT_MAX_HDRS_LEN);
1178 b[0]->current_length = n_bytes;
1179 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
1180 b[0]->total_length_not_including_first_buffer = 0;
1181 max_deq_bytes -= n_bytes;
1184 for (i = 1; i < n_bufs_per_seg; i++)
1187 len_to_deq = clib_min (max_deq_bytes, bytes_per_buffer);
1188 chain_bi = wrk->tx_buffers[--n_bufs];
1189 chain_b = vlib_get_buffer (vm, chain_bi);
1190 chain_b->current_data = 0;
1191 data = vlib_buffer_get_current (chain_b);
1192 n_peeked = session_tx_fifo_peek_bytes (&tc->connection, data,
1195 ASSERT (n_peeked == len_to_deq);
1196 n_bytes += n_peeked;
1197 chain_b->current_length = n_peeked;
1198 chain_b->next_buffer = 0;
1200 /* update previous buffer */
1201 prev_b->next_buffer = chain_bi;
1202 prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
1204 max_deq_bytes -= n_peeked;
1205 b[0]->total_length_not_including_first_buffer += n_peeked;
1208 tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0,
1209 /* burst */ 0, /* update_snd_nxt */ 0);
1211 if (PREDICT_FALSE (n_bufs))
1213 clib_warning ("not all buffers consumed");
1214 vlib_buffer_free (vm, wrk->tx_buffers, n_bufs);
1218 ASSERT (n_bytes > 0);
1219 ASSERT (((*b)->current_data + (*b)->current_length) <= bytes_per_buffer);
1225 * Build a retransmit segment
1227 * @return the number of bytes in the segment or 0 if there's nothing to
1231 tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk,
1232 tcp_connection_t * tc, u32 offset,
1233 u32 max_deq_bytes, vlib_buffer_t ** b)
1235 u32 start, available_bytes;
1238 ASSERT (tc->state >= TCP_STATE_ESTABLISHED);
1239 ASSERT (max_deq_bytes != 0);
1242 * Make sure we can retransmit something
1244 available_bytes = transport_max_tx_dequeue (&tc->connection);
1245 ASSERT (available_bytes >= offset);
1246 available_bytes -= offset;
1247 if (!available_bytes)
1250 max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes);
1251 max_deq_bytes = clib_min (available_bytes, max_deq_bytes);
1253 start = tc->snd_una + offset;
1254 ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt));
1256 n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b);
1260 tc->snd_rxt_bytes += n_bytes;
1262 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1263 tcp_bt_track_rxt (tc, start, start + n_bytes);
1265 tc->bytes_retrans += n_bytes;
1266 tc->segs_retrans += 1;
1267 tcp_worker_stats_inc (wrk, rxt_segs, 1);
1268 TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes);
1274 tcp_check_sack_reneging (tcp_connection_t * tc)
1276 sack_scoreboard_t *sb = &tc->sack_sb;
1277 sack_scoreboard_hole_t *hole;
1279 hole = scoreboard_first_hole (sb);
1280 if (!sb->is_reneging && (!hole || hole->start == tc->snd_una))
1283 scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt);
1287 * Reset congestion control, switch cwnd to loss window and try again.
1290 tcp_cc_init_rxt_timeout (tcp_connection_t * tc)
1292 TCP_EVT (TCP_EVT_CC_EVT, tc, 6);
1294 tc->prev_ssthresh = tc->ssthresh;
1295 tc->prev_cwnd = tc->cwnd;
1297 /* If we entrered loss without fast recovery, notify cc algo of the
1298 * congestion event such that it can update ssthresh and its state */
1299 if (!tcp_in_fastrecovery (tc))
1300 tcp_cc_congestion (tc);
1302 /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */
1306 tc->cwnd_acc_bytes = 0;
1307 tc->tr_occurences += 1;
1308 tcp_recovery_on (tc);
1312 tcp_timer_retransmit_handler (tcp_connection_t * tc)
1314 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1315 vlib_main_t *vm = wrk->vm;
1316 vlib_buffer_t *b = 0;
1319 tcp_worker_stats_inc (wrk, tr_events, 1);
1321 /* Should be handled by a different handler */
1322 if (PREDICT_FALSE (tc->state == TCP_STATE_SYN_SENT))
1325 /* Wait-close and retransmit could pop at the same time */
1326 if (tc->state == TCP_STATE_CLOSED)
1329 if (tc->state >= TCP_STATE_ESTABLISHED)
1331 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1333 /* Lost FIN, retransmit and return */
1334 if (tc->flags & TCP_CONN_FINSNT)
1338 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1342 /* Shouldn't be here. This condition is tricky because it has to take
1343 * into account boff > 0 due to persist timeout. */
1344 if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_nxt)
1345 || (tc->rto_boff > 0 && seq_geq (tc->snd_una, tc->snd_congestion)
1346 && !tcp_flight_size (tc)))
1348 ASSERT (!tcp_in_recovery (tc));
1353 /* We're not in recovery so make sure rto_boff is 0. Can be non 0 due
1354 * to persist timer timeout */
1355 if (!tcp_in_recovery (tc) && tc->rto_boff > 0)
1358 tcp_update_rto (tc);
1361 /* Peer is dead or network connectivity is lost. Close connection.
1362 * RFC 1122 section 4.2.3.5 recommends a value of at least 100s. For
1363 * a min rto of 0.2s we need to retry about 8 times. */
1364 if (tc->rto_boff >= TCP_RTO_BOFF_MAX)
1366 tcp_send_reset (tc);
1367 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1368 session_transport_closing_notify (&tc->connection);
1369 session_transport_closed_notify (&tc->connection);
1370 tcp_connection_timers_reset (tc);
1371 tcp_program_cleanup (wrk, tc);
1372 tcp_worker_stats_inc (wrk, tr_abort, 1);
1376 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1377 tcp_check_sack_reneging (tc);
1379 /* Update send congestion to make sure that rxt has data to send */
1380 tc->snd_congestion = tc->snd_nxt;
1382 /* Send the first unacked segment. If we're short on buffers, return
1383 * as soon as possible */
1384 n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una);
1385 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b);
1388 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1392 bi = vlib_get_buffer_index (vm, b);
1393 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1395 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1396 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
1399 if (tc->rto_boff == 1)
1401 tcp_cc_init_rxt_timeout (tc);
1402 /* Record timestamp. Eifel detection algorithm RFC3522 */
1403 tc->snd_rxt_ts = tcp_tstamp (tc);
1406 if (tcp_opts_sack_permitted (&tc->rcv_opts))
1407 scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes);
1409 tcp_program_retransmit (tc);
1411 /* Retransmit SYN-ACK */
1412 else if (tc->state == TCP_STATE_SYN_RCVD)
1414 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1418 /* Passive open establish timeout */
1419 if (tc->rto > TCP_ESTABLISH_TIME >> 1)
1421 tcp_connection_set_state (tc, TCP_STATE_CLOSED);
1422 tcp_connection_timers_reset (tc);
1423 tcp_program_cleanup (wrk, tc);
1424 tcp_worker_stats_inc (wrk, tr_abort, 1);
1428 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1430 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT, 1);
1431 tcp_worker_stats_inc (wrk, no_buffer, 1);
1436 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1437 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1439 tcp_retransmit_timer_force_update (&wrk->timer_wheel, tc);
1441 b = vlib_get_buffer (vm, bi);
1442 tcp_init_buffer (vm, b);
1443 tcp_make_synack (tc, b);
1444 TCP_EVT (TCP_EVT_SYN_RXT, tc, 1);
1446 /* Retransmit timer already updated, just enqueue to output */
1447 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1451 ASSERT (tc->state == TCP_STATE_CLOSED);
1457 * SYN retransmit timer handler. Active open only.
1460 tcp_timer_retransmit_syn_handler (tcp_connection_t * tc)
1462 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1463 vlib_main_t *vm = wrk->vm;
1464 vlib_buffer_t *b = 0;
1467 /* Note: the connection may have transitioned to ESTABLISHED... */
1468 if (PREDICT_FALSE (tc->state != TCP_STATE_SYN_SENT))
1471 /* Half-open connection actually moved to established but we were
1472 * waiting for syn retransmit to pop to call cleanup from the right
1474 if (tc->flags & TCP_CONN_HALF_OPEN_DONE)
1476 if (tcp_half_open_connection_cleanup (tc))
1477 TCP_DBG ("could not remove half-open connection");
1481 TCP_EVT (TCP_EVT_CC_EVT, tc, 2);
1484 /* Active open establish timeout */
1485 if (tc->rto >= TCP_ESTABLISH_TIME >> 1)
1487 session_stream_connect_notify (&tc->connection, SESSION_E_TIMEDOUT);
1488 tcp_connection_cleanup (tc);
1492 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1494 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN, 1);
1495 tcp_worker_stats_inc (wrk, no_buffer, 1);
1499 /* Try without increasing RTO a number of times. If this fails,
1500 * start growing RTO exponentially */
1502 if (tc->rto_boff > TCP_RTO_SYN_RETRIES)
1503 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1505 b = vlib_get_buffer (vm, bi);
1506 tcp_init_buffer (vm, b);
1507 tcp_make_syn (tc, b);
1509 TCP_EVT (TCP_EVT_SYN_RXT, tc, 0);
1511 /* This goes straight to ipx_lookup */
1512 tcp_push_ip_hdr (wrk, tc, b);
1513 tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index);
1515 tcp_timer_update (&wrk->timer_wheel, tc, TCP_TIMER_RETRANSMIT_SYN,
1516 tc->rto * TCP_TO_TIMER_TICK);
1520 * Got 0 snd_wnd from peer, try to do something about it.
1524 tcp_timer_persist_handler (tcp_connection_t * tc)
1526 tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index);
1527 u32 bi, max_snd_bytes, available_bytes, offset;
1528 tcp_main_t *tm = vnet_get_tcp_main ();
1529 vlib_main_t *vm = wrk->vm;
1534 /* Problem already solved or worse */
1535 if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss
1536 || (tc->flags & TCP_CONN_FINSNT))
1537 goto update_scheduler;
1539 available_bytes = transport_max_tx_dequeue (&tc->connection);
1540 offset = tc->snd_nxt - tc->snd_una;
1542 /* Reprogram persist if no new bytes available to send. We may have data
1544 if (!available_bytes)
1546 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1550 if (available_bytes <= offset)
1551 goto update_scheduler;
1553 /* Increment RTO backoff */
1555 tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX);
1558 * Try to force the first unsent segment (or buffer)
1560 if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1)))
1562 tcp_persist_timer_set (&wrk->timer_wheel, tc);
1563 tcp_worker_stats_inc (wrk, no_buffer, 1);
1567 b = vlib_get_buffer (vm, bi);
1568 data = tcp_init_buffer (vm, b);
1570 tcp_validate_txf_size (tc, offset);
1571 tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
1572 max_snd_bytes = clib_min (tc->snd_mss,
1573 tm->bytes_per_buffer - TRANSPORT_MAX_HDRS_LEN);
1574 n_bytes = session_tx_fifo_peek_bytes (&tc->connection, data, offset,
1576 b->current_length = n_bytes;
1577 ASSERT (n_bytes != 0 && (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)
1578 || tc->snd_nxt == tc->snd_una_max
1579 || tc->rto_boff > 1));
1581 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1583 tcp_bt_check_app_limited (tc);
1584 tcp_bt_track_tx (tc, n_bytes);
1587 tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0,
1588 /* burst */ 0, /* update_snd_nxt */ 1);
1589 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1590 tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una);
1591 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1593 /* Just sent new data, enable retransmit */
1594 tcp_retransmit_timer_update (&wrk->timer_wheel, tc);
1600 if (tcp_is_descheduled (tc))
1601 transport_connection_reschedule (&tc->connection);
1605 * Retransmit first unacked segment
1608 tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc)
1610 vlib_main_t *vm = wrk->vm;
1614 TCP_EVT (TCP_EVT_CC_EVT, tc, 1);
1616 n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b);
1620 bi = vlib_get_buffer_index (vm, b);
1621 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1627 tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1630 u32 offset, n_segs = 0, n_written, bi, available_wnd;
1631 vlib_main_t *vm = wrk->vm;
1632 vlib_buffer_t *b = 0;
1634 offset = tc->snd_nxt - tc->snd_una;
1635 available_wnd = tc->snd_wnd - offset;
1636 burst_size = clib_min (burst_size, available_wnd / tc->snd_mss);
1638 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1639 tcp_bt_check_app_limited (tc);
1641 while (n_segs < burst_size)
1643 n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b);
1647 bi = vlib_get_buffer_index (vm, b);
1648 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1649 offset += n_written;
1652 if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
1653 tcp_bt_track_tx (tc, n_written);
1655 tc->snd_nxt += n_written;
1656 tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max);
1664 * Estimate send space using proportional rate reduction (RFC6937)
1667 tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc)
1672 pipe = tcp_flight_size (tc);
1673 prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion);
1675 if (pipe > tc->ssthresh)
1677 space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd))
1683 limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss;
1684 space = clib_min (tc->ssthresh - pipe, limit);
1686 space = clib_max (space, prr_out ? 0 : tc->snd_mss);
1691 tcp_retransmit_should_retry_head (tcp_connection_t * tc,
1692 sack_scoreboard_t * sb)
1694 u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion;
1695 f64 rr = (f64) tc->ssthresh / tc->prev_cwnd;
1697 if (tcp_fastrecovery_first (tc))
1700 return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr);
1704 tcp_max_tx_deq (tcp_connection_t * tc)
1706 return (transport_max_tx_dequeue (&tc->connection)
1707 - (tc->snd_nxt - tc->snd_una));
1710 #define scoreboard_rescue_rxt_valid(_sb, _tc) \
1711 (seq_geq (_sb->rescue_rxt, _tc->snd_una) \
1712 && seq_leq (_sb->rescue_rxt, _tc->snd_congestion))
1715 * Do retransmit with SACKs
1718 tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1721 u32 n_written = 0, offset, max_bytes, n_segs = 0;
1722 u8 snd_limited = 0, can_rescue = 0;
1723 u32 bi, max_deq, burst_bytes;
1724 sack_scoreboard_hole_t *hole;
1725 vlib_main_t *vm = wrk->vm;
1726 vlib_buffer_t *b = 0;
1727 sack_scoreboard_t *sb;
1730 ASSERT (tcp_in_cong_recovery (tc));
1732 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1733 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1736 tcp_program_retransmit (tc);
1740 if (tcp_in_recovery (tc))
1741 snd_space = tcp_available_cc_snd_space (tc);
1743 snd_space = tcp_fastrecovery_prr_snd_space (tc);
1745 if (snd_space < tc->snd_mss)
1750 /* Check if snd_una is a lost retransmit */
1751 if (pool_elts (sb->holes)
1752 && seq_gt (sb->high_sacked, tc->snd_congestion)
1753 && tc->rxt_head != tc->snd_una
1754 && tcp_retransmit_should_retry_head (tc, sb))
1756 max_bytes = clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una);
1757 n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, max_bytes, &b);
1760 tcp_program_retransmit (tc);
1763 bi = vlib_get_buffer_index (vm, b);
1764 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1767 tc->rxt_head = tc->snd_una;
1768 tc->rxt_delivered += n_written;
1769 tc->prr_delivered += n_written;
1770 ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes);
1773 tcp_fastrecovery_first_off (tc);
1775 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1776 hole = scoreboard_get_hole (sb, sb->cur_rxt_hole);
1778 max_deq = transport_max_tx_dequeue (&tc->connection);
1779 max_deq -= tc->snd_nxt - tc->snd_una;
1781 while (snd_space > 0 && n_segs < burst_size)
1783 hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue,
1787 /* We are out of lost holes to retransmit so send some new data. */
1788 if (max_deq > tc->snd_mss)
1793 /* Make sure we don't exceed available window and leave space
1794 * for one more packet, to avoid zero window acks */
1795 av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una);
1796 av_wnd = clib_max (av_wnd - tc->snd_mss, 0);
1797 snd_space = clib_min (snd_space, av_wnd);
1798 snd_space = clib_min (max_deq, snd_space);
1799 burst_size = clib_min (burst_size - n_segs,
1800 snd_space / tc->snd_mss);
1801 burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST);
1802 n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size);
1803 if (max_deq > n_segs_new * tc->snd_mss)
1804 tcp_program_retransmit (tc);
1806 n_segs += n_segs_new;
1810 if (tcp_in_recovery (tc) || !can_rescue
1811 || scoreboard_rescue_rxt_valid (sb, tc))
1814 /* If rescue rxt undefined or less than snd_una then one segment of
1815 * up to SMSS octets that MUST include the highest outstanding
1816 * unSACKed sequence number SHOULD be returned, and RescueRxt set to
1817 * RecoveryPoint. HighRxt MUST NOT be updated.
1819 hole = scoreboard_last_hole (sb);
1820 max_bytes = clib_min (tc->snd_mss, hole->end - hole->start);
1821 max_bytes = clib_min (max_bytes, snd_space);
1822 offset = hole->end - tc->snd_una - max_bytes;
1823 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset,
1828 sb->rescue_rxt = tc->snd_congestion;
1829 bi = vlib_get_buffer_index (vm, b);
1830 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1835 max_bytes = clib_min (hole->end - sb->high_rxt, snd_space);
1836 max_bytes = snd_limited ? clib_min (max_bytes, tc->snd_mss) : max_bytes;
1840 offset = sb->high_rxt - tc->snd_una;
1841 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1843 ASSERT (n_written <= snd_space);
1845 /* Nothing left to retransmit */
1849 bi = vlib_get_buffer_index (vm, b);
1850 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1852 sb->high_rxt += n_written;
1853 ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt));
1855 snd_space -= n_written;
1860 tcp_program_retransmit (tc);
1864 transport_connection_tx_pacer_reset_bucket (&tc->connection, 0);
1869 * Fast retransmit without SACK info
1872 tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc,
1875 u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now, max_bytes;
1876 u32 burst_bytes, sent_bytes;
1877 vlib_main_t *vm = wrk->vm;
1878 int snd_space, n_segs = 0;
1882 ASSERT (tcp_in_cong_recovery (tc));
1883 TCP_EVT (TCP_EVT_CC_EVT, tc, 0);
1885 burst_bytes = transport_connection_tx_pacer_burst (&tc->connection);
1886 burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss);
1889 tcp_program_retransmit (tc);
1893 snd_space = tcp_available_cc_snd_space (tc);
1894 cc_limited = snd_space < burst_bytes;
1896 if (!tcp_fastrecovery_first (tc))
1899 /* RFC 6582: [If a partial ack], retransmit the first unacknowledged
1901 while (snd_space > 0 && n_segs < burst_size)
1903 max_bytes = clib_min (tc->snd_mss,
1904 tc->snd_congestion - tc->snd_una - offset);
1907 n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes,
1910 /* Nothing left to retransmit */
1914 bi = vlib_get_buffer_index (vm, b);
1915 tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4);
1916 snd_space -= n_written;
1917 offset += n_written;
1921 if (n_segs == burst_size)
1926 /* RFC 6582: Send a new segment if permitted by the new value of cwnd. */
1927 if (snd_space < tc->snd_mss || tc->snd_mss == 0)
1930 max_deq = transport_max_tx_dequeue (&tc->connection);
1931 max_deq -= tc->snd_nxt - tc->snd_una;
1934 snd_space = clib_min (max_deq, snd_space);
1935 burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss);
1936 n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size);
1937 if (n_segs_now && max_deq > n_segs_now * tc->snd_mss)
1938 tcp_program_retransmit (tc);
1939 n_segs += n_segs_now;
1943 tcp_fastrecovery_first_off (tc);
1945 sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes);
1946 sent_bytes = cc_limited ? burst_bytes : sent_bytes;
1947 transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes);
1953 tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size)
1957 if (!tc->pending_dupacks)
1959 if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc)
1960 || tc->state != TCP_STATE_ESTABLISHED)
1968 /* If we're supposed to send dupacks but have no ooo data
1969 * send only one ack */
1970 if (!vec_len (tc->snd_sacks))
1973 tc->dupacks_out += 1;
1974 tc->pending_dupacks = 0;
1978 /* Start with first sack block */
1979 tc->snd_sack_pos = 0;
1981 /* Generate enough dupacks to cover all sack blocks. Do not generate
1982 * more sacks than the number of packets received. But do generate at
1983 * least 3, i.e., the number needed to signal congestion, if needed. */
1984 n_acks = vec_len (tc->snd_sacks) / TCP_OPTS_MAX_SACK_BLOCKS;
1985 n_acks = clib_min (n_acks, tc->pending_dupacks);
1986 n_acks = clib_max (n_acks, clib_min (tc->pending_dupacks, 3));
1987 for (j = 0; j < clib_min (n_acks, max_burst_size); j++)
1990 if (n_acks < max_burst_size)
1992 tc->pending_dupacks = 0;
1993 tc->snd_sack_pos = 0;
1994 tc->dupacks_out += n_acks;
1999 TCP_DBG ("constrained by burst size");
2000 tc->pending_dupacks = n_acks - max_burst_size;
2001 tc->dupacks_out += max_burst_size;
2002 tcp_program_dupack (tc);
2003 return max_burst_size;
2008 tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size)
2010 tcp_worker_ctx_t *wrk;
2013 if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED))
2016 wrk = tcp_get_worker (tc->c_thread_index);
2018 if (tcp_opts_sack_permitted (&tc->rcv_opts))
2019 n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size);
2021 n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size);
2027 tcp_session_custom_tx (void *conn, transport_send_params_t * sp)
2029 tcp_connection_t *tc = (tcp_connection_t *) conn;
2032 if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING))
2034 tc->flags &= ~TCP_CONN_RXT_PENDING;
2035 n_segs = tcp_do_retransmit (tc, sp->max_burst_size);
2038 if (!(tc->flags & TCP_CONN_SNDACK))
2041 tc->flags &= ~TCP_CONN_SNDACK;
2043 /* We have retransmitted packets and no dupack */
2044 if (n_segs && !tc->pending_dupacks)
2047 if (sp->max_burst_size <= n_segs)
2049 tcp_program_ack (tc);
2053 n_segs += tcp_send_acks (tc, sp->max_burst_size - n_segs);
2057 #endif /* CLIB_MARCH_VARIANT */
2060 tcp_output_handle_link_local (tcp_connection_t * tc0, vlib_buffer_t * b0,
2061 u16 * next0, u32 * error0)
2063 ip_adjacency_t *adj;
2066 /* Not thread safe but as long as the connection exists the adj should
2068 ai = adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &tc0->c_rmt_ip,
2070 if (ai == ADJ_INDEX_INVALID)
2072 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2073 *next0 = TCP_OUTPUT_NEXT_DROP;
2074 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2079 if (PREDICT_TRUE (adj->lookup_next_index == IP_LOOKUP_NEXT_REWRITE))
2080 *next0 = TCP_OUTPUT_NEXT_IP_REWRITE;
2081 else if (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP)
2082 *next0 = TCP_OUTPUT_NEXT_IP_ARP;
2085 *next0 = TCP_OUTPUT_NEXT_DROP;
2086 *error0 = TCP_ERROR_LINK_LOCAL_RW;
2088 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ai;
2092 tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
2093 u32 * to_next, u32 n_bufs)
2095 tcp_connection_t *tc;
2101 for (i = 0; i < n_bufs; i++)
2103 b = vlib_get_buffer (vm, to_next[i]);
2104 if (!(b->flags & VLIB_BUFFER_IS_TRACED))
2106 th = vlib_buffer_get_current (b);
2107 tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index,
2109 t = vlib_add_trace (vm, node, b, sizeof (*t));
2110 clib_memcpy_fast (&t->tcp_header, th, sizeof (t->tcp_header));
2111 clib_memcpy_fast (&t->tcp_connection, tc, sizeof (t->tcp_connection));
2116 tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0,
2117 tcp_connection_t * tc0, u8 is_ip4)
2119 TCP_EVT (TCP_EVT_OUTPUT, tc0,
2120 ((tcp_header_t *) vlib_buffer_get_current (b0))->flags,
2121 b0->current_length);
2124 vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4,
2125 IP_PROTOCOL_TCP, tcp_csum_offload (tc0));
2127 vlib_buffer_push_ip6_custom (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6,
2128 IP_PROTOCOL_TCP, tc0->ipv6_flow_label);
2132 tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b)
2134 if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO)))
2137 u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len;
2139 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID))
2140 data_len += b->total_length_not_including_first_buffer;
2142 if (PREDICT_TRUE (data_len <= tc->snd_mss))
2146 ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0);
2147 ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0);
2148 b->flags |= VNET_BUFFER_F_GSO;
2149 vnet_buffer2 (b)->gso_l4_hdr_sz =
2150 sizeof (tcp_header_t) + tc->snd_opts_len;
2151 vnet_buffer2 (b)->gso_size = tc->snd_mss;
2156 tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0,
2157 vlib_node_runtime_t * error_node, u16 * next0,
2160 /* If next_index is not drop use it */
2161 if (tc0->next_node_index)
2163 *next0 = tc0->next_node_index;
2164 vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque;
2168 *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP;
2171 vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
2172 vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
2178 if (PREDICT_FALSE (ip6_address_is_link_local_unicast (&tc0->c_rmt_ip6)))
2179 tcp_output_handle_link_local (tc0, b0, next0, &error0);
2181 if (PREDICT_FALSE (error0))
2183 b0->error = error_node->errors[error0];
2192 tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2193 vlib_frame_t * frame, int is_ip4)
2195 u32 n_left_from, *from, thread_index = vm->thread_index;
2196 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
2197 u16 nexts[VLIB_FRAME_SIZE], *next;
2199 from = vlib_frame_vector_args (frame);
2200 n_left_from = frame->n_vectors;
2201 tcp_set_time_now (tcp_get_worker (thread_index));
2203 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
2204 tcp46_output_trace_frame (vm, node, from, n_left_from);
2206 vlib_get_buffers (vm, from, bufs, n_left_from);
2210 while (n_left_from >= 4)
2212 tcp_connection_t *tc0, *tc1;
2215 vlib_prefetch_buffer_header (b[2], STORE);
2216 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2218 vlib_prefetch_buffer_header (b[3], STORE);
2219 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2222 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2224 tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index,
2227 if (PREDICT_TRUE (!tc0 + !tc1 == 0))
2229 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2230 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2232 tcp_check_if_gso (tc0, b[0]);
2233 tcp_check_if_gso (tc1, b[1]);
2235 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2236 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2242 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2243 tcp_check_if_gso (tc0, b[0]);
2244 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2248 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2249 next[0] = TCP_OUTPUT_NEXT_DROP;
2253 tcp_output_push_ip (vm, b[1], tc1, is_ip4);
2254 tcp_check_if_gso (tc1, b[1]);
2255 tcp_output_handle_packet (tc1, b[1], node, &next[1], is_ip4);
2259 b[1]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2260 next[1] = TCP_OUTPUT_NEXT_DROP;
2268 while (n_left_from > 0)
2270 tcp_connection_t *tc0;
2272 if (n_left_from > 1)
2274 vlib_prefetch_buffer_header (b[1], STORE);
2275 CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE);
2278 tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index,
2281 if (PREDICT_TRUE (tc0 != 0))
2283 tcp_output_push_ip (vm, b[0], tc0, is_ip4);
2284 tcp_check_if_gso (tc0, b[0]);
2285 tcp_output_handle_packet (tc0, b[0], node, &next[0], is_ip4);
2289 b[0]->error = node->errors[TCP_ERROR_INVALID_CONNECTION];
2290 next[0] = TCP_OUTPUT_NEXT_DROP;
2298 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
2299 vlib_node_increment_counter (vm, tcp_node_index (output, is_ip4),
2300 TCP_ERROR_PKTS_SENT, frame->n_vectors);
2301 return frame->n_vectors;
2304 VLIB_NODE_FN (tcp4_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2305 vlib_frame_t * from_frame)
2307 return tcp46_output_inline (vm, node, from_frame, 1 /* is_ip4 */ );
2310 VLIB_NODE_FN (tcp6_output_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2311 vlib_frame_t * from_frame)
2313 return tcp46_output_inline (vm, node, from_frame, 0 /* is_ip4 */ );
2317 VLIB_REGISTER_NODE (tcp4_output_node) =
2319 .name = "tcp4-output",
2320 /* Takes a vector of packets. */
2321 .vector_size = sizeof (u32),
2322 .n_errors = TCP_N_ERROR,
2323 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2324 .error_strings = tcp_error_strings,
2325 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2327 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2328 foreach_tcp4_output_next
2331 .format_buffer = format_tcp_header,
2332 .format_trace = format_tcp_tx_trace,
2337 VLIB_REGISTER_NODE (tcp6_output_node) =
2339 .name = "tcp6-output",
2340 /* Takes a vector of packets. */
2341 .vector_size = sizeof (u32),
2342 .n_errors = TCP_N_ERROR,
2343 .protocol_hint = VLIB_NODE_PROTO_HINT_TCP,
2344 .error_strings = tcp_error_strings,
2345 .n_next_nodes = TCP_OUTPUT_N_NEXT,
2347 #define _(s,n) [TCP_OUTPUT_NEXT_##s] = n,
2348 foreach_tcp6_output_next
2351 .format_buffer = format_tcp_header,
2352 .format_trace = format_tcp_tx_trace,
2356 typedef enum _tcp_reset_next
2358 TCP_RESET_NEXT_DROP,
2359 TCP_RESET_NEXT_IP_LOOKUP,
2363 #define foreach_tcp4_reset_next \
2364 _(DROP, "error-drop") \
2365 _(IP_LOOKUP, "ip4-lookup")
2367 #define foreach_tcp6_reset_next \
2368 _(DROP, "error-drop") \
2369 _(IP_LOOKUP, "ip6-lookup")
2372 tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
2373 vlib_frame_t * from_frame, u8 is_ip4)
2375 u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP;
2376 u32 n_left_from, next_index, *from, *to_next;
2378 from = vlib_frame_vector_args (from_frame);
2379 n_left_from = from_frame->n_vectors;
2381 next_index = node->cached_next_index;
2383 while (n_left_from > 0)
2387 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
2389 while (n_left_from > 0 && n_left_to_next > 0)
2401 n_left_to_next -= 1;
2403 b0 = vlib_get_buffer (vm, bi0);
2404 tcp_make_reset_in_place (vm, b0, is_ip4);
2406 /* Prepare to send to IP lookup */
2407 vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
2409 b0->error = node->errors[error0];
2410 b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
2411 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
2413 th0 = vlib_buffer_get_current (b0);
2415 th0 = ip4_next_header ((ip4_header_t *) th0);
2417 th0 = ip6_next_header ((ip6_header_t *) th0);
2418 t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
2419 clib_memcpy_fast (&t0->tcp_header, th0,
2420 sizeof (t0->tcp_header));
2423 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
2424 n_left_to_next, bi0, next0);
2426 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
2428 return from_frame->n_vectors;
2431 VLIB_NODE_FN (tcp4_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2432 vlib_frame_t * from_frame)
2434 return tcp46_send_reset_inline (vm, node, from_frame, 1);
2437 VLIB_NODE_FN (tcp6_reset_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
2438 vlib_frame_t * from_frame)
2440 return tcp46_send_reset_inline (vm, node, from_frame, 0);
2444 VLIB_REGISTER_NODE (tcp4_reset_node) = {
2445 .name = "tcp4-reset",
2446 .vector_size = sizeof (u32),
2447 .n_errors = TCP_N_ERROR,
2448 .error_strings = tcp_error_strings,
2449 .n_next_nodes = TCP_RESET_N_NEXT,
2451 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2452 foreach_tcp4_reset_next
2455 .format_trace = format_tcp_tx_trace,
2460 VLIB_REGISTER_NODE (tcp6_reset_node) = {
2461 .name = "tcp6-reset",
2462 .vector_size = sizeof (u32),
2463 .n_errors = TCP_N_ERROR,
2464 .error_strings = tcp_error_strings,
2465 .n_next_nodes = TCP_RESET_N_NEXT,
2467 #define _(s,n) [TCP_RESET_NEXT_##s] = n,
2468 foreach_tcp6_reset_next
2471 .format_trace = format_tcp_tx_trace,
2476 * fd.io coding-style-patch-verification: ON
2479 * eval: (c-set-style "gnu")