X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Ftcp%2Ftcp_output.c;h=1b9530b4e1a5e73efed3c15fb3135e9957984eef;hb=5e6305fb02ccdfd38c8c5e369a960deaa7602eba;hp=220076057510818478a291578f7a8c6294704aaf;hpb=df36f4963f1a590ce9a02f048507c3d4590580ae;p=vpp.git diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 22007605751..1b9530b4e1a 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -55,12 +55,12 @@ format_tcp_tx_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); tcp_tx_trace_t *t = va_arg (*args, tcp_tx_trace_t *); + tcp_connection_t *tc = &t->tcp_connection; u32 indent = format_get_indent (s); - s = format (s, "%U\n%U%U", - format_tcp_header, &t->tcp_header, 128, - format_white_space, indent, - format_tcp_connection, &t->tcp_connection, 1); + s = format (s, "%U state %U\n%U%U", format_tcp_connection_id, tc, + format_tcp_state, tc->state, format_white_space, indent, + format_tcp_header, &t->tcp_header, 128); return s; } @@ -75,19 +75,6 @@ tcp_window_compute_scale (u32 window) return wnd_scale; } -/** - * Update max segment size we're able to process. - * - * The value is constrained by our interface's MTU and IP options. It is - * also what we advertise to our peer. - */ -void -tcp_update_rcv_mss (tcp_connection_t * tc) -{ - /* TODO find our iface MTU */ - tc->mss = tcp_cfg.default_mtu - sizeof (tcp_header_t); -} - /** * TCP's initial window */ @@ -135,7 +122,10 @@ tcp_update_rcv_wnd (tcp_connection_t * tc) */ available_space = transport_max_rx_enqueue (&tc->connection); if (PREDICT_FALSE (available_space < tc->rcv_opts.mss)) - available_space = 0; + { + tc->rcv_wnd = 0; + return; + } /* * Use the above and what we know about what we've previously advertised @@ -147,7 +137,7 @@ tcp_update_rcv_wnd (tcp_connection_t * tc) if (PREDICT_FALSE ((i32) available_space < observed_wnd)) { wnd = clib_max (observed_wnd, 0); - TCP_EVT_DBG (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space); + TCP_EVT (TCP_EVT_RCV_WND_SHRUNK, tc, observed_wnd, available_space); } else { @@ -263,16 +253,16 @@ tcp_options_write (u8 * data, tcp_options_t * opts) } static int -tcp_make_syn_options (tcp_options_t * opts, u8 wnd_scale) +tcp_make_syn_options (tcp_connection_t * tc, tcp_options_t * opts) { u8 len = 0; opts->flags |= TCP_OPTS_FLAG_MSS; - opts->mss = tcp_cfg.default_mtu; /*XXX discover that */ + opts->mss = tc->mss; len += TCP_OPTION_LEN_MSS; opts->flags |= TCP_OPTS_FLAG_WSCALE; - opts->wscale = wnd_scale; + opts->wscale = tc->rcv_wscale; len += TCP_OPTION_LEN_WINDOW_SCALE; opts->flags |= TCP_OPTS_FLAG_TSTAMP; @@ -379,7 +369,7 @@ tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts, case TCP_STATE_SYN_RCVD: return tcp_make_synack_options (tc, opts); case TCP_STATE_SYN_SENT: - return tcp_make_syn_options (opts, tc->rcv_wscale); + return tcp_make_syn_options (tc, opts); default: clib_warning ("State not handled! %d", state); return 0; @@ -415,36 +405,16 @@ tcp_update_burst_snd_vars (tcp_connection_t * tc) tcp_update_rcv_wnd (tc); - if (tc->flags & TCP_CONN_RATE_SAMPLE) - tc->flags |= TCP_CONN_TRACK_BURST; + if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE) + tcp_bt_check_app_limited (tc); if (tc->snd_una == tc->snd_nxt) - tcp_cc_event (tc, TCP_CC_EVT_START_TX); -} - -void -tcp_init_mss (tcp_connection_t * tc) -{ - u16 default_min_mss = 536; - tcp_update_rcv_mss (tc); - - /* TODO cache mss and consider PMTU discovery */ - tc->snd_mss = clib_min (tc->rcv_opts.mss, tc->mss); - - if (tc->snd_mss < 45) { - /* Assume that at least the min default mss works */ - tc->snd_mss = default_min_mss; - tc->rcv_opts.mss = default_min_mss; + tcp_cc_event (tc, TCP_CC_EVT_START_TX); + tcp_connection_tx_pacer_reset (tc, tc->cwnd, TRANSPORT_PACER_MIN_BURST); } - - /* We should have enough space for 40 bytes of options */ - ASSERT (tc->snd_mss > 45); - - /* If we use timestamp option, account for it */ - if (tcp_opts_tstamp (&tc->rcv_opts)) - tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP; } + #endif /* CLIB_MARCH_VARIANT */ static void * @@ -477,6 +447,77 @@ tcp_init_buffer (vlib_main_t * vm, vlib_buffer_t * b) return vlib_buffer_make_headroom (b, TRANSPORT_MAX_HDRS_LEN); } + +/* Compute TCP checksum in software when offloading is disabled for a connection */ +u16 +ip6_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0, + ip46_address_t * src, ip46_address_t * dst) +{ + ip_csum_t sum0; + u16 payload_length_host_byte_order; + u32 i; + + /* Initialize checksum with ip header. */ + sum0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, p0)) + + clib_host_to_net_u16 (IP_PROTOCOL_TCP); + payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0); + + for (i = 0; i < ARRAY_LEN (src->ip6.as_uword); i++) + { + sum0 = ip_csum_with_carry + (sum0, clib_mem_unaligned (&src->ip6.as_uword[i], uword)); + sum0 = ip_csum_with_carry + (sum0, clib_mem_unaligned (&dst->ip6.as_uword[i], uword)); + } + + return ip_calculate_l4_checksum (vm, p0, sum0, + payload_length_host_byte_order, NULL, 0, + NULL); +} + +u16 +ip4_tcp_compute_checksum_custom (vlib_main_t * vm, vlib_buffer_t * p0, + ip46_address_t * src, ip46_address_t * dst) +{ + ip_csum_t sum0; + u32 payload_length_host_byte_order; + + payload_length_host_byte_order = vlib_buffer_length_in_chain (vm, p0); + sum0 = + clib_host_to_net_u32 (payload_length_host_byte_order + + (IP_PROTOCOL_TCP << 16)); + + sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&src->ip4, u32)); + sum0 = ip_csum_with_carry (sum0, clib_mem_unaligned (&dst->ip4, u32)); + + return ip_calculate_l4_checksum (vm, p0, sum0, + payload_length_host_byte_order, NULL, 0, + NULL); +} + +static inline u16 +tcp_compute_checksum (tcp_connection_t * tc, vlib_buffer_t * b) +{ + u16 checksum = 0; + if (PREDICT_FALSE (tc->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD)) + { + tcp_worker_ctx_t *wrk = tcp_get_worker (tc->c_thread_index); + vlib_main_t *vm = wrk->vm; + + if (tc->c_is_ip4) + checksum = ip4_tcp_compute_checksum_custom + (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip); + else + checksum = ip6_tcp_compute_checksum_custom + (vm, b, &tc->c_lcl_ip, &tc->c_rmt_ip); + } + else + { + b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; + } + return checksum; +} + /** * Prepare ACK */ @@ -499,6 +540,9 @@ tcp_make_ack_i (tcp_connection_t * tc, vlib_buffer_t * b, tcp_state_t state, tc->rcv_nxt, tcp_hdr_opts_len, flags, wnd); tcp_options_write ((u8 *) (th + 1), snd_opts); + + th->checksum = tcp_compute_checksum (tc, b); + vnet_buffer (b)->tcp.connection_index = tc->c_c_index; if (wnd == 0) @@ -514,7 +558,7 @@ static inline void tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b) { tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK); - TCP_EVT_DBG (TCP_EVT_ACK_SENT, tc); + TCP_EVT (TCP_EVT_ACK_SENT, tc); tc->rcv_las = tc->rcv_nxt; } @@ -542,7 +586,7 @@ tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b) /* Make and write options */ clib_memset (&snd_opts, 0, sizeof (snd_opts)); - tcp_opts_len = tcp_make_syn_options (&snd_opts, tc->rcv_wscale); + tcp_opts_len = tcp_make_syn_options (tc, &snd_opts); tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss, @@ -550,6 +594,7 @@ tcp_make_syn (tcp_connection_t * tc, vlib_buffer_t * b) initial_wnd); vnet_buffer (b)->tcp.connection_index = tc->c_c_index; tcp_options_write ((u8 *) (th + 1), &snd_opts); + th->checksum = tcp_compute_checksum (tc, b); } /** @@ -574,6 +619,7 @@ tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b) tcp_options_write ((u8 *) (th + 1), snd_opts); vnet_buffer (b)->tcp.connection_index = tc->c_c_index; + th->checksum = tcp_compute_checksum (tc, b); } always_inline void @@ -628,51 +674,19 @@ tcp_enqueue_to_ip_lookup (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi, session_flush_frames_main_thread (wrk->vm); } -always_inline void -tcp_enqueue_to_output_i (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi, - u8 is_ip4, u8 flush) +static void +tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi, + u8 is_ip4) { - u32 *to_next, next_index; - vlib_frame_t *f; + session_type_t st; b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED; b->error = 0; - /* Decide where to send the packet */ - next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; - tcp_trajectory_add_start (b, 2); - - /* Get frame to v4/6 output node */ - f = wrk->tx_frames[!is_ip4]; - if (!f) - { - f = vlib_get_frame_to_node (wrk->vm, next_index); - ASSERT (f); - wrk->tx_frames[!is_ip4] = f; - } - to_next = vlib_frame_vector_args (f); - to_next[f->n_vectors] = bi; - f->n_vectors += 1; - if (flush || f->n_vectors == VLIB_FRAME_SIZE) - { - vlib_put_frame_to_node (wrk->vm, next_index, f); - wrk->tx_frames[!is_ip4] = 0; - } -} - -static void -tcp_enqueue_to_output (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi, - u8 is_ip4) -{ - tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 0); + st = session_type_from_proto_and_ip (TRANSPORT_PROTO_TCP, is_ip4); + session_add_pending_tx_buffer (st, wrk->vm->thread_index, bi); } -static void -tcp_enqueue_to_output_now (tcp_worker_ctx_t * wrk, vlib_buffer_t * b, u32 bi, - u8 is_ip4) -{ - tcp_enqueue_to_output_i (wrk, b, bi, is_ip4, 1); -} #endif /* CLIB_MARCH_VARIANT */ static int @@ -819,7 +833,8 @@ tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, { ASSERT ((pkt_ih4->ip_version_and_header_length & 0xF0) == 0x40); ih4 = vlib_buffer_push_ip4 (vm, b, &pkt_ih4->dst_address, - &pkt_ih4->src_address, IP_PROTOCOL_TCP, 1); + &pkt_ih4->src_address, IP_PROTOCOL_TCP, + tcp_csum_offload (tc)); th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih4); } else @@ -827,14 +842,16 @@ tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, int bogus = ~0; ASSERT ((pkt_ih6->ip_version_traffic_class_and_flow_label & 0xF0) == 0x60); - ih6 = vlib_buffer_push_ip6 (vm, b, &pkt_ih6->dst_address, - &pkt_ih6->src_address, IP_PROTOCOL_TCP); + ih6 = vlib_buffer_push_ip6_custom (vm, b, &pkt_ih6->dst_address, + &pkt_ih6->src_address, + IP_PROTOCOL_TCP, + tc->ipv6_flow_label); th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus); ASSERT (!bogus); } tcp_enqueue_to_ip_lookup_now (wrk, b, bi, is_ip4, fib_index); - TCP_EVT_DBG (TCP_EVT_RST_SENT, tc); + TCP_EVT (TCP_EVT_RST_SENT, tc); vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4), TCP_ERROR_RST_SENT, 1); } @@ -866,10 +883,11 @@ tcp_send_reset (tcp_connection_t * tc) tc->rcv_nxt, tcp_hdr_opts_len, flags, advertise_wnd); opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts); + th->checksum = tcp_compute_checksum (tc, b); ASSERT (opts_write_len == tc->snd_opts_len); vnet_buffer (b)->tcp.connection_index = tc->c_c_index; tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); - TCP_EVT_DBG (TCP_EVT_RST_SENT, tc); + TCP_EVT (TCP_EVT_RST_SENT, tc); vlib_node_increment_counter (vm, tcp_node_index (output, tc->c_is_ip4), TCP_ERROR_RST_SENT, 1); } @@ -878,24 +896,15 @@ static void tcp_push_ip_hdr (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b) { - tcp_header_t *th = vlib_buffer_get_current (b); - vlib_main_t *vm = wrk->vm; if (tc->c_is_ip4) { - ip4_header_t *ih; - ih = vlib_buffer_push_ip4 (vm, b, &tc->c_lcl_ip4, - &tc->c_rmt_ip4, IP_PROTOCOL_TCP, 1); - th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih); + vlib_buffer_push_ip4 (wrk->vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4, + IP_PROTOCOL_TCP, tcp_csum_offload (tc)); } else { - ip6_header_t *ih; - int bogus = ~0; - - ih = vlib_buffer_push_ip6 (vm, b, &tc->c_lcl_ip6, - &tc->c_rmt_ip6, IP_PROTOCOL_TCP); - th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih, &bogus); - ASSERT (!bogus); + vlib_buffer_push_ip6_custom (wrk->vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6, + IP_PROTOCOL_TCP, tc->ipv6_flow_label); } } @@ -938,7 +947,7 @@ tcp_send_syn (tcp_connection_t * tc) tcp_push_ip_hdr (wrk, tc, b); tcp_enqueue_to_ip_lookup (wrk, b, bi, tc->c_is_ip4, tc->c_fib_index); - TCP_EVT_DBG (TCP_EVT_SYN_SENT, tc); + TCP_EVT (TCP_EVT_SYN_SENT, tc); } void @@ -962,22 +971,7 @@ tcp_send_synack (tcp_connection_t * tc) tcp_init_buffer (vm, b); tcp_make_synack (tc, b); tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); - TCP_EVT_DBG (TCP_EVT_SYNACK_SENT, tc); -} - -/** - * Flush tx frame populated by retransmits and timer pops - */ -void -tcp_flush_frame_to_output (tcp_worker_ctx_t * wrk, u8 is_ip4) -{ - if (wrk->tx_frames[!is_ip4]) - { - u32 next_index; - next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; - vlib_put_frame_to_node (wrk->vm, next_index, wrk->tx_frames[!is_ip4]); - wrk->tx_frames[!is_ip4] = 0; - } + TCP_EVT (TCP_EVT_SYNACK_SENT, tc); } /** @@ -1002,8 +996,6 @@ tcp_flush_frame_to_ip_lookup (tcp_worker_ctx_t * wrk, u8 is_ip4) void tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk) { - tcp_flush_frame_to_output (wrk, 1); - tcp_flush_frame_to_output (wrk, 0); tcp_flush_frame_to_ip_lookup (wrk, 1); tcp_flush_frame_to_ip_lookup (wrk, 0); } @@ -1036,12 +1028,16 @@ tcp_send_fin (tcp_connection_t * tc) return; } + /* If we have non-dupacks programmed, no need to send them */ + if ((tc->flags & TCP_CONN_SNDACK) && !tc->pending_dupacks) + tc->flags &= ~TCP_CONN_SNDACK; + tcp_retransmit_timer_force_update (tc); b = vlib_get_buffer (vm, bi); tcp_init_buffer (vm, b); tcp_make_fin (tc, b); - tcp_enqueue_to_output_now (wrk, b, bi, tc->c_is_ip4); - TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc); + tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); + TCP_EVT (TCP_EVT_FIN_SENT, tc); /* Account for the FIN */ tc->snd_nxt += 1; if (!fin_snt) @@ -1115,7 +1111,18 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt, tc->bytes_out += data_len; tc->data_segs_out += 1; - TCP_EVT_DBG (TCP_EVT_PKTIZE, tc); + th->checksum = tcp_compute_checksum (tc, b); + + TCP_EVT (TCP_EVT_PKTIZE, tc); +} + +always_inline u32 +tcp_buffer_len (vlib_buffer_t * b) +{ + u32 data_len = b->current_length; + if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT)) + data_len += b->total_length_not_including_first_buffer; + return data_len; } u32 @@ -1123,12 +1130,8 @@ tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b) { tcp_connection_t *tc = (tcp_connection_t *) tconn; - if (tc->flags & TCP_CONN_TRACK_BURST) - { - tcp_bt_check_app_limited (tc); - tcp_bt_track_tx (tc); - tc->flags &= ~TCP_CONN_TRACK_BURST; - } + if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE) + tcp_bt_track_tx (tc, tcp_buffer_len (b)); tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1, /* update_snd_nxt */ 1); @@ -1192,12 +1195,12 @@ tcp_program_dupack (tcp_connection_t * tc) } void -tcp_program_fastretransmit (tcp_connection_t * tc) +tcp_program_retransmit (tcp_connection_t * tc) { - if (!(tc->flags & TCP_CONN_FRXT_PENDING)) + if (!(tc->flags & TCP_CONN_RXT_PENDING)) { session_add_self_custom_tx_evt (&tc->connection, 0); - tc->flags |= TCP_CONN_FRXT_PENDING; + tc->flags |= TCP_CONN_RXT_PENDING; } } @@ -1207,30 +1210,28 @@ tcp_program_fastretransmit (tcp_connection_t * tc) * Sends delayed ACK when timer expires */ void -tcp_timer_delack_handler (u32 index) +tcp_timer_delack_handler (u32 index, u32 thread_index) { - u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; tc = tcp_connection_get (index, thread_index); - tc->timers[TCP_TIMER_DELACK] = TCP_TIMER_HANDLE_INVALID; tcp_send_ack (tc); } /** - * Send Window Update ACK, - * ensuring that it will be sent once, if RWND became non-zero, - * after zero RWND has been advertised in ACK before + * Send window update ack + * + * Ensures that it will be sent only once, after a zero rwnd has been + * advertised in a previous ack, and only if rwnd has grown beyond a + * configurable value. */ void tcp_send_window_update_ack (tcp_connection_t * tc) { - u32 win; - if (tcp_zero_rwnd_sent (tc)) { - win = tcp_window_to_advertise (tc, tc->state); - if (win > 0) + tcp_update_rcv_wnd (tc); + if (tc->rcv_wnd >= tcp_cfg.rwnd_min_update_ack * tc->snd_mss) { tcp_zero_rwnd_sent_off (tc); tcp_program_ack (tc); @@ -1385,88 +1386,87 @@ tcp_prepare_retransmit_segment (tcp_worker_ctx_t * wrk, max_deq_bytes = clib_min (tc->snd_mss, max_deq_bytes); max_deq_bytes = clib_min (available_bytes, max_deq_bytes); - /* Start is beyond snd_congestion */ start = tc->snd_una + offset; - if (seq_geq (start, tc->snd_congestion)) - return 0; - - /* Don't overshoot snd_congestion */ - if (seq_gt (start + max_deq_bytes, tc->snd_congestion)) - { - max_deq_bytes = tc->snd_congestion - start; - if (max_deq_bytes == 0) - return 0; - } + ASSERT (seq_leq (start + max_deq_bytes, tc->snd_nxt)); n_bytes = tcp_prepare_segment (wrk, tc, offset, max_deq_bytes, b); if (!n_bytes) return 0; - if (tcp_in_fastrecovery (tc)) - { - tc->snd_rxt_bytes += n_bytes; - if (tc->flags & TCP_CONN_RATE_SAMPLE) - tcp_bt_track_rxt (tc, start, start + n_bytes); - } + tc->snd_rxt_bytes += n_bytes; + + if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE) + tcp_bt_track_rxt (tc, start, start + n_bytes); tc->bytes_retrans += n_bytes; tc->segs_retrans += 1; - TCP_EVT_DBG (TCP_EVT_CC_RTX, tc, offset, n_bytes); + tcp_workerp_stats_inc (wrk, rxt_segs, 1); + TCP_EVT (TCP_EVT_CC_RTX, tc, offset, n_bytes); + return n_bytes; } +static void +tcp_check_sack_reneging (tcp_connection_t * tc) +{ + sack_scoreboard_t *sb = &tc->sack_sb; + sack_scoreboard_hole_t *hole; + + hole = scoreboard_first_hole (sb); + if (!sb->is_reneging && (!hole || hole->start == tc->snd_una)) + return; + + scoreboard_clear_reneging (sb, tc->snd_una, tc->snd_nxt); +} + /** * Reset congestion control, switch cwnd to loss window and try again. */ static void tcp_cc_init_rxt_timeout (tcp_connection_t * tc) { - TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 6); + TCP_EVT (TCP_EVT_CC_EVT, tc, 6); + tc->prev_ssthresh = tc->ssthresh; tc->prev_cwnd = tc->cwnd; - /* Clear fast recovery state if needed */ - if (tcp_in_fastrecovery (tc)) - tcp_cc_fastrecovery_clear (tc); + /* If we entrered loss without fast recovery, notify cc algo of the + * congestion event such that it can update ssthresh and its state */ + if (!tcp_in_fastrecovery (tc)) + tcp_cc_congestion (tc); - /* Let cc algo decide loss cwnd and ssthresh */ + /* Let cc algo decide loss cwnd and ssthresh post unrecovered loss */ tcp_cc_loss (tc); - /* Start again from the beginning */ - tc->snd_congestion = tc->snd_nxt; - tc->rcv_dupacks = 0; tc->rtt_ts = 0; tc->cwnd_acc_bytes = 0; tc->tr_occurences += 1; - tcp_connection_tx_pacer_reset (tc, tc->cwnd, 2 * tc->snd_mss); tcp_recovery_on (tc); } void -tcp_timer_retransmit_handler (u32 tc_index) +tcp_timer_retransmit_handler (u32 tc_index, u32 thread_index) { - u32 thread_index = vlib_get_thread_index (); tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index); vlib_main_t *vm = wrk->vm; tcp_connection_t *tc; vlib_buffer_t *b = 0; u32 bi, n_bytes; + tcp_workerp_stats_inc (wrk, tr_events, 1); tc = tcp_connection_get (tc_index, thread_index); /* Note: the connection may have been closed and pool_put */ if (PREDICT_FALSE (tc == 0 || tc->state == TCP_STATE_SYN_SENT)) return; - tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID; - /* Wait-close and retransmit could pop at the same time */ if (tc->state == TCP_STATE_CLOSED) return; if (tc->state >= TCP_STATE_ESTABLISHED) { - TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2); + TCP_EVT (TCP_EVT_CC_EVT, tc, 2); /* Lost FIN, retransmit and return */ if (tc->flags & TCP_CONN_FINSNT) @@ -1504,35 +1504,23 @@ tcp_timer_retransmit_handler (u32 tc_index) tcp_send_reset (tc); tcp_connection_set_state (tc, TCP_STATE_CLOSED); session_transport_closing_notify (&tc->connection); + session_transport_closed_notify (&tc->connection); tcp_connection_timers_reset (tc); tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.closewait_time); + tcp_workerp_stats_inc (wrk, tr_abort, 1); return; } - /* Increment RTO backoff (also equal to number of retries) and go back - * to first un-acked byte */ - tc->rto_boff += 1; + if (tcp_opts_sack_permitted (&tc->rcv_opts)) + tcp_check_sack_reneging (tc); - /* TODO be less aggressive about clearing scoreboard */ - scoreboard_clear (&tc->sack_sb); + /* Update send congestion to make sure that rxt has data to send */ + tc->snd_congestion = tc->snd_nxt; - /* First retransmit timeout */ - if (tc->rto_boff == 1) - { - tcp_cc_init_rxt_timeout (tc); - /* Record timestamp. Eifel detection algorithm RFC3522 */ - tc->snd_rxt_ts = tcp_tstamp (tc); - } - - if (tc->flags & TCP_CONN_RATE_SAMPLE) - tcp_bt_flush_samples (tc); - - /* If we've sent beyond snd_congestion, update it */ - tc->snd_congestion = seq_max (tc->snd_nxt, tc->snd_congestion); - tc->snd_nxt = tc->snd_una; - - /* Send one segment. n_bytes may be zero due to buffer shortfall */ - n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b); + /* Send the first unacked segment. If we're short on buffers, return + * as soon as possible */ + n_bytes = clib_min (tc->snd_mss, tc->snd_nxt - tc->snd_una); + n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, n_bytes, &b); if (!n_bytes) { tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1); @@ -1544,11 +1532,24 @@ tcp_timer_retransmit_handler (u32 tc_index) tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); tcp_retransmit_timer_force_update (tc); + + tc->rto_boff += 1; + if (tc->rto_boff == 1) + { + tcp_cc_init_rxt_timeout (tc); + /* Record timestamp. Eifel detection algorithm RFC3522 */ + tc->snd_rxt_ts = tcp_tstamp (tc); + } + + if (tcp_opts_sack_permitted (&tc->rcv_opts)) + scoreboard_init_rxt (&tc->sack_sb, tc->snd_una + n_bytes); + + tcp_program_retransmit (tc); } /* Retransmit SYN-ACK */ else if (tc->state == TCP_STATE_SYN_RCVD) { - TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2); + TCP_EVT (TCP_EVT_CC_EVT, tc, 2); tc->rtt_ts = 0; @@ -1558,6 +1559,7 @@ tcp_timer_retransmit_handler (u32 tc_index) tcp_connection_set_state (tc, TCP_STATE_CLOSED); tcp_connection_timers_reset (tc); tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, tcp_cfg.cleanup_time); + tcp_workerp_stats_inc (wrk, tr_abort, 1); return; } @@ -1576,7 +1578,7 @@ tcp_timer_retransmit_handler (u32 tc_index) b = vlib_get_buffer (vm, bi); tcp_init_buffer (vm, b); tcp_make_synack (tc, b); - TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 1); + TCP_EVT (TCP_EVT_SYN_RXT, tc, 1); /* Retransmit timer already updated, just enqueue to output */ tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); @@ -1592,9 +1594,8 @@ tcp_timer_retransmit_handler (u32 tc_index) * SYN retransmit timer handler. Active open only. */ void -tcp_timer_retransmit_syn_handler (u32 tc_index) +tcp_timer_retransmit_syn_handler (u32 tc_index, u32 thread_index) { - u32 thread_index = vlib_get_thread_index (); tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index); vlib_main_t *vm = wrk->vm; tcp_connection_t *tc; @@ -1607,8 +1608,6 @@ tcp_timer_retransmit_syn_handler (u32 tc_index) if (PREDICT_FALSE (tc == 0 || tc->state != TCP_STATE_SYN_SENT)) return; - tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID; - /* Half-open connection actually moved to established but we were * waiting for syn retransmit to pop to call cleanup from the right * thread. */ @@ -1619,7 +1618,7 @@ tcp_timer_retransmit_syn_handler (u32 tc_index) return; } - TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2); + TCP_EVT (TCP_EVT_CC_EVT, tc, 2); tc->rtt_ts = 0; /* Active open establish timeout */ @@ -1646,7 +1645,7 @@ tcp_timer_retransmit_syn_handler (u32 tc_index) tcp_init_buffer (vm, b); tcp_make_syn (tc, b); - TCP_EVT_DBG (TCP_EVT_SYN_RXT, tc, 0); + TCP_EVT (TCP_EVT_SYN_RXT, tc, 0); /* This goes straight to ipx_lookup */ tcp_push_ip_hdr (wrk, tc, b); @@ -1661,9 +1660,8 @@ tcp_timer_retransmit_syn_handler (u32 tc_index) * */ void -tcp_timer_persist_handler (u32 index) +tcp_timer_persist_handler (u32 index, u32 thread_index) { - u32 thread_index = vlib_get_thread_index (); tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index); u32 bi, max_snd_bytes, available_bytes, offset; tcp_main_t *tm = vnet_get_tcp_main (); @@ -1677,9 +1675,6 @@ tcp_timer_persist_handler (u32 index) if (!tc) return; - /* Make sure timer handle is set to invalid */ - tc->timers[TCP_TIMER_PERSIST] = TCP_TIMER_HANDLE_INVALID; - /* Problem already solved or worse */ if (tc->state == TCP_STATE_CLOSED || tc->snd_wnd > tc->snd_mss || (tc->flags & TCP_CONN_FINSNT)) @@ -1697,10 +1692,7 @@ tcp_timer_persist_handler (u32 index) } if (available_bytes <= offset) - { - ASSERT (tcp_timer_is_active (tc, TCP_TIMER_RETRANSMIT)); - return; - } + return; /* Increment RTO backoff */ tc->rto_boff += 1; @@ -1728,6 +1720,12 @@ tcp_timer_persist_handler (u32 index) || tc->snd_nxt == tc->snd_una_max || tc->rto_boff > 1)); + if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE) + { + tcp_bt_check_app_limited (tc); + tcp_bt_track_tx (tc, n_bytes); + } + tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 0, /* update_snd_nxt */ 1); tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max); @@ -1748,7 +1746,7 @@ tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc) vlib_buffer_t *b; u32 bi, n_bytes; - TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1); + TCP_EVT (TCP_EVT_CC_EVT, tc, 1); n_bytes = tcp_prepare_retransmit_segment (wrk, tc, 0, tc->snd_mss, &b); if (!n_bytes) @@ -1761,8 +1759,8 @@ tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc) } static int -tcp_fast_retransmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, - u32 burst_size) +tcp_transmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, + u32 burst_size) { u32 offset, n_segs = 0, n_written, bi, available_wnd; vlib_main_t *vm = wrk->vm; @@ -1772,6 +1770,9 @@ tcp_fast_retransmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, available_wnd = tc->snd_wnd - offset; burst_size = clib_min (burst_size, available_wnd / tc->snd_mss); + if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE) + tcp_bt_check_app_limited (tc); + while (n_segs < burst_size) { n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b); @@ -1783,6 +1784,9 @@ tcp_fast_retransmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, offset += n_written; n_segs += 1; + if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE) + tcp_bt_track_tx (tc, n_written); + tc->snd_nxt += n_written; tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max); } @@ -1791,37 +1795,119 @@ done: return n_segs; } +/** + * Estimate send space using proportional rate reduction (RFC6937) + */ +int +tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc) +{ + u32 pipe, prr_out; + int space; + + pipe = tcp_flight_size (tc); + prr_out = tc->snd_rxt_bytes + (tc->snd_nxt - tc->snd_congestion); + + if (pipe > tc->ssthresh) + { + space = ((int) tc->prr_delivered * ((f64) tc->ssthresh / tc->prev_cwnd)) + - prr_out; + } + else + { + int limit; + limit = clib_max ((int) (tc->prr_delivered - prr_out), 0) + tc->snd_mss; + space = clib_min (tc->ssthresh - pipe, limit); + } + space = clib_max (space, prr_out ? 0 : tc->snd_mss); + return space; +} + +static inline u8 +tcp_retransmit_should_retry_head (tcp_connection_t * tc, + sack_scoreboard_t * sb) +{ + u32 tx_adv_sack = sb->high_sacked - tc->snd_congestion; + f64 rr = (f64) tc->ssthresh / tc->prev_cwnd; + + if (tcp_fastrecovery_first (tc)) + return 1; + + return (tx_adv_sack > (tc->snd_una - tc->prr_start) * rr); +} + +static inline u8 +tcp_max_tx_deq (tcp_connection_t * tc) +{ + return (transport_max_tx_dequeue (&tc->connection) + - (tc->snd_nxt - tc->snd_una)); +} + #define scoreboard_rescue_rxt_valid(_sb, _tc) \ (seq_geq (_sb->rescue_rxt, _tc->snd_una) \ && seq_leq (_sb->rescue_rxt, _tc->snd_congestion)) /** - * Do fast retransmit with SACKs + * Do retransmit with SACKs */ -int -tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, - u32 burst_size) +static int +tcp_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, + u32 burst_size) { - u32 n_written = 0, offset, max_bytes, n_segs = 0, n_segs_now; + u32 n_written = 0, offset, max_bytes, n_segs = 0; + u8 snd_limited = 0, can_rescue = 0; + u32 bi, max_deq, burst_bytes; sack_scoreboard_hole_t *hole; vlib_main_t *vm = wrk->vm; vlib_buffer_t *b = 0; sack_scoreboard_t *sb; - u32 bi, max_deq; int snd_space; - u8 snd_limited = 0, can_rescue = 0; - ASSERT (tcp_in_fastrecovery (tc)); + ASSERT (tcp_in_cong_recovery (tc)); - snd_space = tcp_available_cc_snd_space (tc); - if (snd_space < tc->snd_mss) + burst_bytes = transport_connection_tx_pacer_burst (&tc->connection); + burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss); + if (!burst_size) { - tcp_program_fastretransmit (tc); + tcp_program_retransmit (tc); return 0; } - TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0); + if (tcp_in_recovery (tc)) + snd_space = tcp_available_cc_snd_space (tc); + else + snd_space = tcp_fastrecovery_prr_snd_space (tc); + + if (snd_space < tc->snd_mss) + goto done; + sb = &tc->sack_sb; + + /* Check if snd_una is a lost retransmit */ + if (pool_elts (sb->holes) + && seq_gt (sb->high_sacked, tc->snd_congestion) + && tc->rxt_head != tc->snd_una + && tcp_retransmit_should_retry_head (tc, sb)) + { + max_bytes = clib_min (tc->snd_mss, tc->snd_congestion - tc->snd_una); + n_written = tcp_prepare_retransmit_segment (wrk, tc, 0, max_bytes, &b); + if (!n_written) + { + tcp_program_retransmit (tc); + goto done; + } + bi = vlib_get_buffer_index (vm, b); + tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); + n_segs = 1; + + tc->rxt_head = tc->snd_una; + tc->rxt_delivered += n_written; + tc->prr_delivered += n_written; + ASSERT (tc->rxt_delivered <= tc->snd_rxt_bytes); + } + + tcp_fastrecovery_first_off (tc); + + TCP_EVT (TCP_EVT_CC_EVT, tc, 0); hole = scoreboard_get_hole (sb, sb->cur_rxt_hole); max_deq = transport_max_tx_dequeue (&tc->connection); @@ -1829,23 +1915,35 @@ tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, while (snd_space > 0 && n_segs < burst_size) { - hole = scoreboard_next_rxt_hole (sb, hole, max_deq, &can_rescue, + hole = scoreboard_next_rxt_hole (sb, hole, max_deq != 0, &can_rescue, &snd_limited); if (!hole) { - if (max_deq) + /* We are out of lost holes to retransmit so send some new data. */ + if (max_deq > tc->snd_mss) { + u32 n_segs_new; + int av_wnd; + + /* Make sure we don't exceed available window and leave space + * for one more packet, to avoid zero window acks */ + av_wnd = (int) tc->snd_wnd - (tc->snd_nxt - tc->snd_una); + av_wnd = clib_max (av_wnd - tc->snd_mss, 0); + snd_space = clib_min (snd_space, av_wnd); snd_space = clib_min (max_deq, snd_space); burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss); - n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size); - if (max_deq > n_segs_now * tc->snd_mss) - tcp_program_fastretransmit (tc); - n_segs += n_segs_now; + burst_size = clib_min (burst_size, TCP_RXT_MAX_BURST); + n_segs_new = tcp_transmit_unsent (wrk, tc, burst_size); + if (max_deq > n_segs_new * tc->snd_mss) + tcp_program_retransmit (tc); + + n_segs += n_segs_new; goto done; } - if (!can_rescue || scoreboard_rescue_rxt_valid (sb, tc)) + if (tcp_in_recovery (tc) || !can_rescue + || scoreboard_rescue_rxt_valid (sb, tc)) break; /* If rescue rxt undefined or less than snd_una then one segment of @@ -1853,16 +1951,16 @@ tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, * unSACKed sequence number SHOULD be returned, and RescueRxt set to * RecoveryPoint. HighRxt MUST NOT be updated. */ - max_bytes = clib_min (tc->snd_mss, - tc->snd_congestion - tc->snd_una); + hole = scoreboard_last_hole (sb); + max_bytes = clib_min (tc->snd_mss, hole->end - hole->start); max_bytes = clib_min (max_bytes, snd_space); - offset = tc->snd_congestion - tc->snd_una - max_bytes; - sb->rescue_rxt = tc->snd_congestion; + offset = hole->end - tc->snd_una - max_bytes; n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes, &b); if (!n_written) goto done; + sb->rescue_rxt = tc->snd_congestion; bi = vlib_get_buffer_index (vm, b); tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); n_segs += 1; @@ -1887,33 +1985,48 @@ tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); sb->high_rxt += n_written; + ASSERT (seq_leq (sb->high_rxt, tc->snd_nxt)); + snd_space -= n_written; n_segs += 1; } if (hole) - tcp_program_fastretransmit (tc); + tcp_program_retransmit (tc); done: + + transport_connection_tx_pacer_reset_bucket (&tc->connection, 0); return n_segs; } /** * Fast retransmit without SACK info */ -int -tcp_fast_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, - u32 burst_size) +static int +tcp_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, + u32 burst_size) { - u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now; + u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now, max_bytes; + u32 burst_bytes, sent_bytes; vlib_main_t *vm = wrk->vm; int snd_space, n_segs = 0; + u8 cc_limited = 0; vlib_buffer_t *b; - ASSERT (tcp_in_fastrecovery (tc)); - TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0); + ASSERT (tcp_in_cong_recovery (tc)); + TCP_EVT (TCP_EVT_CC_EVT, tc, 0); + + burst_bytes = transport_connection_tx_pacer_burst (&tc->connection); + burst_size = clib_min (burst_size, burst_bytes / tc->snd_mss); + if (!burst_size) + { + tcp_program_retransmit (tc); + return 0; + } snd_space = tcp_available_cc_snd_space (tc); + cc_limited = snd_space < burst_bytes; if (!tcp_fastrecovery_first (tc)) goto send_unsent; @@ -1922,8 +2035,12 @@ tcp_fast_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, * segment. */ while (snd_space > 0 && n_segs < burst_size) { - n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, - tc->snd_mss, &b); + max_bytes = clib_min (tc->snd_mss, + tc->snd_congestion - tc->snd_una - offset); + if (!max_bytes) + break; + n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes, + &b); /* Nothing left to retransmit */ if (n_written == 0) @@ -1951,28 +2068,20 @@ send_unsent: { snd_space = clib_min (max_deq, snd_space); burst_size = clib_min (burst_size - n_segs, snd_space / tc->snd_mss); - n_segs_now = tcp_fast_retransmit_unsent (wrk, tc, burst_size); - if (max_deq > n_segs_now * tc->snd_mss) - tcp_program_fastretransmit (tc); + n_segs_now = tcp_transmit_unsent (wrk, tc, burst_size); + if (n_segs_now && max_deq > n_segs_now * tc->snd_mss) + tcp_program_retransmit (tc); n_segs += n_segs_now; } done: tcp_fastrecovery_first_off (tc); - return n_segs; -} -/** - * Do fast retransmit - */ -int -tcp_fast_retransmit (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, - u32 burst_size) -{ - if (tcp_opts_sack_permitted (&tc->rcv_opts)) - return tcp_fast_retransmit_sack (wrk, tc, burst_size); - else - return tcp_fast_retransmit_no_sack (wrk, tc, burst_size); + sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes); + sent_bytes = cc_limited ? burst_bytes : sent_bytes; + transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes); + + return n_segs; } static int @@ -1982,8 +2091,13 @@ tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size) if (!tc->pending_dupacks) { - tcp_send_ack (tc); - return 1; + if (tcp_in_cong_recovery (tc) || !tcp_max_tx_deq (tc) + || tc->state != TCP_STATE_ESTABLISHED) + { + tcp_send_ack (tc); + return 1; + } + return 0; } /* If we're supposed to send dupacks but have no ooo data @@ -1991,6 +2105,7 @@ tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size) if (!vec_len (tc->snd_sacks)) { tcp_send_ack (tc); + tc->pending_dupacks = 0; return 1; } @@ -2024,25 +2139,21 @@ tcp_send_acks (tcp_connection_t * tc, u32 max_burst_size) } static int -tcp_do_fastretransmit (tcp_connection_t * tc, u32 max_burst_size) +tcp_do_retransmit (tcp_connection_t * tc, u32 max_burst_size) { - u32 n_segs = 0, burst_size, sent_bytes, burst_bytes; tcp_worker_ctx_t *wrk; + u32 n_segs; + + if (PREDICT_FALSE (tc->state == TCP_STATE_CLOSED)) + return 0; wrk = tcp_get_worker (tc->c_thread_index); - burst_bytes = transport_connection_tx_pacer_burst (&tc->connection, - wrk->vm-> - clib_time.last_cpu_time); - burst_size = clib_min (max_burst_size, burst_bytes / tc->snd_mss); - if (!burst_size) - { - tcp_program_fastretransmit (tc); - return 0; - } - n_segs = tcp_fast_retransmit (wrk, tc, burst_size); - sent_bytes = clib_min (n_segs * tc->snd_mss, burst_bytes); - transport_connection_tx_pacer_update_bytes (&tc->connection, sent_bytes); + if (tcp_opts_sack_permitted (&tc->rcv_opts)) + n_segs = tcp_retransmit_sack (wrk, tc, max_burst_size); + else + n_segs = tcp_retransmit_no_sack (wrk, tc, max_burst_size); + return n_segs; } @@ -2052,10 +2163,10 @@ tcp_session_custom_tx (void *conn, u32 max_burst_size) tcp_connection_t *tc = (tcp_connection_t *) conn; u32 n_segs = 0; - if (tcp_in_fastrecovery (tc) && (tc->flags & TCP_CONN_FRXT_PENDING)) + if (tcp_in_cong_recovery (tc) && (tc->flags & TCP_CONN_RXT_PENDING)) { - tc->flags &= ~TCP_CONN_FRXT_PENDING; - n_segs = tcp_do_fastretransmit (tc, max_burst_size); + tc->flags &= ~TCP_CONN_RXT_PENDING; + n_segs = tcp_do_retransmit (tc, max_burst_size); max_burst_size -= n_segs; } @@ -2116,16 +2227,17 @@ static void tcp46_output_trace_frame (vlib_main_t * vm, vlib_node_runtime_t * node, u32 * to_next, u32 n_bufs) { - u32 n_trace = vlib_get_trace_count (vm, node); tcp_connection_t *tc; tcp_tx_trace_t *t; vlib_buffer_t *b; tcp_header_t *th; int i; - for (i = 0; i < clib_min (n_trace, n_bufs); i++) + for (i = 0; i < n_bufs; i++) { b = vlib_get_buffer (vm, to_next[i]); + if (!(b->flags & VLIB_BUFFER_IS_TRACED)) + continue; th = vlib_buffer_get_current (b); tc = tcp_connection_get (vnet_buffer (b)->tcp.connection_index, vm->thread_index); @@ -2139,27 +2251,39 @@ always_inline void tcp_output_push_ip (vlib_main_t * vm, vlib_buffer_t * b0, tcp_connection_t * tc0, u8 is_ip4) { - tcp_header_t *th0 = 0; + TCP_EVT (TCP_EVT_OUTPUT, tc0, + ((tcp_header_t *) vlib_buffer_get_current (b0))->flags, + b0->current_length); - th0 = vlib_buffer_get_current (b0); - TCP_EVT_DBG (TCP_EVT_OUTPUT, tc0, th0->flags, b0->current_length); if (is_ip4) - { - vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4, - IP_PROTOCOL_TCP, 1); - b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; - vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data; - th0->checksum = 0; - } + vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4, + IP_PROTOCOL_TCP, tcp_csum_offload (tc0)); + else + vlib_buffer_push_ip6_custom (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6, + IP_PROTOCOL_TCP, tc0->ipv6_flow_label); +} + +always_inline void +tcp_check_if_gso (tcp_connection_t * tc, vlib_buffer_t * b) +{ + if (PREDICT_TRUE (!(tc->cfg_flags & TCP_CFG_F_TSO))) + return; + + u16 data_len = b->current_length - sizeof (tcp_header_t) - tc->snd_opts_len; + + if (PREDICT_FALSE (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)) + data_len += b->total_length_not_including_first_buffer; + + if (PREDICT_TRUE (data_len <= tc->snd_mss)) + return; else { - ip6_header_t *ih0; - ih0 = vlib_buffer_push_ip6 (vm, b0, &tc0->c_lcl_ip6, - &tc0->c_rmt_ip6, IP_PROTOCOL_TCP); - b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; - vnet_buffer (b0)->l3_hdr_offset = (u8 *) ih0 - b0->data; - vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data; - th0->checksum = 0; + ASSERT ((b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID) != 0); + ASSERT ((b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID) != 0); + b->flags |= VNET_BUFFER_F_GSO; + vnet_buffer2 (b)->gso_l4_hdr_sz = + sizeof (tcp_header_t) + tc->snd_opts_len; + vnet_buffer2 (b)->gso_size = tc->snd_mss; } } @@ -2174,6 +2298,10 @@ tcp_output_handle_packet (tcp_connection_t * tc0, vlib_buffer_t * b0, *next0 = tc0->next_node_index; vnet_buffer (b0)->tcp.next_node_opaque = tc0->next_node_opaque; } + else + { + *next0 = TCP_OUTPUT_NEXT_IP_LOOKUP; + } vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index; vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; @@ -2232,18 +2360,49 @@ tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE); } - next[0] = next[1] = TCP_OUTPUT_NEXT_IP_LOOKUP; - tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index, thread_index); tc1 = tcp_connection_get (vnet_buffer (b[1])->tcp.connection_index, thread_index); - tcp_output_push_ip (vm, b[0], tc0, is_ip4); - tcp_output_push_ip (vm, b[1], tc1, is_ip4); + if (PREDICT_TRUE (!tc0 + !tc1 == 0)) + { + tcp_output_push_ip (vm, b[0], tc0, is_ip4); + tcp_output_push_ip (vm, b[1], tc1, is_ip4); - tcp_output_handle_packet (tc0, b[0], error_node, &next[0], is_ip4); - tcp_output_handle_packet (tc1, b[1], error_node, &next[1], is_ip4); + tcp_check_if_gso (tc0, b[0]); + tcp_check_if_gso (tc1, b[1]); + + tcp_output_handle_packet (tc0, b[0], error_node, &next[0], is_ip4); + tcp_output_handle_packet (tc1, b[1], error_node, &next[1], is_ip4); + } + else + { + if (tc0 != 0) + { + tcp_output_push_ip (vm, b[0], tc0, is_ip4); + tcp_check_if_gso (tc0, b[0]); + tcp_output_handle_packet (tc0, b[0], error_node, &next[0], + is_ip4); + } + else + { + b[0]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION]; + next[0] = TCP_OUTPUT_NEXT_DROP; + } + if (tc1 != 0) + { + tcp_output_push_ip (vm, b[1], tc1, is_ip4); + tcp_check_if_gso (tc1, b[1]); + tcp_output_handle_packet (tc1, b[1], error_node, &next[1], + is_ip4); + } + else + { + b[1]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION]; + next[1] = TCP_OUTPUT_NEXT_DROP; + } + } b += 2; next += 2; @@ -2259,12 +2418,20 @@ tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_PREFETCH (b[1]->data, 2 * CLIB_CACHE_LINE_BYTES, STORE); } - next[0] = TCP_OUTPUT_NEXT_IP_LOOKUP; tc0 = tcp_connection_get (vnet_buffer (b[0])->tcp.connection_index, thread_index); - tcp_output_push_ip (vm, b[0], tc0, is_ip4); - tcp_output_handle_packet (tc0, b[0], error_node, &next[0], is_ip4); + if (PREDICT_TRUE (tc0 != 0)) + { + tcp_output_push_ip (vm, b[0], tc0, is_ip4); + tcp_check_if_gso (tc0, b[0]); + tcp_output_handle_packet (tc0, b[0], error_node, &next[0], is_ip4); + } + else + { + b[0]->error = error_node->errors[TCP_ERROR_INVALID_CONNECTION]; + next[0] = TCP_OUTPUT_NEXT_DROP; + } b += 1; next += 1;