X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Ftcp%2Ftcp.h;h=361abe2572938be813e94a19ada8462b2bc4a967;hb=db88ffba2a713442cb632382889c768e682fd47f;hp=955b2dde8fe550526009fcc807cdc1a25285352a;hpb=d6ae4bf13a7819d64d128d196d491af4700fa948;p=vpp.git diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 955b2dde8fe..361abe25729 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -82,13 +82,6 @@ typedef enum _tcp_timers TCP_N_TIMERS } tcp_timers_e; -typedef void (timer_expiration_handler) (u32 index); - -extern timer_expiration_handler tcp_timer_delack_handler; -extern timer_expiration_handler tcp_timer_retransmit_handler; -extern timer_expiration_handler tcp_timer_persist_handler; -extern timer_expiration_handler tcp_timer_retransmit_syn_handler; - #define TCP_TIMER_HANDLE_INVALID ((u32) ~0) #define TCP_TIMER_TICK 0.1 /**< Timer tick in seconds */ @@ -109,6 +102,7 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; _(NO_CSUM_OFFLOAD, "No csum offload") \ _(NO_TSO, "TSO off") \ _(TSO, "TSO") \ + _(NO_ENDPOINT,"No endpoint") \ typedef enum tcp_cfg_flag_bits_ { @@ -256,6 +250,7 @@ typedef enum tcp_bts_flags_ TCP_BTS_IS_RXT = 1, TCP_BTS_IS_APP_LIMITED = 1 << 1, TCP_BTS_IS_SACKED = 1 << 2, + TCP_BTS_IS_RXT_LOST = 1 << 3, } __clib_packed tcp_bts_flags_t; typedef struct tcp_bt_sample_ @@ -268,6 +263,8 @@ typedef struct tcp_bt_sample_ f64 delivered_time; /**< Delivered time when sample taken */ f64 tx_time; /**< Transmit time for the burst */ f64 first_tx_time; /**< Connection first tx time at tx */ + u64 tx_in_flight; /**< In flight at tx time */ + u64 tx_lost; /**< Lost at tx time */ tcp_bts_flags_t flags; /**< Sample flag */ } tcp_bt_sample_t; @@ -278,9 +275,12 @@ typedef struct tcp_rate_sample_ f64 prior_time; /**< Delivered time of sample used for rate */ f64 interval_time; /**< Time to ack the bytes delivered */ f64 rtt_time; /**< RTT for sample */ + u64 tx_in_flight; /**< In flight at (re)transmit time */ + u64 tx_lost; /**< Lost over interval */ u32 delivered; /**< Bytes delivered in interval_time */ u32 acked_and_sacked; /**< Bytes acked + sacked now */ - u32 lost; /**< Bytes lost now */ + u32 last_lost; /**< Bytes lost now */ + u32 lost; /**< Number of bytes lost over interval */ tcp_bts_flags_t flags; /**< Rate sample flags from bt sample */ } tcp_rate_sample_t; @@ -393,7 +393,6 @@ typedef struct _tcp_connection u32 prr_start; /**< snd_una when prr starts */ u32 rxt_delivered; /**< Rxt bytes delivered during current cc event */ u32 rxt_head; /**< snd_una last time we re rxted the head */ - u32 prev_dsegs_out; /**< Number of dsegs after last ack */ u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */ u32 snd_congestion; /**< snd_una_max when congestion is detected */ u32 tx_fifo_size; /**< Tx fifo size. Used to constrain cwnd */ @@ -426,6 +425,7 @@ typedef struct _tcp_connection u64 app_limited; /**< Delivered when app-limited detected */ f64 delivered_time; /**< Time last bytes were acked */ f64 first_tx_time; /**< Send time for recently delivered/sent */ + u64 lost; /**< Total bytes lost */ tcp_byte_tracker_t *bt; /**< Tx byte tracker */ tcp_errors_t errors; /**< Soft connection errors */ @@ -434,6 +434,9 @@ typedef struct _tcp_connection u32 last_fib_check; /**< Last time we checked fib route for peer */ u16 mss; /**< Our max seg size that includes options */ u32 timestamp_delta; /**< Offset for timestamp */ + u32 ipv6_flow_label; /**< flow label for ipv6 header */ + +#define rst_state snd_wl1 } tcp_connection_t; /* *INDENT-OFF* */ @@ -474,6 +477,13 @@ struct _tcp_cc_algorithm #define tcp_csum_offload(tc) (!((tc)->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD)) +typedef void (timer_expiration_handler) (tcp_connection_t * tc); + +extern timer_expiration_handler tcp_timer_delack_handler; +extern timer_expiration_handler tcp_timer_retransmit_handler; +extern timer_expiration_handler tcp_timer_persist_handler; +extern timer_expiration_handler tcp_timer_retransmit_syn_handler; + always_inline void tcp_cong_recovery_off (tcp_connection_t * tc) { @@ -498,20 +508,38 @@ typedef struct _tcp_lookup_dispatch u8 next, error; } tcp_lookup_dispatch_t; +#define foreach_tcp_wrk_stat \ + _(timer_expirations, u64, "timer expirations") \ + _(rxt_segs, u64, "segments retransmitted") \ + _(tr_events, u32, "timer retransmit events") \ + _(to_closewait, u32, "timeout close-wait") \ + _(to_closewait2, u32, "timeout close-wait w/data") \ + _(to_finwait1, u32, "timeout fin-wait-1") \ + _(to_finwait2, u32, "timeout fin-wait-2") \ + _(to_lastack, u32, "timeout last-ack") \ + _(to_closing, u32, "timeout closing") \ + _(tr_abort, u32, "timer retransmit abort") \ + _(rst_unread, u32, "reset on close due to unread data") \ + +typedef struct tcp_wrk_stats_ +{ +#define _(name, type, str) type name; + foreach_tcp_wrk_stat +#undef _ +} tcp_wrk_stats_t; + +typedef struct tcp_free_req_ +{ + clib_time_type_t free_time; + u32 connection_index; +} tcp_cleanup_req_t; + typedef struct tcp_worker_ctx_ { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - /** worker time */ - u32 time_now; - /** worker timer wheel */ - tw_timer_wheel_16t_2w_512sl_t timer_wheel; - - /** tx buffer free list */ - u32 *tx_buffers; - - /** tx frames for ip 4/6 lookup nodes */ - vlib_frame_t *ip_lookup_tx_frames[2]; + /** worker's pool of connections */ + tcp_connection_t *connections; /** vector of pending ack dequeues */ u32 *pending_deq_acked; @@ -519,16 +547,49 @@ typedef struct tcp_worker_ctx_ /** vector of pending disconnect notifications */ u32 *pending_disconnects; + /** vector of pending reset notifications */ + u32 *pending_resets; + /** convenience pointer to this thread's vlib main */ vlib_main_t *vm; + /** worker time */ + u32 time_now; + + /* Max timers to be handled per dispatch loop */ + u32 max_timers_per_loop; + + /** tx frames for ip 4/6 lookup nodes */ + vlib_frame_t *ip_lookup_tx_frames[2]; + CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); /** cached 'on the wire' options for bursts */ u8 cached_opts[40]; + /** tx buffer free list */ + u32 *tx_buffers; + + /* Fifo of pending timer expirations */ + u32 *pending_timers; + + /* fifo of pending free requests */ + tcp_cleanup_req_t *pending_cleanups; + + /** worker timer wheel */ + tw_timer_wheel_16t_2w_512sl_t timer_wheel; + + CLIB_CACHE_LINE_ALIGN_MARK (cacheline2); + + tcp_wrk_stats_t stats; } tcp_worker_ctx_t; +#define tcp_worker_stats_inc(_ti,_stat,_val) \ + tcp_main.wrk_ctx[_ti].stats._stat += _val + +#define tcp_workerp_stats_inc(_wrk,_stat,_val) \ + _wrk->stats._stat += _val + typedef struct tcp_iss_seed_ { u64 first; @@ -557,6 +618,9 @@ typedef struct tcp_configuration_ /** Allow use of TSO whenever available */ u8 allow_tso; + /** Set if csum offloading is enabled */ + u8 csum_offload; + /** Default congestion control algorithm type */ tcp_cc_algorithm_type_e cc_algo; @@ -585,8 +649,8 @@ typedef struct tcp_configuration_ /** Timer ticks to wait in closing for fin ack */ u16 closing_time; - /** Timer ticks to wait before cleaning up the connection */ - u16 cleanup_time; + /** Time to wait (sec) before cleaning up the connection */ + f32 cleanup_time; /** Number of preallocated connections */ u32 preallocated_connections; @@ -604,27 +668,24 @@ typedef struct tcp_configuration_ typedef struct _tcp_main { - /* Per-worker thread tcp connection pools */ - tcp_connection_t **connections; + /** per-worker context */ + tcp_worker_ctx_t *wrk_ctx; /* Pool of listeners. */ tcp_connection_t *listener_pool; + f64 tstamp_ticks_per_clock; + + /** vlib buffer size */ + u32 bytes_per_buffer; + /** Dispatch table by state and flags */ tcp_lookup_dispatch_t dispatch_table[TCP_N_STATES][64]; - u8 log2_tstamp_clocks_per_tick; - f64 tstamp_ticks_per_clock; - - /** per-worker context */ - tcp_worker_ctx_t *wrk_ctx; + clib_spinlock_t half_open_lock; /** Pool of half-open connections on which we've sent a SYN */ tcp_connection_t *half_open_connections; - clib_spinlock_t half_open_lock; - - /** vlib buffer size */ - u32 bytes_per_buffer; /** Seed used to generate random iss */ tcp_iss_seed_t iss_seed; @@ -684,6 +745,7 @@ vnet_get_tcp_main () always_inline tcp_worker_ctx_t * tcp_get_worker (u32 thread_index) { + ASSERT (thread_index < vec_len (tcp_main.wrk_ctx)); return &tcp_main.wrk_ctx[thread_index]; } @@ -711,20 +773,22 @@ void tcp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add); always_inline tcp_connection_t * tcp_connection_get (u32 conn_index, u32 thread_index) { - if (PREDICT_FALSE - (pool_is_free_index (tcp_main.connections[thread_index], conn_index))) + tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index); + if (PREDICT_FALSE (pool_is_free_index (wrk->connections, conn_index))) return 0; - return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); + return pool_elt_at_index (wrk->connections, conn_index); } always_inline tcp_connection_t * tcp_connection_get_if_valid (u32 conn_index, u32 thread_index) { - if (tcp_main.connections[thread_index] == 0) + tcp_worker_ctx_t *wrk; + if (thread_index >= vec_len (tcp_main.wrk_ctx)) return 0; - if (pool_is_free_index (tcp_main.connections[thread_index], conn_index)) + wrk = tcp_get_worker (thread_index); + if (pool_is_free_index (wrk->connections, conn_index)) return 0; - return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); + return pool_elt_at_index (wrk->connections, conn_index); } always_inline tcp_connection_t * @@ -748,7 +812,6 @@ tcp_connection_t *tcp_connection_alloc (u8 thread_index); tcp_connection_t *tcp_connection_alloc_w_base (u8 thread_index, tcp_connection_t * base); void tcp_connection_free (tcp_connection_t * tc); -void tcp_connection_reset (tcp_connection_t * tc); int tcp_configure_v4_source_address_range (vlib_main_t * vm, ip4_address_t * start, ip4_address_t * end, u32 table_id); @@ -757,11 +820,15 @@ int tcp_configure_v6_source_address_range (vlib_main_t * vm, ip6_address_t * end, u32 table_id); void tcp_api_reference (void); u8 *format_tcp_connection (u8 * s, va_list * args); +u8 *format_tcp_connection_id (u8 * s, va_list * args); always_inline tcp_connection_t * tcp_listener_get (u32 tli) { - return pool_elt_at_index (tcp_main.listener_pool, tli); + tcp_connection_t *tc = 0; + if (!pool_is_free_index (tcp_main.listener_pool, tli)) + tc = pool_elt_at_index (tcp_main.listener_pool, tli); + return tc; } always_inline tcp_connection_t * @@ -786,7 +853,6 @@ void tcp_send_fin (tcp_connection_t * tc); void tcp_send_ack (tcp_connection_t * tc); void tcp_update_burst_snd_vars (tcp_connection_t * tc); void tcp_update_rto (tcp_connection_t * tc); -void tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk); void tcp_send_window_update_ack (tcp_connection_t * tc); void tcp_program_ack (tcp_connection_t * tc); @@ -880,7 +946,8 @@ tcp_bytes_out (const tcp_connection_t * tc) if (tcp_opts_sack_permitted (&tc->rcv_opts)) return tc->sack_sb.sacked_bytes + tc->sack_sb.lost_bytes; else - return tc->rcv_dupacks * tc->snd_mss; + return clib_min (tc->rcv_dupacks * tc->snd_mss, + tc->snd_nxt - tc->snd_una); } /** @@ -975,6 +1042,12 @@ tcp_available_cc_snd_space (const tcp_connection_t * tc) return available_wnd - flight_size; } +static inline u8 +tcp_is_descheduled (tcp_connection_t * tc) +{ + return (transport_connection_is_descheduled (&tc->connection) ? 1 : 0); +} + always_inline u8 tcp_is_lost_fin (tcp_connection_t * tc) { @@ -985,6 +1058,7 @@ tcp_is_lost_fin (tcp_connection_t * tc) u32 tcp_snd_space (tcp_connection_t * tc); int tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc); +void tcp_reschedule (tcp_connection_t * tc); fib_node_index_t tcp_lookup_rmt_in_fib (tcp_connection_t * tc); @@ -1038,6 +1112,7 @@ void tcp_connection_init_vars (tcp_connection_t * tc); void tcp_connection_tx_pacer_update (tcp_connection_t * tc); void tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window, u32 start_bucket); +void tcp_program_cleanup (tcp_worker_ctx_t * wrk, tcp_connection_t * tc); always_inline void tcp_cc_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs) @@ -1169,8 +1244,14 @@ tcp_persist_timer_set (tcp_connection_t * tc) always_inline void tcp_persist_timer_update (tcp_connection_t * tc) { - tcp_timer_update (tc, TCP_TIMER_PERSIST, - clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); + u32 interval; + + if (seq_leq (tc->snd_una, tc->snd_congestion + tc->burst_acked)) + interval = 1; + else + interval = clib_max (tc->rto * TCP_TO_TIMER_TICK, 1); + + tcp_timer_update (tc, TCP_TIMER_PERSIST, interval); } always_inline void @@ -1226,6 +1307,14 @@ tcp_cc_data (tcp_connection_t * tc) void newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type, tcp_rate_sample_t * rs); +/** + * Initialize connection by gleaning network and rcv params from buffer + * + * @param tc connection to initialize + * @param b buffer whose current data is pointing at ip + * @param is_ip4 flag set to 1 if using ip4 + */ +void tcp_init_w_buffer (tcp_connection_t * tc, vlib_buffer_t * b, u8 is_ip4); /** * Push TCP header to buffer