X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Ftcp%2Ftcp.h;h=998806969371a1fe06267f5a8b8dd06096794d90;hb=27bb99ec804afc5977897fe97043735a508c4a78;hp=4b0a0ddea53df20963af68fb127c486077139156;hpb=2089c69efbc4ae5c64cab8a5418187b21478987a;p=vpp.git diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 4b0a0ddea53..99880696937 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -82,13 +82,6 @@ typedef enum _tcp_timers TCP_N_TIMERS } tcp_timers_e; -typedef void (timer_expiration_handler) (u32 index); - -extern timer_expiration_handler tcp_timer_delack_handler; -extern timer_expiration_handler tcp_timer_retransmit_handler; -extern timer_expiration_handler tcp_timer_persist_handler; -extern timer_expiration_handler tcp_timer_retransmit_syn_handler; - #define TCP_TIMER_HANDLE_INVALID ((u32) ~0) #define TCP_TIMER_TICK 0.1 /**< Timer tick in seconds */ @@ -109,6 +102,7 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; _(NO_CSUM_OFFLOAD, "No csum offload") \ _(NO_TSO, "TSO off") \ _(TSO, "TSO") \ + _(NO_ENDPOINT,"No endpoint") \ typedef enum tcp_cfg_flag_bits_ { @@ -394,7 +388,6 @@ typedef struct _tcp_connection u32 prr_start; /**< snd_una when prr starts */ u32 rxt_delivered; /**< Rxt bytes delivered during current cc event */ u32 rxt_head; /**< snd_una last time we re rxted the head */ - u32 prev_dsegs_out; /**< Number of dsegs after last ack */ u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */ u32 snd_congestion; /**< snd_una_max when congestion is detected */ u32 tx_fifo_size; /**< Tx fifo size. Used to constrain cwnd */ @@ -436,6 +429,8 @@ typedef struct _tcp_connection u16 mss; /**< Our max seg size that includes options */ u32 timestamp_delta; /**< Offset for timestamp */ u32 ipv6_flow_label; /**< flow label for ipv6 header */ + +#define rst_state snd_wl1 } tcp_connection_t; /* *INDENT-OFF* */ @@ -476,6 +471,13 @@ struct _tcp_cc_algorithm #define tcp_csum_offload(tc) (!((tc)->cfg_flags & TCP_CFG_F_NO_CSUM_OFFLOAD)) +typedef void (timer_expiration_handler) (tcp_connection_t * tc); + +extern timer_expiration_handler tcp_timer_delack_handler; +extern timer_expiration_handler tcp_timer_retransmit_handler; +extern timer_expiration_handler tcp_timer_persist_handler; +extern timer_expiration_handler tcp_timer_retransmit_syn_handler; + always_inline void tcp_cong_recovery_off (tcp_connection_t * tc) { @@ -500,20 +502,38 @@ typedef struct _tcp_lookup_dispatch u8 next, error; } tcp_lookup_dispatch_t; +#define foreach_tcp_wrk_stat \ + _(timer_expirations, u64, "timer expirations") \ + _(rxt_segs, u64, "segments retransmitted") \ + _(tr_events, u32, "timer retransmit events") \ + _(to_closewait, u32, "timeout close-wait") \ + _(to_closewait2, u32, "timeout close-wait w/data") \ + _(to_finwait1, u32, "timeout fin-wait-1") \ + _(to_finwait2, u32, "timeout fin-wait-2") \ + _(to_lastack, u32, "timeout last-ack") \ + _(to_closing, u32, "timeout closing") \ + _(tr_abort, u32, "timer retransmit abort") \ + _(rst_unread, u32, "reset on close due to unread data") \ + +typedef struct tcp_wrk_stats_ +{ +#define _(name, type, str) type name; + foreach_tcp_wrk_stat +#undef _ +} tcp_wrk_stats_t; + +typedef struct tcp_free_req_ +{ + clib_time_type_t free_time; + u32 connection_index; +} tcp_cleanup_req_t; + typedef struct tcp_worker_ctx_ { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - /** worker time */ - u32 time_now; - /** worker timer wheel */ - tw_timer_wheel_16t_2w_512sl_t timer_wheel; - - /** tx buffer free list */ - u32 *tx_buffers; - - /** tx frames for ip 4/6 lookup nodes */ - vlib_frame_t *ip_lookup_tx_frames[2]; + /** worker's pool of connections */ + tcp_connection_t *connections; /** vector of pending ack dequeues */ u32 *pending_deq_acked; @@ -521,16 +541,49 @@ typedef struct tcp_worker_ctx_ /** vector of pending disconnect notifications */ u32 *pending_disconnects; + /** vector of pending reset notifications */ + u32 *pending_resets; + /** convenience pointer to this thread's vlib main */ vlib_main_t *vm; + /** worker time */ + u32 time_now; + + /* Max timers to be handled per dispatch loop */ + u32 max_timers_per_loop; + + /** tx frames for ip 4/6 lookup nodes */ + vlib_frame_t *ip_lookup_tx_frames[2]; + CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); /** cached 'on the wire' options for bursts */ u8 cached_opts[40]; + /** tx buffer free list */ + u32 *tx_buffers; + + /* Fifo of pending timer expirations */ + u32 *pending_timers; + + /* fifo of pending free requests */ + tcp_cleanup_req_t *pending_cleanups; + + /** worker timer wheel */ + tw_timer_wheel_16t_2w_512sl_t timer_wheel; + + CLIB_CACHE_LINE_ALIGN_MARK (cacheline2); + + tcp_wrk_stats_t stats; } tcp_worker_ctx_t; +#define tcp_worker_stats_inc(_ti,_stat,_val) \ + tcp_main.wrk_ctx[_ti].stats._stat += _val + +#define tcp_workerp_stats_inc(_wrk,_stat,_val) \ + _wrk->stats._stat += _val + typedef struct tcp_iss_seed_ { u64 first; @@ -559,6 +612,9 @@ typedef struct tcp_configuration_ /** Allow use of TSO whenever available */ u8 allow_tso; + /** Set if csum offloading is enabled */ + u8 csum_offload; + /** Default congestion control algorithm type */ tcp_cc_algorithm_type_e cc_algo; @@ -587,8 +643,8 @@ typedef struct tcp_configuration_ /** Timer ticks to wait in closing for fin ack */ u16 closing_time; - /** Timer ticks to wait before cleaning up the connection */ - u16 cleanup_time; + /** Time to wait (sec) before cleaning up the connection */ + f32 cleanup_time; /** Number of preallocated connections */ u32 preallocated_connections; @@ -606,27 +662,24 @@ typedef struct tcp_configuration_ typedef struct _tcp_main { - /* Per-worker thread tcp connection pools */ - tcp_connection_t **connections; + /** per-worker context */ + tcp_worker_ctx_t *wrk_ctx; /* Pool of listeners. */ tcp_connection_t *listener_pool; + f64 tstamp_ticks_per_clock; + + /** vlib buffer size */ + u32 bytes_per_buffer; + /** Dispatch table by state and flags */ tcp_lookup_dispatch_t dispatch_table[TCP_N_STATES][64]; - u8 log2_tstamp_clocks_per_tick; - f64 tstamp_ticks_per_clock; - - /** per-worker context */ - tcp_worker_ctx_t *wrk_ctx; + clib_spinlock_t half_open_lock; /** Pool of half-open connections on which we've sent a SYN */ tcp_connection_t *half_open_connections; - clib_spinlock_t half_open_lock; - - /** vlib buffer size */ - u32 bytes_per_buffer; /** Seed used to generate random iss */ tcp_iss_seed_t iss_seed; @@ -686,6 +739,7 @@ vnet_get_tcp_main () always_inline tcp_worker_ctx_t * tcp_get_worker (u32 thread_index) { + ASSERT (thread_index < vec_len (tcp_main.wrk_ctx)); return &tcp_main.wrk_ctx[thread_index]; } @@ -713,20 +767,22 @@ void tcp_punt_unknown (vlib_main_t * vm, u8 is_ip4, u8 is_add); always_inline tcp_connection_t * tcp_connection_get (u32 conn_index, u32 thread_index) { - if (PREDICT_FALSE - (pool_is_free_index (tcp_main.connections[thread_index], conn_index))) + tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index); + if (PREDICT_FALSE (pool_is_free_index (wrk->connections, conn_index))) return 0; - return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); + return pool_elt_at_index (wrk->connections, conn_index); } always_inline tcp_connection_t * tcp_connection_get_if_valid (u32 conn_index, u32 thread_index) { - if (tcp_main.connections[thread_index] == 0) + tcp_worker_ctx_t *wrk; + if (thread_index >= vec_len (tcp_main.wrk_ctx)) return 0; - if (pool_is_free_index (tcp_main.connections[thread_index], conn_index)) + wrk = tcp_get_worker (thread_index); + if (pool_is_free_index (wrk->connections, conn_index)) return 0; - return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); + return pool_elt_at_index (wrk->connections, conn_index); } always_inline tcp_connection_t * @@ -750,7 +806,6 @@ tcp_connection_t *tcp_connection_alloc (u8 thread_index); tcp_connection_t *tcp_connection_alloc_w_base (u8 thread_index, tcp_connection_t * base); void tcp_connection_free (tcp_connection_t * tc); -void tcp_connection_reset (tcp_connection_t * tc); int tcp_configure_v4_source_address_range (vlib_main_t * vm, ip4_address_t * start, ip4_address_t * end, u32 table_id); @@ -759,11 +814,15 @@ int tcp_configure_v6_source_address_range (vlib_main_t * vm, ip6_address_t * end, u32 table_id); void tcp_api_reference (void); u8 *format_tcp_connection (u8 * s, va_list * args); +u8 *format_tcp_connection_id (u8 * s, va_list * args); always_inline tcp_connection_t * tcp_listener_get (u32 tli) { - return pool_elt_at_index (tcp_main.listener_pool, tli); + tcp_connection_t *tc = 0; + if (!pool_is_free_index (tcp_main.listener_pool, tli)) + tc = pool_elt_at_index (tcp_main.listener_pool, tli); + return tc; } always_inline tcp_connection_t * @@ -788,7 +847,6 @@ void tcp_send_fin (tcp_connection_t * tc); void tcp_send_ack (tcp_connection_t * tc); void tcp_update_burst_snd_vars (tcp_connection_t * tc); void tcp_update_rto (tcp_connection_t * tc); -void tcp_flush_frames_to_output (tcp_worker_ctx_t * wrk); void tcp_send_window_update_ack (tcp_connection_t * tc); void tcp_program_ack (tcp_connection_t * tc); @@ -882,7 +940,8 @@ tcp_bytes_out (const tcp_connection_t * tc) if (tcp_opts_sack_permitted (&tc->rcv_opts)) return tc->sack_sb.sacked_bytes + tc->sack_sb.lost_bytes; else - return tc->rcv_dupacks * tc->snd_mss; + return clib_min (tc->rcv_dupacks * tc->snd_mss, + tc->snd_nxt - tc->snd_una); } /** @@ -977,6 +1036,12 @@ tcp_available_cc_snd_space (const tcp_connection_t * tc) return available_wnd - flight_size; } +static inline u8 +tcp_is_descheduled (tcp_connection_t * tc) +{ + return (transport_connection_is_descheduled (&tc->connection) ? 1 : 0); +} + always_inline u8 tcp_is_lost_fin (tcp_connection_t * tc) { @@ -987,6 +1052,7 @@ tcp_is_lost_fin (tcp_connection_t * tc) u32 tcp_snd_space (tcp_connection_t * tc); int tcp_fastrecovery_prr_snd_space (tcp_connection_t * tc); +void tcp_reschedule (tcp_connection_t * tc); fib_node_index_t tcp_lookup_rmt_in_fib (tcp_connection_t * tc); @@ -1040,6 +1106,7 @@ void tcp_connection_init_vars (tcp_connection_t * tc); void tcp_connection_tx_pacer_update (tcp_connection_t * tc); void tcp_connection_tx_pacer_reset (tcp_connection_t * tc, u32 window, u32 start_bucket); +void tcp_program_cleanup (tcp_worker_ctx_t * wrk, tcp_connection_t * tc); always_inline void tcp_cc_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs) @@ -1171,8 +1238,14 @@ tcp_persist_timer_set (tcp_connection_t * tc) always_inline void tcp_persist_timer_update (tcp_connection_t * tc) { - tcp_timer_update (tc, TCP_TIMER_PERSIST, - clib_max (tc->rto * TCP_TO_TIMER_TICK, 1)); + u32 interval; + + if (seq_leq (tc->snd_una, tc->snd_congestion + tc->burst_acked)) + interval = 1; + else + interval = clib_max (tc->rto * TCP_TO_TIMER_TICK, 1); + + tcp_timer_update (tc, TCP_TIMER_PERSIST, interval); } always_inline void @@ -1228,6 +1301,14 @@ tcp_cc_data (tcp_connection_t * tc) void newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type, tcp_rate_sample_t * rs); +/** + * Initialize connection by gleaning network and rcv params from buffer + * + * @param tc connection to initialize + * @param b buffer whose current data is pointing at ip + * @param is_ip4 flag set to 1 if using ip4 + */ +void tcp_init_w_buffer (tcp_connection_t * tc, vlib_buffer_t * b, u8 is_ip4); /** * Push TCP header to buffer