TCP_N_TIMERS
} tcp_timers_e;
-typedef void (timer_expiration_handler) (u32 index);
+typedef void (timer_expiration_handler) (u32 index, u32 thread_index);
extern timer_expiration_handler tcp_timer_delack_handler;
extern timer_expiration_handler tcp_timer_retransmit_handler;
_(NO_CSUM_OFFLOAD, "No csum offload") \
_(NO_TSO, "TSO off") \
_(TSO, "TSO") \
+ _(NO_ENDPOINT,"No endpoint") \
typedef enum tcp_cfg_flag_bits_
{
_(DEQ_PENDING, "Dequeue pending ") \
_(PSH_PENDING, "PSH pending") \
_(FINRCVD, "FIN received") \
- _(TRACK_BURST, "Track burst") \
_(ZERO_RWND_SENT, "Zero RWND sent") \
typedef enum tcp_connection_flag_bits_
{
TCP_BTS_IS_RXT = 1,
TCP_BTS_IS_APP_LIMITED = 1 << 1,
+ TCP_BTS_IS_SACKED = 1 << 2,
+ TCP_BTS_IS_RXT_LOST = 1 << 3,
} __clib_packed tcp_bts_flags_t;
typedef struct tcp_bt_sample_
u32 prr_start; /**< snd_una when prr starts */
u32 rxt_delivered; /**< Rxt bytes delivered during current cc event */
u32 rxt_head; /**< snd_una last time we re rxted the head */
- u32 prev_dsegs_out; /**< Number of dsegs after last ack */
u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */
u32 snd_congestion; /**< snd_una_max when congestion is detected */
u32 tx_fifo_size; /**< Tx fifo size. Used to constrain cwnd */
u32 last_fib_check; /**< Last time we checked fib route for peer */
u16 mss; /**< Our max seg size that includes options */
u32 timestamp_delta; /**< Offset for timestamp */
+ u32 ipv6_flow_label; /**< flow label for ipv6 header */
+
+#define rst_state snd_wl1
} tcp_connection_t;
/* *INDENT-OFF* */
u8 next, error;
} tcp_lookup_dispatch_t;
+#define foreach_tcp_wrk_stat \
+ _(timer_expirations, u64, "timer expirations") \
+ _(rxt_segs, u64, "segments retransmitted") \
+ _(tr_events, u32, "timer retransmit events") \
+ _(to_closewait, u32, "timeout close-wait") \
+ _(to_finwait1, u32, "timeout fin-wait-1") \
+ _(to_finwait2, u32, "timeout fin-wait-2") \
+ _(to_lastack, u32, "timeout last-ack") \
+ _(to_closing, u32, "timeout closing") \
+ _(tr_abort, u32, "timer retransmit abort") \
+ _(rst_unread, u32, "reset on close due to unread data") \
+
+typedef struct tcp_wrk_stats_
+{
+#define _(name, type, str) type name;
+ foreach_tcp_wrk_stat
+#undef _
+} tcp_wrk_stats_t;
+
typedef struct tcp_worker_ctx_
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- /** worker time */
- u32 time_now;
- /** worker timer wheel */
- tw_timer_wheel_16t_2w_512sl_t timer_wheel;
-
- /** tx buffer free list */
- u32 *tx_buffers;
-
- /** tx frames for ip 4/6 lookup nodes */
- vlib_frame_t *ip_lookup_tx_frames[2];
+ /** worker's pool of connections */
+ tcp_connection_t *connections;
/** vector of pending ack dequeues */
u32 *pending_deq_acked;
/** vector of pending disconnect notifications */
u32 *pending_disconnects;
+ /** vector of pending reset notifications */
+ u32 *pending_resets;
+
/** convenience pointer to this thread's vlib main */
vlib_main_t *vm;
+ /** worker time */
+ u32 time_now;
+
+ /** tx frames for ip 4/6 lookup nodes */
+ vlib_frame_t *ip_lookup_tx_frames[2];
+
CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
/** cached 'on the wire' options for bursts */
u8 cached_opts[40];
+ /** tx buffer free list */
+ u32 *tx_buffers;
+
+ /** worker timer wheel */
+ tw_timer_wheel_16t_2w_512sl_t timer_wheel;
+
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
+
+ tcp_wrk_stats_t stats;
} tcp_worker_ctx_t;
+#define tcp_worker_stats_inc(_ti,_stat,_val) \
+ tcp_main.wrk_ctx[_ti].stats._stat += _val
+
+#define tcp_workerp_stats_inc(_wrk,_stat,_val) \
+ _wrk->stats._stat += _val
+
typedef struct tcp_iss_seed_
{
u64 first;
/** Allow use of TSO whenever available */
u8 allow_tso;
+ /** Set if csum offloading is enabled */
+ u8 csum_offload;
+
/** Default congestion control algorithm type */
tcp_cc_algorithm_type_e cc_algo;
typedef struct _tcp_main
{
- /* Per-worker thread tcp connection pools */
- tcp_connection_t **connections;
+ /** per-worker context */
+ tcp_worker_ctx_t *wrk_ctx;
/* Pool of listeners. */
tcp_connection_t *listener_pool;
+ f64 tstamp_ticks_per_clock;
+
+ /** vlib buffer size */
+ u32 bytes_per_buffer;
+
/** Dispatch table by state and flags */
tcp_lookup_dispatch_t dispatch_table[TCP_N_STATES][64];
- u8 log2_tstamp_clocks_per_tick;
- f64 tstamp_ticks_per_clock;
-
- /** per-worker context */
- tcp_worker_ctx_t *wrk_ctx;
+ clib_spinlock_t half_open_lock;
/** Pool of half-open connections on which we've sent a SYN */
tcp_connection_t *half_open_connections;
- clib_spinlock_t half_open_lock;
-
- /** vlib buffer size */
- u32 bytes_per_buffer;
/** Seed used to generate random iss */
tcp_iss_seed_t iss_seed;
always_inline tcp_worker_ctx_t *
tcp_get_worker (u32 thread_index)
{
+ ASSERT (thread_index < vec_len (tcp_main.wrk_ctx));
return &tcp_main.wrk_ctx[thread_index];
}
always_inline tcp_connection_t *
tcp_connection_get (u32 conn_index, u32 thread_index)
{
- if (PREDICT_FALSE
- (pool_is_free_index (tcp_main.connections[thread_index], conn_index)))
+ tcp_worker_ctx_t *wrk = tcp_get_worker (thread_index);
+ if (PREDICT_FALSE (pool_is_free_index (wrk->connections, conn_index)))
return 0;
- return pool_elt_at_index (tcp_main.connections[thread_index], conn_index);
+ return pool_elt_at_index (wrk->connections, conn_index);
}
always_inline tcp_connection_t *
tcp_connection_get_if_valid (u32 conn_index, u32 thread_index)
{
- if (tcp_main.connections[thread_index] == 0)
+ tcp_worker_ctx_t *wrk;
+ if (thread_index >= vec_len (tcp_main.wrk_ctx))
return 0;
- if (pool_is_free_index (tcp_main.connections[thread_index], conn_index))
+ wrk = tcp_get_worker (thread_index);
+ if (pool_is_free_index (wrk->connections, conn_index))
return 0;
- return pool_elt_at_index (tcp_main.connections[thread_index], conn_index);
+ return pool_elt_at_index (wrk->connections, conn_index);
}
always_inline tcp_connection_t *
tcp_connection_t *tcp_connection_alloc_w_base (u8 thread_index,
tcp_connection_t * base);
void tcp_connection_free (tcp_connection_t * tc);
-void tcp_connection_reset (tcp_connection_t * tc);
int tcp_configure_v4_source_address_range (vlib_main_t * vm,
ip4_address_t * start,
ip4_address_t * end, u32 table_id);
ip6_address_t * end, u32 table_id);
void tcp_api_reference (void);
u8 *format_tcp_connection (u8 * s, va_list * args);
+u8 *format_tcp_connection_id (u8 * s, va_list * args);
always_inline tcp_connection_t *
tcp_listener_get (u32 tli)
{
- return pool_elt_at_index (tcp_main.listener_pool, tli);
+ tcp_connection_t *tc = 0;
+ if (!pool_is_free_index (tcp_main.listener_pool, tli))
+ tc = pool_elt_at_index (tcp_main.listener_pool, tli);
+ return tc;
}
always_inline tcp_connection_t *
*
* @param tc tcp connection
*/
-void tcp_bt_track_tx (tcp_connection_t * tc);
+void tcp_bt_track_tx (tcp_connection_t * tc, u32 len);
/**
* Track a tcp retransmission
*
* @param bt byte tracker
*/
int tcp_bt_is_sane (tcp_byte_tracker_t * bt);
+u8 *format_tcp_bt (u8 * s, va_list * args);
always_inline u32
tcp_end_seq (tcp_header_t * th, u32 len)
if (tcp_opts_sack_permitted (&tc->rcv_opts))
return tc->sack_sb.sacked_bytes + tc->sack_sb.lost_bytes;
else
- return tc->rcv_dupacks * tc->snd_mss;
+ return clib_min (tc->rcv_dupacks * tc->snd_mss,
+ tc->snd_nxt - tc->snd_una);
}
/**
always_inline void
tcp_persist_timer_update (tcp_connection_t * tc)
{
- tcp_timer_update (tc, TCP_TIMER_PERSIST,
- clib_max (tc->rto * TCP_TO_TIMER_TICK, 1));
+ u32 interval;
+
+ if (seq_leq (tc->snd_una, tc->snd_congestion + tc->burst_acked))
+ interval = 1;
+ else
+ interval = clib_max (tc->rto * TCP_TO_TIMER_TICK, 1);
+
+ tcp_timer_update (tc, TCP_TIMER_PERSIST, interval);
}
always_inline void