session_t *s;
transport_proto_vft_t *transport_vft;
transport_connection_t *tc;
+ transport_send_params_t sp;
u32 max_dequeue;
- u32 snd_space;
u32 left_to_snd;
- u32 tx_offset;
u32 max_len_to_snd;
u16 deq_per_first_buf;
u16 deq_per_buf;
- u16 snd_mss;
u16 n_segs_per_evt;
u8 n_bufs_per_seg;
CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
session_event_t evt;
} session_evt_elt_t;
+typedef struct session_ctrl_evt_data_
+{
+ u8 data[SESSION_CTRL_MSG_MAX_SIZE];
+} session_evt_ctrl_data_t;
+
typedef struct session_worker_
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
svm_msg_q_t *vpp_event_queue;
/** vlib_time_now last time around the track */
- f64 last_vlib_time;
+ clib_time_type_t last_vlib_time;
+
+ /** vlib_time_now rounded to us precision and as u64 */
+ clib_us_time_t last_vlib_us_time;
/** Convenience pointer to this worker's vlib_main */
vlib_main_t *vm;
/** Pool of session event list elements */
session_evt_elt_t *event_elts;
+ /** Pool of ctrl events data buffers */
+ session_evt_ctrl_data_t *ctrl_evts_data;
+
/** Head of control events list */
clib_llist_index_t ctrl_head;
/** Peekers rw lock */
clib_rwlock_t peekers_rw_locks;
+ /** Vector of buffers to be sent */
+ u32 *pending_tx_buffers;
+
+ /** Vector of nexts for the pending tx buffers */
+ u16 *pending_tx_nexts;
+
#if SESSION_DEBUG
/** last event poll time by thread */
- f64 last_event_poll;
+ clib_time_type_t last_event_poll;
#endif
} session_worker_t;
/** Session manager is enabled */
u8 is_enabled;
+ /** Enable session manager at startup */
+ u8 session_enable_asap;
/** vpp fifo event queue configured length */
u32 configured_event_queue_length;
pool_elt_at_index (wrk->event_elts, wrk->old_head));
}
+static inline void
+session_evt_add_head_old (session_worker_t * wrk, session_evt_elt_t * elt)
+{
+ clib_llist_add (wrk->event_elts, evt_list, elt,
+ pool_elt_at_index (wrk->event_elts, wrk->old_head));
+}
+
+
+static inline u32
+session_evt_ctrl_data_alloc (session_worker_t * wrk)
+{
+ session_evt_ctrl_data_t *data;
+ pool_get (wrk->ctrl_evts_data, data);
+ return (data - wrk->ctrl_evts_data);
+}
+
static inline session_evt_elt_t *
session_evt_alloc_ctrl (session_worker_t * wrk)
{
return elt;
}
+static inline void *
+session_evt_ctrl_data (session_worker_t * wrk, session_evt_elt_t * elt)
+{
+ return (void *) (pool_elt_at_index (wrk->ctrl_evts_data,
+ elt->evt.ctrl_data_index));
+}
+
+static inline void
+session_evt_ctrl_data_free (session_worker_t * wrk, session_evt_elt_t * elt)
+{
+ ASSERT (elt->evt.event_type > SESSION_IO_EVT_BUILTIN_TX);
+ pool_put_index (wrk->ctrl_evts_data, elt->evt.ctrl_data_index);
+}
+
static inline session_evt_elt_t *
session_evt_alloc_new (session_worker_t * wrk)
{
return elt;
}
-always_inline u8
-session_is_valid (u32 si, u8 thread_index)
-{
- session_t *s;
- s = pool_elt_at_index (session_main.wrk[thread_index].sessions, si);
- if (s->session_state == SESSION_STATE_CLOSED)
- return 1;
-
- if (s->thread_index != thread_index || s->session_index != si)
- return 0;
- return 1;
-}
-
session_t *session_alloc (u32 thread_index);
void session_free (session_t * s);
void session_free_w_fifos (session_t * s);
+u8 session_is_valid (u32 si, u8 thread_index);
always_inline session_t *
session_get (u32 si, u32 thread_index)
int session_listen (session_t * s, session_endpoint_cfg_t * sep);
int session_stop_listen (session_t * s);
void session_close (session_t * s);
+void session_reset (session_t * s);
void session_transport_close (session_t * s);
+void session_transport_reset (session_t * s);
void session_transport_cleanup (session_t * s);
int session_send_io_evt_to_thread (svm_fifo_t * f,
session_evt_type_t evt_type);
void *rpc_args);
void session_add_self_custom_tx_evt (transport_connection_t * tc,
u8 has_prio);
+void sesssion_reschedule_tx (transport_connection_t * tc);
transport_connection_t *session_get_transport (session_t * s);
void session_get_endpoint (session_t * s, transport_endpoint_t * tep,
u8 is_lcl);
transport_rx_fifo_size (transport_connection_t * tc)
{
session_t *s = session_get (tc->s_index, tc->thread_index);
- return s->rx_fifo->nitems;
+ return svm_fifo_size (s->rx_fifo);
}
always_inline u32
transport_tx_fifo_size (transport_connection_t * tc)
{
session_t *s = session_get (tc->s_index, tc->thread_index);
- return s->tx_fifo->nitems;
+ return svm_fifo_size (s->tx_fifo);
}
always_inline u8
return svm_fifo_has_ooo_data (s->rx_fifo);
}
-always_inline f64
+always_inline clib_time_type_t
transport_time_now (u32 thread_index)
{
return session_main.wrk[thread_index].last_vlib_time;
}
+always_inline clib_us_time_t
+transport_us_time_now (u32 thread_index)
+{
+ return session_main.wrk[thread_index].last_vlib_us_time;
+}
+
always_inline void
transport_add_tx_event (transport_connection_t * tc)
{
return &session_main.wrk[thread_index];
}
+static inline session_worker_t *
+session_main_get_worker_if_valid (u32 thread_index)
+{
+ if (thread_index > vec_len (session_main.wrk))
+ return 0;
+ return &session_main.wrk[thread_index];
+}
+
always_inline svm_msg_q_t *
session_main_get_vpp_event_queue (u32 thread_index)
{
#define session_cli_return_if_not_enabled() \
do { \
- if (!session_main.is_enabled) \
- return clib_error_return(0, "session layer is not enabled"); \
+ if (!session_main.is_enabled) \
+ return clib_error_return (0, "session layer is not enabled"); \
} while (0)
int session_main_flush_enqueue_events (u8 proto, u32 thread_index);
int session_main_flush_all_enqueue_events (u8 transport_proto);
void session_flush_frames_main_thread (vlib_main_t * vm);
+
+always_inline void
+session_add_pending_tx_buffer (session_type_t st, u32 thread_index, u32 bi)
+{
+ session_worker_t *wrk = session_main_get_worker (thread_index);
+ vec_add1 (wrk->pending_tx_buffers, bi);
+ vec_add1 (wrk->pending_tx_nexts, session_main.session_type_to_next[st]);
+}
+
ssvm_private_t *session_main_get_evt_q_segment (void);
void session_node_enable_disable (u8 is_en);
clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en);
+session_t *session_alloc_for_connection (transport_connection_t * tc);
+
#endif /* __included_session_h__ */
/*