session_event_t evt;
} session_evt_elt_t;
+typedef struct session_ctrl_evt_data_
+{
+ u8 data[SESSION_CTRL_MSG_MAX_SIZE];
+} session_evt_ctrl_data_t;
+
typedef struct session_worker_
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
/** Pool of session event list elements */
session_evt_elt_t *event_elts;
+ /** Pool of ctrl events data buffers */
+ session_evt_ctrl_data_t *ctrl_evts_data;
+
/** Head of control events list */
clib_llist_index_t ctrl_head;
pool_elt_at_index (wrk->event_elts, wrk->old_head));
}
+static inline u32
+session_evt_ctrl_data_alloc (session_worker_t * wrk)
+{
+ session_evt_ctrl_data_t *data;
+ pool_get (wrk->ctrl_evts_data, data);
+ return (data - wrk->ctrl_evts_data);
+}
+
static inline session_evt_elt_t *
session_evt_alloc_ctrl (session_worker_t * wrk)
{
return elt;
}
+static inline void *
+session_evt_ctrl_data (session_worker_t * wrk, session_evt_elt_t * elt)
+{
+ return (void *) (pool_elt_at_index (wrk->ctrl_evts_data,
+ elt->evt.ctrl_data_index));
+}
+
+static inline void
+session_evt_ctrl_data_free (session_worker_t * wrk, session_evt_elt_t * elt)
+{
+ ASSERT (elt->evt.event_type > SESSION_IO_EVT_BUILTIN_TX);
+ pool_put_index (wrk->ctrl_evts_data, elt->evt.ctrl_data_index);
+}
+
static inline session_evt_elt_t *
session_evt_alloc_new (session_worker_t * wrk)
{
return &session_main.wrk[thread_index];
}
+static inline session_worker_t *
+session_main_get_worker_if_valid (u32 thread_index)
+{
+ if (pool_is_free_index (session_main.wrk, thread_index))
+ return 0;
+ return &session_main.wrk[thread_index];
+}
+
always_inline svm_msg_q_t *
session_main_get_vpp_event_queue (u32 thread_index)
{
#define session_cli_return_if_not_enabled() \
do { \
- if (!session_main.is_enabled) \
- return clib_error_return(0, "session layer is not enabled"); \
+ if (!session_main.is_enabled) \
+ return clib_error_return (0, "session layer is not enabled"); \
} while (0)
int session_main_flush_enqueue_events (u8 proto, u32 thread_index);