#include <vpp/api/vpe_msg_enum.h>
#include <vnet/session/application_interface.h>
#include <vcl/vppcom.h>
+#include <vcl/vcl_event.h>
#include <vlib/unix/unix.h>
#include <vppinfra/vec_bootstrap.h>
#include <vppinfra/elog.h>
u64 client_queue_address;
u64 options[16];
elog_track_t elog_track;
+ vce_event_handler_reg_t *poll_reg;
} session_t;
typedef struct vppcom_cfg_t_
{
u8 init;
u32 debug;
- u32 *client_session_index_fifo;
int main_cpu;
+ /* FIFO for accepted connections - used in epoll/select */
+ clib_spinlock_t session_fifo_lockp;
+ u32 *client_session_index_fifo;
+
/* vpp input queue */
svm_queue_t *vl_input_queue;
/* API client handle */
u32 my_client_index;
-
/* Session pool */
clib_spinlock_t sessions_lockp;
session_t *sessions;
vppcom_cfg_t cfg;
- /* Event logging */
+ /* Event thread */
+ vce_event_thread_t event_thread;
+
+ /* IO thread */
+ vppcom_session_io_thread_t session_io_thread;
+
+ /* VPP Event-logger */
elog_main_t elog_main;
elog_track_t elog_track;
static vppcom_main_t *vcm = &_vppcom_main;
-#define VCL_LOCK_AND_GET_SESSION(I, S) \
+#define VCL_SESSION_LOCK_AND_GET(I, S) \
do { \
clib_spinlock_lock (&vcm->sessions_lockp); \
rv = vppcom_session_at_index (I, S); \
} \
} while (0)
+#define VCL_SESSION_LOCK() clib_spinlock_lock (&(vcm->sessions_lockp))
+#define VCL_SESSION_UNLOCK() clib_spinlock_unlock (&(vcm->sessions_lockp))
+
+#define VCL_IO_SESSIONS_LOCK() \
+ clib_spinlock_lock (&(vcm->session_io_thread.io_sessions_lockp))
+#define VCL_IO_SESSIONS_UNLOCK() \
+ clib_spinlock_unlock (&(vcm->session_io_thread.io_sessions_lockp))
+
+#define VCL_ACCEPT_FIFO_LOCK() clib_spinlock_lock (&(vcm->session_fifo_lockp))
+#define VCL_ACCEPT_FIFO_UNLOCK() \
+ clib_spinlock_unlock (&(vcm->session_fifo_lockp))
+
+#define VCL_EVENTS_LOCK() \
+ clib_spinlock_lock (&(vcm->event_thread.events_lockp))
+#define VCL_EVENTS_UNLOCK() \
+ clib_spinlock_unlock (&(vcm->event_thread.events_lockp))
+
static const char *
vppcom_app_state_str (app_state_t state)
{
return st;
}
+
/*
* VPPCOM Utility Functions
*/
+
static inline int
vppcom_session_at_index (u32 session_index, session_t * volatile *sess)
{
}
+static inline void
+vppcom_send_accept_session_reply (u64 handle, u32 context, int retval)
+{
+ vl_api_accept_session_reply_t *rmp;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ memset (rmp, 0, sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY);
+ rmp->retval = htonl (retval);
+ rmp->context = context;
+ rmp->handle = handle;
+ vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp);
+}
+
static int
vppcom_connect_to_vpp (char *app_name)
{
else
{
vcm->vl_input_queue = am->shmem_hdr->vl_input_queue;
- vcm->my_client_index = am->my_client_index;
+ vcm->my_client_index = (u32) am->my_client_index;
vcm->app_state = STATE_APP_CONN_VPP;
if (VPPCOM_DEBUG > 0)
u32 data;
} *ed;
ed = ELOG_TRACK_DATA (&vcm->elog_main, e, vcm->elog_track);
- ed->data = rv;
+ ed->data = (u32) rv;
/* *INDENT-ON* */
}
return rv;
do
{
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK ();
rv = vppcom_session_at_index (session_index, &session);
if (PREDICT_FALSE (rv))
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
return rv;
}
if (session->state & state)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
return VPPCOM_OK;
}
if (session->state & STATE_FAILED)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
return VPPCOM_ECONNREFUSED;
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
while (clib_time_now (&vcm->clib_time) < timeout);
return VPPCOM_ETIMEDOUT;
}
-static inline int
-vppcom_wait_for_client_session_index (f64 wait_for_time)
-{
- f64 timeout = clib_time_now (&vcm->clib_time) + wait_for_time;
-
- do
- {
- if (clib_fifo_elts (vcm->client_session_index_fifo))
- return VPPCOM_OK;
- }
- while (clib_time_now (&vcm->clib_time) < timeout);
-
- if (wait_for_time == 0)
- return VPPCOM_EAGAIN;
-
- if (VPPCOM_DEBUG > 0)
- clib_warning ("VCL<%d>: timeout waiting for client_session_index",
- getpid ());
-
- if (VPPCOM_DEBUG > 0)
- {
- /* *INDENT-OFF* */
- ELOG_TYPE_DECLARE (e) =
- {
- .format = "ERR: timeout waiting for session index :%d",
- .format_args = "i4",
- };
- struct
- {
- u32 data;
- } *ed;
-
- ed = ELOG_TRACK_DATA (&vcm->elog_main, e, vcm->elog_track);
-
- ed->data = getpid();
- /* *INDENT-ON* */
- }
-
- return VPPCOM_ETIMEDOUT;
-}
-
/*
* VPP-API message functions
*/
(vcm->cfg.app_scope_global ? APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE : 0) |
(app_is_proxy ? APP_OPTIONS_FLAGS_IS_PROXY : 0);
bmp->options[APP_OPTIONS_PROXY_TRANSPORT] =
- (vcm->cfg.app_proxy_transport_tcp ? 1 << TRANSPORT_PROTO_TCP : 0) |
- (vcm->cfg.app_proxy_transport_udp ? 1 << TRANSPORT_PROTO_UDP : 0);
+ (u64) ((vcm->cfg.app_proxy_transport_tcp ? 1 << TRANSPORT_PROTO_TCP : 0) |
+ (vcm->cfg.app_proxy_transport_udp ? 1 << TRANSPORT_PROTO_UDP : 0));
bmp->options[APP_OPTIONS_SEGMENT_SIZE] = vcm->cfg.segment_size;
bmp->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = vcm->cfg.add_segment_size;
bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = vcm->cfg.rx_fifo_size;
getpid (), rv, vppcom_retval_str (rv));
return rv;
}
+
return VPPCOM_OK;
}
session_t *session = 0;
u32 session_index = p[0];
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
session->state = STATE_CLOSE_ON_EMPTY;
if (VPPCOM_DEBUG > 1)
"setting state to 0x%x (%s)",
getpid (), mp->handle, session_index, session->state,
vppcom_session_state_str (session->state));
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
return;
done:
if (p)
{
int rval;
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK ();
rval = vppcom_session_at_index (p[0], &session);
if (PREDICT_FALSE (rval))
{
mp->handle, p[0], session->state,
vppcom_session_state_str (session->state));
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
else
{
int rv = VPPCOM_OK;
session_index = mp->context;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
done:
if (mp->retval)
{
/*
* Setup session
*/
+ if (vcm->session_io_thread.io_sessions_lockp)
+ {
+ // Add this connection to the active io sessions list
+ VCL_IO_SESSIONS_LOCK ();
+ u32 *active_session_index;
+ pool_get (vcm->session_io_thread.active_session_indexes,
+ active_session_index);
+ *active_session_index = session_index;
+ VCL_IO_SESSIONS_UNLOCK ();
+ }
session->vpp_event_queue = uword_to_pointer (mp->vpp_event_queue_address,
svm_queue_t *);
session->rx_fifo->refcnt,
session->tx_fifo, session->tx_fifo->refcnt);
done_unlock:
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
static void
u32 session_index = mp->context;
int rv;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
done:
if (mp->retval)
{
session->vpp_handle = mp->handle;
session->lcl_addr.is_ip4 = mp->lcl_is_ip4;
- clib_memcpy (&session->lcl_addr.ip46, mp->lcl_ip,
- sizeof (session->peer_addr.ip46));
+ session->lcl_addr.ip46 = to_ip46 (!mp->lcl_is_ip4, mp->lcl_ip);
session->lcl_port = mp->lcl_port;
vppcom_session_table_add_listener (mp->handle, session_index);
session->state = STATE_LISTEN;
clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: bind succeeded!",
getpid (), mp->handle, mp->context);
done_unlock:
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
static void
format (s, "%U", format_ip6_address, &ip46->ip6);
}
-static inline void
-vppcom_send_accept_session_reply (u64 handle, u32 context, int retval)
-{
- vl_api_accept_session_reply_t *rmp;
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- memset (rmp, 0, sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY);
- rmp->retval = htonl (retval);
- rmp->context = context;
- rmp->handle = handle;
- vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp);
-}
-
static void
vl_api_accept_session_t_handler (vl_api_accept_session_t * mp)
{
svm_fifo_t *rx_fifo, *tx_fifo;
session_t *session, *listen_session;
u32 session_index;
+ vce_event_connect_request_t *ecr;
+ vce_event_t *ev;
+ int rv;
+ u32 ev_idx;
+ uword elts = 0;
+
+ VCL_SESSION_LOCK ();
+
+ VCL_ACCEPT_FIFO_LOCK ();
+ elts = clib_fifo_free_elts (vcm->client_session_index_fifo);
+ VCL_ACCEPT_FIFO_UNLOCK ();
- clib_spinlock_lock (&vcm->sessions_lockp);
- if (!clib_fifo_free_elts (vcm->client_session_index_fifo))
+ if (!elts)
{
clib_warning ("VCL<%d>: client session queue is full!", getpid ());
vppcom_send_accept_session_reply (mp->handle, mp->context,
VNET_API_ERROR_QUEUE_FULL);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
return;
}
clib_warning ("VCL<%d>: ERROR: couldn't find listen session: "
"unknown vpp listener handle %llx",
getpid (), mp->listener_handle);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ vppcom_send_accept_session_reply (mp->handle, mp->context,
+ VNET_API_ERROR_INVALID_ARGUMENT);
+ VCL_SESSION_UNLOCK ();
return;
}
+ /* TODO check listener depth and update */
+ /* TODO on "child" fd close, update listener depth */
+
/* Allocate local session and set it up */
pool_get (vcm->sessions, session);
memset (session, 0, sizeof (*session));
- session_index = session - vcm->sessions;
+ session_index = (u32) (session - vcm->sessions);
rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *);
rx_fifo->client_session_index = session_index;
session->state = STATE_ACCEPT;
session->peer_port = mp->port;
session->peer_addr.is_ip4 = mp->is_ip4;
- clib_memcpy (&session->peer_addr.ip46, mp->ip,
- sizeof (session->peer_addr.ip46));
+ session->peer_addr.ip46 = to_ip46 (!mp->is_ip4, mp->ip);
/* Add it to lookup table */
hash_set (vcm->session_index_by_vpp_handles, mp->handle, session_index);
session->lcl_port = listen_session->lcl_port;
session->lcl_addr = listen_session->lcl_addr;
- /* TBD: move client_session_index_fifo into listener session */
- clib_fifo_add1 (vcm->client_session_index_fifo, session_index);
+ /* Create an event for handlers */
+
+ VCL_EVENTS_LOCK ();
+
+ pool_get (vcm->event_thread.vce_events, ev);
+ ev_idx = (u32) (ev - vcm->event_thread.vce_events);
+ ecr = vce_get_event_data (ev, sizeof (*ecr));
+ ev->evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED;
+ listen_session = vppcom_session_table_lookup_listener (mp->listener_handle);
+ ev->evk.session_index = (u32) (listen_session - vcm->sessions);
+ ecr->accepted_session_index = session_index;
+
+ VCL_EVENTS_UNLOCK ();
+
+ rv = vce_generate_event (&vcm->event_thread, ev_idx);
+ ASSERT (rv == 0);
if (VPPCOM_DEBUG > 1)
clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: client accept "
"request from %s address %U port %d queue %p!", getpid (),
mp->handle, session_index, mp->is_ip4 ? "IPv4" : "IPv6",
- format_ip46_address, &mp->ip, mp->is_ip4,
+ format_ip46_address, &mp->ip,
+ mp->is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (mp->port), session->vpp_event_queue);
if (VPPCOM_DEBUG > 0)
clib_warning ("ip6");
}
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
-
-}
-
-static void
-vppcom_send_connect_session_reply (session_t * session, u32 session_index,
- u64 vpp_handle, u32 context, int retval)
-{
- vl_api_connect_session_reply_t *rmp;
- u32 len;
- svm_queue_t *client_q;
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- memset (rmp, 0, sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_CONNECT_SESSION_REPLY);
-
- if (!session)
- {
- rmp->context = context;
- rmp->handle = vpp_handle;
- rmp->retval = htonl (retval);
- vl_msg_api_send_shmem (vcm->vl_input_queue, (u8 *) & rmp);
- return;
- }
-
- rmp->context = session->client_context;
- rmp->retval = htonl (retval);
- rmp->handle = session->vpp_handle;
- rmp->server_rx_fifo = pointer_to_uword (session->rx_fifo);
- rmp->server_tx_fifo = pointer_to_uword (session->tx_fifo);
- rmp->vpp_event_queue_address = pointer_to_uword (session->vpp_event_queue);
- rmp->segment_size = vcm->cfg.segment_size;
- len = vec_len (session->segment_name);
- rmp->segment_name_length = clib_min (len, sizeof (rmp->segment_name));
- clib_memcpy (rmp->segment_name, session->segment_name,
- rmp->segment_name_length - 1);
- clib_memcpy (rmp->lcl_ip, session->peer_addr.ip46.as_u8,
- sizeof (rmp->lcl_ip));
- rmp->is_ip4 = session->peer_addr.is_ip4;
- rmp->lcl_port = session->peer_port;
- client_q = uword_to_pointer (session->client_queue_address, svm_queue_t *);
- ASSERT (client_q);
- vl_msg_api_send_shmem (client_q, (u8 *) & rmp);
-}
-
-/*
- * Acting as server for redirected connect requests
- */
-static void
-vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp)
-{
- u32 session_index;
- session_t *session = 0;
-
- clib_spinlock_lock (&vcm->sessions_lockp);
- if (!clib_fifo_free_elts (vcm->client_session_index_fifo))
- {
- clib_spinlock_unlock (&vcm->sessions_lockp);
-
- if (VPPCOM_DEBUG > 1)
- clib_warning ("VCL<%d>: client session queue is full!", getpid ());
-
- /* TBD: Fix api to include vpp handle */
- vppcom_send_connect_session_reply (0 /* session */ , 0 /* sid */ ,
- 0 /* handle */ , mp->context,
- VNET_API_ERROR_QUEUE_FULL);
- return;
- }
-
- pool_get (vcm->sessions, session);
- memset (session, 0, sizeof (*session));
- session_index = session - vcm->sessions;
-
- session->client_context = mp->context;
- session->vpp_handle = session_index;
- session->client_queue_address = mp->client_queue_address;
- session->lcl_port = mp->port;
- session->lcl_addr.is_ip4 = mp->is_ip4;
- clib_memcpy (&session->lcl_addr.ip46, mp->ip,
- sizeof (session->lcl_addr.ip46));
- /* TBD: missing peer info in api msg.
- */
- session->peer_addr.is_ip4 = mp->is_ip4;
- ASSERT (session->lcl_addr.is_ip4 == session->peer_addr.is_ip4);
-
- session->state = STATE_ACCEPT;
- clib_fifo_add1 (vcm->client_session_index_fifo, session_index);
- if (VPPCOM_DEBUG > 1)
- clib_warning ("VCL<%d>: sid %u: Got a cut-thru connect request! "
- "clib_fifo_elts %u!\n", getpid (), session_index,
- clib_fifo_elts (vcm->client_session_index_fifo));
-
- if (VPPCOM_DEBUG > 0)
- {
- session->elog_track.name =
- (char *) format (0, "C:%d:S:%d%c", vcm->my_client_index,
- session_index, 0);
- elog_track_register (&vcm->elog_main, &session->elog_track);
-
- /* *INDENT-OFF* */
- ELOG_TYPE_DECLARE (e) =
- {
- .format = "cut-thru-connect:S:%d clib_fifo_elts:%d",
- .format_args = "i4i4",
- };
+ VCL_SESSION_UNLOCK ();
- struct
- {
- u32 data[2];
- } *ed;
-
- ed = ELOG_TRACK_DATA (&vcm->elog_main, e, session->elog_track);
-
- ed->data[0] = session_index;
- ed->data[1] = clib_fifo_elts (vcm->client_session_index_fifo);
- /* *INDENT-ON* */
- }
-
- clib_spinlock_unlock (&vcm->sessions_lockp);
}
+/* VPP combines bind and listen as one operation. VCL manages the separation
+ * of bind and listen locally via vppcom_session_bind() and
+ * vppcom_session_listen() */
static void
vppcom_send_bind_sock (session_t * session, u32 session_index)
{
u64 vpp_handle;
elog_track_t session_elog_track;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
vpp_handle = session->vpp_handle;
vppcom_session_table_del_listener (vpp_handle);
session->state = STATE_DISCONNECT;
session_elog_track = session->elog_track;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 1)
clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: "
u64 vpp_handle;
session_state_t state;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
vpp_handle = session->vpp_handle;
state = session->state;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 1)
{
_(BIND_SOCK_REPLY, bind_sock_reply) \
_(UNBIND_SOCK_REPLY, unbind_sock_reply) \
_(ACCEPT_SESSION, accept_session) \
-_(CONNECT_SOCK, connect_sock) \
_(CONNECT_SESSION_REPLY, connect_session_reply) \
_(DISCONNECT_SESSION, disconnect_session) \
_(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \
conf_fname = VPPCOM_CONF_DEFAULT;
vppcom_cfg_heapsize (conf_fname);
vcl_cfg = &vcm->cfg;
+ clib_spinlock_init (&vcm->session_fifo_lockp);
clib_fifo_validate (vcm->client_session_index_fifo,
vcm->cfg.listen_queue_size);
vppcom_cfg_read (conf_fname);
+
env_var_str = getenv (VPPCOM_ENV_API_PREFIX);
if (env_var_str)
{
"filename (%s) from " VPPCOM_ENV_API_PREFIX "!",
getpid (), env_var_str, vcl_cfg->vpp_api_filename);
}
-
- env_var_str = getenv (VPPCOM_ENV_APP_NAMESPACE_SECRET);
- if (env_var_str)
- {
- u64 tmp;
- if (sscanf (env_var_str, "%lu", &tmp) != 1)
- clib_warning ("VCL<%d>: WARNING: Invalid namespace secret "
- "specified in the environment variable "
- VPPCOM_ENV_APP_NAMESPACE_SECRET
- " (%s)!\n", getpid (), env_var_str);
- else
- {
- vcm->cfg.namespace_secret = tmp;
- if (VPPCOM_DEBUG > 0)
- clib_warning ("VCL<%d>: configured namespace secret "
- "(%lu) from " VPPCOM_ENV_APP_NAMESPACE_ID "!",
- getpid (), vcm->cfg.namespace_secret);
- }
- }
env_var_str = getenv (VPPCOM_ENV_APP_NAMESPACE_ID);
if (env_var_str)
{
if (VPPCOM_DEBUG > 0)
clib_warning ("VCL<%d>: configured namespace secret "
"(%lu) from "
- VPPCOM_ENV_APP_NAMESPACE_ID
+ VPPCOM_ENV_APP_NAMESPACE_SECRET
"!", getpid (), vcm->cfg.namespace_secret);
}
}
if (vcm->my_client_index == ~0)
{
+
+ /* API hookup and connect to VPP */
vppcom_api_hookup ();
vcm->app_state = STATE_APP_START;
rv = vppcom_connect_to_vpp (app_name);
return rv;
}
+ /* State event handling thread */
+
+ rv = vce_start_event_thread (&(vcm->event_thread), 20);
+
if (VPPCOM_DEBUG > 0)
clib_warning ("VCL<%d>: sending session enable", getpid ());
vppcom_app_destroy (void)
{
int rv;
+ f64 orig_app_timeout;
if (vcm->my_client_index == ~0)
return;
}
vppcom_app_detach ();
+ orig_app_timeout = vcm->cfg.app_timeout;
+ vcm->cfg.app_timeout = 2.0;
rv = vppcom_wait_for_app_state_change (STATE_APP_ENABLED);
+ vcm->cfg.app_timeout = orig_app_timeout;
if (PREDICT_FALSE (rv))
{
if (VPPCOM_DEBUG > 0)
session_state_t state;
elog_track_t session_elog_track;
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK ();
pool_get (vcm->sessions, session);
memset (session, 0, sizeof (*session));
session_index = session - vcm->sessions;
session_elog_track = session->elog_track;
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 0)
clib_warning ("VCL<%d>: sid %u", getpid (), session_index);
session_state_t state;
elog_track_t session_elog_track;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
is_vep = session->is_vep;
is_vep_session = session->is_vep_session;
next_sid = session->vep.next_sid;
vep_idx = session->vep.vep_idx;
state = session->state;
vpp_handle = session->vpp_handle;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
/*
* Why two if(VPPCOM_DEBUG) checks?
getpid (), vpp_handle, next_sid, vep_idx,
rv, vppcom_retval_str (rv));
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
next_sid = session->vep.next_sid;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
}
else
}
}
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
vpp_handle = session->vpp_handle;
if (vpp_handle != ~0)
{
}
pool_put_index (vcm->sessions, session_index);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 0)
{
if (!ep || !ep->ip)
return VPPCOM_EINVAL;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
if (session->is_vep)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
clib_warning ("VCL<%d>: ERROR: sid %u: cannot "
"bind to an epoll session!", getpid (), session_index);
rv = VPPCOM_EBADFD;
"port %u, proto %s", getpid (), session_index,
session->lcl_addr.is_ip4 ? "IPv4" : "IPv6",
format_ip46_address, &session->lcl_addr.ip46,
- session->lcl_addr.is_ip4,
+ session->lcl_addr.is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (session->lcl_port),
session->proto ? "UDP" : "TCP");
{
if (session->lcl_addr.is_ip4)
{
- /* *INDENT-OFF* */
+ /* *INDENT-OFF* */
ELOG_TYPE_DECLARE (e) =
{
.format = "bind local:%s:%d.%d.%d.%d:%d ",
u8 proto;
u8 addr[4];
u16 port;
- }) * ed;
+ }) *ed;
ed = ELOG_TRACK_DATA (&vcm->elog_main, e, session->elog_track);
ed->proto = session->proto;
}
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
done:
return rv;
}
if (q_len == 0 || q_len == ~0)
q_len = vcm->cfg.listen_queue_size;
- VCL_LOCK_AND_GET_SESSION (listen_session_index, &listen_session);
+ VCL_SESSION_LOCK_AND_GET (listen_session_index, &listen_session);
if (listen_session->is_vep)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
clib_warning ("VCL<%d>: ERROR: sid %u: cannot listen on an "
"epoll session!", getpid (), listen_session_index);
rv = VPPCOM_EBADFD;
listen_vpp_handle = listen_session->vpp_handle;
if (listen_session->state & STATE_LISTEN)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 0)
clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: "
"already in listen state!",
if (VPPCOM_DEBUG > 0)
clib_warning ("VCL<%d>: vpp handle 0x%llx, "
- "sid %u: sending bind request...",
+ "sid %u: sending VPP bind+listen request...",
getpid (), listen_vpp_handle, listen_session_index);
vppcom_send_bind_sock (listen_session, listen_session_index);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
retval =
vppcom_wait_for_session_state_change (listen_session_index, STATE_LISTEN,
vcm->cfg.session_timeout);
- VCL_LOCK_AND_GET_SESSION (listen_session_index, &listen_session);
+ VCL_SESSION_LOCK_AND_GET (listen_session_index, &listen_session);
if (PREDICT_FALSE (retval))
{
if (VPPCOM_DEBUG > 0)
- clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: bind failed! "
- "returning %d (%s)", getpid (),
- listen_session->vpp_handle, listen_session_index,
- retval, vppcom_retval_str (retval));
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ clib_warning
+ ("VCL<%d>: vpp handle 0x%llx, sid %u: bind+listen failed! "
+ "returning %d (%s)", getpid (), listen_session->vpp_handle,
+ listen_session_index, retval, vppcom_retval_str (retval));
+ VCL_SESSION_UNLOCK ();
rv = retval;
goto done;
}
+ VCL_ACCEPT_FIFO_LOCK ();
clib_fifo_validate (vcm->client_session_index_fifo, q_len);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_ACCEPT_FIFO_UNLOCK ();
+
+ VCL_SESSION_UNLOCK ();
+
done:
return rv;
}
int
-vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep,
- uint32_t flags)
+validate_args_session_accept_ (session_t * listen_session)
{
- session_t *listen_session = 0;
- session_t *client_session = 0;
- u32 client_session_index = ~0;
- int rv;
- f64 wait_for;
- u64 listen_vpp_handle;
-
- VCL_LOCK_AND_GET_SESSION (listen_session_index, &listen_session);
+ u32 listen_session_index = listen_session - vcm->sessions;
+ /* Input validation - expects spinlock on sessions_lockp */
if (listen_session->is_vep)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
clib_warning ("VCL<%d>: ERROR: sid %u: cannot accept on an "
"epoll session!", getpid (), listen_session_index);
- rv = VPPCOM_EBADFD;
- goto done;
+ return VPPCOM_EBADFD;
}
- listen_vpp_handle = listen_session->vpp_handle;
if (listen_session->state != STATE_LISTEN)
{
clib_warning ("VCL<%d>: ERROR: vpp handle 0x%llx, sid %u: "
"not in listen state! state 0x%x (%s)", getpid (),
- listen_vpp_handle, listen_session_index,
+ listen_session->vpp_handle, listen_session_index,
listen_session->state,
vppcom_session_state_str (listen_session->state));
- clib_spinlock_unlock (&vcm->sessions_lockp);
- rv = VPPCOM_EBADFD;
- goto done;
+ return VPPCOM_EBADFD;
}
- wait_for = (VCL_SESS_ATTR_TEST (listen_session->attr,
- VCL_SESS_ATTR_NONBLOCK))
- ? 0 : vcm->cfg.accept_timeout;
+ return VPPCOM_OK;
+}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+int
+vppcom_session_accept (uint32_t listen_session_index, vppcom_endpt_t * ep,
+ uint32_t flags)
+{
+ session_t *listen_session = 0;
+ session_t *client_session = 0;
+ u32 client_session_index = ~0;
+ int rv;
+ u64 listen_vpp_handle;
+ vce_event_handler_reg_t *reg;
+ vce_event_t *ev;
+ vce_event_connect_request_t *result;
+ struct timespec ts;
+ struct timeval tv;
+ int millisecond_timeout = 1;
+ int hours_timeout = 20 * 60 * 60;
+
+ VCL_SESSION_LOCK_AND_GET (listen_session_index, &listen_session);
+ listen_vpp_handle = listen_session->vpp_handle; // For debugging
+
+ rv = validate_args_session_accept_ (listen_session);
+ if (rv)
+ {
+ VCL_SESSION_UNLOCK ();
+ goto done;
+ }
- while (1)
+ /* Using an aggressive timer of 1ms and a generous timer of
+ * 20 hours, we can implement a blocking and non-blocking listener
+ * as both event and time driven */
+ gettimeofday (&tv, NULL);
+ ts.tv_nsec = (tv.tv_usec * 1000) + (1000 * millisecond_timeout);
+ ts.tv_sec = tv.tv_sec;
+
+ /* Predict that the Listener is blocking more often than not */
+ if (PREDICT_TRUE (!VCL_SESS_ATTR_TEST (listen_session->attr,
+ VCL_SESS_ATTR_NONBLOCK)))
+ ts.tv_sec += hours_timeout;
+
+ VCL_SESSION_UNLOCK ();
+
+ /* Register handler for connect_request event on listen_session_index */
+ vce_event_key_t evk;
+ evk.session_index = listen_session_index;
+ evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED;
+ reg = vce_register_handler (&vcm->event_thread, &evk,
+ vce_connect_request_handler_fn, 0);
+ VCL_EVENTS_LOCK ();
+ ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx);
+ pthread_mutex_lock (®->handler_lock);
+ while (!ev)
{
- rv = vppcom_wait_for_client_session_index (wait_for);
- if (rv)
+ VCL_EVENTS_UNLOCK ();
+ rv = pthread_cond_timedwait (®->handler_cond,
+ ®->handler_lock, &ts);
+ if (rv == ETIMEDOUT)
{
- if ((VPPCOM_DEBUG > 0))
- clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: "
- "accept failed! returning %d (%s)", getpid (),
- listen_vpp_handle, listen_session_index,
- rv, vppcom_retval_str (rv));
- if (wait_for == 0)
- goto done;
+ rv = VPPCOM_EAGAIN;
+ goto cleanup;
}
- else
- break;
+ VCL_EVENTS_LOCK ();
+ ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx);
}
+ result = vce_get_event_data (ev, sizeof (*result));
+ client_session_index = result->accepted_session_index;
+ VCL_EVENTS_UNLOCK ();
+
+ /* Remove from the FIFO used to service epoll */
+ VCL_ACCEPT_FIFO_LOCK ();
+ if (clib_fifo_elts (vcm->client_session_index_fifo))
+ {
+ u32 tmp_client_session_index;
+ clib_fifo_sub1 (vcm->client_session_index_fifo,
+ tmp_client_session_index);
+ /* It wasn't ours... put it back ... */
+ if (tmp_client_session_index != client_session_index)
+ clib_fifo_add1 (vcm->client_session_index_fifo,
+ tmp_client_session_index);
+ }
+ VCL_ACCEPT_FIFO_UNLOCK ();
+
+ VCL_SESSION_LOCK ();
- clib_spinlock_lock (&vcm->sessions_lockp);
- clib_fifo_sub1 (vcm->client_session_index_fifo, client_session_index);
rv = vppcom_session_at_index (client_session_index, &client_session);
if (PREDICT_FALSE (rv))
{
"lookup failed! returning %d (%s)", getpid (),
listen_vpp_handle, listen_session_index,
client_session_index, rv, vppcom_retval_str (rv));
- goto done;
+ goto cleanup;
}
if (flags & O_NONBLOCK)
client_session_index,
client_session->lcl_addr.is_ip4 ? "IPv4" : "IPv6",
format_ip46_address, &client_session->lcl_addr.ip46,
- client_session->lcl_addr.is_ip4,
+ client_session->lcl_addr.is_ip4 ?
+ IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (client_session->lcl_port));
if (VPPCOM_DEBUG > 0)
u32 session;
u8 addr[4];
u16 port;
- }) * ed2;
+ }) *ed2;
ed2 =
ELOG_TRACK_DATA (&vcm->elog_main, e2, client_session->elog_track);
}
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
+
rv = (int) client_session_index;
+ vce_clear_event (&vcm->event_thread, reg->ev_idx);
+ if (vcm->session_io_thread.io_sessions_lockp)
+ {
+ /* Throw this new accepted session index into the rx poll thread pool */
+ VCL_IO_SESSIONS_LOCK ();
+ u32 *active_session_index;
+ pool_get (vcm->session_io_thread.active_session_indexes,
+ active_session_index);
+ *active_session_index = client_session_index;
+ VCL_IO_SESSIONS_UNLOCK ();
+ }
+cleanup:
+ vce_unregister_handler (&vcm->event_thread, reg);
+ pthread_mutex_unlock (®->handler_lock);
+
done:
return rv;
}
u64 vpp_handle = 0;
int rv, retval = VPPCOM_OK;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
if (PREDICT_FALSE (session->is_vep))
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
clib_warning ("VCL<%d>: ERROR: sid %u: cannot "
"connect on an epoll session!", getpid (), session_index);
rv = VPPCOM_EBADFD;
getpid (), session->vpp_handle, session_index,
session->peer_addr.is_ip4 ? "IPv4" : "IPv6",
format_ip46_address,
- &session->peer_addr.ip46, session->peer_addr.is_ip4,
+ &session->peer_addr.ip46, session->peer_addr.is_ip4 ?
+ IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (session->peer_port),
session->proto ? "UDP" : "TCP", session->state,
vppcom_session_state_str (session->state));
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
goto done;
}
getpid (), session->vpp_handle, session_index,
session->peer_addr.is_ip4 ? "IPv4" : "IPv6",
format_ip46_address,
- &session->peer_addr.ip46, session->peer_addr.is_ip4,
+ &session->peer_addr.ip46, session->peer_addr.is_ip4 ?
+ IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (session->peer_port),
session->proto ? "UDP" : "TCP");
vppcom_send_connect_sock (session, session_index);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
retval =
vppcom_wait_for_session_state_change (session_index, STATE_CONNECT,
vcm->cfg.session_timeout);
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
vpp_handle = session->vpp_handle;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
done:
if (PREDICT_FALSE (retval))
ASSERT (buf);
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
is_nonblocking = VCL_SESS_ATTR_TEST (session->attr, VCL_SESS_ATTR_NONBLOCK);
rx_fifo = session->rx_fifo;
if (PREDICT_FALSE (session->is_vep))
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
clib_warning ("VCL<%d>: ERROR: sid %u: cannot "
"read from an epoll session!", getpid (), session_index);
rv = VPPCOM_EBADFD;
if (PREDICT_FALSE (!(state & (SERVER_STATE_OPEN | CLIENT_STATE_OPEN))))
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
if (VPPCOM_DEBUG > 0)
goto done;
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
do
{
if (n_read <= 0)
{
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
poll_et = (((EPOLLET | EPOLLIN) & session->vep.ev.events) ==
(EPOLLET | EPOLLIN));
else
rv = VPPCOM_EAGAIN;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
else
rv = n_read;
}
if (session->state & STATE_LISTEN)
- ready = clib_fifo_elts (vcm->client_session_index_fifo);
+ {
+ VCL_ACCEPT_FIFO_LOCK ();
+ ready = clib_fifo_elts (vcm->client_session_index_fifo);
+ VCL_ACCEPT_FIFO_UNLOCK ();
+ }
else
{
if (!(state & (SERVER_STATE_OPEN | CLIENT_STATE_OPEN | STATE_LISTEN)))
ASSERT (buf);
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
tx_fifo = session->tx_fifo;
is_nonblocking = VCL_SESS_ATTR_TEST (session->attr, VCL_SESS_ATTR_NONBLOCK);
if (PREDICT_FALSE (session->is_vep))
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
clib_warning ("VCL<%d>: ERROR: vpp handle 0x%llx, sid %u: "
"cannot write to an epoll session!",
getpid (), vpp_handle, session_index);
((session->state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET :
VPPCOM_ENOTCONN);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 1)
clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: "
"session is not open! state 0x%x (%s)",
goto done;
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
do
{
}
while (!is_nonblocking && (n_write <= 0));
- /* If event wasn't set, add one */
+ /* If event wasn't set, add one
+ *
+ * To reduce context switching, can check if an
+ * event is already there for this event_key, but for now
+ * this will suffice. */
+
if ((n_write > 0) && svm_fifo_set_event (tx_fifo))
{
/* Fabricate TX event, send to vpp */
evt.fifo = tx_fifo;
evt.event_type = FIFO_EVENT_APP_TX;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
q = session->vpp_event_queue;
ASSERT (q);
svm_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ );
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 1)
clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: "
"added FIFO_EVENT_APP_TX to "
if (n_write <= 0)
{
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
poll_et = (((EPOLLET | EPOLLOUT) & session->vep.ev.events) ==
(EPOLLET | EPOLLOUT));
else
rv = VPPCOM_EAGAIN;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
else
rv = n_write;
{
clib_bitmap_foreach (session_index, vcm->rd_bitmap,
({
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK();
rv = vppcom_session_at_index (session_index, &session);
if (rv < 0)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK();
if (VPPCOM_DEBUG > 1)
clib_warning ("VCL<%d>: session %d specified in "
"read_map is closed.", getpid (),
bits_set = VPPCOM_EBADFD;
goto select_done;
}
-
- rv = vppcom_session_read_ready (session, session_index);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ if (session->state & STATE_LISTEN)
+ {
+ vce_event_handler_reg_t *reg = 0;
+ vce_event_key_t evk;
+
+ /* Check if handler already registered for this
+ * event.
+ * If not, register handler for connect_request event
+ * on listen_session_index
+ */
+ evk.session_index = session_index;
+ evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED;
+ reg = vce_get_event_handler (&vcm->event_thread, &evk);
+ if (!reg)
+ reg = vce_register_handler (&vcm->event_thread, &evk,
+ vce_poll_wait_connect_request_handler_fn,
+ 0 /* No callback args */);
+ rv = vppcom_session_read_ready (session, session_index);
+ if (rv > 0)
+ {
+ vce_unregister_handler (&vcm->event_thread, reg);
+ }
+ }
+ else
+ rv = vppcom_session_read_ready (session, session_index);
+ VCL_SESSION_UNLOCK();
if (except_map && vcm->ex_bitmap &&
clib_bitmap_get (vcm->ex_bitmap, session_index) &&
(rv < 0))
{
clib_bitmap_foreach (session_index, vcm->wr_bitmap,
({
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK();
rv = vppcom_session_at_index (session_index, &session);
if (rv < 0)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK();
if (VPPCOM_DEBUG > 0)
clib_warning ("VCL<%d>: session %d specified in "
"write_map is closed.", getpid (),
}
rv = vppcom_session_write_ready (session, session_index);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK();
if (write_map && (rv > 0))
{
clib_bitmap_set_no_check (write_map, session_index, 1);
{
clib_bitmap_foreach (session_index, vcm->ex_bitmap,
({
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK();
rv = vppcom_session_at_index (session_index, &session);
if (rv < 0)
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK();
if (VPPCOM_DEBUG > 1)
clib_warning ("VCL<%d>: session %d specified in "
"except_map is closed.", getpid (),
}
rv = vppcom_session_read_ready (session, session_index);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK();
if (rv < 0)
{
clib_bitmap_set_no_check (except_map, session_index, 1);
u32 vep_idx;
elog_track_t vep_elog_track;
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK ();
pool_get (vcm->sessions, vep_session);
memset (vep_session, 0, sizeof (*vep_session));
vep_idx = vep_session - vcm->sessions;
vep_session->vep.prev_sid = ~0;
vep_session->wait_cont_idx = ~0;
vep_session->vpp_handle = ~0;
+ vep_session->poll_reg = 0;
if (VPPCOM_DEBUG > 0)
{
vep_elog_track = vep_session->elog_track;
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 0)
clib_warning ("VCL<%d>: Created vep_idx %u / sid %u!",
return VPPCOM_EINVAL;
}
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK ();
rv = vppcom_session_at_index (vep_idx, &vep_session);
if (PREDICT_FALSE (rv))
{
session->is_vep = 0;
session->is_vep_session = 1;
vep_session->vep.next_sid = session_index;
+
+ /* VCL Event Register handler */
+ if (session->state & STATE_LISTEN)
+ {
+ /* Register handler for connect_request event on listen_session_index */
+ vce_event_key_t evk;
+ evk.session_index = session_index;
+ evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED;
+ vep_session->poll_reg =
+ vce_register_handler (&vcm->event_thread, &evk,
+ vce_poll_wait_connect_request_handler_fn,
+ 0 /* No callback args */ );
+ }
if (VPPCOM_DEBUG > 1)
clib_warning ("VCL<%d>: EPOLL_CTL_ADD: vep_idx %u, "
"sid %u, events 0x%x, data 0x%llx!",
goto done;
}
+ /* VCL Event Un-register handler */
+ if ((session->state & STATE_LISTEN) && vep_session->poll_reg)
+ {
+ (void) vce_unregister_handler (&vcm->event_thread,
+ vep_session->poll_reg);
+ }
+
vep_session->wait_cont_idx =
(vep_session->wait_cont_idx == session_index) ?
session->vep.next_sid : vep_session->wait_cont_idx;
vep_verify_epoll_chain (vep_idx);
done:
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
return rv;
}
}
memset (events, 0, sizeof (*events) * maxevents);
- VCL_LOCK_AND_GET_SESSION (vep_idx, &vep_session);
+ VCL_SESSION_LOCK_AND_GET (vep_idx, &vep_session);
vep_next_sid = vep_session->vep.next_sid;
is_vep = vep_session->is_vep;
wait_cont_idx = vep_session->wait_cont_idx;
vep_elog_track = vep_session->elog_track;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (PREDICT_FALSE (!is_vep))
{
int ready;
u64 session_ev_data;
- VCL_LOCK_AND_GET_SESSION (sid, &session);
+ VCL_SESSION_LOCK_AND_GET (sid, &session);
next_sid = session->vep.next_sid;
session_events = session->vep.ev.events;
et_mask = session->vep.et_mask;
session_elog_track = session->elog_track;
}
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (PREDICT_FALSE (is_vep))
{
if (EPOLLIN & session_events)
{
- VCL_LOCK_AND_GET_SESSION (sid, &session);
+ VCL_SESSION_LOCK_AND_GET (sid, &session);
ready = vppcom_session_read_ready (session, sid);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if ((ready > 0) && (EPOLLIN & et_mask))
{
add_event = 1;
if (EPOLLOUT & session_events)
{
- VCL_LOCK_AND_GET_SESSION (sid, &session);
+ VCL_SESSION_LOCK_AND_GET (sid, &session);
ready = vppcom_session_write_ready (session, sid);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if ((ready > 0) && (EPOLLOUT & et_mask))
{
add_event = 1;
events[num_ev].data.u64 = session_ev_data;
if (EPOLLONESHOT & session_events)
{
- VCL_LOCK_AND_GET_SESSION (sid, &session);
+ VCL_SESSION_LOCK_AND_GET (sid, &session);
session->vep.ev.events = 0;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
num_ev++;
if (num_ev == maxevents)
{
- VCL_LOCK_AND_GET_SESSION (vep_idx, &vep_session);
+ VCL_SESSION_LOCK_AND_GET (vep_idx, &vep_session);
vep_session->wait_cont_idx = next_sid;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
goto done;
}
}
if (wait_cont_idx != ~0)
{
- VCL_LOCK_AND_GET_SESSION (vep_idx, &vep_session);
+ VCL_SESSION_LOCK_AND_GET (vep_idx, &vep_session);
vep_session->wait_cont_idx = ~0;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
done:
return (rv != VPPCOM_OK) ? rv : num_ev;
u32 *flags = buffer;
vppcom_endpt_t *ep = buffer;
- VCL_LOCK_AND_GET_SESSION (session_index, &session);
+ VCL_SESSION_LOCK_AND_GET (session_index, &session);
ASSERT (session);
clib_warning ("VCL<%d>: VPPCOM_ATTR_GET_PEER_ADDR: sid %u, "
"is_ip4 = %u, addr = %U, port %u", getpid (),
session_index, ep->is_ip4, format_ip46_address,
- &session->peer_addr.ip46, ep->is_ip4,
+ &session->peer_addr.ip46,
+ ep->is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (ep->port));
if (VPPCOM_DEBUG > 0)
{
clib_warning ("VCL<%d>: VPPCOM_ATTR_GET_LCL_ADDR: sid %u, "
"is_ip4 = %u, addr = %U port %d", getpid (),
session_index, ep->is_ip4, format_ip46_address,
- &session->lcl_addr.ip46, ep->is_ip4,
+ &session->lcl_addr.ip46,
+ ep->is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
clib_net_to_host_u16 (ep->port));
if (VPPCOM_DEBUG > 0)
{
};
CLIB_PACKED (struct {
i32 data;
- }) * ed;
+ }) *ed;
ed = ELOG_TRACK_DATA (&vcm->elog_main, e, session->elog_track);
ed->data = session->libc_epfd;
}
done:
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
return rv;
}
if (ep)
{
- clib_spinlock_lock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK ();
rv = vppcom_session_at_index (session_index, &session);
if (PREDICT_FALSE (rv))
{
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (VPPCOM_DEBUG > 0)
clib_warning ("VCL<%d>: invalid session, "
"sid (%u) has been closed!",
/* *INDENT-ON* */
}
rv = VPPCOM_EBADFD;
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
goto done;
}
ep->is_ip4 = session->peer_addr.is_ip4;
else
clib_memcpy (ep->ip, &session->peer_addr.ip46.ip6,
sizeof (ip6_address_t));
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
}
if (flags == 0)
{
ASSERT (vp[i].revents);
- VCL_LOCK_AND_GET_SESSION (vp[i].sid, &session);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_LOCK_AND_GET (vp[i].sid, &session);
+ VCL_SESSION_UNLOCK ();
if (*vp[i].revents)
*vp[i].revents = 0;
if (POLLIN & vp[i].events)
{
- VCL_LOCK_AND_GET_SESSION (vp[i].sid, &session);
+ VCL_SESSION_LOCK_AND_GET (vp[i].sid, &session);
rv = vppcom_session_read_ready (session, vp[i].sid);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (rv > 0)
{
*vp[i].revents |= POLLIN;
if (POLLOUT & vp[i].events)
{
- VCL_LOCK_AND_GET_SESSION (vp[i].sid, &session);
+ VCL_SESSION_LOCK_AND_GET (vp[i].sid, &session);
rv = vppcom_session_write_ready (session, vp[i].sid);
- clib_spinlock_unlock (&vcm->sessions_lockp);
+ VCL_SESSION_UNLOCK ();
if (rv > 0)
{
*vp[i].revents |= POLLOUT;
}
}
- if (0) // Note "done:" label used by VCL_LOCK_AND_GET_SESSION()
+ if (0) // Note "done:" label used by VCL_SESSION_LOCK_AND_GET()
{
done:
*vp[i].revents = POLLNVAL;
return num_ev;
}
+/*
+ * VPPCOM Event Functions
+ */
+
+void *
+vppcom_session_io_thread_fn (void *arg)
+{
+ vppcom_session_io_thread_t *evt = (vppcom_session_io_thread_t *) arg;
+ u32 *session_indexes = 0, *session_index;
+ int i, rv;
+ u32 bytes = 0;
+ session_t *session;
+
+ while (1)
+ {
+ vec_reset_length (session_indexes);
+ VCE_IO_SESSIONS_LOCK ();
+ pool_foreach (session_index, evt->active_session_indexes, (
+ {
+ vec_add1
+ (session_indexes,
+ *session_index);
+ }
+ ));
+ VCE_IO_SESSIONS_UNLOCK ();
+ if (session_indexes)
+ {
+ for (i = 0; i < vec_len (session_indexes); ++i)
+ {
+ VCL_SESSION_LOCK_AND_GET (session_indexes[i], &session);
+ bytes = svm_fifo_max_dequeue (session->rx_fifo);
+ VCL_SESSION_UNLOCK ();
+
+ if (bytes)
+ {
+ vppcom_ioevent_t *eio;
+ vce_event_t *ev;
+ u32 ev_idx;
+
+ VCL_EVENTS_LOCK ();
+
+ pool_get (vcm->event_thread.vce_events, ev);
+ ev_idx = (u32) (ev - vcm->event_thread.vce_events);
+ eio = vce_get_event_data (ev, sizeof (*eio));
+ ev->evk.eid = VCL_EVENT_IOEVENT_RX_FIFO;
+ ev->evk.session_index = session_indexes[i];
+ eio->bytes = bytes;
+ eio->session_index = session_indexes[i];
+
+ VCL_EVENTS_UNLOCK ();
+
+ rv = vce_generate_event (&vcm->event_thread, ev_idx);
+ }
+ }
+ }
+ struct timespec ts;
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1000000; /* 1 millisecond */
+ nanosleep (&ts, NULL);
+ }
+done:
+ VCL_SESSION_UNLOCK ();
+ return NULL;
+}
+
+int
+vppcom_start_io_event_thread (vppcom_session_io_thread_t * evt,
+ u8 max_sessions)
+{
+ pthread_cond_init (&(evt->vce_io_cond), NULL);
+ pthread_mutex_init (&(evt->vce_io_lock), NULL);
+
+ clib_spinlock_init (&(evt->io_sessions_lockp));
+
+ return pthread_create (&(evt->thread), NULL /* attr */ ,
+ vppcom_session_io_thread_fn, evt);
+}
+
+void
+vce_registered_ioevent_handler_fn (void *arg)
+{
+ vce_event_handler_reg_t *reg = (vce_event_handler_reg_t *) arg;
+ vppcom_ioevent_t *eio;
+ vce_event_t *ev;
+ u32 ioevt_ndx = (u64) (reg->handler_fn_args);
+ vppcom_session_ioevent_t *ioevent, ioevent_;
+
+ VCL_EVENTS_LOCK ();
+ ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx);
+ eio = vce_get_event_data (ev, sizeof (*eio));
+ VCL_EVENTS_UNLOCK ();
+
+ VCL_IO_SESSIONS_LOCK ();
+ ioevent = pool_elt_at_index (vcm->session_io_thread.ioevents, ioevt_ndx);
+ ioevent_ = *ioevent;
+ VCL_IO_SESSIONS_UNLOCK ();
+ (ioevent_.user_cb) (eio, ioevent_.user_cb_data);
+ vce_clear_event (&vcm->event_thread, reg->ev_idx);
+ return;
+
+ /*TODO - Unregister check in close for this listener */
+
+}
+
+void
+vce_registered_listener_connect_handler_fn (void *arg)
+{
+ vce_event_handler_reg_t *reg = (vce_event_handler_reg_t *) arg;
+ vce_event_connect_request_t *ecr;
+ vce_event_t *ev;
+ vppcom_endpt_t ep;
+
+ session_t *new_session;
+ int rv;
+
+ vppcom_session_listener_t *session_listener =
+ (vppcom_session_listener_t *) reg->handler_fn_args;
+
+ VCL_EVENTS_LOCK ();
+ ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx);
+ ecr = vce_get_event_data (ev, sizeof (*ecr));
+ VCL_EVENTS_UNLOCK ();
+ VCL_SESSION_LOCK_AND_GET (ecr->accepted_session_index, &new_session);
+
+ ep.is_ip4 = new_session->peer_addr.is_ip4;
+ ep.port = new_session->peer_port;
+ if (new_session->peer_addr.is_ip4)
+ clib_memcpy (&ep.ip, &new_session->peer_addr.ip46.ip4,
+ sizeof (ip4_address_t));
+ else
+ clib_memcpy (&ep.ip, &new_session->peer_addr.ip46.ip6,
+ sizeof (ip6_address_t));
+
+ vppcom_send_accept_session_reply (new_session->vpp_handle,
+ new_session->client_context,
+ 0 /* retval OK */ );
+ VCL_SESSION_UNLOCK ();
+
+ (session_listener->user_cb) (ecr->accepted_session_index, &ep,
+ session_listener->user_cb_data);
+
+ if (vcm->session_io_thread.io_sessions_lockp)
+ {
+ /* Throw this new accepted session index into the rx poll thread pool */
+ VCL_IO_SESSIONS_LOCK ();
+ u32 *active_session_index;
+ pool_get (vcm->session_io_thread.active_session_indexes,
+ active_session_index);
+ *active_session_index = ecr->accepted_session_index;
+ VCL_IO_SESSIONS_UNLOCK ();
+ }
+
+ /*TODO - Unregister check in close for this listener */
+ return;
+
+done:
+ ASSERT (0); // If we can't get a lock or accepted session fails, lets blow up.
+}
+
+/**
+ * @brief vce_poll_wait_connect_request_handler_fn
+ * - used by vppcom_epoll_xxxx() for listener sessions
+ * - when a vl_api_accept_session_t_handler() generates an event
+ * this callback is alerted and sets the fields that vppcom_epoll_wait()
+ * expects to see.
+ *
+ * @param arg - void* to be cast to vce_event_handler_reg_t*
+ */
+void
+vce_poll_wait_connect_request_handler_fn (void *arg)
+{
+ vce_event_handler_reg_t *reg = (vce_event_handler_reg_t *) arg;
+ vce_event_t *ev;
+ /* Retrieve the VCL_EVENT_CONNECT_REQ_ACCEPTED event */
+ ev = vce_get_event_from_index (&vcm->event_thread, reg->ev_idx);
+ vce_event_connect_request_t *ecr = vce_get_event_data (ev, sizeof (*ecr));
+
+ /* Add the accepted_session_index to the FIFO */
+ VCL_ACCEPT_FIFO_LOCK ();
+ clib_fifo_add1 (vcm->client_session_index_fifo,
+ ecr->accepted_session_index);
+ VCL_ACCEPT_FIFO_UNLOCK ();
+
+ /* Recycling the event. */
+ VCL_EVENTS_LOCK ();
+ ev->recycle = 1;
+ clib_fifo_add1 (vcm->event_thread.event_index_fifo, reg->ev_idx);
+ VCL_EVENTS_UNLOCK ();
+}
+
+int
+vppcom_session_register_ioevent_cb (uint32_t session_index,
+ vppcom_session_ioevent_cb cb,
+ uint8_t rx, void *ptr)
+{
+ int rv = VPPCOM_OK;
+ vce_event_key_t evk;
+ vppcom_session_ioevent_t *ioevent;
+
+ if (!vcm->session_io_thread.io_sessions_lockp)
+ rv = vppcom_start_io_event_thread (&vcm->session_io_thread, 100 /* DAW_TODO: ??? hard-coded value */
+ );
+
+ if (rv == VPPCOM_OK)
+ {
+ void *io_evt_ndx;
+
+ /* Register handler for ioevent on session_index */
+ VCL_IO_SESSIONS_LOCK ();
+ pool_get (vcm->session_io_thread.ioevents, ioevent);
+ io_evt_ndx = (void *) (ioevent - vcm->session_io_thread.ioevents);
+ ioevent->user_cb = cb;
+ ioevent->user_cb_data = ptr;
+ VCL_IO_SESSIONS_UNLOCK ();
+
+ evk.session_index = session_index;
+ evk.eid = rx ? VCL_EVENT_IOEVENT_RX_FIFO : VCL_EVENT_IOEVENT_TX_FIFO;
+
+ (void) vce_register_handler (&vcm->event_thread, &evk,
+ vce_registered_ioevent_handler_fn,
+ io_evt_ndx);
+ }
+ return rv;
+}
+
+int
+vppcom_session_register_listener (uint32_t session_index,
+ vppcom_session_listener_cb cb,
+ vppcom_session_listener_errcb
+ errcb, uint8_t flags, int q_len, void *ptr)
+{
+ int rv = VPPCOM_OK;
+ vce_event_key_t evk;
+ vppcom_session_listener_t *listener_args;
+
+ if (!vcm->session_io_thread.io_sessions_lockp)
+ rv = vppcom_start_io_event_thread (&vcm->session_io_thread, 100 /* DAW_TODO: ??? hard-coded value */
+ );
+ if (rv)
+ {
+ goto done;
+ }
+ rv = vppcom_session_listen (session_index, q_len);
+ if (rv)
+ {
+ goto done;
+ }
+
+ /* Register handler for connect_request event on listen_session_index */
+ listener_args = clib_mem_alloc (sizeof (vppcom_session_listener_t)); // DAW_TODO: Use a pool instead of thrashing the memory allocator!
+ listener_args->user_cb = cb;
+ listener_args->user_cb_data = ptr;
+ listener_args->user_errcb = errcb;
+
+ evk.session_index = session_index;
+ evk.eid = VCL_EVENT_CONNECT_REQ_ACCEPTED;
+ (void) vce_register_handler (&vcm->event_thread, &evk,
+ vce_registered_listener_connect_handler_fn,
+ listener_args);
+
+done:
+ return rv;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*