#include <stdlib.h>
#include <svm/svm_fifo_segment.h>
#include <vcl/vppcom.h>
-#include <vcl/vcl_event.h>
#include <vcl/vcl_debug.h>
#include <vcl/vcl_private.h>
__thread uword __vcl_worker_index = ~0;
-static u8 not_ready;
-void
-sigsegv_signal (int signum)
+static int
+vcl_wait_for_segment (u64 segment_handle)
{
- not_ready = 1;
-}
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+ u32 wait_for_seconds = 10, segment_index;
+ f64 timeout;
-static void
-vcl_wait_for_memory (void *mem)
-{
- u8 __clib_unused test;
- if (vcm->mounting_segment)
- {
- while (vcm->mounting_segment)
- ;
- return;
- }
- if (1 || vcm->debug)
- {
- usleep (1e5);
- return;
- }
- if (signal (SIGSEGV, sigsegv_signal))
- {
- perror ("signal()");
- return;
- }
- not_ready = 0;
+ if (segment_handle == VCL_INVALID_SEGMENT_HANDLE)
+ return 1;
-again:
- test = *(u8 *) mem;
- if (not_ready)
+ timeout = clib_time_now (&wrk->clib_time) + wait_for_seconds;
+ while (clib_time_now (&wrk->clib_time) < timeout)
{
- not_ready = 0;
- usleep (1);
- goto again;
+ segment_index = vcl_segment_table_lookup (segment_handle);
+ if (segment_index != VCL_INVALID_SEGMENT_INDEX)
+ return 0;
+ usleep (10);
}
-
- signal (SIGSEGV, SIG_DFL);
+ return 1;
}
const char *
svm_msg_q_t *);
session->our_evt_q = uword_to_pointer (mp->server_event_queue_address,
svm_msg_q_t *);
- vcl_wait_for_memory (session->vpp_evt_q);
+ if (vcl_wait_for_segment (mp->segment_handle))
+ {
+ clib_warning ("segment for session %u couldn't be mounted!",
+ session->session_index);
+ return VCL_INVALID_SESSION_INDEX;
+ }
rx_fifo->master_session_index = session->session_index;
tx_fifo->master_session_index = session->session_index;
+ rx_fifo->master_thread_index = vcl_get_worker_index ();
+ tx_fifo->master_thread_index = vcl_get_worker_index ();
vec_validate (wrk->vpp_event_queues, 0);
evt_q = uword_to_pointer (mp->vpp_event_queue_address, svm_msg_q_t *);
wrk->vpp_event_queues[0] = evt_q;
svm_msg_q_t *);
rx_fifo->client_session_index = session->session_index;
tx_fifo->client_session_index = session->session_index;
-
+ rx_fifo->client_thread_index = vcl_get_worker_index ();
+ tx_fifo->client_thread_index = vcl_get_worker_index ();
vpp_wrk_index = tx_fifo->master_thread_index;
vec_validate (wrk->vpp_event_queues, vpp_wrk_index);
wrk->vpp_event_queues[vpp_wrk_index] = session->vpp_evt_q;
session->session_state = STATE_ACCEPT;
session->transport.rmt_port = mp->port;
session->transport.is_ip4 = mp->is_ip4;
- clib_memcpy (&session->transport.rmt_ip, mp->ip, sizeof (ip46_address_t));
+ clib_memcpy_fast (&session->transport.rmt_ip, mp->ip,
+ sizeof (ip46_address_t));
vcl_session_table_add_vpp_handle (wrk, mp->handle, session->session_index);
session->transport.lcl_port = listen_session->transport.lcl_port;
if (mp->retval)
{
clib_warning ("VCL<%d>: ERROR: sid %u: connect failed! %U", getpid (),
- mp->handle, session_index, format_api_error,
- ntohl (mp->retval));
+ session_index, format_api_error, ntohl (mp->retval));
session->session_state = STATE_FAILED;
session->vpp_handle = mp->handle;
return session_index;
rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *);
tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *);
- vcl_wait_for_memory (rx_fifo);
+ if (vcl_wait_for_segment (mp->segment_handle))
+ {
+ clib_warning ("segment for session %u couldn't be mounted!",
+ session->session_index);
+ return VCL_INVALID_SESSION_INDEX;
+ }
+
rx_fifo->client_session_index = session_index;
tx_fifo->client_session_index = session_index;
+ rx_fifo->client_thread_index = vcl_get_worker_index ();
+ tx_fifo->client_thread_index = vcl_get_worker_index ();
if (mp->client_event_queue_address)
{
session->tx_fifo = tx_fifo;
session->vpp_handle = mp->handle;
session->transport.is_ip4 = mp->is_ip4;
- clib_memcpy (&session->transport.lcl_ip, mp->lcl_ip,
- sizeof (session->transport.lcl_ip));
+ clib_memcpy_fast (&session->transport.lcl_ip, mp->lcl_ip,
+ sizeof (session->transport.lcl_ip));
session->transport.lcl_port = mp->lcl_port;
session->session_state = STATE_CONNECT;
session->session_state = STATE_CLOSE_ON_EMPTY;
VDBG (0, "reset handle 0x%llx, sid %u ", reset_msg->handle, sid);
vcl_send_session_reset_reply (vcl_session_vpp_evt_q (wrk, session),
- vcm->my_client_index, reset_msg->handle, 0);
+ wrk->my_client_index, reset_msg->handle, 0);
return sid;
}
session = vcl_session_get (wrk, sid);
if (mp->retval)
{
- VDBG (0, "VCL<%d>: ERROR: vpp handle 0x%llx, sid %u: bind failed: %U",
- getpid (), mp->handle, sid, format_api_error, ntohl (mp->retval));
+ VERR ("vpp handle 0x%llx, sid %u: bind failed: %U", mp->handle, sid,
+ format_api_error, mp->retval);
if (session)
{
session->session_state = STATE_FAILED;
session->vpp_handle = mp->handle;
session->transport.is_ip4 = mp->lcl_is_ip4;
- clib_memcpy (&session->transport.lcl_ip, mp->lcl_ip,
- sizeof (ip46_address_t));
+ clib_memcpy_fast (&session->transport.lcl_ip, mp->lcl_ip,
+ sizeof (ip46_address_t));
session->transport.lcl_port = mp->lcl_port;
vcl_session_table_add_listener (wrk, mp->handle, sid);
session->session_state = STATE_LISTEN;
session->tx_fifo = tx_fifo;
}
- VDBG (1, "VCL<%d>: vpp handle 0x%llx, sid %u: bind succeeded!",
+ VDBG (0, "VCL<%d>: vpp handle 0x%llx, sid %u: bind succeeded!",
getpid (), mp->handle, sid);
return sid;
}
-int
-vcl_handle_mq_ctrl_event (vcl_worker_t * wrk, session_event_t * e)
+static int
+vcl_handle_mq_event (vcl_worker_t * wrk, session_event_t * e)
{
session_accepted_msg_t *accepted_msg;
session_disconnected_msg_t *disconnected_msg;
switch (e->event_type)
{
case FIFO_EVENT_APP_RX:
- clib_warning ("unhandled rx: sid %u (0x%x)",
- e->fifo->client_session_index,
- e->fifo->client_session_index);
+ case FIFO_EVENT_APP_TX:
+ case SESSION_IO_EVT_CT_RX:
+ case SESSION_IO_EVT_CT_TX:
+ vec_add1 (wrk->unhandled_evts_vector, *e);
break;
case SESSION_CTRL_EVT_ACCEPTED:
accepted_msg = (session_accepted_msg_t *) e->data;
break;
}
session->session_state = STATE_DISCONNECT;
- VDBG (0, "disconnected handle 0xllx, sid %u", disconnected_msg->handle,
+ VDBG (0, "disconnected handle 0x%llx, sid %u", disconnected_msg->handle,
sid);
break;
case SESSION_CTRL_EVT_RESET:
}
if (svm_msg_q_sub (wrk->app_event_queue, &msg, SVM_Q_NOWAIT, 0))
- continue;
+ {
+ usleep (100);
+ continue;
+ }
e = svm_msg_q_msg_data (wrk->app_event_queue, &msg);
- vcl_handle_mq_ctrl_event (wrk, e);
+ vcl_handle_mq_event (wrk, e);
svm_msg_q_free_msg (wrk->app_event_queue, &msg);
}
while (clib_time_now (&wrk->clib_time) < timeout);
u64 vpp_handle;
session = vcl_session_get_w_handle (wrk, session_handle);
+ if (!session)
+ return VPPCOM_EBADFD;
+
vpp_handle = session->vpp_handle;
state = session->session_state;
if (state & STATE_CLOSE_ON_EMPTY)
{
vpp_evt_q = vcl_session_vpp_evt_q (wrk, session);
- vcl_send_session_disconnected_reply (vpp_evt_q, vcm->my_client_index,
+ vcl_send_session_disconnected_reply (vpp_evt_q, wrk->my_client_index,
vpp_handle, 0);
VDBG (1, "VCL<%d>: vpp handle 0x%llx, sid %u: sending disconnect "
"REPLY...", getpid (), vpp_handle, session_handle);
return VPPCOM_OK;
}
+static void
+vcl_cleanup_bapi (void)
+{
+ socket_client_main_t *scm = &socket_client_main;
+ api_main_t *am = &api_main;
+
+ am->my_client_index = ~0;
+ am->my_registration = 0;
+ am->vl_input_queue = 0;
+ am->msg_index_by_name_and_crc = 0;
+ scm->socket_fd = 0;
+
+ vl_client_api_unmap ();
+}
+
+static void
+vcl_cleanup_forked_child (vcl_worker_t * wrk, vcl_worker_t * child_wrk)
+{
+ vcl_worker_t *sub_child;
+ int tries = 0;
+
+ if (child_wrk->forked_child != ~0)
+ {
+ sub_child = vcl_worker_get_if_valid (child_wrk->forked_child);
+ if (sub_child)
+ {
+ /* Wait a bit, maybe the process is going away */
+ while (kill (sub_child->current_pid, 0) >= 0 && tries++ < 50)
+ usleep (1e3);
+ if (kill (sub_child->current_pid, 0) < 0)
+ vcl_cleanup_forked_child (child_wrk, sub_child);
+ }
+ }
+ vcl_worker_cleanup (child_wrk, 1 /* notify vpp */ );
+ VDBG (0, "Cleaned up wrk %u", child_wrk->wrk_index);
+ wrk->forked_child = ~0;
+}
+
+static struct sigaction old_sa;
+
+static void
+vcl_intercept_sigchld_handler (int signum, siginfo_t * si, void *uc)
+{
+ vcl_worker_t *wrk, *child_wrk;
+
+ if (vcl_get_worker_index () == ~0)
+ return;
+
+ sigaction (SIGCHLD, &old_sa, 0);
+
+ wrk = vcl_worker_get_current ();
+ if (wrk->forked_child == ~0)
+ return;
+
+ child_wrk = vcl_worker_get_if_valid (wrk->forked_child);
+ if (!child_wrk)
+ goto done;
+
+ if (si && si->si_pid != child_wrk->current_pid)
+ {
+ VDBG (0, "unexpected child pid %u", si->si_pid);
+ goto done;
+ }
+ vcl_cleanup_forked_child (wrk, child_wrk);
+
+done:
+ if (old_sa.sa_flags & SA_SIGINFO)
+ {
+ void (*fn) (int, siginfo_t *, void *) = old_sa.sa_sigaction;
+ fn (signum, si, uc);
+ }
+ else
+ {
+ void (*fn) (int) = old_sa.sa_handler;
+ if (fn)
+ fn (signum);
+ }
+}
+
+static void
+vcl_incercept_sigchld ()
+{
+ struct sigaction sa;
+ clib_memset (&sa, 0, sizeof (sa));
+ sa.sa_sigaction = vcl_intercept_sigchld_handler;
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction (SIGCHLD, &sa, &old_sa))
+ {
+ VERR ("couldn't intercept sigchld");
+ exit (-1);
+ }
+}
+
+static void
+vcl_app_pre_fork (void)
+{
+ vcl_incercept_sigchld ();
+}
+
+static void
+vcl_app_fork_child_handler (void)
+{
+ int rv, parent_wrk_index;
+ vcl_worker_t *parent_wrk;
+ u8 *child_name;
+
+ parent_wrk_index = vcl_get_worker_index ();
+ VDBG (0, "initializing forked child with parent wrk %u", parent_wrk_index);
+
+ /*
+ * Allocate worker
+ */
+ vcl_set_worker_index (~0);
+ if (!vcl_worker_alloc_and_init ())
+ VERR ("couldn't allocate new worker");
+
+ /*
+ * Attach to binary api
+ */
+ child_name = format (0, "%v-child-%u%c", vcm->app_name, getpid (), 0);
+ vcl_cleanup_bapi ();
+ vppcom_api_hookup ();
+ vcm->app_state = STATE_APP_START;
+ rv = vppcom_connect_to_vpp ((char *) child_name);
+ vec_free (child_name);
+ if (rv)
+ {
+ VERR ("couldn't connect to VPP!");
+ return;
+ }
+
+ /*
+ * Register worker with vpp and share sessions
+ */
+ vcl_worker_register_with_vpp ();
+ parent_wrk = vcl_worker_get (parent_wrk_index);
+ vcl_worker_share_sessions (parent_wrk);
+ parent_wrk->forked_child = vcl_get_worker_index ();
+
+ VDBG (0, "forked child main worker initialized");
+ vcm->forking = 0;
+}
+
+static void
+vcl_app_fork_parent_handler (void)
+{
+ vcm->forking = 1;
+ while (vcm->forking)
+ ;
+}
+
+/**
+ * Handle app exit
+ *
+ * Notify vpp of the disconnect and mark the worker as free. If we're the
+ * last worker, do a full cleanup otherwise, since we're probably a forked
+ * child, avoid syscalls as much as possible. We might've lost privileges.
+ */
+void
+vppcom_app_exit (void)
+{
+ if (!pool_elts (vcm->workers))
+ return;
+ vcl_worker_cleanup (vcl_worker_get_current (), 1 /* notify vpp */ );
+ vcl_set_worker_index (~0);
+ vcl_elog_stop (vcm);
+ if (vec_len (vcm->workers) == 1)
+ vl_client_disconnect_from_vlib ();
+ else
+ vl_client_send_disconnect (1 /* vpp should cleanup */ );
+}
+
/*
* VPPCOM Public API functions
*/
vppcom_cfg_t *vcl_cfg = &vcm->cfg;
int rv;
- if (!vcm->is_init)
+ if (vcm->is_init)
{
- vcm->is_init = 1;
- vppcom_cfg (&vcm->cfg);
- vcl_cfg = &vcm->cfg;
-
- vcm->main_cpu = pthread_self ();
- vppcom_init_error_string_table ();
- svm_fifo_segment_main_init (vcl_cfg->segment_baseva,
- 20 /* timeout in secs */ );
- pool_init_fixed (vcm->workers, vcl_cfg->max_workers);
- vcl_worker_alloc_and_init ();
+ VDBG (1, "already initialized");
+ return VPPCOM_EEXIST;
}
- if (vcm->my_client_index == ~0)
- {
- /* API hookup and connect to VPP */
- vppcom_api_hookup ();
- vcl_elog_init (vcm);
- vcm->app_state = STATE_APP_START;
- rv = vppcom_connect_to_vpp (app_name);
- if (rv)
- {
- clib_warning ("VCL<%d>: ERROR: couldn't connect to VPP!",
- getpid ());
- return rv;
- }
+ vcm->is_init = 1;
+ vppcom_cfg (&vcm->cfg);
+ vcl_cfg = &vcm->cfg;
- VDBG (0, "VCL<%d>: sending session enable", getpid ());
- rv = vppcom_app_session_enable ();
- if (rv)
- {
- clib_warning ("VCL<%d>: ERROR: vppcom_app_session_enable() "
- "failed!", getpid ());
- return rv;
- }
+ vcm->main_cpu = pthread_self ();
+ vcm->main_pid = getpid ();
+ vcm->app_name = format (0, "%s", app_name);
+ vppcom_init_error_string_table ();
+ svm_fifo_segment_main_init (&vcm->segment_main, vcl_cfg->segment_baseva,
+ 20 /* timeout in secs */ );
+ pool_alloc (vcm->workers, vcl_cfg->max_workers);
+ clib_spinlock_init (&vcm->workers_lock);
+ clib_rwlock_init (&vcm->segment_table_lock);
+ pthread_atfork (vcl_app_pre_fork, vcl_app_fork_parent_handler,
+ vcl_app_fork_child_handler);
+ atexit (vppcom_app_exit);
- VDBG (0, "VCL<%d>: sending app attach", getpid ());
- rv = vppcom_app_attach ();
- if (rv)
- {
- clib_warning ("VCL<%d>: ERROR: vppcom_app_attach() failed!",
- getpid ());
- return rv;
- }
+ /* Allocate default worker */
+ vcl_worker_alloc_and_init ();
+
+ /* API hookup and connect to VPP */
+ vppcom_api_hookup ();
+ vcl_elog_init (vcm);
+ vcm->app_state = STATE_APP_START;
+ rv = vppcom_connect_to_vpp (app_name);
+ if (rv)
+ {
+ VERR ("couldn't connect to VPP!");
+ return rv;
+ }
+ VDBG (0, "sending session enable");
+ rv = vppcom_app_session_enable ();
+ if (rv)
+ {
+ VERR ("vppcom_app_session_enable() failed!");
+ return rv;
+ }
- VDBG (0, "VCL<%d>: app_name '%s', my_client_index %d (0x%x)",
- getpid (), app_name, vcm->my_client_index, vcm->my_client_index);
+ VDBG (0, "sending app attach");
+ rv = vppcom_app_attach ();
+ if (rv)
+ {
+ VERR ("vppcom_app_attach() failed!");
+ return rv;
}
+ VDBG (0, "app_name '%s', my_client_index %d (0x%x)", app_name,
+ vcm->workers[0].my_client_index, vcm->workers[0].my_client_index);
+
return VPPCOM_OK;
}
int rv;
f64 orig_app_timeout;
- if (vcm->my_client_index == ~0)
+ if (!pool_elts (vcm->workers))
return;
- VDBG (0, "VCL<%d>: detaching from VPP, my_client_index %d (0x%x)",
- getpid (), vcm->my_client_index, vcm->my_client_index);
vcl_evt (VCL_EVT_DETACH, vcm);
- vppcom_app_send_detach ();
- orig_app_timeout = vcm->cfg.app_timeout;
- vcm->cfg.app_timeout = 2.0;
- rv = vcl_wait_for_app_state_change (STATE_APP_ENABLED);
- vcm->cfg.app_timeout = orig_app_timeout;
- if (PREDICT_FALSE (rv))
- VDBG (0, "VCL<%d>: application detach timed out! returning %d (%s)",
- getpid (), rv, vppcom_retval_str (rv));
+ if (pool_elts (vcm->workers) == 1)
+ {
+ vppcom_app_send_detach ();
+ orig_app_timeout = vcm->cfg.app_timeout;
+ vcm->cfg.app_timeout = 2.0;
+ rv = vcl_wait_for_app_state_change (STATE_APP_ENABLED);
+ vcm->cfg.app_timeout = orig_app_timeout;
+ if (PREDICT_FALSE (rv))
+ VDBG (0, "application detach timed out! returning %d (%s)", rv,
+ vppcom_retval_str (rv));
+ vec_free (vcm->app_name);
+ vcl_worker_cleanup (vcl_worker_get_current (), 0 /* notify vpp */ );
+ }
+ else
+ {
+ vcl_worker_cleanup (vcl_worker_get_current (), 1 /* notify vpp */ );
+ }
+ vcl_set_worker_index (~0);
vcl_elog_stop (vcm);
vl_client_disconnect_from_vlib ();
- vcm->my_client_index = ~0;
- vcm->app_state = STATE_APP_START;
}
int
vcl_evt (VCL_EVT_CREATE, session, session_type, session->session_state,
is_nonblocking, session_index);
- VDBG (0, "VCL<%d>: sid %u", getpid (), session->session_index);
+ VDBG (0, "created sid %u", session->session_index);
return vcl_session_handle (session);
}
vppcom_session_close (uint32_t session_handle)
{
vcl_worker_t *wrk = vcl_worker_get_current ();
+ u8 is_vep, do_disconnect = 1;
vcl_session_t *session = 0;
- u8 is_vep, is_vep_session;
session_state_t state;
u32 next_sh, vep_sh;
int rv = VPPCOM_OK;
if (!session)
return VPPCOM_EBADFD;
+ if (session->shared_index != ~0)
+ do_disconnect = vcl_worker_unshare_session (wrk, session);
+
is_vep = session->is_vep;
- is_vep_session = session->is_vep_session;
next_sh = session->vep.next_sh;
vep_sh = session->vep.vep_sh;
state = session->session_state;
vpp_handle = session->vpp_handle;
- if (VPPCOM_DEBUG > 0)
- {
- if (is_vep)
- clib_warning ("VCL<%d>: vep_idx %u / sid %u: "
- "closing epoll session...",
- getpid (), session_handle, session_handle);
- else
- clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %d: "
- "closing session...",
- getpid (), vpp_handle, session_handle);
- }
+ VDBG (0, "Closing session handle %u vpp handle %u", session_handle,
+ vpp_handle);
if (is_vep)
{
{
rv = vppcom_epoll_ctl (session_handle, EPOLL_CTL_DEL, next_sh, 0);
if (PREDICT_FALSE (rv < 0))
- VDBG (0, "VCL<%d>: vpp handle 0x%llx, sid %u: EPOLL_CTL_DEL "
- "vep_idx %u failed! rv %d (%s)",
- getpid (), vpp_handle, next_sh, vep_sh,
- rv, vppcom_retval_str (rv));
+ VDBG (0, "vpp handle 0x%llx, sid %u: EPOLL_CTL_DEL vep_idx %u"
+ " failed! rv %d (%s)", vpp_handle, next_sh, vep_sh, rv,
+ vppcom_retval_str (rv));
next_sh = session->vep.next_sh;
}
}
else
{
- if (is_vep_session)
+ if (session->is_vep_session)
{
rv = vppcom_epoll_ctl (vep_sh, EPOLL_CTL_DEL, session_handle, 0);
if (rv < 0)
- VDBG (0, "VCL<%d>: vpp handle 0x%llx, sid %u: EPOLL_CTL_DEL "
- "vep_idx %u failed! rv %d (%s)",
- getpid (), vpp_handle, session_handle,
- vep_sh, rv, vppcom_retval_str (rv));
+ VDBG (0, "vpp handle 0x%llx, sid %u: EPOLL_CTL_DEL vep_idx %u "
+ "failed! rv %d (%s)", vpp_handle, session_handle, vep_sh,
+ rv, vppcom_retval_str (rv));
}
+ if (!do_disconnect)
+ goto cleanup;
+
if (state & STATE_LISTEN)
{
rv = vppcom_session_unbind (session_handle);
if (PREDICT_FALSE (rv < 0))
- VDBG (0, "VCL<%d>: vpp handle 0x%llx, sid %u: listener unbind "
- "failed! rv %d (%s)",
- getpid (), vpp_handle, session_handle,
- rv, vppcom_retval_str (rv));
+ VDBG (0, "vpp handle 0x%llx, sid %u: listener unbind failed! "
+ "rv %d (%s)", vpp_handle, session_handle, rv,
+ vppcom_retval_str (rv));
}
else if (state & STATE_OPEN)
{
}
}
+cleanup:
+
if (vcl_session_is_ct (session))
{
vcl_cut_through_registration_t *ctr;
}
vcl_session_free (wrk, session);
- if (VPPCOM_DEBUG > 0)
- {
- if (is_vep)
- clib_warning ("VCL<%d>: vep_idx %u / sid %u: epoll session removed.",
- getpid (), session_handle, session_handle);
- else
- clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: session removed.",
- getpid (), vpp_handle, session_handle);
- }
+ VDBG (0, "session handle %u vpp handle %u removed", session_handle,
+ vpp_handle);
vcl_evt (VCL_EVT_CLOSE, session, rv);
session->transport.is_ip4 = ep->is_ip4;
if (ep->is_ip4)
- clib_memcpy (&session->transport.lcl_ip.ip4, ep->ip,
- sizeof (ip4_address_t));
+ clib_memcpy_fast (&session->transport.lcl_ip.ip4, ep->ip,
+ sizeof (ip4_address_t));
else
- clib_memcpy (&session->transport.lcl_ip.ip6, ep->ip,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (&session->transport.lcl_ip.ip6, ep->ip,
+ sizeof (ip6_address_t));
session->transport.lcl_port = ep->port;
VDBG (0, "VCL<%d>: sid %u: binding to local %s address %U port %u, "
svm_msg_q_free_msg (wrk->app_event_queue, &msg);
continue;
}
- clib_memcpy (&accepted_msg, e->data, sizeof (accepted_msg));
+ clib_memcpy_fast (&accepted_msg, e->data, sizeof (accepted_msg));
svm_msg_q_free_msg (wrk->app_event_queue, &msg);
break;
}
VCL_SESS_ATTR_SET (client_session->attr, VCL_SESS_ATTR_NONBLOCK);
listen_vpp_handle = listen_session->vpp_handle;
- VDBG (0, "VCL<%d>: vpp handle 0x%llx, sid %u: Got a client request! "
+ VDBG (0, "vpp handle 0x%llx, sid %u: Got a client request! "
"vpp handle 0x%llx, sid %u, flags %d, is_nonblocking %u",
- getpid (), listen_vpp_handle, listen_session_handle,
+ listen_vpp_handle, listen_session_handle,
client_session->vpp_handle, client_session_index,
flags, VCL_SESS_ATTR_TEST (client_session->attr,
VCL_SESS_ATTR_NONBLOCK));
ep->is_ip4 = client_session->transport.is_ip4;
ep->port = client_session->transport.rmt_port;
if (client_session->transport.is_ip4)
- clib_memcpy (ep->ip, &client_session->transport.rmt_ip.ip4,
- sizeof (ip4_address_t));
+ clib_memcpy_fast (ep->ip, &client_session->transport.rmt_ip.ip4,
+ sizeof (ip4_address_t));
else
- clib_memcpy (ep->ip, &client_session->transport.rmt_ip.ip6,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (ep->ip, &client_session->transport.rmt_ip.ip6,
+ sizeof (ip6_address_t));
}
if (accepted_msg.server_event_queue_address)
vcl_send_session_accepted_reply (vpp_evt_q, client_session->client_context,
client_session->vpp_handle, 0);
- VDBG (0, "VCL<%d>: vpp handle 0x%llx, sid %u: accepted vpp handle 0x%llx, "
+ VDBG (0, "vpp handle 0x%llx, sid %u: accepted vpp handle 0x%llx, "
"sid %u connection from peer %s address %U port %u to local %s "
- "address %U port %u", getpid (), listen_vpp_handle,
+ "address %U port %u", listen_vpp_handle,
listen_session_handle, client_session->vpp_handle,
client_session_index,
client_session->transport.is_ip4 ? "IPv4" : "IPv6",
session->transport.is_ip4 = server_ep->is_ip4;
if (session->transport.is_ip4)
- clib_memcpy (&session->transport.rmt_ip.ip4, server_ep->ip,
- sizeof (ip4_address_t));
+ clib_memcpy_fast (&session->transport.rmt_ip.ip4, server_ep->ip,
+ sizeof (ip4_address_t));
else
- clib_memcpy (&session->transport.rmt_ip.ip6, server_ep->ip,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (&session->transport.rmt_ip.ip6, server_ep->ip,
+ sizeof (ip6_address_t));
session->transport.rmt_port = server_ep->port;
VDBG (0, "VCL<%d>: vpp handle 0x%llx, sid %u: connecting to server %s %U "
svm_msg_q_msg_t msg;
session_event_t *e;
svm_msg_q_t *mq;
- u8 is_full;
+ u8 is_ct;
if (PREDICT_FALSE (!buf))
return VPPCOM_EINVAL;
s = vcl_session_get_w_handle (wrk, session_handle);
- if (PREDICT_FALSE (!s))
+ if (PREDICT_FALSE (!s || s->is_vep))
return VPPCOM_EBADFD;
- if (PREDICT_FALSE (s->is_vep))
- {
- clib_warning ("VCL<%d>: ERROR: sid %u: cannot "
- "read from an epoll session!", getpid (), session_handle);
- return VPPCOM_EBADFD;
- }
-
- is_nonblocking = VCL_SESS_ATTR_TEST (s->attr, VCL_SESS_ATTR_NONBLOCK);
- rx_fifo = s->rx_fifo;
-
if (PREDICT_FALSE (!vcl_session_is_readable (s)))
{
session_state_t state = s->session_state;
return rv;
}
- mq = vcl_session_is_ct (s) ? s->our_evt_q : wrk->app_event_queue;
- svm_fifo_unset_event (rx_fifo);
- is_full = svm_fifo_is_full (rx_fifo);
+ is_nonblocking = VCL_SESS_ATTR_TEST (s->attr, VCL_SESS_ATTR_NONBLOCK);
+ is_ct = vcl_session_is_ct (s);
+ mq = is_ct ? s->our_evt_q : wrk->app_event_queue;
+ rx_fifo = s->rx_fifo;
+ s->has_rx_evt = 0;
if (svm_fifo_is_empty (rx_fifo))
{
if (is_nonblocking)
{
- return VPPCOM_OK;
+ svm_fifo_unset_event (rx_fifo);
+ return VPPCOM_EWOULDBLOCK;
}
- while (1)
+ while (svm_fifo_is_empty (rx_fifo))
{
+ svm_fifo_unset_event (rx_fifo);
svm_msg_q_lock (mq);
if (svm_msg_q_is_empty (mq))
svm_msg_q_wait (mq);
svm_msg_q_sub_w_lock (mq, &msg);
e = svm_msg_q_msg_data (mq, &msg);
svm_msg_q_unlock (mq);
- if (!vcl_is_rx_evt_for_session (e, s->session_index,
- s->our_evt_q != 0))
+ if (!vcl_is_rx_evt_for_session (e, s->session_index, is_ct))
{
- vcl_handle_mq_ctrl_event (wrk, e);
+ vcl_handle_mq_event (wrk, e);
svm_msg_q_free_msg (mq, &msg);
continue;
}
- svm_fifo_unset_event (rx_fifo);
svm_msg_q_free_msg (mq, &msg);
+
if (PREDICT_FALSE (s->session_state == STATE_CLOSE_ON_EMPTY))
return 0;
- if (svm_fifo_is_empty (rx_fifo))
- continue;
- break;
}
}
else
n_read = app_recv_stream_raw (rx_fifo, buf, n, 0, peek);
- if (vcl_session_is_ct (s) && is_full)
- {
- /* If the peer is not polling send notification */
- if (!svm_fifo_has_event (s->rx_fifo))
- app_send_io_evt_to_vpp (s->vpp_evt_q, s->rx_fifo,
- SESSION_IO_EVT_CT_RX, SVM_Q_WAIT);
- }
+ if (svm_fifo_is_empty (rx_fifo))
+ svm_fifo_unset_event (rx_fifo);
- if (VPPCOM_DEBUG > 2)
+ if (is_ct && svm_fifo_want_tx_evt (rx_fifo))
{
- if (n_read > 0)
- clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: read %d bytes "
- "from (%p)", getpid (), s->vpp_handle,
- session_handle, n_read, rx_fifo);
- else
- clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: nothing read! "
- "returning %d (%s)", getpid (), s->vpp_handle,
- session_handle, n_read, vppcom_retval_str (n_read));
+ svm_fifo_set_want_tx_evt (s->rx_fifo, 0);
+ app_send_io_evt_to_vpp (s->vpp_evt_q, s->rx_fifo, SESSION_IO_EVT_CT_RX,
+ SVM_Q_WAIT);
}
+
+ VDBG (2, "VCL<%d>: vpp handle 0x%llx, sid %u: read %d bytes from (%p)",
+ getpid (), s->vpp_handle, session_handle, n_read, rx_fifo);
+
return n_read;
}
return (vppcom_session_read_internal (session_handle, buf, n, 1));
}
+int
+vppcom_session_read_segments (uint32_t session_handle,
+ vppcom_data_segments_t ds)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+ int n_read = 0, rv, is_nonblocking;
+ vcl_session_t *s = 0;
+ svm_fifo_t *rx_fifo;
+ svm_msg_q_msg_t msg;
+ session_event_t *e;
+ svm_msg_q_t *mq;
+ u8 is_ct;
+
+ s = vcl_session_get_w_handle (wrk, session_handle);
+ if (PREDICT_FALSE (!s || s->is_vep))
+ return VPPCOM_EBADFD;
+
+ if (PREDICT_FALSE (!vcl_session_is_readable (s)))
+ {
+ session_state_t state = s->session_state;
+ rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
+ return rv;
+ }
+
+ is_nonblocking = VCL_SESS_ATTR_TEST (s->attr, VCL_SESS_ATTR_NONBLOCK);
+ is_ct = vcl_session_is_ct (s);
+ mq = is_ct ? s->our_evt_q : wrk->app_event_queue;
+ rx_fifo = s->rx_fifo;
+ s->has_rx_evt = 0;
+
+ if (svm_fifo_is_empty (rx_fifo))
+ {
+ if (is_nonblocking)
+ {
+ svm_fifo_unset_event (rx_fifo);
+ return VPPCOM_EWOULDBLOCK;
+ }
+ while (svm_fifo_is_empty (rx_fifo))
+ {
+ svm_fifo_unset_event (rx_fifo);
+ svm_msg_q_lock (mq);
+ if (svm_msg_q_is_empty (mq))
+ svm_msg_q_wait (mq);
+
+ svm_msg_q_sub_w_lock (mq, &msg);
+ e = svm_msg_q_msg_data (mq, &msg);
+ svm_msg_q_unlock (mq);
+ if (!vcl_is_rx_evt_for_session (e, s->session_index, is_ct))
+ {
+ vcl_handle_mq_event (wrk, e);
+ svm_msg_q_free_msg (mq, &msg);
+ continue;
+ }
+ svm_msg_q_free_msg (mq, &msg);
+
+ if (PREDICT_FALSE (s->session_state == STATE_CLOSE_ON_EMPTY))
+ return 0;
+ }
+ }
+
+ n_read = svm_fifo_segments (rx_fifo, (svm_fifo_segment_t *) ds);
+ svm_fifo_unset_event (rx_fifo);
+
+ if (is_ct && n_read + svm_fifo_max_dequeue (rx_fifo) == rx_fifo->nitems)
+ {
+ /* If the peer is not polling send notification */
+ if (!svm_fifo_has_event (s->rx_fifo))
+ app_send_io_evt_to_vpp (s->vpp_evt_q, s->rx_fifo,
+ SESSION_IO_EVT_CT_RX, SVM_Q_WAIT);
+ }
+
+ return n_read;
+}
+
+void
+vppcom_session_free_segments (uint32_t session_handle,
+ vppcom_data_segments_t ds)
+{
+ vcl_worker_t *wrk = vcl_worker_get_current ();
+ vcl_session_t *s;
+
+ s = vcl_session_get_w_handle (wrk, session_handle);
+ if (PREDICT_FALSE (!s || s->is_vep))
+ return;
+
+ svm_fifo_segments_free (s->rx_fifo, (svm_fifo_segment_t *) ds);
+}
+
static inline int
vppcom_session_read_ready (vcl_session_t * session)
{
return svm_fifo_max_dequeue (session->rx_fifo);
}
+int
+vppcom_data_segment_copy (void *buf, vppcom_data_segments_t ds, u32 max_bytes)
+{
+ u32 first_copy = clib_min (ds[0].len, max_bytes);
+ clib_memcpy_fast (buf, ds[0].data, first_copy);
+ if (first_copy < max_bytes)
+ {
+ clib_memcpy_fast (buf + first_copy, ds[1].data,
+ clib_min (ds[1].len, max_bytes - first_copy));
+ }
+ return 0;
+}
+
static u8
vcl_is_tx_evt_for_session (session_event_t * e, u32 sid, u8 is_ct)
{
svm_msg_q_msg_t msg;
session_event_t *e;
svm_msg_q_t *mq;
+ u8 is_ct;
if (PREDICT_FALSE (!buf))
return VPPCOM_EINVAL;
if (PREDICT_FALSE (!s))
return VPPCOM_EBADFD;
- tx_fifo = s->tx_fifo;
- is_nonblocking = VCL_SESS_ATTR_TEST (s->attr, VCL_SESS_ATTR_NONBLOCK);
-
if (PREDICT_FALSE (s->is_vep))
{
clib_warning ("VCL<%d>: ERROR: vpp handle 0x%llx, sid %u: "
return VPPCOM_EBADFD;
}
- if (!(s->session_state & STATE_OPEN))
+ if (PREDICT_FALSE (!(s->session_state & STATE_OPEN)))
{
session_state_t state = s->session_state;
rv = ((state & STATE_DISCONNECT) ? VPPCOM_ECONNRESET : VPPCOM_ENOTCONN);
VDBG (1, "VCL<%d>: vpp handle 0x%llx, sid %u: session is not open! "
- "state 0x%x (%s)",
- getpid (), s->vpp_handle, session_handle,
+ "state 0x%x (%s)", getpid (), s->vpp_handle, session_handle,
state, vppcom_session_state_str (state));
return rv;
}
- mq = vcl_session_is_ct (s) ? s->our_evt_q : wrk->app_event_queue;
+ tx_fifo = s->tx_fifo;
+ is_ct = vcl_session_is_ct (s);
+ is_nonblocking = VCL_SESS_ATTR_TEST (s->attr, VCL_SESS_ATTR_NONBLOCK);
+ mq = is_ct ? s->our_evt_q : wrk->app_event_queue;
if (svm_fifo_is_full (tx_fifo))
{
if (is_nonblocking)
}
while (svm_fifo_is_full (tx_fifo))
{
+ svm_fifo_set_want_tx_evt (tx_fifo, 1);
svm_msg_q_lock (mq);
- while (svm_msg_q_is_empty (mq) && svm_msg_q_timedwait (mq, 10e-6))
- ;
+ if (svm_msg_q_is_empty (mq))
+ svm_msg_q_wait (mq);
+
svm_msg_q_sub_w_lock (mq, &msg);
e = svm_msg_q_msg_data (mq, &msg);
svm_msg_q_unlock (mq);
- if (!vcl_is_tx_evt_for_session (e, s->session_index,
- s->our_evt_q != 0))
- vcl_handle_mq_ctrl_event (wrk, e);
+ if (!vcl_is_tx_evt_for_session (e, s->session_index, is_ct))
+ vcl_handle_mq_event (wrk, e);
svm_msg_q_free_msg (mq, &msg);
}
}
ASSERT (n_write > 0);
- if (VPPCOM_DEBUG > 2)
- {
- if (n_write <= 0)
- clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: "
- "FIFO-FULL (%p)", getpid (), s->vpp_handle,
- session_handle, tx_fifo);
- else
- clib_warning ("VCL<%d>: vpp handle 0x%llx, sid %u: "
- "wrote %d bytes tx-fifo: (%p)", getpid (),
- s->vpp_handle, session_handle, n_write, tx_fifo);
- }
+ VDBG (2, "VCL<%d>: vpp handle 0x%llx, sid %u: wrote %d bytes", getpid (),
+ s->vpp_handle, session_handle, n_write);
+
return n_write;
}
u32 n_msgs;
int i;
- n_msgs = svm_msg_q_size (mq);
- for (i = 0; i < n_msgs; i++)
+ n_msgs = svm_msg_q_size (mq);
+ for (i = 0; i < n_msgs; i++)
+ {
+ vec_add2 (wrk->mq_msg_vector, msg, 1);
+ svm_msg_q_sub_w_lock (mq, msg);
+ }
+ return n_msgs;
+}
+
+#define vcl_fifo_rx_evt_valid_or_break(_fifo) \
+if (PREDICT_FALSE (svm_fifo_is_empty (_fifo))) \
+ { \
+ svm_fifo_unset_event (_fifo); \
+ if (svm_fifo_is_empty (_fifo)) \
+ break; \
+ } \
+
+static void
+vcl_select_handle_mq_event (vcl_worker_t * wrk, session_event_t * e,
+ unsigned long n_bits, unsigned long *read_map,
+ unsigned long *write_map,
+ unsigned long *except_map, u32 * bits_set)
+{
+ session_disconnected_msg_t *disconnected_msg;
+ session_connected_msg_t *connected_msg;
+ session_accepted_msg_t *accepted_msg;
+ vcl_session_msg_t *vcl_msg;
+ vcl_session_t *session;
+ u64 handle;
+ u32 sid;
+
+ switch (e->event_type)
{
- vec_add2 (wrk->mq_msg_vector, msg, 1);
- svm_msg_q_sub_w_lock (mq, msg);
+ case FIFO_EVENT_APP_RX:
+ vcl_fifo_rx_evt_valid_or_break (e->fifo);
+ sid = e->fifo->client_session_index;
+ session = vcl_session_get (wrk, sid);
+ if (!session)
+ break;
+ if (sid < n_bits && read_map)
+ {
+ clib_bitmap_set_no_check (read_map, sid, 1);
+ *bits_set += 1;
+ }
+ break;
+ case FIFO_EVENT_APP_TX:
+ sid = e->fifo->client_session_index;
+ session = vcl_session_get (wrk, sid);
+ if (!session)
+ break;
+ if (sid < n_bits && write_map)
+ {
+ clib_bitmap_set_no_check (write_map, sid, 1);
+ *bits_set += 1;
+ }
+ break;
+ case SESSION_IO_EVT_CT_TX:
+ vcl_fifo_rx_evt_valid_or_break (e->fifo);
+ session = vcl_ct_session_get_from_fifo (wrk, e->fifo, 0);
+ if (!session)
+ break;
+ sid = session->session_index;
+ if (sid < n_bits && read_map)
+ {
+ clib_bitmap_set_no_check (read_map, sid, 1);
+ *bits_set += 1;
+ }
+ break;
+ case SESSION_IO_EVT_CT_RX:
+ session = vcl_ct_session_get_from_fifo (wrk, e->fifo, 1);
+ if (!session)
+ break;
+ sid = session->session_index;
+ if (sid < n_bits && write_map)
+ {
+ clib_bitmap_set_no_check (write_map, sid, 1);
+ *bits_set += 1;
+ }
+ break;
+ case SESSION_CTRL_EVT_ACCEPTED:
+ accepted_msg = (session_accepted_msg_t *) e->data;
+ handle = accepted_msg->listener_handle;
+ session = vcl_session_table_lookup_listener (wrk, handle);
+ if (!session)
+ {
+ clib_warning ("VCL<%d>: ERROR: couldn't find listen session:"
+ "listener handle %llx", getpid (), handle);
+ break;
+ }
+
+ clib_fifo_add2 (session->accept_evts_fifo, vcl_msg);
+ vcl_msg->accepted_msg = *accepted_msg;
+ sid = session->session_index;
+ if (sid < n_bits && read_map)
+ {
+ clib_bitmap_set_no_check (read_map, sid, 1);
+ *bits_set += 1;
+ }
+ break;
+ case SESSION_CTRL_EVT_CONNECTED:
+ connected_msg = (session_connected_msg_t *) e->data;
+ vcl_session_connected_handler (wrk, connected_msg);
+ break;
+ case SESSION_CTRL_EVT_DISCONNECTED:
+ disconnected_msg = (session_disconnected_msg_t *) e->data;
+ sid = vcl_session_index_from_vpp_handle (wrk, disconnected_msg->handle);
+ if (sid < n_bits && except_map)
+ {
+ clib_bitmap_set_no_check (except_map, sid, 1);
+ *bits_set += 1;
+ }
+ break;
+ case SESSION_CTRL_EVT_RESET:
+ sid = vcl_session_reset_handler (wrk, (session_reset_msg_t *) e->data);
+ if (sid < n_bits && except_map)
+ {
+ clib_bitmap_set_no_check (except_map, sid, 1);
+ *bits_set += 1;
+ }
+ break;
+ default:
+ clib_warning ("unhandled: %u", e->event_type);
+ break;
}
- return n_msgs;
}
static int
unsigned long *write_map, unsigned long *except_map,
double time_to_wait, u32 * bits_set)
{
- session_disconnected_msg_t *disconnected_msg;
- session_connected_msg_t *connected_msg;
- session_accepted_msg_t *accepted_msg;
- vcl_session_msg_t *vcl_msg;
- vcl_session_t *session;
svm_msg_q_msg_t *msg;
session_event_t *e;
- u32 i, sid;
- u64 handle;
+ u32 i;
svm_msg_q_lock (mq);
if (svm_msg_q_is_empty (mq))
{
msg = vec_elt_at_index (wrk->mq_msg_vector, i);
e = svm_msg_q_msg_data (mq, msg);
- switch (e->event_type)
- {
- case FIFO_EVENT_APP_RX:
- sid = e->fifo->client_session_index;
- session = vcl_session_get (wrk, sid);
- if (sid < n_bits && read_map)
- {
- clib_bitmap_set_no_check (read_map, sid, 1);
- *bits_set += 1;
- }
- break;
- case FIFO_EVENT_APP_TX:
- sid = e->fifo->client_session_index;
- session = vcl_session_get (wrk, sid);
- if (!session)
- break;
- if (sid < n_bits && write_map)
- {
- clib_bitmap_set_no_check (write_map, sid, 1);
- *bits_set += 1;
- }
- break;
- case SESSION_IO_EVT_CT_TX:
- if (svm_fifo_is_empty (e->fifo))
- {
- svm_fifo_unset_event (e->fifo);
- if (svm_fifo_is_empty (e->fifo))
- break;
- }
- session = vcl_ct_session_get_from_fifo (wrk, e->fifo, 0);
- sid = session->session_index;
- if (sid < n_bits && read_map)
- {
- clib_bitmap_set_no_check (read_map, sid, 1);
- *bits_set += 1;
- }
- break;
- case SESSION_IO_EVT_CT_RX:
- session = vcl_ct_session_get_from_fifo (wrk, e->fifo, 1);
- sid = session->session_index;
- if (!session)
- break;
- if (sid < n_bits && write_map)
- {
- clib_bitmap_set_no_check (write_map, sid, 1);
- *bits_set += 1;
- }
- break;
- case SESSION_CTRL_EVT_ACCEPTED:
- accepted_msg = (session_accepted_msg_t *) e->data;
- handle = accepted_msg->listener_handle;
- session = vcl_session_table_lookup_listener (wrk, handle);
- if (!session)
- {
- clib_warning ("VCL<%d>: ERROR: couldn't find listen session:"
- "listener handle %llx", getpid (), handle);
- break;
- }
-
- clib_fifo_add2 (session->accept_evts_fifo, vcl_msg);
- vcl_msg->accepted_msg = *accepted_msg;
- sid = session->session_index;
- if (sid < n_bits && read_map)
- {
- clib_bitmap_set_no_check (read_map, sid, 1);
- *bits_set += 1;
- }
- break;
- case SESSION_CTRL_EVT_CONNECTED:
- connected_msg = (session_connected_msg_t *) e->data;
- vcl_session_connected_handler (wrk, connected_msg);
- break;
- case SESSION_CTRL_EVT_DISCONNECTED:
- disconnected_msg = (session_disconnected_msg_t *) e->data;
- sid = vcl_session_index_from_vpp_handle (wrk,
- disconnected_msg->handle);
- if (sid < n_bits && except_map)
- {
- clib_bitmap_set_no_check (except_map, sid, 1);
- *bits_set += 1;
- }
- break;
- case SESSION_CTRL_EVT_RESET:
- sid = vcl_session_reset_handler (wrk,
- (session_reset_msg_t *) e->data);
- if (sid < n_bits && except_map)
- {
- clib_bitmap_set_no_check (except_map, sid, 1);
- *bits_set += 1;
- }
- break;
- default:
- clib_warning ("unhandled: %u", e->event_type);
- break;
- }
+ vcl_select_handle_mq_event (wrk, e, n_bits, read_map, write_map,
+ except_map, bits_set);
svm_msg_q_free_msg (mq, msg);
}
-
vec_reset_length (wrk->mq_msg_vector);
return *bits_set;
}
u32 sid, minbits = clib_max (n_bits, BITS (uword)), bits_set = 0;
vcl_worker_t *wrk = vcl_worker_get_current ();
vcl_session_t *session = 0;
- int rv;
+ int rv, i;
ASSERT (sizeof (clib_bitmap_t) == sizeof (long int));
if (n_bits && read_map)
{
clib_bitmap_validate (wrk->rd_bitmap, minbits);
- clib_memcpy (wrk->rd_bitmap, read_map,
- vec_len (wrk->rd_bitmap) * sizeof (clib_bitmap_t));
+ clib_memcpy_fast (wrk->rd_bitmap, read_map,
+ vec_len (wrk->rd_bitmap) * sizeof (clib_bitmap_t));
memset (read_map, 0, vec_len (wrk->rd_bitmap) * sizeof (clib_bitmap_t));
}
if (n_bits && write_map)
{
clib_bitmap_validate (wrk->wr_bitmap, minbits);
- clib_memcpy (wrk->wr_bitmap, write_map,
- vec_len (wrk->wr_bitmap) * sizeof (clib_bitmap_t));
+ clib_memcpy_fast (wrk->wr_bitmap, write_map,
+ vec_len (wrk->wr_bitmap) * sizeof (clib_bitmap_t));
memset (write_map, 0,
vec_len (wrk->wr_bitmap) * sizeof (clib_bitmap_t));
}
if (n_bits && except_map)
{
clib_bitmap_validate (wrk->ex_bitmap, minbits);
- clib_memcpy (wrk->ex_bitmap, except_map,
- vec_len (wrk->ex_bitmap) * sizeof (clib_bitmap_t));
+ clib_memcpy_fast (wrk->ex_bitmap, except_map,
+ vec_len (wrk->ex_bitmap) * sizeof (clib_bitmap_t));
memset (except_map, 0,
vec_len (wrk->ex_bitmap) * sizeof (clib_bitmap_t));
}
clib_bitmap_foreach (sid, wrk->wr_bitmap, ({
if (!(session = vcl_session_get (wrk, sid)))
{
- VDBG (0, "VCL<%d>: session %d specified in write_map is closed.",
- getpid (), sid);
- return VPPCOM_EBADFD;
+ if (except_map && sid < minbits)
+ clib_bitmap_set_no_check (except_map, sid, 1);
+ continue;
}
rv = svm_fifo_is_full (session->tx_fifo);
clib_bitmap_foreach (sid, wrk->rd_bitmap, ({
if (!(session = vcl_session_get (wrk, sid)))
{
- VDBG (0, "VCL<%d>: session %d specified in write_map is closed.",
- getpid (), sid);
- return VPPCOM_EBADFD;
+ if (except_map && sid < minbits)
+ clib_bitmap_set_no_check (except_map, sid, 1);
+ continue;
}
rv = vppcom_session_read_ready (session);
check_mq:
+ for (i = 0; i < vec_len (wrk->unhandled_evts_vector); i++)
+ {
+ vcl_select_handle_mq_event (wrk, &wrk->unhandled_evts_vector[i], n_bits,
+ read_map, write_map, except_map, &bits_set);
+ }
+ vec_reset_length (wrk->unhandled_evts_vector);
+
if (vcm->cfg.use_mq_eventfd)
vppcom_select_eventfd (wrk, n_bits, read_map, write_map, except_map,
time_to_wait, &bits_set);
return rv;
}
-static int
-vcl_epoll_wait_handle_mq (vcl_worker_t * wrk, svm_msg_q_t * mq,
- struct epoll_event *events, u32 maxevents,
- double wait_for_time, u32 * num_ev)
+static inline void
+vcl_epoll_wait_handle_mq_event (vcl_worker_t * wrk, session_event_t * e,
+ struct epoll_event *events, u32 * num_ev)
{
session_disconnected_msg_t *disconnected_msg;
session_connected_msg_t *connected_msg;
u32 sid = ~0, session_events;
vcl_session_msg_t *vcl_msg;
vcl_session_t *session;
+ u8 add_event = 0;
+
+ switch (e->event_type)
+ {
+ case FIFO_EVENT_APP_RX:
+ ASSERT (e->fifo->client_thread_index == vcl_get_worker_index ());
+ vcl_fifo_rx_evt_valid_or_break (e->fifo);
+ sid = e->fifo->client_session_index;
+ session = vcl_session_get (wrk, sid);
+ session_events = session->vep.ev.events;
+ if (!(EPOLLIN & session->vep.ev.events) || session->has_rx_evt)
+ break;
+ add_event = 1;
+ events[*num_ev].events |= EPOLLIN;
+ session_evt_data = session->vep.ev.data.u64;
+ session->has_rx_evt = 1;
+ break;
+ case FIFO_EVENT_APP_TX:
+ sid = e->fifo->client_session_index;
+ session = vcl_session_get (wrk, sid);
+ session_events = session->vep.ev.events;
+ if (!(EPOLLOUT & session_events))
+ break;
+ add_event = 1;
+ events[*num_ev].events |= EPOLLOUT;
+ session_evt_data = session->vep.ev.data.u64;
+ break;
+ case SESSION_IO_EVT_CT_TX:
+ vcl_fifo_rx_evt_valid_or_break (e->fifo);
+ session = vcl_ct_session_get_from_fifo (wrk, e->fifo, 0);
+ sid = session->session_index;
+ session_events = session->vep.ev.events;
+ if (!(EPOLLIN & session->vep.ev.events) || session->has_rx_evt)
+ break;
+ add_event = 1;
+ events[*num_ev].events |= EPOLLIN;
+ session_evt_data = session->vep.ev.data.u64;
+ session->has_rx_evt = 1;
+ break;
+ case SESSION_IO_EVT_CT_RX:
+ session = vcl_ct_session_get_from_fifo (wrk, e->fifo, 1);
+ sid = session->session_index;
+ session_events = session->vep.ev.events;
+ if (!(EPOLLOUT & session_events))
+ break;
+ add_event = 1;
+ events[*num_ev].events |= EPOLLOUT;
+ session_evt_data = session->vep.ev.data.u64;
+ break;
+ case SESSION_CTRL_EVT_ACCEPTED:
+ accepted_msg = (session_accepted_msg_t *) e->data;
+ handle = accepted_msg->listener_handle;
+ session = vcl_session_table_lookup_listener (wrk, handle);
+ if (!session)
+ {
+ clib_warning ("VCL<%d>: ERROR: couldn't find listen session:"
+ "listener handle %llx", getpid (), handle);
+ break;
+ }
+
+ clib_fifo_add2 (session->accept_evts_fifo, vcl_msg);
+ vcl_msg->accepted_msg = *accepted_msg;
+ session_events = session->vep.ev.events;
+ if (!(EPOLLIN & session_events))
+ break;
+
+ add_event = 1;
+ events[*num_ev].events |= EPOLLIN;
+ session_evt_data = session->vep.ev.data.u64;
+ break;
+ case SESSION_CTRL_EVT_CONNECTED:
+ connected_msg = (session_connected_msg_t *) e->data;
+ vcl_session_connected_handler (wrk, connected_msg);
+ /* Generate EPOLLOUT because there's no connected event */
+ sid = vcl_session_index_from_vpp_handle (wrk, connected_msg->handle);
+ session = vcl_session_get (wrk, sid);
+ session_events = session->vep.ev.events;
+ if (EPOLLOUT & session_events)
+ {
+ add_event = 1;
+ events[*num_ev].events |= EPOLLOUT;
+ session_evt_data = session->vep.ev.data.u64;
+ }
+ break;
+ case SESSION_CTRL_EVT_DISCONNECTED:
+ disconnected_msg = (session_disconnected_msg_t *) e->data;
+ sid = vcl_session_index_from_vpp_handle (wrk, disconnected_msg->handle);
+ if (!(session = vcl_session_get (wrk, sid)))
+ break;
+ add_event = 1;
+ events[*num_ev].events |= EPOLLHUP | EPOLLRDHUP;
+ session_evt_data = session->vep.ev.data.u64;
+ session_events = session->vep.ev.events;
+ break;
+ case SESSION_CTRL_EVT_RESET:
+ sid = vcl_session_reset_handler (wrk, (session_reset_msg_t *) e->data);
+ if (!(session = vcl_session_get (wrk, sid)))
+ break;
+ add_event = 1;
+ events[*num_ev].events |= EPOLLHUP | EPOLLRDHUP;
+ session_evt_data = session->vep.ev.data.u64;
+ session_events = session->vep.ev.events;
+ break;
+ default:
+ VDBG (0, "unhandled: %u", e->event_type);
+ break;
+ }
+
+ if (add_event)
+ {
+ events[*num_ev].data.u64 = session_evt_data;
+ if (EPOLLONESHOT & session_events)
+ {
+ session = vcl_session_get (wrk, sid);
+ session->vep.ev.events = 0;
+ }
+ *num_ev += 1;
+ }
+}
+
+static int
+vcl_epoll_wait_handle_mq (vcl_worker_t * wrk, svm_msg_q_t * mq,
+ struct epoll_event *events, u32 maxevents,
+ double wait_for_time, u32 * num_ev)
+{
svm_msg_q_msg_t *msg;
session_event_t *e;
- u8 add_event;
int i;
+ if (vec_len (wrk->mq_msg_vector) && svm_msg_q_is_empty (mq))
+ goto handle_dequeued;
+
svm_msg_q_lock (mq);
if (svm_msg_q_is_empty (mq))
{
vcl_mq_dequeue_batch (wrk, mq);
svm_msg_q_unlock (mq);
+handle_dequeued:
for (i = 0; i < vec_len (wrk->mq_msg_vector); i++)
{
msg = vec_elt_at_index (wrk->mq_msg_vector, i);
e = svm_msg_q_msg_data (mq, msg);
- add_event = 0;
- switch (e->event_type)
- {
- case FIFO_EVENT_APP_RX:
- sid = e->fifo->client_session_index;
- session = vcl_session_get (wrk, sid);
- session_events = session->vep.ev.events;
- if (!(EPOLLIN & session->vep.ev.events))
- break;
- add_event = 1;
- events[*num_ev].events |= EPOLLIN;
- session_evt_data = session->vep.ev.data.u64;
- break;
- case FIFO_EVENT_APP_TX:
- sid = e->fifo->client_session_index;
- session = vcl_session_get (wrk, sid);
- session_events = session->vep.ev.events;
- if (!(EPOLLOUT & session_events))
- break;
- add_event = 1;
- events[*num_ev].events |= EPOLLOUT;
- session_evt_data = session->vep.ev.data.u64;
- break;
- case SESSION_IO_EVT_CT_TX:
- session = vcl_ct_session_get_from_fifo (wrk, e->fifo, 0);
- sid = session->session_index;
- session_events = session->vep.ev.events;
- if (!(EPOLLIN & session->vep.ev.events))
- break;
- add_event = 1;
- events[*num_ev].events |= EPOLLIN;
- session_evt_data = session->vep.ev.data.u64;
- break;
- case SESSION_IO_EVT_CT_RX:
- session = vcl_ct_session_get_from_fifo (wrk, e->fifo, 1);
- sid = session->session_index;
- session_events = session->vep.ev.events;
- if (!(EPOLLOUT & session_events))
- break;
- add_event = 1;
- events[*num_ev].events |= EPOLLOUT;
- session_evt_data = session->vep.ev.data.u64;
- break;
- case SESSION_CTRL_EVT_ACCEPTED:
- accepted_msg = (session_accepted_msg_t *) e->data;
- handle = accepted_msg->listener_handle;
- session = vcl_session_table_lookup_listener (wrk, handle);
- if (!session)
- {
- clib_warning ("VCL<%d>: ERROR: couldn't find listen session:"
- "listener handle %llx", getpid (), handle);
- break;
- }
-
- clib_fifo_add2 (session->accept_evts_fifo, vcl_msg);
- vcl_msg->accepted_msg = *accepted_msg;
- session_events = session->vep.ev.events;
- if (!(EPOLLIN & session_events))
- break;
-
- add_event = 1;
- events[*num_ev].events |= EPOLLIN;
- session_evt_data = session->vep.ev.data.u64;
- break;
- case SESSION_CTRL_EVT_CONNECTED:
- connected_msg = (session_connected_msg_t *) e->data;
- vcl_session_connected_handler (wrk, connected_msg);
- /* Generate EPOLLOUT because there's no connected event */
- sid = vcl_session_index_from_vpp_handle (wrk,
- connected_msg->handle);
- session = vcl_session_get (wrk, sid);
- session_events = session->vep.ev.events;
- if (EPOLLOUT & session_events)
- {
- add_event = 1;
- events[*num_ev].events |= EPOLLOUT;
- session_evt_data = session->vep.ev.data.u64;
- }
- break;
- case SESSION_CTRL_EVT_DISCONNECTED:
- disconnected_msg = (session_disconnected_msg_t *) e->data;
- sid = vcl_session_index_from_vpp_handle (wrk,
- disconnected_msg->handle);
- if (!(session = vcl_session_get (wrk, sid)))
- break;
- add_event = 1;
- events[*num_ev].events |= EPOLLHUP | EPOLLRDHUP;
- session_evt_data = session->vep.ev.data.u64;
- session_events = session->vep.ev.events;
- break;
- case SESSION_CTRL_EVT_RESET:
- sid = vcl_session_reset_handler (wrk,
- (session_reset_msg_t *) e->data);
- if (!(session = vcl_session_get (wrk, sid)))
- break;
- add_event = 1;
- events[*num_ev].events |= EPOLLHUP | EPOLLRDHUP;
- session_evt_data = session->vep.ev.data.u64;
- session_events = session->vep.ev.events;
- break;
- default:
- VDBG (0, "unhandled: %u", e->event_type);
- svm_msg_q_free_msg (mq, msg);
- continue;
- }
+ if (*num_ev < maxevents)
+ vcl_epoll_wait_handle_mq_event (wrk, e, events, num_ev);
+ else
+ vec_add1 (wrk->unhandled_evts_vector, *e);
svm_msg_q_free_msg (mq, msg);
-
- if (add_event)
- {
- events[*num_ev].data.u64 = session_evt_data;
- if (EPOLLONESHOT & session_events)
- {
- session = vcl_session_get (wrk, sid);
- session->vep.ev.events = 0;
- }
- *num_ev += 1;
- if (*num_ev == maxevents)
- break;
- }
}
-
vec_reset_length (wrk->mq_msg_vector);
+
return *num_ev;
}
static int
vppcom_epoll_wait_condvar (vcl_worker_t * wrk, struct epoll_event *events,
- int maxevents, double wait_for_time)
+ int maxevents, u32 n_evts, double wait_for_time)
{
vcl_cut_through_registration_t *cr;
double total_wait = 0, wait_slice;
- u32 num_ev = 0;
int rv;
wait_for_time = (wait_for_time == -1) ? (double) 10e9 : wait_for_time;
vcl_ct_registration_lock (wrk);
/* *INDENT-OFF* */
pool_foreach (cr, wrk->cut_through_registrations, ({
- vcl_epoll_wait_handle_mq (wrk, cr->mq, events, maxevents, 0, &num_ev);
+ vcl_epoll_wait_handle_mq (wrk, cr->mq, events, maxevents, 0, &n_evts);
}));
/* *INDENT-ON* */
vcl_ct_registration_unlock (wrk);
rv = vcl_epoll_wait_handle_mq (wrk, wrk->app_event_queue, events,
- maxevents, num_ev ? 0 : wait_slice,
- &num_ev);
+ maxevents, n_evts ? 0 : wait_slice,
+ &n_evts);
if (rv)
total_wait += wait_slice;
- if (num_ev)
- return num_ev;
+ if (n_evts)
+ return n_evts;
}
while (total_wait < wait_for_time);
- return (int) num_ev;
+ return n_evts;
}
static int
vppcom_epoll_wait_eventfd (vcl_worker_t * wrk, struct epoll_event *events,
- int maxevents, double wait_for_time)
+ int maxevents, u32 n_evts, double wait_for_time)
{
vcl_mq_evt_conn_t *mqc;
int __clib_unused n_read;
int n_mq_evts, i;
- u32 n_evts = 0;
u64 buf;
vec_validate (wrk->mq_events, pool_elts (wrk->mq_evt_conns));
+again:
n_mq_evts = epoll_wait (wrk->mqs_epfd, wrk->mq_events,
vec_len (wrk->mq_events), wait_for_time);
for (i = 0; i < n_mq_evts; i++)
n_read = read (mqc->mq_fd, &buf, sizeof (buf));
vcl_epoll_wait_handle_mq (wrk, mqc->mq, events, maxevents, 0, &n_evts);
}
+ if (!n_evts && n_mq_evts > 0)
+ goto again;
return (int) n_evts;
}
{
vcl_worker_t *wrk = vcl_worker_get_current ();
vcl_session_t *vep_session;
+ u32 n_evts = 0;
+ int i;
if (PREDICT_FALSE (maxevents <= 0))
{
}
vep_session = vcl_session_get_w_handle (wrk, vep_handle);
+ if (!vep_session)
+ return VPPCOM_EBADFD;
+
if (PREDICT_FALSE (!vep_session->is_vep))
{
clib_warning ("VCL<%d>: ERROR: vep_idx (%u) is not a vep!",
memset (events, 0, sizeof (*events) * maxevents);
+ if (vec_len (wrk->unhandled_evts_vector))
+ {
+ for (i = 0; i < vec_len (wrk->unhandled_evts_vector); i++)
+ {
+ vcl_epoll_wait_handle_mq_event (wrk, &wrk->unhandled_evts_vector[i],
+ events, &n_evts);
+ if (n_evts == maxevents)
+ {
+ i += 1;
+ break;
+ }
+ }
+
+ vec_delete (wrk->unhandled_evts_vector, i, 0);
+ }
+
if (vcm->cfg.use_mq_eventfd)
- return vppcom_epoll_wait_eventfd (wrk, events, maxevents, wait_for_time);
+ return vppcom_epoll_wait_eventfd (wrk, events, maxevents, n_evts,
+ wait_for_time);
- return vppcom_epoll_wait_condvar (wrk, events, maxevents, wait_for_time);
+ return vppcom_epoll_wait_condvar (wrk, events, maxevents, n_evts,
+ wait_for_time);
}
int
ep->is_ip4 = session->transport.is_ip4;
ep->port = session->transport.rmt_port;
if (session->transport.is_ip4)
- clib_memcpy (ep->ip, &session->transport.rmt_ip.ip4,
- sizeof (ip4_address_t));
+ clib_memcpy_fast (ep->ip, &session->transport.rmt_ip.ip4,
+ sizeof (ip4_address_t));
else
- clib_memcpy (ep->ip, &session->transport.rmt_ip.ip6,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (ep->ip, &session->transport.rmt_ip.ip6,
+ sizeof (ip6_address_t));
*buflen = sizeof (*ep);
VDBG (1, "VCL<%d>: VPPCOM_ATTR_GET_PEER_ADDR: sid %u, is_ip4 = %u, "
"addr = %U, port %u", getpid (),
ep->is_ip4 = session->transport.is_ip4;
ep->port = session->transport.lcl_port;
if (session->transport.is_ip4)
- clib_memcpy (ep->ip, &session->transport.lcl_ip.ip4,
- sizeof (ip4_address_t));
+ clib_memcpy_fast (ep->ip, &session->transport.lcl_ip.ip4,
+ sizeof (ip4_address_t));
else
- clib_memcpy (ep->ip, &session->transport.lcl_ip.ip6,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (ep->ip, &session->transport.lcl_ip.ip6,
+ sizeof (ip6_address_t));
*buflen = sizeof (*ep);
VDBG (1, "VCL<%d>: VPPCOM_ATTR_GET_LCL_ADDR: sid %u, is_ip4 = %u,"
" addr = %U port %d", getpid (),
rv = VPPCOM_EINVAL;
break;
+ case VPPCOM_ATTR_GET_REFCNT:
+ rv = vcl_session_get_refcnt (session);
+ break;
+
default:
rv = VPPCOM_EINVAL;
break;
if (ep)
{
if (session->transport.is_ip4)
- clib_memcpy (ep->ip, &session->transport.rmt_ip.ip4,
- sizeof (ip4_address_t));
+ clib_memcpy_fast (ep->ip, &session->transport.rmt_ip.ip4,
+ sizeof (ip4_address_t));
else
- clib_memcpy (ep->ip, &session->transport.rmt_ip.ip6,
- sizeof (ip6_address_t));
+ clib_memcpy_fast (ep->ip, &session->transport.rmt_ip.ip6,
+ sizeof (ip6_address_t));
}
return rv;
vcl_worker_t *wrk = vcl_worker_get_current ();
f64 timeout = clib_time_now (&wrk->clib_time) + wait_for_time;
u32 i, keep_trying = 1;
+ svm_msg_q_msg_t msg;
+ session_event_t *e;
int rv, num_ev = 0;
VDBG (3, "VCL<%d>: vp %p, nsids %u, wait_for_time %f",
{
vcl_session_t *session;
- for (i = 0; i < n_sids; i++)
+ /* Dequeue all events and drop all unhandled io events */
+ while (svm_msg_q_sub (wrk->app_event_queue, &msg, SVM_Q_NOWAIT, 0) == 0)
{
- ASSERT (vp[i].revents);
+ e = svm_msg_q_msg_data (wrk->app_event_queue, &msg);
+ vcl_handle_mq_event (wrk, e);
+ svm_msg_q_free_msg (wrk->app_event_queue, &msg);
+ }
+ vec_reset_length (wrk->unhandled_evts_vector);
+ for (i = 0; i < n_sids; i++)
+ {
session = vcl_session_get (wrk, vp[i].sid);
if (!session)
- continue;
+ {
+ vp[i].revents = POLLHUP;
+ num_ev++;
+ continue;
+ }
- if (*vp[i].revents)
- *vp[i].revents = 0;
+ vp[i].revents = 0;
if (POLLIN & vp[i].events)
{
rv = vppcom_session_read_ready (session);
if (rv > 0)
{
- *vp[i].revents |= POLLIN;
+ vp[i].revents |= POLLIN;
num_ev++;
}
else if (rv < 0)
switch (rv)
{
case VPPCOM_ECONNRESET:
- *vp[i].revents = POLLHUP;
+ vp[i].revents = POLLHUP;
break;
default:
- *vp[i].revents = POLLERR;
+ vp[i].revents = POLLERR;
break;
}
num_ev++;
rv = vppcom_session_write_ready (session);
if (rv > 0)
{
- *vp[i].revents |= POLLOUT;
+ vp[i].revents |= POLLOUT;
num_ev++;
}
else if (rv < 0)
switch (rv)
{
case VPPCOM_ECONNRESET:
- *vp[i].revents = POLLHUP;
+ vp[i].revents = POLLHUP;
break;
default:
- *vp[i].revents = POLLERR;
+ vp[i].revents = POLLERR;
break;
}
num_ev++;
if (0) // Note "done:" label used by VCL_SESSION_LOCK_AND_GET()
{
- *vp[i].revents = POLLNVAL;
+ vp[i].revents = POLLNVAL;
num_ev++;
}
}
{
clib_warning ("VCL<%d>: vp[%d].sid %d (0x%x), .events 0x%x, "
".revents 0x%x", getpid (), i, vp[i].sid, vp[i].sid,
- vp[i].events, *vp[i].revents);
+ vp[i].events, vp[i].revents);
}
}
return num_ev;
return session_handle & 0xFFFFFF;
}
+int
+vppcom_session_handle (uint32_t session_index)
+{
+ return (vcl_get_worker_index () << 24) | session_index;
+}
+
int
vppcom_worker_register (void)
{
if (!vcl_worker_alloc_and_init ())
- return VPPCOM_OK;
- return VPPCOM_EEXIST;
+ return VPPCOM_EEXIST;
+
+ if (vcl_worker_set_bapi ())
+ return VPPCOM_EEXIST;
+
+ if (vcl_worker_register_with_vpp ())
+ return VPPCOM_EEXIST;
+
+ return VPPCOM_OK;
+}
+
+int
+vppcom_worker_index (void)
+{
+ return vcl_get_worker_index ();
}
/*