vcl_locked_session_t *vls_pool;
uword *session_handle_to_vlsh_table;
u32 wrk_index;
+ /** Vector of child wrk to cleanup */
+ u32 *pending_wrk_cleanup;
} vls_worker_t;
typedef struct vls_local_
u32 session_index, u32 vls_wrk_index,
u32 dst_wrk_index, u32 dst_vls_index,
u32 dst_session_index);
+static void vls_cleanup_forked_child (vcl_worker_t *wrk,
+ vcl_worker_t *child_wrk);
+static void vls_handle_pending_wrk_cleanup (void);
static inline u32
vls_get_worker_index (void)
if (vls_mt_wrk_supported ())
clib_rwlock_init (&wrk->sh_to_vlsh_table_lock);
wrk->wrk_index = vcl_get_worker_index ();
+ vec_validate (wrk->pending_wrk_cleanup, 16);
+ vec_reset_length (wrk->pending_wrk_cleanup);
}
static void
if (s->rx_fifo)
{
- svm_fifo_add_subscriber (s->rx_fifo, vcl_wrk->vpp_wrk_index);
- svm_fifo_add_subscriber (s->tx_fifo, vcl_wrk->vpp_wrk_index);
+ vcl_session_share_fifos (s, s->rx_fifo, s->tx_fifo);
}
else if (s->session_state == VCL_STATE_LISTEN)
{
/* *INDENT-ON* */
}
+static void
+vls_validate_veps (vcl_worker_t *wrk)
+{
+ vcl_session_t *s;
+ u32 session_index, wrk_index;
+
+ pool_foreach (s, wrk->sessions)
+ {
+ if (s->vep.vep_sh != ~0)
+ {
+ vcl_session_handle_parse (s->vep.vep_sh, &wrk_index, &session_index);
+ s->vep.vep_sh = vcl_session_handle_from_index (session_index);
+ }
+ if (s->vep.next_sh != ~0)
+ {
+ vcl_session_handle_parse (s->vep.next_sh, &wrk_index,
+ &session_index);
+ s->vep.next_sh = vcl_session_handle_from_index (session_index);
+ }
+ if (s->vep.prev_sh != ~0)
+ {
+ vcl_session_handle_parse (s->vep.prev_sh, &wrk_index,
+ &session_index);
+ s->vep.prev_sh = vcl_session_handle_from_index (session_index);
+ }
+ }
+}
+
void
vls_worker_copy_on_fork (vcl_worker_t * parent_wrk)
{
/* *INDENT-ON* */
vls_wrk->vls_pool = pool_dup (vls_parent_wrk->vls_pool);
+ /* Validate vep's handle */
+ vls_validate_veps (wrk);
+
vls_share_sessions (vls_parent_wrk, vls_wrk);
}
wait_for_time);
vls_mt_unguard ();
vls_get_and_unlock (ep_vlsh);
+ vls_handle_pending_wrk_cleanup ();
return rv;
}
vls_select_mp_checks (read_map);
rv = vppcom_select (n_bits, read_map, write_map, except_map, wait_for_time);
vls_mt_unguard ();
+ vls_handle_pending_wrk_cleanup ();
return rv;
}
wrk->forked_child = ~0;
}
+static void
+vls_handle_pending_wrk_cleanup (void)
+{
+ u32 *wip;
+ vcl_worker_t *child_wrk, *wrk;
+ vls_worker_t *vls_wrk = vls_worker_get_current ();
+
+ if (PREDICT_TRUE (vec_len (vls_wrk->pending_wrk_cleanup) == 0))
+ return;
+
+ wrk = vcl_worker_get_current ();
+ vec_foreach (wip, vls_wrk->pending_wrk_cleanup)
+ {
+ child_wrk = vcl_worker_get_if_valid (*wip);
+ if (!child_wrk)
+ continue;
+ vls_cleanup_forked_child (wrk, child_wrk);
+ }
+ vec_reset_length (vls_wrk->pending_wrk_cleanup);
+}
+
static struct sigaction old_sa;
static void
vls_intercept_sigchld_handler (int signum, siginfo_t * si, void *uc)
{
vcl_worker_t *wrk, *child_wrk;
+ vls_worker_t *vls_wrk;
if (vcl_get_worker_index () == ~0)
return;
VDBG (0, "unexpected child pid %u", si->si_pid);
goto done;
}
- vls_cleanup_forked_child (wrk, child_wrk);
+
+ /* Parent process may enter sighandler with a lock, such as lock in localtime
+ * or in mspace_free, and child wrk cleanup may try to get such locks and
+ * cause deadlock.
+ * So move child wrk cleanup from sighandler to vls_epoll_wait/vls_select.
+ */
+ vls_wrk = vls_worker_get_current ();
+ vec_add1 (vls_wrk->pending_wrk_cleanup, child_wrk->wrk_index);
done:
if (old_sa.sa_flags & SA_SIGINFO)
{
vls_worker_t *wrk = vls_worker_get_current ();
+ /* Handle pending wrk cleanup */
+ vls_handle_pending_wrk_cleanup ();
+
/* Unshare the sessions. VCL will clean up the worker */
vls_unshare_vcl_worker_sessions (vcl_worker_get_current ());
vls_worker_free (wrk);