From 68a023d9d9ef87bf98a3ddb8069359af1dd10ff0 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Mon, 16 Jun 2025 12:47:49 -0400 Subject: [PATCH] vcl: improve vls pthread cleanup handling Try to drop locks if interrupted with locks grabbed. Type: fix Change-Id: I8d4996b6f35a8a2610327fb11e80e9951808b535 Signed-off-by: Florin Coras --- src/vcl/vcl_locked.c | 162 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 95 insertions(+), 67 deletions(-) diff --git a/src/vcl/vcl_locked.c b/src/vcl/vcl_locked.c index 79c4e250152..4f2d89dade3 100644 --- a/src/vcl/vcl_locked.c +++ b/src/vcl/vcl_locked.c @@ -136,7 +136,7 @@ typedef struct vls_local_ { int vls_wrk_index; /**< vls wrk index, 1 per process */ volatile int vls_mt_n_threads; /**< number of threads detected */ - int vls_mt_needs_locks; /**< mt single vcl wrk needs locks */ + volatile int vls_mt_needs_locks; /**< mt single vcl wrk needs locks */ clib_rwlock_t vls_pool_lock; /**< per process/wrk vls pool locks */ pthread_mutex_t vls_mt_mq_mlock; /**< vcl mq lock */ pthread_mutex_t vls_mt_spool_mlock; /**< vcl select or pool lock */ @@ -159,6 +159,22 @@ vls_main_t *vlsm; static pthread_key_t vls_mt_pthread_stop_key; +typedef enum +{ + VLS_MT_LOCK_MQ = 1 << 0, + VLS_MT_LOCK_SPOOL = 1 << 1, + VLS_MT_RLOCK_POOL = 1 << 2, + VLS_MT_WLOCK_POOL = 1 << 3 +} vls_mt_lock_type_t; + +typedef struct vls_mt_pthread_local_ +{ + vls_mt_lock_type_t locks_acq; +} vls_mt_pthread_local_t; + +static __thread vls_mt_pthread_local_t vls_mt_pthread_local; +#define vlspt (&vls_mt_pthread_local) + typedef enum { VLS_RPC_STATE_INIT, @@ -269,28 +285,40 @@ static inline void vls_mt_pool_rlock (void) { if (vlsl->vls_mt_needs_locks) - clib_rwlock_reader_lock (&vlsl->vls_pool_lock); + { + clib_rwlock_reader_lock (&vlsl->vls_pool_lock); + vlspt->locks_acq |= VLS_MT_RLOCK_POOL; + } } static inline void vls_mt_pool_runlock (void) { - if (vlsl->vls_mt_needs_locks) - clib_rwlock_reader_unlock (&vlsl->vls_pool_lock); + if (vlspt->locks_acq & VLS_MT_RLOCK_POOL) + { + clib_rwlock_reader_unlock (&vlsl->vls_pool_lock); + vlspt->locks_acq &= ~VLS_MT_RLOCK_POOL; + } } static inline void vls_mt_pool_wlock (void) { if (vlsl->vls_mt_needs_locks) - clib_rwlock_writer_lock (&vlsl->vls_pool_lock); + { + clib_rwlock_writer_lock (&vlsl->vls_pool_lock); + vlspt->locks_acq |= VLS_MT_WLOCK_POOL; + } } static inline void vls_mt_pool_wunlock (void) { - if (vlsl->vls_mt_needs_locks) - clib_rwlock_writer_unlock (&vlsl->vls_pool_lock); + if (vlspt->locks_acq & VLS_MT_WLOCK_POOL) + { + clib_rwlock_writer_unlock (&vlsl->vls_pool_lock); + vlspt->locks_acq &= ~VLS_MT_WLOCK_POOL; + } } typedef enum @@ -301,17 +329,12 @@ typedef enum VLS_MT_OP_XPOLL, } vls_mt_ops_t; -typedef enum -{ - VLS_MT_LOCK_MQ = 1 << 0, - VLS_MT_LOCK_SPOOL = 1 << 1 -} vls_mt_lock_type_t; - static void vls_mt_add (void) { - vlsl->vls_mt_n_threads += 1; vlsl->vls_mt_needs_locks = 1; + vlsl->vls_mt_n_threads += 1; + vlspt->locks_acq = 0; /* If multi-thread workers are supported, for each new thread register a new * vcl worker with vpp. Otherwise, all threads use the same vcl worker, so @@ -332,62 +355,49 @@ vls_mt_add (void) VDBG (0, "failed to setup key value"); } -static void -vls_mt_del (void *arg) -{ - vcl_worker_t *wrk = (vcl_worker_t *) arg; - - VDBG (0, "vls worker %u vcl worker %u nthreads %u cleaning up pthread", - vlsl->vls_wrk_index, vcl_get_worker_index (), vlsl->vls_mt_n_threads); - - if (wrk != vcl_worker_get_current ()) - { - VDBG (0, "vls_mt_del called with wrong worker %u != %u", wrk->wrk_index, - vcl_get_worker_index ()); - return; - } - - vlsl->vls_mt_n_threads -= 1; - - if (vls_mt_wrk_supported ()) - { - vppcom_worker_unregister (); - } - else - { - if (!vlsl->vls_mt_n_threads) - vppcom_worker_unregister (); - } -} - static inline void vls_mt_mq_lock (void) { pthread_mutex_lock (&vlsl->vls_mt_mq_mlock); + vlspt->locks_acq |= VLS_MT_LOCK_MQ; } static inline int vls_mt_mq_trylock (void) { - return pthread_mutex_trylock (&vlsl->vls_mt_mq_mlock); + int rv = pthread_mutex_trylock (&vlsl->vls_mt_mq_mlock); + vlspt->locks_acq |= rv ? 0 : VLS_MT_LOCK_MQ; + return rv; } static inline void vls_mt_mq_unlock (void) { pthread_mutex_unlock (&vlsl->vls_mt_mq_mlock); + vlspt->locks_acq &= ~VLS_MT_LOCK_MQ; } static inline void vls_mt_spool_lock (void) { pthread_mutex_lock (&vlsl->vls_mt_spool_mlock); + vlspt->locks_acq |= VLS_MT_LOCK_SPOOL; } static inline void -vls_mt_create_unlock (void) +vls_mt_spool_unlock (void) { pthread_mutex_unlock (&vlsl->vls_mt_spool_mlock); + vlspt->locks_acq &= ~VLS_MT_LOCK_SPOOL; +} + +static void +vls_mt_rel_locks () +{ + if (vlspt->locks_acq & VLS_MT_LOCK_MQ) + vls_mt_mq_unlock (); + if (vlspt->locks_acq & VLS_MT_LOCK_SPOOL) + vls_mt_spool_unlock (); } static void @@ -397,6 +407,39 @@ vls_mt_locks_init (void) pthread_mutex_init (&vlsl->vls_mt_spool_mlock, NULL); } +static void +vls_mt_del (void *arg) +{ + vcl_worker_t *wrk = (vcl_worker_t *) arg; + + VDBG (0, "vls worker %u vcl worker %u nthreads %u cleaning up pthread", + vlsl->vls_wrk_index, vcl_get_worker_index (), vlsl->vls_mt_n_threads); + + if (wrk != vcl_worker_get_current ()) + { + VDBG (0, "vls_mt_del called with wrong worker %u != %u", wrk->wrk_index, + vcl_get_worker_index ()); + return; + } + + vlsl->vls_mt_n_threads -= 1; + + /* drop locks if any held */ + vls_mt_rel_locks (); + vls_mt_pool_runlock (); + vls_mt_pool_wunlock (); + + if (vls_mt_wrk_supported ()) + { + vppcom_worker_unregister (); + } + else + { + if (!vlsl->vls_mt_n_threads) + vppcom_worker_unregister (); + } +} + u8 vls_is_shared (vcl_locked_session_t * vls) { @@ -1060,7 +1103,7 @@ vcl_session_is_write_nonblk (vcl_session_t *s) } static void -vls_mt_acq_locks (vcl_locked_session_t * vls, vls_mt_ops_t op, int *locks_acq) +vls_mt_acq_locks (vcl_locked_session_t *vls, vls_mt_ops_t op) { vcl_worker_t *wrk = vcl_worker_get_current (); vcl_session_t *s = 0; @@ -1083,8 +1126,6 @@ vls_mt_acq_locks (vcl_locked_session_t * vls, vls_mt_ops_t op, int *locks_acq) /* might get data while waiting for lock */ is_nonblk = vcl_session_read_ready (s) != 0; } - if (!is_nonblk) - *locks_acq |= VLS_MT_LOCK_MQ; break; case VLS_MT_OP_WRITE: ASSERT (s); @@ -1094,31 +1135,18 @@ vls_mt_acq_locks (vcl_locked_session_t * vls, vls_mt_ops_t op, int *locks_acq) /* might get space while waiting for lock */ is_nonblk = vcl_session_is_write_nonblk (s); } - if (!is_nonblk) - *locks_acq |= VLS_MT_LOCK_MQ; break; case VLS_MT_OP_XPOLL: vls_mt_mq_lock (); - *locks_acq |= VLS_MT_LOCK_MQ; break; case VLS_MT_OP_SPOOL: vls_mt_spool_lock (); - *locks_acq |= VLS_MT_LOCK_SPOOL; break; default: break; } } -static void -vls_mt_rel_locks (int locks_acq) -{ - if (locks_acq & VLS_MT_LOCK_MQ) - vls_mt_mq_unlock (); - if (locks_acq & VLS_MT_LOCK_SPOOL) - vls_mt_create_unlock (); -} - static inline u8 vls_mt_session_should_migrate (vcl_locked_session_t * vls) { @@ -1233,7 +1261,6 @@ vls_mt_detect (void) } #define vls_mt_guard(_vls, _op) \ - int _locks_acq = 0; \ if (vls_mt_wrk_supported ()) \ { \ if (PREDICT_FALSE (_vls && \ @@ -1248,12 +1275,12 @@ vls_mt_detect (void) else \ { \ if (PREDICT_FALSE (vlsl->vls_mt_needs_locks)) \ - vls_mt_acq_locks (_vls, _op, &_locks_acq); \ + vls_mt_acq_locks (_vls, _op); \ } -#define vls_mt_unguard() \ - if (PREDICT_FALSE (_locks_acq)) \ - vls_mt_rel_locks (_locks_acq) +#define vls_mt_unguard() \ + if (PREDICT_FALSE (vlspt->locks_acq)) \ + vls_mt_rel_locks () int vls_write (vls_handle_t vlsh, void *buf, size_t nbytes) @@ -2131,7 +2158,7 @@ vls_mt_mq_wait_lock (vcl_session_handle_t vcl_sh) uword *vlshp; /* If mt wrk supported or single threaded just return */ - if (vls_mt_wrk_supported () || (vlsl->vls_mt_n_threads <= 1)) + if (vls_mt_wrk_supported () || !vlsl->vls_mt_needs_locks) return; wrk = vls_worker_get_current (); @@ -2159,7 +2186,7 @@ vls_mt_mq_wait_lock (vcl_session_handle_t vcl_sh) static inline void vls_mt_mq_wait_unlock (vcl_session_handle_t vcl_sh) { - if (vls_mt_wrk_supported () || (vlsl->vls_mt_n_threads <= 1)) + if (vls_mt_wrk_supported () || !vlsl->vls_mt_needs_locks) return; vls_mt_mq_lock (); @@ -2194,6 +2221,7 @@ vls_app_create (char *app_name) vls_worker_alloc (); vlsl->vls_wrk_index = vcl_get_worker_index (); vlsl->vls_mt_n_threads = 1; + vlspt->locks_acq = 0; if (pthread_setspecific (vls_mt_pthread_stop_key, vcl_worker_get_current ())) VDBG (0, "failed to setup key value"); clib_rwlock_init (&vlsl->vls_pool_lock); -- 2.16.6