{
int vls_wrk_index; /**< vls wrk index, 1 per process */
volatile int vls_mt_n_threads; /**< number of threads detected */
- int vls_mt_needs_locks; /**< mt single vcl wrk needs locks */
+ volatile int vls_mt_needs_locks; /**< mt single vcl wrk needs locks */
clib_rwlock_t vls_pool_lock; /**< per process/wrk vls pool locks */
pthread_mutex_t vls_mt_mq_mlock; /**< vcl mq lock */
pthread_mutex_t vls_mt_spool_mlock; /**< vcl select or pool lock */
static pthread_key_t vls_mt_pthread_stop_key;
+typedef enum
+{
+ VLS_MT_LOCK_MQ = 1 << 0,
+ VLS_MT_LOCK_SPOOL = 1 << 1,
+ VLS_MT_RLOCK_POOL = 1 << 2,
+ VLS_MT_WLOCK_POOL = 1 << 3
+} vls_mt_lock_type_t;
+
+typedef struct vls_mt_pthread_local_
+{
+ vls_mt_lock_type_t locks_acq;
+} vls_mt_pthread_local_t;
+
+static __thread vls_mt_pthread_local_t vls_mt_pthread_local;
+#define vlspt (&vls_mt_pthread_local)
+
typedef enum
{
VLS_RPC_STATE_INIT,
vls_mt_pool_rlock (void)
{
if (vlsl->vls_mt_needs_locks)
- clib_rwlock_reader_lock (&vlsl->vls_pool_lock);
+ {
+ clib_rwlock_reader_lock (&vlsl->vls_pool_lock);
+ vlspt->locks_acq |= VLS_MT_RLOCK_POOL;
+ }
}
static inline void
vls_mt_pool_runlock (void)
{
- if (vlsl->vls_mt_needs_locks)
- clib_rwlock_reader_unlock (&vlsl->vls_pool_lock);
+ if (vlspt->locks_acq & VLS_MT_RLOCK_POOL)
+ {
+ clib_rwlock_reader_unlock (&vlsl->vls_pool_lock);
+ vlspt->locks_acq &= ~VLS_MT_RLOCK_POOL;
+ }
}
static inline void
vls_mt_pool_wlock (void)
{
if (vlsl->vls_mt_needs_locks)
- clib_rwlock_writer_lock (&vlsl->vls_pool_lock);
+ {
+ clib_rwlock_writer_lock (&vlsl->vls_pool_lock);
+ vlspt->locks_acq |= VLS_MT_WLOCK_POOL;
+ }
}
static inline void
vls_mt_pool_wunlock (void)
{
- if (vlsl->vls_mt_needs_locks)
- clib_rwlock_writer_unlock (&vlsl->vls_pool_lock);
+ if (vlspt->locks_acq & VLS_MT_WLOCK_POOL)
+ {
+ clib_rwlock_writer_unlock (&vlsl->vls_pool_lock);
+ vlspt->locks_acq &= ~VLS_MT_WLOCK_POOL;
+ }
}
typedef enum
VLS_MT_OP_XPOLL,
} vls_mt_ops_t;
-typedef enum
-{
- VLS_MT_LOCK_MQ = 1 << 0,
- VLS_MT_LOCK_SPOOL = 1 << 1
-} vls_mt_lock_type_t;
-
static void
vls_mt_add (void)
{
- vlsl->vls_mt_n_threads += 1;
vlsl->vls_mt_needs_locks = 1;
+ vlsl->vls_mt_n_threads += 1;
+ vlspt->locks_acq = 0;
/* If multi-thread workers are supported, for each new thread register a new
* vcl worker with vpp. Otherwise, all threads use the same vcl worker, so
VDBG (0, "failed to setup key value");
}
-static void
-vls_mt_del (void *arg)
-{
- vcl_worker_t *wrk = (vcl_worker_t *) arg;
-
- VDBG (0, "vls worker %u vcl worker %u nthreads %u cleaning up pthread",
- vlsl->vls_wrk_index, vcl_get_worker_index (), vlsl->vls_mt_n_threads);
-
- if (wrk != vcl_worker_get_current ())
- {
- VDBG (0, "vls_mt_del called with wrong worker %u != %u", wrk->wrk_index,
- vcl_get_worker_index ());
- return;
- }
-
- vlsl->vls_mt_n_threads -= 1;
-
- if (vls_mt_wrk_supported ())
- {
- vppcom_worker_unregister ();
- }
- else
- {
- if (!vlsl->vls_mt_n_threads)
- vppcom_worker_unregister ();
- }
-}
-
static inline void
vls_mt_mq_lock (void)
{
pthread_mutex_lock (&vlsl->vls_mt_mq_mlock);
+ vlspt->locks_acq |= VLS_MT_LOCK_MQ;
}
static inline int
vls_mt_mq_trylock (void)
{
- return pthread_mutex_trylock (&vlsl->vls_mt_mq_mlock);
+ int rv = pthread_mutex_trylock (&vlsl->vls_mt_mq_mlock);
+ vlspt->locks_acq |= rv ? 0 : VLS_MT_LOCK_MQ;
+ return rv;
}
static inline void
vls_mt_mq_unlock (void)
{
pthread_mutex_unlock (&vlsl->vls_mt_mq_mlock);
+ vlspt->locks_acq &= ~VLS_MT_LOCK_MQ;
}
static inline void
vls_mt_spool_lock (void)
{
pthread_mutex_lock (&vlsl->vls_mt_spool_mlock);
+ vlspt->locks_acq |= VLS_MT_LOCK_SPOOL;
}
static inline void
-vls_mt_create_unlock (void)
+vls_mt_spool_unlock (void)
{
pthread_mutex_unlock (&vlsl->vls_mt_spool_mlock);
+ vlspt->locks_acq &= ~VLS_MT_LOCK_SPOOL;
+}
+
+static void
+vls_mt_rel_locks ()
+{
+ if (vlspt->locks_acq & VLS_MT_LOCK_MQ)
+ vls_mt_mq_unlock ();
+ if (vlspt->locks_acq & VLS_MT_LOCK_SPOOL)
+ vls_mt_spool_unlock ();
}
static void
pthread_mutex_init (&vlsl->vls_mt_spool_mlock, NULL);
}
+static void
+vls_mt_del (void *arg)
+{
+ vcl_worker_t *wrk = (vcl_worker_t *) arg;
+
+ VDBG (0, "vls worker %u vcl worker %u nthreads %u cleaning up pthread",
+ vlsl->vls_wrk_index, vcl_get_worker_index (), vlsl->vls_mt_n_threads);
+
+ if (wrk != vcl_worker_get_current ())
+ {
+ VDBG (0, "vls_mt_del called with wrong worker %u != %u", wrk->wrk_index,
+ vcl_get_worker_index ());
+ return;
+ }
+
+ vlsl->vls_mt_n_threads -= 1;
+
+ /* drop locks if any held */
+ vls_mt_rel_locks ();
+ vls_mt_pool_runlock ();
+ vls_mt_pool_wunlock ();
+
+ if (vls_mt_wrk_supported ())
+ {
+ vppcom_worker_unregister ();
+ }
+ else
+ {
+ if (!vlsl->vls_mt_n_threads)
+ vppcom_worker_unregister ();
+ }
+}
+
u8
vls_is_shared (vcl_locked_session_t * vls)
{
}
static void
-vls_mt_acq_locks (vcl_locked_session_t * vls, vls_mt_ops_t op, int *locks_acq)
+vls_mt_acq_locks (vcl_locked_session_t *vls, vls_mt_ops_t op)
{
vcl_worker_t *wrk = vcl_worker_get_current ();
vcl_session_t *s = 0;
/* might get data while waiting for lock */
is_nonblk = vcl_session_read_ready (s) != 0;
}
- if (!is_nonblk)
- *locks_acq |= VLS_MT_LOCK_MQ;
break;
case VLS_MT_OP_WRITE:
ASSERT (s);
/* might get space while waiting for lock */
is_nonblk = vcl_session_is_write_nonblk (s);
}
- if (!is_nonblk)
- *locks_acq |= VLS_MT_LOCK_MQ;
break;
case VLS_MT_OP_XPOLL:
vls_mt_mq_lock ();
- *locks_acq |= VLS_MT_LOCK_MQ;
break;
case VLS_MT_OP_SPOOL:
vls_mt_spool_lock ();
- *locks_acq |= VLS_MT_LOCK_SPOOL;
break;
default:
break;
}
}
-static void
-vls_mt_rel_locks (int locks_acq)
-{
- if (locks_acq & VLS_MT_LOCK_MQ)
- vls_mt_mq_unlock ();
- if (locks_acq & VLS_MT_LOCK_SPOOL)
- vls_mt_create_unlock ();
-}
-
static inline u8
vls_mt_session_should_migrate (vcl_locked_session_t * vls)
{
}
#define vls_mt_guard(_vls, _op) \
- int _locks_acq = 0; \
if (vls_mt_wrk_supported ()) \
{ \
if (PREDICT_FALSE (_vls && \
else \
{ \
if (PREDICT_FALSE (vlsl->vls_mt_needs_locks)) \
- vls_mt_acq_locks (_vls, _op, &_locks_acq); \
+ vls_mt_acq_locks (_vls, _op); \
}
-#define vls_mt_unguard() \
- if (PREDICT_FALSE (_locks_acq)) \
- vls_mt_rel_locks (_locks_acq)
+#define vls_mt_unguard() \
+ if (PREDICT_FALSE (vlspt->locks_acq)) \
+ vls_mt_rel_locks ()
int
vls_write (vls_handle_t vlsh, void *buf, size_t nbytes)
uword *vlshp;
/* If mt wrk supported or single threaded just return */
- if (vls_mt_wrk_supported () || (vlsl->vls_mt_n_threads <= 1))
+ if (vls_mt_wrk_supported () || !vlsl->vls_mt_needs_locks)
return;
wrk = vls_worker_get_current ();
static inline void
vls_mt_mq_wait_unlock (vcl_session_handle_t vcl_sh)
{
- if (vls_mt_wrk_supported () || (vlsl->vls_mt_n_threads <= 1))
+ if (vls_mt_wrk_supported () || !vlsl->vls_mt_needs_locks)
return;
vls_mt_mq_lock ();
vls_worker_alloc ();
vlsl->vls_wrk_index = vcl_get_worker_index ();
vlsl->vls_mt_n_threads = 1;
+ vlspt->locks_acq = 0;
if (pthread_setspecific (vls_mt_pthread_stop_key, vcl_worker_get_current ()))
VDBG (0, "failed to setup key value");
clib_rwlock_init (&vlsl->vls_pool_lock);