X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlibmemory%2Funix_shared_memory_queue.c;h=4db4851c111283be15e707bc1ccf2f8cbd6ed908;hb=8f2a4eafeaa439432107563033728e09665c16d9;hp=25d28910706758ea2ed3b80077463c4ea265a669;hpb=7cd468a3d7dee7d6c92f69a0bb7061ae208ec727;p=vpp.git diff --git a/src/vlibmemory/unix_shared_memory_queue.c b/src/vlibmemory/unix_shared_memory_queue.c index 25d28910706..4db4851c111 100644 --- a/src/vlibmemory/unix_shared_memory_queue.c +++ b/src/vlibmemory/unix_shared_memory_queue.c @@ -33,18 +33,13 @@ * nels = number of elements on the queue * elsize = element size, presumably 4 and cacheline-size will * be popular choices. - * coid = consumer coid, from ChannelCreate * pid = consumer pid - * pulse_code = pulse code consumer expects - * pulse_value = pulse value consumer expects - * consumer_prio = consumer's priority, so pulses won't change - * the consumer's priority. * * The idea is to call this function in the queue consumer, * and e-mail the queue pointer to the producer(s). * - * The spp process / main thread allocates one of these - * at startup; its main input queue. The spp main input queue + * The vpp process / main thread allocates one of these + * at startup; its main input queue. The vpp main input queue * has a pointer to it in the shared memory segment header. * * You probably want to be on an svm data heap before calling this @@ -70,7 +65,7 @@ unix_shared_memory_queue_init (int nels, q->signal_when_queue_non_empty = signal_when_queue_non_empty; memset (&attr, 0, sizeof (attr)); - memset (&cattr, 0, sizeof (attr)); + memset (&cattr, 0, sizeof (cattr)); if (pthread_mutexattr_init (&attr)) clib_unix_warning ("mutexattr_init"); @@ -239,6 +234,71 @@ unix_shared_memory_queue_add (unix_shared_memory_queue_t * q, return 0; } +/* + * unix_shared_memory_queue_add2 + */ +int +unix_shared_memory_queue_add2 (unix_shared_memory_queue_t * q, u8 * elem, + u8 * elem2, int nowait) +{ + i8 *tailp; + int need_broadcast = 0; + + if (nowait) + { + /* zero on success */ + if (pthread_mutex_trylock (&q->mutex)) + { + return (-1); + } + } + else + pthread_mutex_lock (&q->mutex); + + if (PREDICT_FALSE (q->cursize + 1 == q->maxsize)) + { + if (nowait) + { + pthread_mutex_unlock (&q->mutex); + return (-2); + } + while (q->cursize + 1 == q->maxsize) + { + (void) pthread_cond_wait (&q->condvar, &q->mutex); + } + } + + tailp = (i8 *) (&q->data[0] + q->elsize * q->tail); + clib_memcpy (tailp, elem, q->elsize); + + q->tail++; + q->cursize++; + + if (q->tail == q->maxsize) + q->tail = 0; + + need_broadcast = (q->cursize == 1); + + tailp = (i8 *) (&q->data[0] + q->elsize * q->tail); + clib_memcpy (tailp, elem2, q->elsize); + + q->tail++; + q->cursize++; + + if (q->tail == q->maxsize) + q->tail = 0; + + if (need_broadcast) + { + (void) pthread_cond_broadcast (&q->condvar); + if (q->signal_when_queue_non_empty) + kill (q->consumer_pid, q->signal_when_queue_non_empty); + } + pthread_mutex_unlock (&q->mutex); + + return 0; +} + /* * unix_shared_memory_queue_sub */ @@ -277,6 +337,7 @@ unix_shared_memory_queue_sub (unix_shared_memory_queue_t * q, clib_memcpy (elem, headp, q->elsize); q->head++; + /* $$$$ JFC shouldn't this be == 0? */ if (q->cursize == q->maxsize) need_broadcast = 1;