return vl_msg_api_alloc_internal (nbytes, 0, 1 /* may_return_null */ );
}
+void *
+vl_mem_api_alloc_as_if_client_w_reg (vl_api_registration_t * reg, int nbytes)
+{
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *save_shmem_hdr = am->shmem_hdr;
+ svm_region_t *vlib_rp, *save_vlib_rp = am->vlib_rp;
+ void *msg;
+
+ vlib_rp = am->vlib_rp = reg->vlib_rp;
+ am->shmem_hdr = (void *) vlib_rp->user_ctx;
+
+ msg = vl_msg_api_alloc_internal (nbytes, 0, 0 /* may_return_null */ );
+
+ am->shmem_hdr = save_shmem_hdr;
+ am->vlib_rp = save_vlib_rp;
+
+ return msg;
+}
+
void
vl_msg_api_free (void *a)
{
vlib_input_queue_length = am->vlib_input_queue_length;
shmem_hdr->vl_input_queue =
- svm_queue_init (vlib_input_queue_length, sizeof (uword),
- getpid (), am->vlib_signal);
+ svm_queue_alloc_and_init (vlib_input_queue_length, sizeof (uword),
+ getpid ());
#define _(sz,n) \
do { \
ring_alloc_t _rp; \
- _rp.rp = svm_queue_init ((n), (sz), 0, 0); \
+ _rp.rp = svm_queue_alloc_and_init ((n), (sz), 0); \
_rp.size = (sz); \
_rp.nitems = n; \
_rp.hits = 0; \
#define _(sz,n) \
do { \
ring_alloc_t _rp; \
- _rp.rp = svm_queue_init ((n), (sz), 0, 0); \
+ _rp.rp = svm_queue_alloc_and_init ((n), (sz), 0); \
_rp.size = (sz); \
_rp.nitems = n; \
_rp.hits = 0; \
void
vl_api_mem_config (vl_shmem_hdr_t * hdr, vl_api_shm_elem_config_t * config)
{
- api_main_t *am = &api_main;
vl_api_shm_elem_config_t *c;
ring_alloc_t *rp;
u32 size;
switch (c->type)
{
case VL_API_QUEUE:
- hdr->vl_input_queue = svm_queue_init (c->count,
- c->size,
- getpid (), am->vlib_signal);
+ hdr->vl_input_queue = svm_queue_alloc_and_init (c->count, c->size,
+ getpid ());
continue;
case VL_API_VLIB_RING:
vec_add2 (hdr->vl_rings, rp, 1);
}
size = sizeof (ring_alloc_t) + c->size;
- rp->rp = svm_queue_init (c->count, size, 0, 0);
+ rp->rp = svm_queue_alloc_and_init (c->count, size, 0);
rp->size = size;
rp->nitems = c->count;
rp->hits = 0;
vec_validate (shmem_hdr, 0);
shmem_hdr->version = VL_SHM_VERSION;
+ shmem_hdr->clib_file_index = VL_API_INVALID_FI;
/* Set up the queue and msg ring allocator */
vl_api_mem_config (shmem_hdr, config);
struct timespec ts, tsrem;
char *vpe_api_region_suffix = "-vpe-api";
- memset (a, 0, sizeof (*a));
+ clib_memset (a, 0, sizeof (*a));
if (strstr (region_name, vpe_api_region_suffix))
{
if (is_vlib == 0)
{
+ int tfd;
+ u8 *api_name;
+ /*
+ * Clients wait for vpp to set up the root / API regioins
+ */
+ if (am->root_path)
+ api_name = format (0, "/dev/shm/%s-%s%c", am->root_path,
+ region_name + 1, 0);
+ else
+ api_name = format (0, "/dev/shm%s%c", region_name, 0);
+
+ /* Wait up to 100 seconds... */
+ for (i = 0; i < 10000; i++)
+ {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 10000 * 1000; /* 10 ms */
+ while (nanosleep (&ts, &tsrem) < 0)
+ ts = tsrem;
+ tfd = open ((char *) api_name, O_RDWR);
+ if (tfd > 0)
+ break;
+ }
+ vec_free (api_name);
+ if (tfd < 0)
+ {
+ clib_warning ("region init fail");
+ return -2;
+ }
+ close (tfd);
rv = svm_region_init_chroot (am->root_path);
if (rv)
return rv;
ts = tsrem;
}
/* Mutex buggered, "fix" it */
- memset (&q->mutex, 0, sizeof (q->mutex));
+ clib_memset (&q->mutex, 0, sizeof (q->mutex));
clib_warning ("forcibly release main input queue mutex");
mutex_ok:
am->vlib_rp = vlib_rp;
- while (svm_queue_sub (q, (u8 *) & old_msg, 1 /* nowait */ )
+ while (svm_queue_sub (q, (u8 *) & old_msg, SVM_Q_NOWAIT, 0)
!= -2 /* queue underflow */ )
{
vl_msg_api_free_nolock ((void *) old_msg);
vec_add1 (am->mapped_shmem_regions, rp);
}
-void
-vl_unmap_shmem (void)
+static void
+vl_unmap_shmem_internal (u8 is_client)
{
svm_region_t *rp;
int i;
for (i = 0; i < vec_len (am->mapped_shmem_regions); i++)
{
rp = am->mapped_shmem_regions[i];
- svm_region_unmap (rp);
+ is_client ? svm_region_unmap_client (rp) : svm_region_unmap (rp);
}
vec_free (am->mapped_shmem_regions);
am->shmem_hdr = 0;
- svm_region_exit ();
+ is_client ? svm_region_exit_client () : svm_region_exit ();
+
/* $$$ more careful cleanup, valgrind run... */
vec_free (am->msg_handlers);
vec_free (am->msg_endian_handlers);
vec_free (am->msg_print_handlers);
}
+void
+vl_unmap_shmem (void)
+{
+ vl_unmap_shmem_internal (0);
+}
+
+void
+vl_unmap_shmem_client (void)
+{
+ vl_unmap_shmem_internal (1);
+}
+
void
vl_msg_api_send_shmem (svm_queue_t * q, u8 * elem)
{
if (am->tx_trace && am->tx_trace->enabled)
vl_msg_api_trace (am, am->tx_trace, (void *) trace[0]);
+ /*
+ * Announce a probable binary API client bug:
+ * some client's input queue is stuffed.
+ * The situation may be recoverable, or not.
+ */
+ if (PREDICT_FALSE
+ (am->vl_clients /* vpp side */ && (q->cursize == q->maxsize)))
+ clib_warning ("WARNING: client input queue at %llx is stuffed...", q);
(void) svm_queue_add (q, elem, 0 /* nowait */ );
}
+int
+vl_mem_api_can_send (svm_queue_t * q)
+{
+ return (q->cursize < q->maxsize);
+}
+
void
vl_msg_api_send_shmem_nolock (svm_queue_t * q, u8 * elem)
{