X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlibmemory%2Fmemory_vlib.c;h=805438152ce3741df4c4470ac321917855010fb9;hb=90a6398;hp=c3aef65e8542af9c6f0ddaa1eeecb76e8b91c3df;hpb=0d056e5ede136cd0111dc3f9f41ef7b36a938027;p=vpp.git diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index c3aef65e854..805438152ce 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -188,7 +188,6 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) int rv = 0; void *oldheap; api_main_t *am = &api_main; - u8 *serialized_message_table_in_shmem; /* * This is tortured. Maintain a vlib-address-space private @@ -237,7 +236,9 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) regp->name = format (0, "%s", mp->name); vec_add1 (regp->name, 0); - serialized_message_table_in_shmem = vl_api_serialize_message_table (am, 0); + if (am->serialized_message_table_in_shmem == 0) + am->serialized_message_table_in_shmem = + vl_api_serialize_message_table (am, 0); pthread_mutex_unlock (&svm->mutex); svm_pop_heap (oldheap); @@ -250,7 +251,8 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) am->shmem_hdr->application_restarts); rp->context = mp->context; rp->response = ntohl (rv); - rp->message_table = pointer_to_uword (serialized_message_table_in_shmem); + rp->message_table = + pointer_to_uword (am->serialized_message_table_in_shmem); vl_msg_api_send_shmem (q, (u8 *) & rp); } @@ -498,7 +500,7 @@ vl_api_api_versions_t_handler (vl_api_api_versions_t * mp) rmp->api_versions[i].major = htonl (vl->major); rmp->api_versions[i].minor = htonl (vl->minor); rmp->api_versions[i].patch = htonl (vl->patch); - strncpy ((char *) rmp->api_versions[i].name, vl->name, 64); + strncpy ((char *) rmp->api_versions[i].name, vl->name, 64 - 1); } vl_msg_api_send_shmem (q, (u8 *) & rmp); @@ -1022,11 +1024,10 @@ skip_save: if (PREDICT_FALSE (q->head == q->maxsize)) q->head = 0; pthread_mutex_unlock (&q->mutex); + if (need_broadcast) (void) pthread_cond_broadcast (&q->condvar); - pthread_mutex_unlock (&q->mutex); - vl_msg_api_handler_with_vm_node (am, (void *) mp, vm, node); } else @@ -1425,7 +1426,7 @@ vl_api_client_command (vlib_main_t * vm, if (!pool_elts (am->vl_clients)) goto socket_clients; vlib_cli_output (vm, "Shared memory clients"); - vlib_cli_output (vm, "%16s %8s %14s %18s %s", + vlib_cli_output (vm, "%20s %8s %14s %18s %s", "Name", "PID", "Queue Length", "Queue VA", "Health"); /* *INDENT-OFF* */ @@ -1442,7 +1443,7 @@ vl_api_client_command (vlib_main_t * vm, q = regp->vl_input_queue; - vlib_cli_output (vm, "%16s %8d %14d 0x%016llx %s\n", + vlib_cli_output (vm, "%20s %8d %14d 0x%016llx %s\n", regp->name, q->consumer_pid, q->cursize, q, health); } @@ -1838,19 +1839,51 @@ vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp) clib_warning ("unimplemented"); } +void +vl_api_send_pending_rpc_requests (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q; + int i; + + /* + * Use the "normal" control-plane mechanism for the main thread. + * Well, almost. if the main input queue is full, we cannot + * block. Otherwise, we can expect a barrier sync timeout. + */ + q = shmem_hdr->vl_input_queue; + + for (i = 0; i < vec_len (vm->pending_rpc_requests); i++) + { + while (pthread_mutex_trylock (&q->mutex)) + vlib_worker_thread_barrier_check (); + + while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q))) + { + pthread_mutex_unlock (&q->mutex); + vlib_worker_thread_barrier_check (); + while (pthread_mutex_trylock (&q->mutex)) + vlib_worker_thread_barrier_check (); + } + + vl_msg_api_send_shmem_nolock (q, (u8 *) (vm->pending_rpc_requests + i)); + + pthread_mutex_unlock (&q->mutex); + } + _vec_len (vm->pending_rpc_requests) = 0; +} + always_inline void vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, u8 force_rpc) { vl_api_rpc_call_t *mp; - api_main_t *am = &api_main; - vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; - unix_shared_memory_queue_t *q; + vlib_main_t *vm = vlib_get_main (); - /* Main thread: call the function directly */ + /* Main thread and not a forced RPC: call the function directly */ if ((force_rpc == 0) && (vlib_get_thread_index () == 0)) { - vlib_main_t *vm = vlib_get_main (); void (*call_fp) (void *); vlib_worker_thread_barrier_sync (vm); @@ -1862,7 +1895,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, return; } - /* Any other thread, actually do an RPC call... */ + /* Otherwise, actually do an RPC */ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + data_length); memset (mp, 0, sizeof (*mp)); @@ -1871,27 +1904,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, mp->function = pointer_to_uword (fp); mp->need_barrier_sync = 1; - /* - * Use the "normal" control-plane mechanism for the main thread. - * Well, almost. if the main input queue is full, we cannot - * block. Otherwise, we can expect a barrier sync timeout. - */ - q = shmem_hdr->vl_input_queue; - - while (pthread_mutex_trylock (&q->mutex)) - vlib_worker_thread_barrier_check (); - - while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q))) - { - pthread_mutex_unlock (&q->mutex); - vlib_worker_thread_barrier_check (); - while (pthread_mutex_trylock (&q->mutex)) - vlib_worker_thread_barrier_check (); - } - - vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); - - pthread_mutex_unlock (&q->mutex); + vec_add1 (vm->pending_rpc_requests, (uword) mp); } /*