X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlibmemory%2Fmemory_vlib.c;h=805438152ce3741df4c4470ac321917855010fb9;hb=90a63988fa01685626b6d6a01b79ea5370f7fbac;hp=184a0f8c0a9acb4dcb5bbbbe5b3ed405d102a2ea;hpb=780fc39506759127f59840e37be9c03d278a2f6b;p=vpp.git diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 184a0f8c0a9..805438152ce 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -188,7 +188,6 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) int rv = 0; void *oldheap; api_main_t *am = &api_main; - u8 *serialized_message_table_in_shmem; /* * This is tortured. Maintain a vlib-address-space private @@ -237,7 +236,9 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) regp->name = format (0, "%s", mp->name); vec_add1 (regp->name, 0); - serialized_message_table_in_shmem = vl_api_serialize_message_table (am, 0); + if (am->serialized_message_table_in_shmem == 0) + am->serialized_message_table_in_shmem = + vl_api_serialize_message_table (am, 0); pthread_mutex_unlock (&svm->mutex); svm_pop_heap (oldheap); @@ -250,7 +251,8 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) am->shmem_hdr->application_restarts); rp->context = mp->context; rp->response = ntohl (rv); - rp->message_table = pointer_to_uword (serialized_message_table_in_shmem); + rp->message_table = + pointer_to_uword (am->serialized_message_table_in_shmem); vl_msg_api_send_shmem (q, (u8 *) & rp); } @@ -470,12 +472,48 @@ vl_api_memclnt_keepalive_t_handler (vl_api_memclnt_keepalive_t * mp) vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & rmp); } +void +vl_api_api_versions_t_handler (vl_api_api_versions_t * mp) +{ + api_main_t *am = &api_main; + vl_api_api_versions_reply_t *rmp; + unix_shared_memory_queue_t *q; + u32 nmsg = vec_len (am->api_version_list); + int msg_size = sizeof (*rmp) + sizeof (rmp->api_versions[0]) * nmsg; + int i; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + return; + + rmp = vl_msg_api_alloc (msg_size); + memset (rmp, 0, msg_size); + rmp->_vl_msg_id = ntohs (VL_API_API_VERSIONS_REPLY); + + /* fill in the message */ + rmp->context = mp->context; + rmp->count = htonl (nmsg); + + for (i = 0; i < nmsg; ++i) + { + api_version_t *vl = &am->api_version_list[i]; + rmp->api_versions[i].major = htonl (vl->major); + rmp->api_versions[i].minor = htonl (vl->minor); + rmp->api_versions[i].patch = htonl (vl->patch); + strncpy ((char *) rmp->api_versions[i].name, vl->name, 64 - 1); + } + + vl_msg_api_send_shmem (q, (u8 *) & rmp); + +} + #define foreach_vlib_api_msg \ _(MEMCLNT_CREATE, memclnt_create) \ _(MEMCLNT_DELETE, memclnt_delete) \ _(GET_FIRST_MSG_ID, get_first_msg_id) \ _(MEMCLNT_KEEPALIVE, memclnt_keepalive) \ -_(MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply) +_(MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply) \ +_(API_VERSIONS, api_versions) /* * vl_api_init @@ -801,6 +839,8 @@ memclnt_process (vlib_main_t * vm, ASSERT (shm); q = shm->vl_input_queue; ASSERT (q); + /* Make a note so we can always find the primary region easily */ + am->vlib_primary_rp = am->vlib_rp; e = vlib_call_init_exit_functions (vm, vm->api_init_function_registrations, 1 /* call_once */ ); @@ -984,11 +1024,10 @@ skip_save: if (PREDICT_FALSE (q->head == q->maxsize)) q->head = 0; pthread_mutex_unlock (&q->mutex); + if (need_broadcast) (void) pthread_cond_broadcast (&q->condvar); - pthread_mutex_unlock (&q->mutex); - vl_msg_api_handler_with_vm_node (am, (void *) mp, vm, node); } else @@ -1317,14 +1356,16 @@ vl_api_ring_command (vlib_main_t * vm, vl_shmem_hdr_t *shmem_hdr; api_main_t *am = &api_main; - shmem_hdr = am->shmem_hdr; + /* First, dump the primary region rings.. */ - if (shmem_hdr == 0) + if (am->vlib_primary_rp == 0 || am->vlib_primary_rp->user_ctx == 0) { vlib_cli_output (vm, "Shared memory segment not initialized...\n"); return 0; } + shmem_hdr = (void *) am->vlib_primary_rp->user_ctx; + vlib_cli_output (vm, "Main API segment rings:"); vlib_cli_output (vm, "%U", format_api_message_rings, am, @@ -1338,7 +1379,7 @@ vl_api_ring_command (vlib_main_t * vm, svm_region_t *vlib_rp = am->vlib_private_rps[i]; shmem_hdr = (void *) vlib_rp->user_ctx; vl_api_registration_t **regpp; - vl_api_registration_t *regp; + vl_api_registration_t *regp = 0; /* For horizontal scaling, add a hash table... */ /* *INDENT-OFF* */ @@ -1351,8 +1392,12 @@ vl_api_ring_command (vlib_main_t * vm, goto found; } })); + vlib_cli_output (vm, "regp %llx not found?", regp); + continue; /* *INDENT-ON* */ found: + vlib_cli_output (vm, "%U", format_api_message_rings, am, + 0 /* print header */ , 0 /* notused */ ); vlib_cli_output (vm, "%U", format_api_message_rings, am, shmem_hdr, 0 /* main segment */ ); } @@ -1381,7 +1426,7 @@ vl_api_client_command (vlib_main_t * vm, if (!pool_elts (am->vl_clients)) goto socket_clients; vlib_cli_output (vm, "Shared memory clients"); - vlib_cli_output (vm, "%16s %8s %14s %18s %s", + vlib_cli_output (vm, "%20s %8s %14s %18s %s", "Name", "PID", "Queue Length", "Queue VA", "Health"); /* *INDENT-OFF* */ @@ -1398,7 +1443,7 @@ vl_api_client_command (vlib_main_t * vm, q = regp->vl_input_queue; - vlib_cli_output (vm, "%16s %8d %14d 0x%016llx %s\n", + vlib_cli_output (vm, "%20s %8d %14d 0x%016llx %s\n", regp->name, q->consumer_pid, q->cursize, q, health); } @@ -1794,19 +1839,51 @@ vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp) clib_warning ("unimplemented"); } +void +vl_api_send_pending_rpc_requests (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; + unix_shared_memory_queue_t *q; + int i; + + /* + * Use the "normal" control-plane mechanism for the main thread. + * Well, almost. if the main input queue is full, we cannot + * block. Otherwise, we can expect a barrier sync timeout. + */ + q = shmem_hdr->vl_input_queue; + + for (i = 0; i < vec_len (vm->pending_rpc_requests); i++) + { + while (pthread_mutex_trylock (&q->mutex)) + vlib_worker_thread_barrier_check (); + + while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q))) + { + pthread_mutex_unlock (&q->mutex); + vlib_worker_thread_barrier_check (); + while (pthread_mutex_trylock (&q->mutex)) + vlib_worker_thread_barrier_check (); + } + + vl_msg_api_send_shmem_nolock (q, (u8 *) (vm->pending_rpc_requests + i)); + + pthread_mutex_unlock (&q->mutex); + } + _vec_len (vm->pending_rpc_requests) = 0; +} + always_inline void vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, u8 force_rpc) { vl_api_rpc_call_t *mp; - api_main_t *am = &api_main; - vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; - unix_shared_memory_queue_t *q; + vlib_main_t *vm = vlib_get_main (); - /* Main thread: call the function directly */ + /* Main thread and not a forced RPC: call the function directly */ if ((force_rpc == 0) && (vlib_get_thread_index () == 0)) { - vlib_main_t *vm = vlib_get_main (); void (*call_fp) (void *); vlib_worker_thread_barrier_sync (vm); @@ -1818,7 +1895,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, return; } - /* Any other thread, actually do an RPC call... */ + /* Otherwise, actually do an RPC */ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + data_length); memset (mp, 0, sizeof (*mp)); @@ -1827,27 +1904,7 @@ vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, mp->function = pointer_to_uword (fp); mp->need_barrier_sync = 1; - /* - * Use the "normal" control-plane mechanism for the main thread. - * Well, almost. if the main input queue is full, we cannot - * block. Otherwise, we can expect a barrier sync timeout. - */ - q = shmem_hdr->vl_input_queue; - - while (pthread_mutex_trylock (&q->mutex)) - vlib_worker_thread_barrier_check (); - - while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q))) - { - pthread_mutex_unlock (&q->mutex); - vlib_worker_thread_barrier_check (); - while (pthread_mutex_trylock (&q->mutex)) - vlib_worker_thread_barrier_check (); - } - - vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); - - pthread_mutex_unlock (&q->mutex); + vec_add1 (vm->pending_rpc_requests, (uword) mp); } /*