2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vlibapi/api.h>
21 #include <vlibmemory/api.h>
22 #include <vlibmemory/memory_api.h>
24 #include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
26 #define vl_typedefs /* define message structures */
27 #include <vlibmemory/vl_memory_api_h.h>
30 /* instantiate all the print functions we know about */
31 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
33 #include <vlibmemory/vl_memory_api_h.h>
36 /* instantiate all the endian swap functions we know about */
38 #include <vlibmemory/vl_memory_api_h.h>
41 volatile int **vl_api_queue_cursizes;
44 memclnt_queue_callback (vlib_main_t * vm)
47 api_main_t *am = vlibapi_get_main ();
48 int have_pending_rpcs;
50 if (PREDICT_FALSE (vec_len (vl_api_queue_cursizes) !=
51 1 + vec_len (am->vlib_private_rps)))
53 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
59 q = shmem_hdr->vl_input_queue;
63 vec_add1 (vl_api_queue_cursizes, &q->cursize);
65 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
67 svm_region_t *vlib_rp = am->vlib_private_rps[i];
69 shmem_hdr = (void *) vlib_rp->user_ctx;
70 q = shmem_hdr->vl_input_queue;
71 vec_add1 (vl_api_queue_cursizes, &q->cursize);
75 for (i = 0; i < vec_len (vl_api_queue_cursizes); i++)
77 if (*vl_api_queue_cursizes[i])
79 vm->queue_signal_pending = 1;
80 vm->api_queue_nonempty = 1;
81 vlib_process_signal_event (vm, vl_api_clnt_node.index,
82 /* event_type */ QUEUE_SIGNAL_EVENT,
88 clib_spinlock_lock_if_init (&vm->pending_rpc_lock);
89 have_pending_rpcs = vec_len (vm->pending_rpc_requests) > 0;
90 clib_spinlock_unlock_if_init (&vm->pending_rpc_lock);
92 if (have_pending_rpcs)
94 vm->queue_signal_pending = 1;
95 vm->api_queue_nonempty = 1;
96 vlib_process_signal_event (vm, vl_api_clnt_node.index,
97 /* event_type */ QUEUE_SIGNAL_EVENT,
103 * vl_api_memclnt_create_internal
106 vl_api_memclnt_create_internal (char *name, svm_queue_t * q)
108 vl_api_registration_t **regpp;
109 vl_api_registration_t *regp;
111 api_main_t *am = vlibapi_get_main ();
113 ASSERT (vlib_get_thread_index () == 0);
114 pool_get (am->vl_clients, regpp);
117 oldheap = vl_msg_push_heap ();
118 *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
121 clib_memset (regp, 0, sizeof (*regp));
122 regp->registration_type = REGISTRATION_TYPE_SHMEM;
123 regp->vl_api_registration_pool_index = regpp - am->vl_clients;
124 regp->vlib_rp = am->vlib_rp;
125 regp->shmem_hdr = am->shmem_hdr;
127 regp->vl_input_queue = q;
128 regp->name = format (0, "%s%c", name, 0);
130 vl_msg_pop_heap (oldheap);
131 return vl_msg_api_handle_from_index_and_epoch
132 (regp->vl_api_registration_pool_index,
133 am->shmem_hdr->application_restarts);
137 * vl_api_memclnt_create_t_handler
140 vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
142 vl_api_registration_t **regpp;
143 vl_api_registration_t *regp;
144 vl_api_memclnt_create_reply_t *rp;
148 api_main_t *am = vlibapi_get_main ();
152 * This is tortured. Maintain a vlib-address-space private
153 * pool of client registrations. We use the shared-memory virtual
154 * address of client structure as a handle, to allow direct
155 * manipulation of context quota vbls from the client library.
157 * This scheme causes trouble w/ API message trace replay, since
158 * some random VA from clib_mem_alloc() certainly won't
159 * occur in the Linux sim. The (very) few places
160 * that care need to use the pool index.
162 * Putting the registration object(s) into a pool in shared memory and
163 * using the pool index as a handle seems like a great idea.
164 * Unfortunately, each and every reference to that pool would need
165 * to be protected by a mutex:
169 * convert pool index to
177 pool_get (am->vl_clients, regpp);
179 oldheap = vl_msg_push_heap ();
180 *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
183 clib_memset (regp, 0, sizeof (*regp));
184 regp->registration_type = REGISTRATION_TYPE_SHMEM;
185 regp->vl_api_registration_pool_index = regpp - am->vl_clients;
186 regp->vlib_rp = am->vlib_rp;
187 regp->shmem_hdr = am->shmem_hdr;
188 regp->clib_file_index = am->shmem_hdr->clib_file_index;
190 q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue;
191 VL_MSG_API_SVM_QUEUE_UNPOISON (q);
193 regp->name = format (0, "%s", mp->name);
194 vec_add1 (regp->name, 0);
196 if (am->serialized_message_table_in_shmem == 0)
197 am->serialized_message_table_in_shmem =
198 vl_api_serialize_message_table (am, 0);
200 if (am->vlib_rp != am->vlib_primary_rp)
201 msg_table = vl_api_serialize_message_table (am, 0);
203 msg_table = am->serialized_message_table_in_shmem;
205 vl_msg_pop_heap (oldheap);
207 rp = vl_msg_api_alloc (sizeof (*rp));
208 rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_REPLY);
209 rp->handle = (uword) regp;
210 rp->index = vl_msg_api_handle_from_index_and_epoch
211 (regp->vl_api_registration_pool_index,
212 am->shmem_hdr->application_restarts);
213 rp->context = mp->context;
214 rp->response = ntohl (rv);
215 rp->message_table = pointer_to_uword (msg_table);
217 vl_msg_api_send_shmem (q, (u8 *) & rp);
221 vl_api_call_reaper_functions (u32 client_index)
223 clib_error_t *error = 0;
224 _vl_msg_api_function_list_elt_t *i;
226 i = vlibapi_get_main ()->reaper_function_registrations;
229 error = i->f (client_index);
231 clib_error_report (error);
232 i = i->next_init_function;
237 * vl_api_memclnt_delete_t_handler
240 vl_api_memclnt_delete_t_handler (vl_api_memclnt_delete_t * mp)
242 vl_api_registration_t **regpp;
243 vl_api_registration_t *regp;
244 vl_api_memclnt_delete_reply_t *rp;
246 api_main_t *am = vlibapi_get_main ();
247 u32 handle, client_index, epoch;
251 vl_api_call_reaper_functions (handle);
253 epoch = vl_msg_api_handle_get_epoch (handle);
254 client_index = vl_msg_api_handle_get_index (handle);
256 if (epoch != (am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK))
259 ("Stale clnt delete index %d old epoch %d cur epoch %d",
261 (am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK));
265 regpp = pool_elt_at_index (am->vl_clients, client_index);
267 if (!pool_is_free (am->vl_clients, regpp))
271 int private_registration = 0;
273 /* Send reply unless client asked us to do the cleanup */
277 * Note: the API message handling path will set am->vlib_rp
278 * as appropriate for pairwise / private memory segments
280 rp = vl_msg_api_alloc (sizeof (*rp));
281 rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_DELETE_REPLY);
282 rp->handle = mp->handle;
285 vl_msg_api_send_shmem (regp->vl_input_queue, (u8 *) & rp);
286 if (client_index != regp->vl_api_registration_pool_index)
288 clib_warning ("mismatch client_index %d pool_index %d",
290 regp->vl_api_registration_pool_index);
291 vl_msg_api_free (rp);
296 /* No dangling references, please */
299 /* For horizontal scaling, add a hash table... */
300 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
302 /* Is this a pairwise / private API segment? */
303 if (am->vlib_private_rps[i] == am->vlib_rp)
305 /* Note: account for the memfd header page */
306 uword virtual_base = am->vlib_rp->virtual_base - MMAP_PAGESIZE;
307 uword virtual_size = am->vlib_rp->virtual_size + MMAP_PAGESIZE;
310 * Kill the registration pool element before we make
311 * the index vanish forever
313 pool_put_index (am->vl_clients,
314 regp->vl_api_registration_pool_index);
316 vec_delete (am->vlib_private_rps, 1, i);
317 /* Kill it, accounting for the memfd header page */
318 if (munmap ((void *) virtual_base, virtual_size) < 0)
319 clib_unix_warning ("munmap");
320 /* Reset the queue-length-address cache */
321 vec_reset_length (vl_api_queue_cursizes);
322 private_registration = 1;
327 if (private_registration == 0)
329 pool_put_index (am->vl_clients,
330 regp->vl_api_registration_pool_index);
331 oldheap = vl_msg_push_heap ();
333 svm_queue_free (regp->vl_input_queue);
334 vec_free (regp->name);
335 /* Poison the old registration */
336 clib_memset (regp, 0xF1, sizeof (*regp));
337 clib_mem_free (regp);
338 vl_msg_pop_heap (oldheap);
340 * These messages must be freed manually, since they're set up
341 * as "bounce" messages. In the private_registration == 1 case,
342 * we kill the shared-memory segment which contains the message
345 vl_msg_api_free (mp);
350 clib_warning ("unknown client ID %d", mp->index);
355 * client answered a ping, stave off the grim reaper...
358 vl_api_memclnt_keepalive_reply_t_handler
359 (vl_api_memclnt_keepalive_reply_t * mp)
361 vl_api_registration_t *regp;
362 vlib_main_t *vm = vlib_get_main ();
364 regp = vl_api_client_index_to_registration (mp->context);
367 regp->last_heard = vlib_time_now (vm);
368 regp->unanswered_pings = 0;
371 clib_warning ("BUG: anonymous memclnt_keepalive_reply");
375 * We can send ourselves these messages if someone uses the
376 * builtin binary api test tool...
379 vl_api_memclnt_keepalive_t_handler (vl_api_memclnt_keepalive_t * mp)
381 vl_api_memclnt_keepalive_reply_t *rmp;
383 vl_shmem_hdr_t *shmem_hdr;
385 am = vlibapi_get_main ();
386 shmem_hdr = am->shmem_hdr;
388 rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp));
389 clib_memset (rmp, 0, sizeof (*rmp));
390 rmp->_vl_msg_id = ntohs (VL_API_MEMCLNT_KEEPALIVE_REPLY);
391 rmp->context = mp->context;
392 vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & rmp);
396 * To avoid filling the API trace buffer with boring messages,
397 * don't trace memclnt_keepalive[_reply] msgs
400 #define foreach_vlib_api_msg \
401 _ (MEMCLNT_CREATE, memclnt_create, 0) \
402 _ (MEMCLNT_DELETE, memclnt_delete, 0) \
403 _ (MEMCLNT_KEEPALIVE, memclnt_keepalive, 0) \
404 _ (MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply, 0)
410 vl_mem_api_init (const char *region_name)
413 api_main_t *am = vlibapi_get_main ();
414 vl_msg_api_msg_config_t cfg;
415 vl_msg_api_msg_config_t *c = &cfg;
417 vlib_main_t *vm = vlib_get_main ();
419 clib_memset (c, 0, sizeof (*c));
421 if ((rv = vl_map_shmem (region_name, 1 /* is_vlib */ )) < 0)
424 #define _(N,n,t) do { \
425 c->id = VL_API_##N; \
427 c->handler = vl_api_##n##_t_handler; \
428 c->cleanup = vl_noop_handler; \
429 c->endian = vl_api_##n##_t_endian; \
430 c->print = vl_api_##n##_t_print; \
431 c->size = sizeof(vl_api_##n##_t); \
432 c->traced = t; /* trace, so these msgs print */ \
433 c->replay = 0; /* don't replay client create/delete msgs */ \
434 c->message_bounce = 0; /* don't bounce this message */ \
435 vl_msg_api_config(c);} while (0);
437 foreach_vlib_api_msg;
440 #define vl_msg_name_crc_list
441 #include <vlibmemory/memclnt.api.h>
442 #undef vl_msg_name_crc_list
444 #define _(id, n, crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
445 foreach_vl_msg_name_crc_memclnt;
449 * special-case freeing of memclnt_delete messages, so we can
450 * simply munmap pairwise / private API segments...
452 am->message_bounce[VL_API_MEMCLNT_DELETE] = 1;
453 am->is_mp_safe[VL_API_MEMCLNT_KEEPALIVE_REPLY] = 1;
454 am->is_mp_safe[VL_API_MEMCLNT_KEEPALIVE] = 1;
456 vlib_set_queue_signal_callback (vm, memclnt_queue_callback);
459 ASSERT (shm && shm->vl_input_queue);
461 /* Make a note so we can always find the primary region easily */
462 am->vlib_primary_rp = am->vlib_rp;
468 map_api_segment_init (vlib_main_t * vm)
470 api_main_t *am = vlibapi_get_main ();
473 if ((rv = vl_mem_api_init (am->region_name)) < 0)
475 return clib_error_return (0, "vl_mem_api_init (%s) failed",
482 send_memclnt_keepalive (vl_api_registration_t * regp, f64 now)
484 vl_api_memclnt_keepalive_t *mp;
486 api_main_t *am = vlibapi_get_main ();
488 q = regp->vl_input_queue;
491 * If the queue head is moving, assume that the client is processing
492 * messages and skip the ping. This heuristic may fail if the queue
493 * is in the same position as last time, net of wrapping; in which
494 * case, the client will receive a keepalive.
496 if (regp->last_queue_head != q->head)
498 regp->last_heard = now;
499 regp->unanswered_pings = 0;
500 regp->last_queue_head = q->head;
505 * push/pop shared memory segment, so this routine
506 * will work with "normal" as well as "private segment"
510 mp = vl_mem_api_alloc_as_if_client_w_reg (regp, sizeof (*mp));
511 clib_memset (mp, 0, sizeof (*mp));
512 mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_MEMCLNT_KEEPALIVE);
513 mp->context = mp->client_index =
514 vl_msg_api_handle_from_index_and_epoch
515 (regp->vl_api_registration_pool_index,
516 am->shmem_hdr->application_restarts);
518 regp->unanswered_pings++;
520 /* Failure-to-send due to a stuffed queue is absolutely expected */
521 if (svm_queue_add (q, (u8 *) & mp, 1 /* nowait */ ))
522 vl_msg_api_free_w_region (regp->vlib_rp, mp);
526 vl_mem_send_client_keepalive_w_reg (api_main_t * am, f64 now,
527 vl_api_registration_t ** regpp,
529 u32 ** confused_indices)
531 vl_api_registration_t *regp = *regpp;
534 /* If we haven't heard from this client recently... */
535 if (regp->last_heard < (now - 10.0))
537 if (regp->unanswered_pings == 2)
540 q = regp->vl_input_queue;
541 if (kill (q->consumer_pid, 0) >= 0)
543 clib_warning ("REAPER: lazy binary API client '%s'",
545 regp->unanswered_pings = 0;
546 regp->last_heard = now;
550 clib_warning ("REAPER: binary API client '%s' died",
552 vec_add1 (*dead_indices, regpp - am->vl_clients);
556 send_memclnt_keepalive (regp, now);
559 regp->unanswered_pings = 0;
563 clib_warning ("NULL client registration index %d",
564 regpp - am->vl_clients);
565 vec_add1 (*confused_indices, regpp - am->vl_clients);
570 vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now)
572 vl_api_registration_t **regpp;
573 static u32 *dead_indices;
574 static u32 *confused_indices;
576 vec_reset_length (dead_indices);
577 vec_reset_length (confused_indices);
580 pool_foreach (regpp, am->vl_clients) {
581 vl_mem_send_client_keepalive_w_reg (am, now, regpp, &dead_indices,
586 /* This should "never happen," but if it does, fix it... */
587 if (PREDICT_FALSE (vec_len (confused_indices) > 0))
590 for (i = 0; i < vec_len (confused_indices); i++)
592 pool_put_index (am->vl_clients, confused_indices[i]);
596 if (PREDICT_FALSE (vec_len (dead_indices) > 0))
601 /* Allow the application to clean up its registrations */
602 for (i = 0; i < vec_len (dead_indices); i++)
604 regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]);
609 handle = vl_msg_api_handle_from_index_and_epoch
610 (dead_indices[i], shm->application_restarts);
611 vl_api_call_reaper_functions (handle);
615 oldheap = vl_msg_push_heap ();
617 for (i = 0; i < vec_len (dead_indices); i++)
619 regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]);
622 /* Is this a pairwise SVM segment? */
623 if ((*regpp)->vlib_rp != am->vlib_rp)
626 svm_region_t *dead_rp = (*regpp)->vlib_rp;
627 /* Note: account for the memfd header page */
628 uword virtual_base = dead_rp->virtual_base - MMAP_PAGESIZE;
629 uword virtual_size = dead_rp->virtual_size + MMAP_PAGESIZE;
631 /* For horizontal scaling, add a hash table... */
632 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
633 if (am->vlib_private_rps[i] == dead_rp)
635 vec_delete (am->vlib_private_rps, 1, i);
638 svm_pop_heap (oldheap);
639 clib_warning ("private rp %llx AWOL", dead_rp);
640 oldheap = svm_push_data_heap (am->vlib_rp);
643 /* Kill it, accounting for the memfd header page */
644 svm_pop_heap (oldheap);
645 if (munmap ((void *) virtual_base, virtual_size) < 0)
646 clib_unix_warning ("munmap");
647 /* Reset the queue-length-address cache */
648 vec_reset_length (vl_api_queue_cursizes);
649 oldheap = svm_push_data_heap (am->vlib_rp);
653 /* Poison the old registration */
654 clib_memset (*regpp, 0xF3, sizeof (**regpp));
655 clib_mem_free (*regpp);
657 /* no dangling references, please */
662 svm_pop_heap (oldheap);
663 clib_warning ("Duplicate free, client index %d",
664 regpp - am->vl_clients);
665 oldheap = svm_push_data_heap (am->vlib_rp);
669 svm_client_scan_this_region_nolock (am->vlib_rp);
671 vl_msg_pop_heap (oldheap);
672 for (i = 0; i < vec_len (dead_indices); i++)
673 pool_put_index (am->vl_clients, dead_indices[i]);
677 void (*vl_mem_api_fuzz_hook) (u16, void *);
679 /* This is only to be called from a vlib/vnet app */
681 vl_mem_api_handler_with_vm_node (api_main_t *am, svm_region_t *vlib_rp,
682 void *the_msg, vlib_main_t *vm,
683 vlib_node_runtime_t *node, u8 is_private)
685 u16 id = clib_net_to_host_u16 (*((u16 *) the_msg));
686 u8 *(*handler) (void *, void *, void *);
687 u8 *(*print_fp) (void *, void *);
688 svm_region_t *old_vlib_rp;
689 void *save_shmem_hdr;
692 if (PREDICT_FALSE (am->elog_trace_api_messages))
694 ELOG_TYPE_DECLARE (e) = {
695 .format = "api-msg: %s",
702 ed = ELOG_DATA (am->elog_main, e);
703 if (id < vec_len (am->msg_names) && am->msg_names[id])
704 ed->c = elog_string (am->elog_main, (char *) am->msg_names[id]);
706 ed->c = elog_string (am->elog_main, "BOGUS");
709 if (id < vec_len (am->msg_handlers) && am->msg_handlers[id])
711 handler = (void *) am->msg_handlers[id];
713 if (PREDICT_FALSE (am->rx_trace && am->rx_trace->enabled))
714 vl_msg_api_trace (am, am->rx_trace, the_msg);
716 if (PREDICT_FALSE (am->msg_print_flag))
718 fformat (stdout, "[%d]: %s\n", id, am->msg_names[id]);
719 print_fp = (void *) am->msg_print_handlers[id];
722 fformat (stdout, " [no registered print fn for msg %d]\n", id);
726 (*print_fp) (the_msg, vm);
729 is_mp_safe = am->is_mp_safe[id];
733 vl_msg_api_barrier_trace_context (am->msg_names[id]);
734 vl_msg_api_barrier_sync ();
738 old_vlib_rp = am->vlib_rp;
739 save_shmem_hdr = am->shmem_hdr;
740 am->vlib_rp = vlib_rp;
741 am->shmem_hdr = (void *) vlib_rp->user_ctx;
744 if (PREDICT_FALSE (vl_mem_api_fuzz_hook != 0))
745 (*vl_mem_api_fuzz_hook) (id, the_msg);
747 if (am->is_autoendian[id])
749 void (*endian_fp) (void *);
750 endian_fp = am->msg_endian_handlers[id];
751 (*endian_fp) (the_msg);
753 if (PREDICT_FALSE (vec_len (am->perf_counter_cbs) != 0))
754 clib_call_callbacks (am->perf_counter_cbs, am, id, 0 /* before */);
756 (*handler) (the_msg, vm, node);
758 if (PREDICT_FALSE (vec_len (am->perf_counter_cbs) != 0))
759 clib_call_callbacks (am->perf_counter_cbs, am, id, 1 /* after */);
762 am->vlib_rp = old_vlib_rp;
763 am->shmem_hdr = save_shmem_hdr;
766 vl_msg_api_barrier_release ();
770 clib_warning ("no handler for msg id %d", id);
774 * Special-case, so we can e.g. bounce messages off the vnet
775 * main thread without copying them...
777 if (id >= vec_len (am->message_bounce) || !(am->message_bounce[id]))
781 old_vlib_rp = am->vlib_rp;
782 save_shmem_hdr = am->shmem_hdr;
783 am->vlib_rp = vlib_rp;
784 am->shmem_hdr = (void *) vlib_rp->user_ctx;
786 vl_msg_api_free (the_msg);
789 am->vlib_rp = old_vlib_rp;
790 am->shmem_hdr = save_shmem_hdr;
794 if (PREDICT_FALSE (am->elog_trace_api_messages))
796 ELOG_TYPE_DECLARE (e) = { .format = "api-msg-done(%s): %s",
797 .format_args = "t4T4",
809 ed = ELOG_DATA (am->elog_main, e);
810 if (id < vec_len (am->msg_names) && am->msg_names[id])
811 ed->c = elog_string (am->elog_main, (char *) am->msg_names[id]);
813 ed->c = elog_string (am->elog_main, "BOGUS");
814 ed->barrier = is_mp_safe;
819 void_mem_api_handle_msg_i (api_main_t * am, svm_region_t * vlib_rp,
820 vlib_main_t * vm, vlib_node_runtime_t * node,
826 q = ((vl_shmem_hdr_t *) (void *) vlib_rp->user_ctx)->vl_input_queue;
828 if (!svm_queue_sub2 (q, (u8 *) & mp))
830 VL_MSG_API_UNPOISON ((void *) mp);
831 vl_mem_api_handler_with_vm_node (am, vlib_rp, (void *) mp, vm, node,
839 vl_mem_api_handle_msg_main (vlib_main_t * vm, vlib_node_runtime_t * node)
841 api_main_t *am = vlibapi_get_main ();
842 return void_mem_api_handle_msg_i (am, am->vlib_rp, vm, node,
843 0 /* is_private */ );
847 vl_mem_api_handle_rpc (vlib_main_t * vm, vlib_node_runtime_t * node)
849 api_main_t *am = vlibapi_get_main ();
854 * Swap pending and processing vectors, then process the RPCs
855 * Avoid deadlock conditions by construction.
857 clib_spinlock_lock_if_init (&vm->pending_rpc_lock);
858 tmp = vm->processing_rpc_requests;
859 vec_reset_length (tmp);
860 vm->processing_rpc_requests = vm->pending_rpc_requests;
861 vm->pending_rpc_requests = tmp;
862 clib_spinlock_unlock_if_init (&vm->pending_rpc_lock);
865 * RPCs are used to reflect function calls to thread 0
866 * when the underlying code is not thread-safe.
868 * Grabbing the thread barrier across a set of RPCs
869 * greatly increases efficiency, and avoids
870 * running afoul of the barrier sync holddown timer.
871 * The barrier sync code supports recursive locking.
873 * We really need to rewrite RPC-based code...
875 if (PREDICT_TRUE (vec_len (vm->processing_rpc_requests)))
877 vl_msg_api_barrier_sync ();
878 for (i = 0; i < vec_len (vm->processing_rpc_requests); i++)
880 mp = vm->processing_rpc_requests[i];
881 vl_mem_api_handler_with_vm_node (am, am->vlib_rp, (void *) mp, vm,
882 node, 0 /* is_private */);
884 vl_msg_api_barrier_release ();
891 vl_mem_api_handle_msg_private (vlib_main_t * vm, vlib_node_runtime_t * node,
894 api_main_t *am = vlibapi_get_main ();
895 return void_mem_api_handle_msg_i (am, am->vlib_private_rps[reg_index], vm,
896 node, 1 /* is_private */ );
899 vl_api_registration_t *
900 vl_mem_api_client_index_to_registration (u32 handle)
902 vl_api_registration_t **regpp;
903 vl_api_registration_t *regp;
904 api_main_t *am = vlibapi_get_main ();
905 vl_shmem_hdr_t *shmem_hdr;
908 index = vl_msg_api_handle_get_index (handle);
909 regpp = am->vl_clients + index;
911 if (pool_is_free (am->vl_clients, regpp))
913 vl_msg_api_increment_missing_client_counter ();
918 shmem_hdr = (vl_shmem_hdr_t *) regp->shmem_hdr;
919 if (!vl_msg_api_handle_is_valid (handle, shmem_hdr->application_restarts))
921 vl_msg_api_increment_missing_client_counter ();
929 vl_api_client_index_to_input_queue (u32 index)
931 vl_api_registration_t *regp;
932 api_main_t *am = vlibapi_get_main ();
934 /* Special case: vlib trying to send itself a message */
935 if (index == (u32) ~ 0)
936 return (am->shmem_hdr->vl_input_queue);
938 regp = vl_mem_api_client_index_to_registration (index);
941 return (regp->vl_input_queue);
944 static clib_error_t *
945 setup_memclnt_exit (vlib_main_t * vm)
947 atexit (vl_unmap_shmem);
951 VLIB_INIT_FUNCTION (setup_memclnt_exit);
954 format_api_message_rings (u8 * s, va_list * args)
956 api_main_t *am = va_arg (*args, api_main_t *);
957 vl_shmem_hdr_t *shmem_hdr = va_arg (*args, vl_shmem_hdr_t *);
958 int main_segment = va_arg (*args, int);
963 return format (s, "%8s %8s %8s %8s %8s\n",
964 "Owner", "Size", "Nitems", "Hits", "Misses");
966 ap = shmem_hdr->vl_rings;
968 for (i = 0; i < vec_len (shmem_hdr->vl_rings); i++)
970 s = format (s, "%8s %8d %8d %8d %8d\n",
971 "vlib", ap->size, ap->nitems, ap->hits, ap->misses);
975 ap = shmem_hdr->client_rings;
977 for (i = 0; i < vec_len (shmem_hdr->client_rings); i++)
979 s = format (s, "%8s %8d %8d %8d %8d\n",
980 "clnt", ap->size, ap->nitems, ap->hits, ap->misses);
986 s = format (s, "%d ring miss fallback allocations\n", am->ring_misses);
989 "%d application restarts, %d reclaimed msgs, %d garbage collects\n",
990 shmem_hdr->application_restarts, shmem_hdr->restart_reclaims,
991 shmem_hdr->garbage_collects);
996 static clib_error_t *
997 vl_api_ring_command (vlib_main_t * vm,
998 unformat_input_t * input, vlib_cli_command_t * cli_cmd)
1001 vl_shmem_hdr_t *shmem_hdr;
1002 api_main_t *am = vlibapi_get_main ();
1004 /* First, dump the primary region rings.. */
1006 if (am->vlib_primary_rp == 0 || am->vlib_primary_rp->user_ctx == 0)
1008 vlib_cli_output (vm, "Shared memory segment not initialized...\n");
1012 shmem_hdr = (void *) am->vlib_primary_rp->user_ctx;
1014 vlib_cli_output (vm, "Main API segment rings:");
1016 vlib_cli_output (vm, "%U", format_api_message_rings, am,
1017 0 /* print header */ , 0 /* notused */ );
1019 vlib_cli_output (vm, "%U", format_api_message_rings, am,
1020 shmem_hdr, 1 /* main segment */ );
1022 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
1024 svm_region_t *vlib_rp = am->vlib_private_rps[i];
1025 shmem_hdr = (void *) vlib_rp->user_ctx;
1026 vl_api_registration_t **regpp;
1027 vl_api_registration_t *regp = 0;
1029 /* For horizontal scaling, add a hash table... */
1031 pool_foreach (regpp, am->vl_clients)
1034 if (regp && regp->vlib_rp == vlib_rp)
1036 vlib_cli_output (vm, "%s segment rings:", regp->name);
1040 vlib_cli_output (vm, "regp %llx not found?", regp);
1044 vlib_cli_output (vm, "%U", format_api_message_rings, am,
1045 0 /* print header */ , 0 /* notused */ );
1046 vlib_cli_output (vm, "%U", format_api_message_rings, am,
1047 shmem_hdr, 0 /* main segment */ );
1054 * Display binary api message allocation ring statistics
1057 VLIB_CLI_COMMAND (cli_show_api_ring_command, static) =
1059 .path = "show api ring-stats",
1060 .short_help = "Message ring statistics",
1061 .function = vl_api_ring_command,
1066 vlibmemory_init (vlib_main_t * vm)
1068 api_main_t *am = vlibapi_get_main ();
1069 svm_map_region_args_t _a, *a = &_a;
1070 u8 *remove_path1, *remove_path2;
1071 void vlibsocket_reference (void);
1073 vlibsocket_reference ();
1076 * By popular request / to avoid support fires, remove any old api segment
1079 if (am->root_path == 0)
1081 remove_path1 = format (0, "/dev/shm/global_vm%c", 0);
1082 remove_path2 = format (0, "/dev/shm/vpe-api%c", 0);
1086 remove_path1 = format (0, "/dev/shm/%s-global_vm%c", am->root_path, 0);
1087 remove_path2 = format (0, "/dev/shm/%s-vpe-api%c", am->root_path, 0);
1090 (void) unlink ((char *) remove_path1);
1091 (void) unlink ((char *) remove_path2);
1093 vec_free (remove_path1);
1094 vec_free (remove_path2);
1096 clib_memset (a, 0, sizeof (*a));
1097 a->root_path = am->root_path;
1098 a->name = SVM_GLOBAL_REGION_NAME;
1099 a->baseva = (am->global_baseva != 0) ?
1100 am->global_baseva : +svm_get_global_region_base_va ();
1101 a->size = (am->global_size != 0) ? am->global_size : SVM_GLOBAL_REGION_SIZE;
1102 a->flags = SVM_FLAGS_NODATA;
1103 a->uid = am->api_uid;
1104 a->gid = am->api_gid;
1106 (am->global_pvt_heap_size !=
1107 0) ? am->global_pvt_heap_size : SVM_PVT_MHEAP_SIZE;
1109 svm_region_init_args (a);
1115 vl_set_memory_region_name (const char *name)
1117 api_main_t *am = vlibapi_get_main ();
1118 am->region_name = name;
1122 * fd.io coding-style-patch-verification: ON
1125 * eval: (c-set-style "gnu")