2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vlibapi/api.h>
21 #include <vlibmemory/api.h>
22 #include <vlibmemory/memory_api.h>
24 #include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
26 #define vl_typedefs /* define message structures */
27 #include <vlibmemory/vl_memory_api_h.h>
30 /* instantiate all the print functions we know about */
31 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
33 #include <vlibmemory/vl_memory_api_h.h>
36 /* instantiate all the endian swap functions we know about */
38 #include <vlibmemory/vl_memory_api_h.h>
42 vl_api_memclnt_create_t_print (vl_api_memclnt_create_t * a, void *handle)
44 vl_print (handle, "vl_api_memclnt_create_t:\n");
45 vl_print (handle, "name: %s\n", a->name);
46 vl_print (handle, "input_queue: 0x%wx\n", a->input_queue);
47 vl_print (handle, "context: %u\n", (unsigned) a->context);
48 vl_print (handle, "ctx_quota: %ld\n", (long) a->ctx_quota);
53 vl_api_memclnt_delete_t_print (vl_api_memclnt_delete_t * a, void *handle)
55 vl_print (handle, "vl_api_memclnt_delete_t:\n");
56 vl_print (handle, "index: %u\n", (unsigned) a->index);
57 vl_print (handle, "handle: 0x%wx\n", a->handle);
61 volatile int **vl_api_queue_cursizes;
64 memclnt_queue_callback (vlib_main_t * vm)
67 api_main_t *am = &api_main;
69 if (PREDICT_FALSE (vec_len (vl_api_queue_cursizes) !=
70 1 + vec_len (am->vlib_private_rps)))
72 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
78 q = shmem_hdr->vl_input_queue;
82 vec_add1 (vl_api_queue_cursizes, &q->cursize);
84 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
86 svm_region_t *vlib_rp = am->vlib_private_rps[i];
88 shmem_hdr = (void *) vlib_rp->user_ctx;
89 q = shmem_hdr->vl_input_queue;
90 vec_add1 (vl_api_queue_cursizes, &q->cursize);
94 for (i = 0; i < vec_len (vl_api_queue_cursizes); i++)
96 if (*vl_api_queue_cursizes[i])
98 vm->queue_signal_pending = 1;
99 vm->api_queue_nonempty = 1;
100 vlib_process_signal_event (vm, vl_api_clnt_node.index,
101 /* event_type */ QUEUE_SIGNAL_EVENT,
106 if (vec_len (vm->pending_rpc_requests))
108 vm->queue_signal_pending = 1;
109 vm->api_queue_nonempty = 1;
110 vlib_process_signal_event (vm, vl_api_clnt_node.index,
111 /* event_type */ QUEUE_SIGNAL_EVENT,
117 * vl_api_memclnt_create_internal
120 vl_api_memclnt_create_internal (char *name, svm_queue_t * q)
122 vl_api_registration_t **regpp;
123 vl_api_registration_t *regp;
126 api_main_t *am = &api_main;
128 ASSERT (vlib_get_thread_index () == 0);
129 pool_get (am->vl_clients, regpp);
133 pthread_mutex_lock (&svm->mutex);
134 oldheap = svm_push_data_heap (svm);
135 *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
138 clib_memset (regp, 0, sizeof (*regp));
139 regp->registration_type = REGISTRATION_TYPE_SHMEM;
140 regp->vl_api_registration_pool_index = regpp - am->vl_clients;
142 regp->shmem_hdr = am->shmem_hdr;
144 regp->vl_input_queue = q;
145 regp->name = format (0, "%s%c", name, 0);
147 pthread_mutex_unlock (&svm->mutex);
148 svm_pop_heap (oldheap);
149 return vl_msg_api_handle_from_index_and_epoch
150 (regp->vl_api_registration_pool_index,
151 am->shmem_hdr->application_restarts);
155 * vl_api_memclnt_create_t_handler
158 vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
160 vl_api_registration_t **regpp;
161 vl_api_registration_t *regp;
162 vl_api_memclnt_create_reply_t *rp;
167 api_main_t *am = &api_main;
171 * This is tortured. Maintain a vlib-address-space private
172 * pool of client registrations. We use the shared-memory virtual
173 * address of client structure as a handle, to allow direct
174 * manipulation of context quota vbls from the client library.
176 * This scheme causes trouble w/ API message trace replay, since
177 * some random VA from clib_mem_alloc() certainly won't
178 * occur in the Linux sim. The (very) few places
179 * that care need to use the pool index.
181 * Putting the registration object(s) into a pool in shared memory and
182 * using the pool index as a handle seems like a great idea.
183 * Unfortunately, each and every reference to that pool would need
184 * to be protected by a mutex:
188 * convert pool index to
196 pool_get (am->vl_clients, regpp);
200 pthread_mutex_lock (&svm->mutex);
201 oldheap = svm_push_data_heap (svm);
202 *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
205 clib_memset (regp, 0, sizeof (*regp));
206 regp->registration_type = REGISTRATION_TYPE_SHMEM;
207 regp->vl_api_registration_pool_index = regpp - am->vl_clients;
209 regp->shmem_hdr = am->shmem_hdr;
210 regp->clib_file_index = am->shmem_hdr->clib_file_index;
212 q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue;
213 VL_MSG_API_SVM_QUEUE_UNPOISON (q);
215 regp->name = format (0, "%s", mp->name);
216 vec_add1 (regp->name, 0);
218 if (am->serialized_message_table_in_shmem == 0)
219 am->serialized_message_table_in_shmem =
220 vl_api_serialize_message_table (am, 0);
222 if (am->vlib_rp != am->vlib_primary_rp)
223 msg_table = vl_api_serialize_message_table (am, 0);
225 msg_table = am->serialized_message_table_in_shmem;
227 pthread_mutex_unlock (&svm->mutex);
228 svm_pop_heap (oldheap);
230 rp = vl_msg_api_alloc (sizeof (*rp));
231 rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_REPLY);
232 rp->handle = (uword) regp;
233 rp->index = vl_msg_api_handle_from_index_and_epoch
234 (regp->vl_api_registration_pool_index,
235 am->shmem_hdr->application_restarts);
236 rp->context = mp->context;
237 rp->response = ntohl (rv);
238 rp->message_table = pointer_to_uword (msg_table);
240 vl_msg_api_send_shmem (q, (u8 *) & rp);
244 vl_api_call_reaper_functions (u32 client_index)
246 clib_error_t *error = 0;
247 _vl_msg_api_function_list_elt_t *i;
249 i = api_main.reaper_function_registrations;
252 error = i->f (client_index);
254 clib_error_report (error);
255 i = i->next_init_function;
261 * vl_api_memclnt_delete_t_handler
264 vl_api_memclnt_delete_t_handler (vl_api_memclnt_delete_t * mp)
266 vl_api_registration_t **regpp;
267 vl_api_registration_t *regp;
268 vl_api_memclnt_delete_reply_t *rp;
271 api_main_t *am = &api_main;
272 u32 handle, client_index, epoch;
276 if (vl_api_call_reaper_functions (handle))
279 epoch = vl_msg_api_handle_get_epoch (handle);
280 client_index = vl_msg_api_handle_get_index (handle);
282 if (epoch != (am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK))
285 ("Stale clnt delete index %d old epoch %d cur epoch %d",
287 (am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK));
291 regpp = pool_elt_at_index (am->vl_clients, client_index);
293 if (!pool_is_free (am->vl_clients, regpp))
298 int private_registration = 0;
300 /* Send reply unless client asked us to do the cleanup */
304 * Note: the API message handling path will set am->vlib_rp
305 * as appropriate for pairwise / private memory segments
307 rp = vl_msg_api_alloc (sizeof (*rp));
308 rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_DELETE_REPLY);
309 rp->handle = mp->handle;
312 vl_msg_api_send_shmem (regp->vl_input_queue, (u8 *) & rp);
313 if (client_index != regp->vl_api_registration_pool_index)
315 clib_warning ("mismatch client_index %d pool_index %d",
317 regp->vl_api_registration_pool_index);
318 vl_msg_api_free (rp);
323 /* No dangling references, please */
326 /* For horizontal scaling, add a hash table... */
327 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
329 /* Is this a pairwise / private API segment? */
330 if (am->vlib_private_rps[i] == svm)
332 /* Note: account for the memfd header page */
333 uword virtual_base = svm->virtual_base - MMAP_PAGESIZE;
334 uword virtual_size = svm->virtual_size + MMAP_PAGESIZE;
337 * Kill the registration pool element before we make
338 * the index vanish forever
340 pool_put_index (am->vl_clients,
341 regp->vl_api_registration_pool_index);
343 vec_delete (am->vlib_private_rps, 1, i);
344 /* Kill it, accounting for the memfd header page */
345 if (munmap ((void *) virtual_base, virtual_size) < 0)
346 clib_unix_warning ("munmap");
347 /* Reset the queue-length-address cache */
348 vec_reset_length (vl_api_queue_cursizes);
349 private_registration = 1;
354 if (private_registration == 0)
356 pool_put_index (am->vl_clients,
357 regp->vl_api_registration_pool_index);
358 pthread_mutex_lock (&svm->mutex);
359 oldheap = svm_push_data_heap (svm);
361 svm_queue_free (regp->vl_input_queue);
362 vec_free (regp->name);
363 /* Poison the old registration */
364 clib_memset (regp, 0xF1, sizeof (*regp));
365 clib_mem_free (regp);
366 pthread_mutex_unlock (&svm->mutex);
367 svm_pop_heap (oldheap);
369 * These messages must be freed manually, since they're set up
370 * as "bounce" messages. In the private_registration == 1 case,
371 * we kill the shared-memory segment which contains the message
374 vl_msg_api_free (mp);
379 clib_warning ("unknown client ID %d", mp->index);
384 * client answered a ping, stave off the grim reaper...
387 vl_api_memclnt_keepalive_reply_t_handler
388 (vl_api_memclnt_keepalive_reply_t * mp)
390 vl_api_registration_t *regp;
391 vlib_main_t *vm = vlib_get_main ();
393 regp = vl_api_client_index_to_registration (mp->context);
396 regp->last_heard = vlib_time_now (vm);
397 regp->unanswered_pings = 0;
400 clib_warning ("BUG: anonymous memclnt_keepalive_reply");
404 * We can send ourselves these messages if someone uses the
405 * builtin binary api test tool...
408 vl_api_memclnt_keepalive_t_handler (vl_api_memclnt_keepalive_t * mp)
410 vl_api_memclnt_keepalive_reply_t *rmp;
412 vl_shmem_hdr_t *shmem_hdr;
415 shmem_hdr = am->shmem_hdr;
417 rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp));
418 clib_memset (rmp, 0, sizeof (*rmp));
419 rmp->_vl_msg_id = ntohs (VL_API_MEMCLNT_KEEPALIVE_REPLY);
420 rmp->context = mp->context;
421 vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & rmp);
425 * To avoid filling the API trace buffer with boring messages,
426 * don't trace memclnt_keepalive[_reply] msgs
429 #define foreach_vlib_api_msg \
430 _(MEMCLNT_CREATE, memclnt_create, 1) \
431 _(MEMCLNT_DELETE, memclnt_delete, 1) \
432 _(MEMCLNT_KEEPALIVE, memclnt_keepalive, 0) \
433 _(MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply, 0)
439 vl_mem_api_init (const char *region_name)
442 api_main_t *am = &api_main;
443 vl_msg_api_msg_config_t cfg;
444 vl_msg_api_msg_config_t *c = &cfg;
446 vlib_main_t *vm = vlib_get_main ();
448 clib_memset (c, 0, sizeof (*c));
450 if ((rv = vl_map_shmem (region_name, 1 /* is_vlib */ )) < 0)
453 #define _(N,n,t) do { \
454 c->id = VL_API_##N; \
456 c->handler = vl_api_##n##_t_handler; \
457 c->cleanup = vl_noop_handler; \
458 c->endian = vl_api_##n##_t_endian; \
459 c->print = vl_api_##n##_t_print; \
460 c->size = sizeof(vl_api_##n##_t); \
461 c->traced = t; /* trace, so these msgs print */ \
462 c->replay = 0; /* don't replay client create/delete msgs */ \
463 c->message_bounce = 0; /* don't bounce this message */ \
464 vl_msg_api_config(c);} while (0);
466 foreach_vlib_api_msg;
470 * special-case freeing of memclnt_delete messages, so we can
471 * simply munmap pairwise / private API segments...
473 am->message_bounce[VL_API_MEMCLNT_DELETE] = 1;
474 am->is_mp_safe[VL_API_MEMCLNT_KEEPALIVE_REPLY] = 1;
475 am->is_mp_safe[VL_API_MEMCLNT_KEEPALIVE] = 1;
477 vlib_set_queue_signal_callback (vm, memclnt_queue_callback);
480 ASSERT (shm && shm->vl_input_queue);
482 /* Make a note so we can always find the primary region easily */
483 am->vlib_primary_rp = am->vlib_rp;
489 map_api_segment_init (vlib_main_t * vm)
491 api_main_t *am = &api_main;
494 if ((rv = vl_mem_api_init (am->region_name)) < 0)
496 return clib_error_return (0, "vl_mem_api_init (%s) failed",
503 send_memclnt_keepalive (vl_api_registration_t * regp, f64 now)
505 vl_api_memclnt_keepalive_t *mp;
507 api_main_t *am = &api_main;
508 svm_region_t *save_vlib_rp = am->vlib_rp;
509 vl_shmem_hdr_t *save_shmem_hdr = am->shmem_hdr;
511 q = regp->vl_input_queue;
514 * If the queue head is moving, assume that the client is processing
515 * messages and skip the ping. This heuristic may fail if the queue
516 * is in the same position as last time, net of wrapping; in which
517 * case, the client will receive a keepalive.
519 if (regp->last_queue_head != q->head)
521 regp->last_heard = now;
522 regp->unanswered_pings = 0;
523 regp->last_queue_head = q->head;
528 * push/pop shared memory segment, so this routine
529 * will work with "normal" as well as "private segment"
533 am->vlib_rp = regp->vlib_rp;
534 am->shmem_hdr = regp->shmem_hdr;
536 mp = vl_msg_api_alloc (sizeof (*mp));
537 clib_memset (mp, 0, sizeof (*mp));
538 mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_MEMCLNT_KEEPALIVE);
539 mp->context = mp->client_index =
540 vl_msg_api_handle_from_index_and_epoch
541 (regp->vl_api_registration_pool_index,
542 am->shmem_hdr->application_restarts);
544 regp->unanswered_pings++;
546 /* Failure-to-send due to a stuffed queue is absolutely expected */
547 if (svm_queue_add (q, (u8 *) & mp, 1 /* nowait */ ))
548 vl_msg_api_free (mp);
550 am->vlib_rp = save_vlib_rp;
551 am->shmem_hdr = save_shmem_hdr;
555 vl_mem_send_client_keepalive_w_reg (api_main_t * am, f64 now,
556 vl_api_registration_t ** regpp,
558 u32 ** confused_indices)
560 vl_api_registration_t *regp = *regpp;
563 /* If we haven't heard from this client recently... */
564 if (regp->last_heard < (now - 10.0))
566 if (regp->unanswered_pings == 2)
569 q = regp->vl_input_queue;
570 if (kill (q->consumer_pid, 0) >= 0)
572 clib_warning ("REAPER: lazy binary API client '%s'",
574 regp->unanswered_pings = 0;
575 regp->last_heard = now;
579 clib_warning ("REAPER: binary API client '%s' died",
581 vec_add1 (*dead_indices, regpp - am->vl_clients);
585 send_memclnt_keepalive (regp, now);
588 regp->unanswered_pings = 0;
592 clib_warning ("NULL client registration index %d",
593 regpp - am->vl_clients);
594 vec_add1 (*confused_indices, regpp - am->vl_clients);
599 vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now)
601 vl_api_registration_t **regpp;
602 static u32 *dead_indices;
603 static u32 *confused_indices;
605 vec_reset_length (dead_indices);
606 vec_reset_length (confused_indices);
609 pool_foreach (regpp, am->vl_clients, ({
610 vl_mem_send_client_keepalive_w_reg (am, now, regpp, &dead_indices,
615 /* This should "never happen," but if it does, fix it... */
616 if (PREDICT_FALSE (vec_len (confused_indices) > 0))
619 for (i = 0; i < vec_len (confused_indices); i++)
621 pool_put_index (am->vl_clients, confused_indices[i]);
625 if (PREDICT_FALSE (vec_len (dead_indices) > 0))
631 /* Allow the application to clean up its registrations */
632 for (i = 0; i < vec_len (dead_indices); i++)
634 regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]);
639 handle = vl_msg_api_handle_from_index_and_epoch
640 (dead_indices[i], shm->application_restarts);
641 (void) vl_api_call_reaper_functions (handle);
646 pthread_mutex_lock (&svm->mutex);
647 oldheap = svm_push_data_heap (svm);
649 for (i = 0; i < vec_len (dead_indices); i++)
651 regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]);
654 /* Is this a pairwise SVM segment? */
655 if ((*regpp)->vlib_rp != svm)
658 svm_region_t *dead_rp = (*regpp)->vlib_rp;
659 /* Note: account for the memfd header page */
660 uword virtual_base = dead_rp->virtual_base - MMAP_PAGESIZE;
661 uword virtual_size = dead_rp->virtual_size + MMAP_PAGESIZE;
663 /* For horizontal scaling, add a hash table... */
664 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
665 if (am->vlib_private_rps[i] == dead_rp)
667 vec_delete (am->vlib_private_rps, 1, i);
670 svm_pop_heap (oldheap);
671 clib_warning ("private rp %llx AWOL", dead_rp);
672 oldheap = svm_push_data_heap (svm);
675 /* Kill it, accounting for the memfd header page */
676 svm_pop_heap (oldheap);
677 if (munmap ((void *) virtual_base, virtual_size) < 0)
678 clib_unix_warning ("munmap");
679 /* Reset the queue-length-address cache */
680 vec_reset_length (vl_api_queue_cursizes);
681 oldheap = svm_push_data_heap (svm);
685 /* Poison the old registration */
686 clib_memset (*regpp, 0xF3, sizeof (**regpp));
687 clib_mem_free (*regpp);
689 /* no dangling references, please */
694 svm_pop_heap (oldheap);
695 clib_warning ("Duplicate free, client index %d",
696 regpp - am->vl_clients);
697 oldheap = svm_push_data_heap (svm);
701 svm_client_scan_this_region_nolock (am->vlib_rp);
703 pthread_mutex_unlock (&svm->mutex);
704 svm_pop_heap (oldheap);
705 for (i = 0; i < vec_len (dead_indices); i++)
706 pool_put_index (am->vl_clients, dead_indices[i]);
711 void_mem_api_handle_msg_i (api_main_t * am, vlib_main_t * vm,
712 vlib_node_runtime_t * node, svm_queue_t * q)
715 if (!svm_queue_sub2 (q, (u8 *) & mp))
717 VL_MSG_API_UNPOISON ((void *) mp);
718 vl_msg_api_handler_with_vm_node (am, (void *) mp, vm, node);
725 vl_mem_api_handle_msg_main (vlib_main_t * vm, vlib_node_runtime_t * node)
727 api_main_t *am = &api_main;
728 return void_mem_api_handle_msg_i (am, vm, node,
729 am->shmem_hdr->vl_input_queue);
733 vl_mem_api_handle_rpc (vlib_main_t * vm, vlib_node_runtime_t * node)
735 api_main_t *am = &api_main;
740 * Swap pending and processing vectors, then process the RPCs
741 * Avoid deadlock conditions by construction.
743 clib_spinlock_lock_if_init (&vm->pending_rpc_lock);
744 tmp = vm->processing_rpc_requests;
745 vec_reset_length (tmp);
746 vm->processing_rpc_requests = vm->pending_rpc_requests;
747 vm->pending_rpc_requests = tmp;
748 clib_spinlock_unlock_if_init (&vm->pending_rpc_lock);
751 * RPCs are used to reflect function calls to thread 0
752 * when the underlying code is not thread-safe.
754 * Grabbing the thread barrier across a set of RPCs
755 * greatly increases efficiency, and avoids
756 * running afoul of the barrier sync holddown timer.
757 * The barrier sync code supports recursive locking.
759 * We really need to rewrite RPC-based code...
761 if (PREDICT_TRUE (vec_len (vm->processing_rpc_requests)))
763 vl_msg_api_barrier_sync ();
764 for (i = 0; i < vec_len (vm->processing_rpc_requests); i++)
766 mp = vm->processing_rpc_requests[i];
767 vl_msg_api_handler_with_vm_node (am, (void *) mp, vm, node);
769 vl_msg_api_barrier_release ();
776 vl_mem_api_handle_msg_private (vlib_main_t * vm, vlib_node_runtime_t * node,
779 api_main_t *am = &api_main;
780 vl_shmem_hdr_t *save_shmem_hdr = am->shmem_hdr;
781 svm_region_t *vlib_rp, *save_vlib_rp = am->vlib_rp;
785 vlib_rp = am->vlib_rp = am->vlib_private_rps[reg_index];
787 am->shmem_hdr = (void *) vlib_rp->user_ctx;
788 q = am->shmem_hdr->vl_input_queue;
790 rv = void_mem_api_handle_msg_i (am, vm, node, q);
792 am->shmem_hdr = save_shmem_hdr;
793 am->vlib_rp = save_vlib_rp;
798 vl_api_registration_t *
799 vl_mem_api_client_index_to_registration (u32 handle)
801 vl_api_registration_t **regpp;
802 vl_api_registration_t *regp;
803 api_main_t *am = &api_main;
804 vl_shmem_hdr_t *shmem_hdr;
807 index = vl_msg_api_handle_get_index (handle);
808 regpp = am->vl_clients + index;
810 if (pool_is_free (am->vl_clients, regpp))
812 vl_msg_api_increment_missing_client_counter ();
817 shmem_hdr = (vl_shmem_hdr_t *) regp->shmem_hdr;
818 if (!vl_msg_api_handle_is_valid (handle, shmem_hdr->application_restarts))
820 vl_msg_api_increment_missing_client_counter ();
828 vl_api_client_index_to_input_queue (u32 index)
830 vl_api_registration_t *regp;
831 api_main_t *am = &api_main;
833 /* Special case: vlib trying to send itself a message */
834 if (index == (u32) ~ 0)
835 return (am->shmem_hdr->vl_input_queue);
837 regp = vl_mem_api_client_index_to_registration (index);
840 return (regp->vl_input_queue);
843 static clib_error_t *
844 setup_memclnt_exit (vlib_main_t * vm)
846 atexit (vl_unmap_shmem);
850 VLIB_INIT_FUNCTION (setup_memclnt_exit);
853 format_api_message_rings (u8 * s, va_list * args)
855 api_main_t *am = va_arg (*args, api_main_t *);
856 vl_shmem_hdr_t *shmem_hdr = va_arg (*args, vl_shmem_hdr_t *);
857 int main_segment = va_arg (*args, int);
862 return format (s, "%8s %8s %8s %8s %8s\n",
863 "Owner", "Size", "Nitems", "Hits", "Misses");
865 ap = shmem_hdr->vl_rings;
867 for (i = 0; i < vec_len (shmem_hdr->vl_rings); i++)
869 s = format (s, "%8s %8d %8d %8d %8d\n",
870 "vlib", ap->size, ap->nitems, ap->hits, ap->misses);
874 ap = shmem_hdr->client_rings;
876 for (i = 0; i < vec_len (shmem_hdr->client_rings); i++)
878 s = format (s, "%8s %8d %8d %8d %8d\n",
879 "clnt", ap->size, ap->nitems, ap->hits, ap->misses);
885 s = format (s, "%d ring miss fallback allocations\n", am->ring_misses);
888 "%d application restarts, %d reclaimed msgs, %d garbage collects\n",
889 shmem_hdr->application_restarts, shmem_hdr->restart_reclaims,
890 shmem_hdr->garbage_collects);
895 static clib_error_t *
896 vl_api_ring_command (vlib_main_t * vm,
897 unformat_input_t * input, vlib_cli_command_t * cli_cmd)
900 vl_shmem_hdr_t *shmem_hdr;
901 api_main_t *am = &api_main;
903 /* First, dump the primary region rings.. */
905 if (am->vlib_primary_rp == 0 || am->vlib_primary_rp->user_ctx == 0)
907 vlib_cli_output (vm, "Shared memory segment not initialized...\n");
911 shmem_hdr = (void *) am->vlib_primary_rp->user_ctx;
913 vlib_cli_output (vm, "Main API segment rings:");
915 vlib_cli_output (vm, "%U", format_api_message_rings, am,
916 0 /* print header */ , 0 /* notused */ );
918 vlib_cli_output (vm, "%U", format_api_message_rings, am,
919 shmem_hdr, 1 /* main segment */ );
921 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
923 svm_region_t *vlib_rp = am->vlib_private_rps[i];
924 shmem_hdr = (void *) vlib_rp->user_ctx;
925 vl_api_registration_t **regpp;
926 vl_api_registration_t *regp = 0;
928 /* For horizontal scaling, add a hash table... */
930 pool_foreach (regpp, am->vl_clients,
933 if (regp && regp->vlib_rp == vlib_rp)
935 vlib_cli_output (vm, "%s segment rings:", regp->name);
939 vlib_cli_output (vm, "regp %llx not found?", regp);
943 vlib_cli_output (vm, "%U", format_api_message_rings, am,
944 0 /* print header */ , 0 /* notused */ );
945 vlib_cli_output (vm, "%U", format_api_message_rings, am,
946 shmem_hdr, 0 /* main segment */ );
953 * Display binary api message allocation ring statistics
956 VLIB_CLI_COMMAND (cli_show_api_ring_command, static) =
958 .path = "show api ring-stats",
959 .short_help = "Message ring statistics",
960 .function = vl_api_ring_command,
965 vlibmemory_init (vlib_main_t * vm)
967 api_main_t *am = &api_main;
968 svm_map_region_args_t _a, *a = &_a;
969 u8 *remove_path1, *remove_path2;
970 void vlibsocket_reference (void);
972 vlibsocket_reference ();
975 * By popular request / to avoid support fires, remove any old api segment
978 if (am->root_path == 0)
980 remove_path1 = format (0, "/dev/shm/global_vm%c", 0);
981 remove_path2 = format (0, "/dev/shm/vpe-api%c", 0);
985 remove_path1 = format (0, "/dev/shm/%s-global_vm%c", am->root_path, 0);
986 remove_path2 = format (0, "/dev/shm/%s-vpe-api%c", am->root_path, 0);
989 (void) unlink ((char *) remove_path1);
990 (void) unlink ((char *) remove_path2);
992 vec_free (remove_path1);
993 vec_free (remove_path2);
995 clib_memset (a, 0, sizeof (*a));
996 a->root_path = am->root_path;
997 a->name = SVM_GLOBAL_REGION_NAME;
998 a->baseva = (am->global_baseva != 0) ?
999 am->global_baseva : +svm_get_global_region_base_va ();
1000 a->size = (am->global_size != 0) ? am->global_size : SVM_GLOBAL_REGION_SIZE;
1001 a->flags = SVM_FLAGS_NODATA;
1002 a->uid = am->api_uid;
1003 a->gid = am->api_gid;
1005 (am->global_pvt_heap_size !=
1006 0) ? am->global_pvt_heap_size : SVM_PVT_MHEAP_SIZE;
1008 svm_region_init_args (a);
1014 vl_set_memory_region_name (const char *name)
1016 api_main_t *am = &api_main;
1017 am->region_name = name;
1021 * fd.io coding-style-patch-verification: ON
1024 * eval: (c-set-style "gnu")