2 *------------------------------------------------------------------
3 * Copyright (c) 2018 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vlibapi/api.h>
21 #include <vlibmemory/api.h>
22 #include <vlibmemory/memory_api.h>
24 #include <vlibmemory/vl_memory_msg_enum.h> /* enumerate all vlib messages */
26 #define vl_typedefs /* define message structures */
27 #include <vlibmemory/vl_memory_api_h.h>
30 /* instantiate all the print functions we know about */
31 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
33 #include <vlibmemory/vl_memory_api_h.h>
36 /* instantiate all the endian swap functions we know about */
38 #include <vlibmemory/vl_memory_api_h.h>
41 volatile int **vl_api_queue_cursizes;
44 memclnt_queue_callback (vlib_main_t * vm)
47 api_main_t *am = vlibapi_get_main ();
48 int have_pending_rpcs;
50 if (PREDICT_FALSE (vec_len (vl_api_queue_cursizes) !=
51 1 + vec_len (am->vlib_private_rps)))
53 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
59 q = shmem_hdr->vl_input_queue;
63 vec_add1 (vl_api_queue_cursizes, &q->cursize);
65 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
67 svm_region_t *vlib_rp = am->vlib_private_rps[i];
69 shmem_hdr = (void *) vlib_rp->user_ctx;
70 q = shmem_hdr->vl_input_queue;
71 vec_add1 (vl_api_queue_cursizes, &q->cursize);
75 for (i = 0; i < vec_len (vl_api_queue_cursizes); i++)
77 if (*vl_api_queue_cursizes[i])
79 vm->queue_signal_pending = 1;
80 vm->api_queue_nonempty = 1;
81 vlib_process_signal_event (vm, vl_api_clnt_node.index,
82 /* event_type */ QUEUE_SIGNAL_EVENT,
88 clib_spinlock_lock_if_init (&vm->pending_rpc_lock);
89 have_pending_rpcs = vec_len (vm->pending_rpc_requests) > 0;
90 clib_spinlock_unlock_if_init (&vm->pending_rpc_lock);
92 if (have_pending_rpcs)
94 vm->queue_signal_pending = 1;
95 vm->api_queue_nonempty = 1;
96 vlib_process_signal_event (vm, vl_api_clnt_node.index,
97 /* event_type */ QUEUE_SIGNAL_EVENT,
103 * vl_api_memclnt_create_internal
106 vl_api_memclnt_create_internal (char *name, svm_queue_t * q)
108 vl_api_registration_t **regpp;
109 vl_api_registration_t *regp;
111 api_main_t *am = vlibapi_get_main ();
113 ASSERT (vlib_get_thread_index () == 0);
114 pool_get (am->vl_clients, regpp);
117 oldheap = vl_msg_push_heap ();
118 *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
121 clib_memset (regp, 0, sizeof (*regp));
122 regp->registration_type = REGISTRATION_TYPE_SHMEM;
123 regp->vl_api_registration_pool_index = regpp - am->vl_clients;
124 regp->vlib_rp = am->vlib_rp;
125 regp->shmem_hdr = am->shmem_hdr;
127 regp->vl_input_queue = q;
128 regp->name = format (0, "%s%c", name, 0);
130 vl_msg_pop_heap (oldheap);
131 return vl_msg_api_handle_from_index_and_epoch
132 (regp->vl_api_registration_pool_index,
133 am->shmem_hdr->application_restarts);
137 * vl_api_memclnt_create_t_handler
140 vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp)
142 vl_api_registration_t **regpp;
143 vl_api_registration_t *regp;
144 vl_api_memclnt_create_reply_t *rp;
148 api_main_t *am = vlibapi_get_main ();
152 * This is tortured. Maintain a vlib-address-space private
153 * pool of client registrations. We use the shared-memory virtual
154 * address of client structure as a handle, to allow direct
155 * manipulation of context quota vbls from the client library.
157 * This scheme causes trouble w/ API message trace replay, since
158 * some random VA from clib_mem_alloc() certainly won't
159 * occur in the Linux sim. The (very) few places
160 * that care need to use the pool index.
162 * Putting the registration object(s) into a pool in shared memory and
163 * using the pool index as a handle seems like a great idea.
164 * Unfortunately, each and every reference to that pool would need
165 * to be protected by a mutex:
169 * convert pool index to
177 pool_get (am->vl_clients, regpp);
179 oldheap = vl_msg_push_heap ();
180 *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
183 clib_memset (regp, 0, sizeof (*regp));
184 regp->registration_type = REGISTRATION_TYPE_SHMEM;
185 regp->vl_api_registration_pool_index = regpp - am->vl_clients;
186 regp->vlib_rp = am->vlib_rp;
187 regp->shmem_hdr = am->shmem_hdr;
188 regp->clib_file_index = am->shmem_hdr->clib_file_index;
190 q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue;
191 VL_MSG_API_SVM_QUEUE_UNPOISON (q);
193 regp->name = format (0, "%s", mp->name);
194 vec_add1 (regp->name, 0);
195 regp->keepalive = true;
197 if (am->serialized_message_table_in_shmem == 0)
198 am->serialized_message_table_in_shmem =
199 vl_api_serialize_message_table (am, 0);
201 if (am->vlib_rp != am->vlib_primary_rp)
202 msg_table = vl_api_serialize_message_table (am, 0);
204 msg_table = am->serialized_message_table_in_shmem;
206 vl_msg_pop_heap (oldheap);
208 rp = vl_msg_api_alloc (sizeof (*rp));
209 rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_REPLY);
210 rp->handle = (uword) regp;
211 rp->index = vl_msg_api_handle_from_index_and_epoch
212 (regp->vl_api_registration_pool_index,
213 am->shmem_hdr->application_restarts);
214 rp->context = mp->context;
215 rp->response = ntohl (rv);
216 rp->message_table = pointer_to_uword (msg_table);
218 vl_msg_api_send_shmem (q, (u8 *) & rp);
222 vl_api_memclnt_create_v2_t_handler (vl_api_memclnt_create_v2_t *mp)
224 vl_api_registration_t **regpp;
225 vl_api_registration_t *regp;
226 vl_api_memclnt_create_v2_reply_t *rp;
230 api_main_t *am = vlibapi_get_main ();
234 * This is tortured. Maintain a vlib-address-space private
235 * pool of client registrations. We use the shared-memory virtual
236 * address of client structure as a handle, to allow direct
237 * manipulation of context quota vbls from the client library.
239 * This scheme causes trouble w/ API message trace replay, since
240 * some random VA from clib_mem_alloc() certainly won't
241 * occur in the Linux sim. The (very) few places
242 * that care need to use the pool index.
244 * Putting the registration object(s) into a pool in shared memory and
245 * using the pool index as a handle seems like a great idea.
246 * Unfortunately, each and every reference to that pool would need
247 * to be protected by a mutex:
251 * convert pool index to
259 pool_get (am->vl_clients, regpp);
261 oldheap = vl_msg_push_heap ();
262 *regpp = clib_mem_alloc (sizeof (vl_api_registration_t));
265 clib_memset (regp, 0, sizeof (*regp));
266 regp->registration_type = REGISTRATION_TYPE_SHMEM;
267 regp->vl_api_registration_pool_index = regpp - am->vl_clients;
268 regp->vlib_rp = am->vlib_rp;
269 regp->shmem_hdr = am->shmem_hdr;
270 regp->clib_file_index = am->shmem_hdr->clib_file_index;
272 q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue;
273 VL_MSG_API_SVM_QUEUE_UNPOISON (q);
275 regp->name = format (0, "%s", mp->name);
276 vec_add1 (regp->name, 0);
277 regp->keepalive = mp->keepalive;
279 if (am->serialized_message_table_in_shmem == 0)
280 am->serialized_message_table_in_shmem =
281 vl_api_serialize_message_table (am, 0);
283 if (am->vlib_rp != am->vlib_primary_rp)
284 msg_table = vl_api_serialize_message_table (am, 0);
286 msg_table = am->serialized_message_table_in_shmem;
288 vl_msg_pop_heap (oldheap);
290 rp = vl_msg_api_alloc (sizeof (*rp));
291 rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_CREATE_V2_REPLY);
292 rp->handle = (uword) regp;
293 rp->index = vl_msg_api_handle_from_index_and_epoch (
294 regp->vl_api_registration_pool_index, am->shmem_hdr->application_restarts);
295 rp->context = mp->context;
296 rp->response = ntohl (rv);
297 rp->message_table = pointer_to_uword (msg_table);
299 vl_msg_api_send_shmem (q, (u8 *) &rp);
303 vl_api_call_reaper_functions (u32 client_index)
305 clib_error_t *error = 0;
306 _vl_msg_api_function_list_elt_t *i;
308 i = vlibapi_get_main ()->reaper_function_registrations;
311 error = i->f (client_index);
313 clib_error_report (error);
314 i = i->next_init_function;
319 * vl_api_memclnt_delete_t_handler
322 vl_api_memclnt_delete_t_handler (vl_api_memclnt_delete_t * mp)
324 vl_api_registration_t **regpp;
325 vl_api_registration_t *regp;
326 vl_api_memclnt_delete_reply_t *rp;
328 api_main_t *am = vlibapi_get_main ();
329 u32 handle, client_index, epoch;
333 vl_api_call_reaper_functions (handle);
335 epoch = vl_msg_api_handle_get_epoch (handle);
336 client_index = vl_msg_api_handle_get_index (handle);
338 if (epoch != (am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK))
341 ("Stale clnt delete index %d old epoch %d cur epoch %d",
343 (am->shmem_hdr->application_restarts & VL_API_EPOCH_MASK));
347 regpp = pool_elt_at_index (am->vl_clients, client_index);
349 if (!pool_is_free (am->vl_clients, regpp))
353 int private_registration = 0;
355 /* Send reply unless client asked us to do the cleanup */
359 * Note: the API message handling path will set am->vlib_rp
360 * as appropriate for pairwise / private memory segments
362 rp = vl_msg_api_alloc (sizeof (*rp));
363 rp->_vl_msg_id = ntohs (VL_API_MEMCLNT_DELETE_REPLY);
364 rp->handle = mp->handle;
367 vl_msg_api_send_shmem (regp->vl_input_queue, (u8 *) & rp);
368 if (client_index != regp->vl_api_registration_pool_index)
370 clib_warning ("mismatch client_index %d pool_index %d",
372 regp->vl_api_registration_pool_index);
373 vl_msg_api_free (rp);
378 /* No dangling references, please */
381 /* For horizontal scaling, add a hash table... */
382 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
384 /* Is this a pairwise / private API segment? */
385 if (am->vlib_private_rps[i] == am->vlib_rp)
387 /* Note: account for the memfd header page */
388 uword virtual_base = am->vlib_rp->virtual_base - MMAP_PAGESIZE;
389 uword virtual_size = am->vlib_rp->virtual_size + MMAP_PAGESIZE;
392 * Kill the registration pool element before we make
393 * the index vanish forever
395 pool_put_index (am->vl_clients,
396 regp->vl_api_registration_pool_index);
398 vec_delete (am->vlib_private_rps, 1, i);
399 /* Kill it, accounting for the memfd header page */
400 if (munmap ((void *) virtual_base, virtual_size) < 0)
401 clib_unix_warning ("munmap");
402 /* Reset the queue-length-address cache */
403 vec_reset_length (vl_api_queue_cursizes);
404 private_registration = 1;
409 if (private_registration == 0)
411 pool_put_index (am->vl_clients,
412 regp->vl_api_registration_pool_index);
413 oldheap = vl_msg_push_heap ();
415 svm_queue_free (regp->vl_input_queue);
416 vec_free (regp->name);
417 /* Poison the old registration */
418 clib_memset (regp, 0xF1, sizeof (*regp));
419 clib_mem_free (regp);
420 vl_msg_pop_heap (oldheap);
422 * These messages must be freed manually, since they're set up
423 * as "bounce" messages. In the private_registration == 1 case,
424 * we kill the shared-memory segment which contains the message
427 vl_msg_api_free (mp);
432 clib_warning ("unknown client ID %d", mp->index);
437 * client answered a ping, stave off the grim reaper...
440 vl_api_memclnt_keepalive_reply_t_handler
441 (vl_api_memclnt_keepalive_reply_t * mp)
443 vl_api_registration_t *regp;
444 vlib_main_t *vm = vlib_get_main ();
446 regp = vl_api_client_index_to_registration (mp->context);
449 regp->last_heard = vlib_time_now (vm);
450 regp->unanswered_pings = 0;
453 clib_warning ("BUG: anonymous memclnt_keepalive_reply");
457 * We can send ourselves these messages if someone uses the
458 * builtin binary api test tool...
461 vl_api_memclnt_keepalive_t_handler (vl_api_memclnt_keepalive_t * mp)
463 vl_api_memclnt_keepalive_reply_t *rmp;
465 vl_shmem_hdr_t *shmem_hdr;
467 am = vlibapi_get_main ();
468 shmem_hdr = am->shmem_hdr;
470 rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp));
471 clib_memset (rmp, 0, sizeof (*rmp));
472 rmp->_vl_msg_id = ntohs (VL_API_MEMCLNT_KEEPALIVE_REPLY);
473 rmp->context = mp->context;
474 vl_msg_api_send_shmem (shmem_hdr->vl_input_queue, (u8 *) & rmp);
478 * To avoid filling the API trace buffer with boring messages,
479 * don't trace memclnt_keepalive[_reply] msgs
482 #define foreach_vlib_api_msg \
483 _ (MEMCLNT_CREATE, memclnt_create, 0) \
484 _ (MEMCLNT_CREATE_V2, memclnt_create_v2, 0) \
485 _ (MEMCLNT_DELETE, memclnt_delete, 0) \
486 _ (MEMCLNT_KEEPALIVE, memclnt_keepalive, 0) \
487 _ (MEMCLNT_KEEPALIVE_REPLY, memclnt_keepalive_reply, 0)
493 vl_mem_api_init (const char *region_name)
496 api_main_t *am = vlibapi_get_main ();
497 vl_msg_api_msg_config_t cfg;
498 vl_msg_api_msg_config_t *c = &cfg;
500 vlib_main_t *vm = vlib_get_main ();
502 clib_memset (c, 0, sizeof (*c));
504 if ((rv = vl_map_shmem (region_name, 1 /* is_vlib */ )) < 0)
507 #define _(N,n,t) do { \
508 c->id = VL_API_##N; \
510 c->handler = vl_api_##n##_t_handler; \
511 c->cleanup = vl_noop_handler; \
512 c->endian = vl_api_##n##_t_endian; \
513 c->print = vl_api_##n##_t_print; \
514 c->size = sizeof(vl_api_##n##_t); \
515 c->traced = t; /* trace, so these msgs print */ \
516 c->replay = 0; /* don't replay client create/delete msgs */ \
517 c->message_bounce = 0; /* don't bounce this message */ \
518 vl_msg_api_config(c);} while (0);
520 foreach_vlib_api_msg;
523 #define vl_msg_name_crc_list
524 #include <vlibmemory/memclnt.api.h>
525 #undef vl_msg_name_crc_list
527 #define _(id, n, crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
528 foreach_vl_msg_name_crc_memclnt;
532 * special-case freeing of memclnt_delete messages, so we can
533 * simply munmap pairwise / private API segments...
535 am->msg_data[VL_API_MEMCLNT_DELETE].bounce = 1;
536 vl_api_set_msg_thread_safe (am, VL_API_MEMCLNT_KEEPALIVE_REPLY, 1);
537 vl_api_set_msg_thread_safe (am, VL_API_MEMCLNT_KEEPALIVE, 1);
539 vlib_set_queue_signal_callback (vm, memclnt_queue_callback);
542 ASSERT (shm && shm->vl_input_queue);
544 /* Make a note so we can always find the primary region easily */
545 am->vlib_primary_rp = am->vlib_rp;
551 map_api_segment_init (vlib_main_t * vm)
553 api_main_t *am = vlibapi_get_main ();
556 if ((rv = vl_mem_api_init (am->region_name)) < 0)
558 return clib_error_return (0, "vl_mem_api_init (%s) failed",
565 send_memclnt_keepalive (vl_api_registration_t * regp, f64 now)
567 vl_api_memclnt_keepalive_t *mp;
569 api_main_t *am = vlibapi_get_main ();
571 q = regp->vl_input_queue;
574 * If the queue head is moving, assume that the client is processing
575 * messages and skip the ping. This heuristic may fail if the queue
576 * is in the same position as last time, net of wrapping; in which
577 * case, the client will receive a keepalive.
579 if (regp->last_queue_head != q->head)
581 regp->last_heard = now;
582 regp->unanswered_pings = 0;
583 regp->last_queue_head = q->head;
588 * push/pop shared memory segment, so this routine
589 * will work with "normal" as well as "private segment"
593 mp = vl_mem_api_alloc_as_if_client_w_reg (regp, sizeof (*mp));
594 clib_memset (mp, 0, sizeof (*mp));
595 mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_MEMCLNT_KEEPALIVE);
596 mp->context = mp->client_index =
597 vl_msg_api_handle_from_index_and_epoch
598 (regp->vl_api_registration_pool_index,
599 am->shmem_hdr->application_restarts);
601 regp->unanswered_pings++;
603 /* Failure-to-send due to a stuffed queue is absolutely expected */
604 if (svm_queue_add (q, (u8 *) & mp, 1 /* nowait */ ))
605 vl_msg_api_free_w_region (regp->vlib_rp, mp);
609 vl_mem_send_client_keepalive_w_reg (api_main_t * am, f64 now,
610 vl_api_registration_t ** regpp,
612 u32 ** confused_indices)
614 vl_api_registration_t *regp = *regpp;
617 /* If we haven't heard from this client recently... */
618 if (regp->last_heard < (now - 10.0))
620 if (regp->unanswered_pings == 2)
623 q = regp->vl_input_queue;
624 if (kill (q->consumer_pid, 0) >= 0)
626 clib_warning ("REAPER: lazy binary API client '%s'",
628 regp->unanswered_pings = 0;
629 regp->last_heard = now;
633 clib_warning ("REAPER: binary API client '%s' died",
635 vec_add1 (*dead_indices, regpp - am->vl_clients);
639 send_memclnt_keepalive (regp, now);
642 regp->unanswered_pings = 0;
646 clib_warning ("NULL client registration index %d",
647 regpp - am->vl_clients);
648 vec_add1 (*confused_indices, regpp - am->vl_clients);
653 vl_mem_api_dead_client_scan (api_main_t * am, vl_shmem_hdr_t * shm, f64 now)
655 vl_api_registration_t **regpp;
656 static u32 *dead_indices;
657 static u32 *confused_indices;
659 vec_reset_length (dead_indices);
660 vec_reset_length (confused_indices);
663 pool_foreach (regpp, am->vl_clients) {
664 if (!(*regpp)->keepalive)
666 vl_mem_send_client_keepalive_w_reg (am, now, regpp, &dead_indices,
671 /* This should "never happen," but if it does, fix it... */
672 if (PREDICT_FALSE (vec_len (confused_indices) > 0))
675 for (i = 0; i < vec_len (confused_indices); i++)
677 pool_put_index (am->vl_clients, confused_indices[i]);
681 if (PREDICT_FALSE (vec_len (dead_indices) > 0))
686 /* Allow the application to clean up its registrations */
687 for (i = 0; i < vec_len (dead_indices); i++)
689 regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]);
694 handle = vl_msg_api_handle_from_index_and_epoch
695 (dead_indices[i], shm->application_restarts);
696 vl_api_call_reaper_functions (handle);
700 oldheap = vl_msg_push_heap ();
702 for (i = 0; i < vec_len (dead_indices); i++)
704 regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]);
707 /* Is this a pairwise SVM segment? */
708 if ((*regpp)->vlib_rp != am->vlib_rp)
711 svm_region_t *dead_rp = (*regpp)->vlib_rp;
712 /* Note: account for the memfd header page */
713 uword virtual_base = dead_rp->virtual_base - MMAP_PAGESIZE;
714 uword virtual_size = dead_rp->virtual_size + MMAP_PAGESIZE;
716 /* For horizontal scaling, add a hash table... */
717 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
718 if (am->vlib_private_rps[i] == dead_rp)
720 vec_delete (am->vlib_private_rps, 1, i);
723 svm_pop_heap (oldheap);
724 clib_warning ("private rp %llx AWOL", dead_rp);
725 oldheap = svm_push_data_heap (am->vlib_rp);
728 /* Kill it, accounting for the memfd header page */
729 svm_pop_heap (oldheap);
730 if (munmap ((void *) virtual_base, virtual_size) < 0)
731 clib_unix_warning ("munmap");
732 /* Reset the queue-length-address cache */
733 vec_reset_length (vl_api_queue_cursizes);
734 oldheap = svm_push_data_heap (am->vlib_rp);
738 /* Poison the old registration */
739 clib_memset (*regpp, 0xF3, sizeof (**regpp));
740 clib_mem_free (*regpp);
742 /* no dangling references, please */
747 svm_pop_heap (oldheap);
748 clib_warning ("Duplicate free, client index %d",
749 regpp - am->vl_clients);
750 oldheap = svm_push_data_heap (am->vlib_rp);
754 svm_client_scan_this_region_nolock (am->vlib_rp);
756 vl_msg_pop_heap (oldheap);
757 for (i = 0; i < vec_len (dead_indices); i++)
758 pool_put_index (am->vl_clients, dead_indices[i]);
762 void (*vl_mem_api_fuzz_hook) (u16, void *);
764 /* This is only to be called from a vlib/vnet app */
766 vl_mem_api_handler_with_vm_node (api_main_t *am, svm_region_t *vlib_rp,
767 void *the_msg, vlib_main_t *vm,
768 vlib_node_runtime_t *node, u8 is_private)
770 u16 id = clib_net_to_host_u16 (*((u16 *) the_msg));
771 vl_api_msg_data_t *m = vl_api_get_msg_data (am, id);
772 u8 *(*handler) (void *, void *, void *);
773 u8 *(*print_fp) (void *, void *);
774 svm_region_t *old_vlib_rp;
775 void *save_shmem_hdr;
778 if (PREDICT_FALSE (am->elog_trace_api_messages))
780 ELOG_TYPE_DECLARE (e) = {
781 .format = "api-msg: %s",
788 ed = ELOG_DATA (am->elog_main, e);
790 ed->c = elog_string (am->elog_main, (char *) m->name);
792 ed->c = elog_string (am->elog_main, "BOGUS");
797 handler = (void *) m->handler;
799 if (PREDICT_FALSE (am->rx_trace && am->rx_trace->enabled))
800 vl_msg_api_trace (am, am->rx_trace, the_msg);
802 if (PREDICT_FALSE (am->msg_print_flag))
804 fformat (stdout, "[%d]: %s\n", id, m->name);
805 print_fp = (void *) am->msg_data[id].print_handler;
808 fformat (stdout, " [no registered print fn for msg %d]\n", id);
812 (*print_fp) (the_msg, vm);
815 is_mp_safe = am->msg_data[id].is_mp_safe;
819 vl_msg_api_barrier_trace_context (am->msg_data[id].name);
820 vl_msg_api_barrier_sync ();
824 old_vlib_rp = am->vlib_rp;
825 save_shmem_hdr = am->shmem_hdr;
826 am->vlib_rp = vlib_rp;
827 am->shmem_hdr = (void *) vlib_rp->user_ctx;
830 if (PREDICT_FALSE (vl_mem_api_fuzz_hook != 0))
831 (*vl_mem_api_fuzz_hook) (id, the_msg);
833 if (m->is_autoendian)
835 void (*endian_fp) (void *);
836 endian_fp = am->msg_data[id].endian_handler;
837 (*endian_fp) (the_msg);
839 if (PREDICT_FALSE (vec_len (am->perf_counter_cbs) != 0))
840 clib_call_callbacks (am->perf_counter_cbs, am, id, 0 /* before */);
842 (*handler) (the_msg, vm, node);
844 if (PREDICT_FALSE (vec_len (am->perf_counter_cbs) != 0))
845 clib_call_callbacks (am->perf_counter_cbs, am, id, 1 /* after */);
848 am->vlib_rp = old_vlib_rp;
849 am->shmem_hdr = save_shmem_hdr;
852 vl_msg_api_barrier_release ();
856 clib_warning ("no handler for msg id %d", id);
860 * Special-case, so we can e.g. bounce messages off the vnet
861 * main thread without copying them...
863 if (!m || !m->bounce)
867 old_vlib_rp = am->vlib_rp;
868 save_shmem_hdr = am->shmem_hdr;
869 am->vlib_rp = vlib_rp;
870 am->shmem_hdr = (void *) vlib_rp->user_ctx;
872 vl_msg_api_free (the_msg);
875 am->vlib_rp = old_vlib_rp;
876 am->shmem_hdr = save_shmem_hdr;
880 if (PREDICT_FALSE (am->elog_trace_api_messages))
882 ELOG_TYPE_DECLARE (e) = { .format = "api-msg-done(%s): %s",
883 .format_args = "t4T4",
895 ed = ELOG_DATA (am->elog_main, e);
897 ed->c = elog_string (am->elog_main, (char *) m->name);
899 ed->c = elog_string (am->elog_main, "BOGUS");
900 ed->barrier = is_mp_safe;
905 void_mem_api_handle_msg_i (api_main_t * am, svm_region_t * vlib_rp,
906 vlib_main_t * vm, vlib_node_runtime_t * node,
912 q = ((vl_shmem_hdr_t *) (void *) vlib_rp->user_ctx)->vl_input_queue;
914 if (!svm_queue_sub2 (q, (u8 *) & mp))
916 VL_MSG_API_UNPOISON ((void *) mp);
917 vl_mem_api_handler_with_vm_node (am, vlib_rp, (void *) mp, vm, node,
925 vl_mem_api_handle_msg_main (vlib_main_t * vm, vlib_node_runtime_t * node)
927 api_main_t *am = vlibapi_get_main ();
928 return void_mem_api_handle_msg_i (am, am->vlib_rp, vm, node,
929 0 /* is_private */ );
933 vl_mem_api_handle_rpc (vlib_main_t * vm, vlib_node_runtime_t * node)
935 api_main_t *am = vlibapi_get_main ();
940 * Swap pending and processing vectors, then process the RPCs
941 * Avoid deadlock conditions by construction.
943 clib_spinlock_lock_if_init (&vm->pending_rpc_lock);
944 tmp = vm->processing_rpc_requests;
945 vec_reset_length (tmp);
946 vm->processing_rpc_requests = vm->pending_rpc_requests;
947 vm->pending_rpc_requests = tmp;
948 clib_spinlock_unlock_if_init (&vm->pending_rpc_lock);
951 * RPCs are used to reflect function calls to thread 0
952 * when the underlying code is not thread-safe.
954 * Grabbing the thread barrier across a set of RPCs
955 * greatly increases efficiency, and avoids
956 * running afoul of the barrier sync holddown timer.
957 * The barrier sync code supports recursive locking.
959 * We really need to rewrite RPC-based code...
961 if (PREDICT_TRUE (vec_len (vm->processing_rpc_requests)))
963 vl_msg_api_barrier_sync ();
964 for (i = 0; i < vec_len (vm->processing_rpc_requests); i++)
966 mp = vm->processing_rpc_requests[i];
967 vl_mem_api_handler_with_vm_node (am, am->vlib_rp, (void *) mp, vm,
968 node, 0 /* is_private */);
970 vl_msg_api_barrier_release ();
977 vl_mem_api_handle_msg_private (vlib_main_t * vm, vlib_node_runtime_t * node,
980 api_main_t *am = vlibapi_get_main ();
981 return void_mem_api_handle_msg_i (am, am->vlib_private_rps[reg_index], vm,
982 node, 1 /* is_private */ );
985 vl_api_registration_t *
986 vl_mem_api_client_index_to_registration (u32 handle)
988 vl_api_registration_t **regpp;
989 vl_api_registration_t *regp;
990 api_main_t *am = vlibapi_get_main ();
991 vl_shmem_hdr_t *shmem_hdr;
994 index = vl_msg_api_handle_get_index (handle);
995 regpp = am->vl_clients + index;
997 if (pool_is_free (am->vl_clients, regpp))
999 vl_msg_api_increment_missing_client_counter ();
1004 shmem_hdr = (vl_shmem_hdr_t *) regp->shmem_hdr;
1005 if (!vl_msg_api_handle_is_valid (handle, shmem_hdr->application_restarts))
1007 vl_msg_api_increment_missing_client_counter ();
1015 vl_api_client_index_to_input_queue (u32 index)
1017 vl_api_registration_t *regp;
1018 api_main_t *am = vlibapi_get_main ();
1020 /* Special case: vlib trying to send itself a message */
1021 if (index == (u32) ~ 0)
1022 return (am->shmem_hdr->vl_input_queue);
1024 regp = vl_mem_api_client_index_to_registration (index);
1027 return (regp->vl_input_queue);
1030 static clib_error_t *
1031 setup_memclnt_exit (vlib_main_t * vm)
1033 atexit (vl_unmap_shmem);
1037 VLIB_INIT_FUNCTION (setup_memclnt_exit);
1040 format_api_message_rings (u8 * s, va_list * args)
1042 api_main_t *am = va_arg (*args, api_main_t *);
1043 vl_shmem_hdr_t *shmem_hdr = va_arg (*args, vl_shmem_hdr_t *);
1044 int main_segment = va_arg (*args, int);
1049 return format (s, "%8s %8s %8s %8s %8s\n",
1050 "Owner", "Size", "Nitems", "Hits", "Misses");
1052 ap = shmem_hdr->vl_rings;
1054 for (i = 0; i < vec_len (shmem_hdr->vl_rings); i++)
1056 s = format (s, "%8s %8d %8d %8d %8d\n",
1057 "vlib", ap->size, ap->nitems, ap->hits, ap->misses);
1061 ap = shmem_hdr->client_rings;
1063 for (i = 0; i < vec_len (shmem_hdr->client_rings); i++)
1065 s = format (s, "%8s %8d %8d %8d %8d\n",
1066 "clnt", ap->size, ap->nitems, ap->hits, ap->misses);
1072 s = format (s, "%d ring miss fallback allocations\n", am->ring_misses);
1075 "%d application restarts, %d reclaimed msgs, %d garbage collects\n",
1076 shmem_hdr->application_restarts, shmem_hdr->restart_reclaims,
1077 shmem_hdr->garbage_collects);
1082 static clib_error_t *
1083 vl_api_ring_command (vlib_main_t * vm,
1084 unformat_input_t * input, vlib_cli_command_t * cli_cmd)
1087 vl_shmem_hdr_t *shmem_hdr;
1088 api_main_t *am = vlibapi_get_main ();
1090 /* First, dump the primary region rings.. */
1092 if (am->vlib_primary_rp == 0 || am->vlib_primary_rp->user_ctx == 0)
1094 vlib_cli_output (vm, "Shared memory segment not initialized...\n");
1098 shmem_hdr = (void *) am->vlib_primary_rp->user_ctx;
1100 vlib_cli_output (vm, "Main API segment rings:");
1102 vlib_cli_output (vm, "%U", format_api_message_rings, am,
1103 0 /* print header */ , 0 /* notused */ );
1105 vlib_cli_output (vm, "%U", format_api_message_rings, am,
1106 shmem_hdr, 1 /* main segment */ );
1108 for (i = 0; i < vec_len (am->vlib_private_rps); i++)
1110 svm_region_t *vlib_rp = am->vlib_private_rps[i];
1111 shmem_hdr = (void *) vlib_rp->user_ctx;
1112 vl_api_registration_t **regpp;
1113 vl_api_registration_t *regp = 0;
1115 /* For horizontal scaling, add a hash table... */
1117 pool_foreach (regpp, am->vl_clients)
1120 if (regp && regp->vlib_rp == vlib_rp)
1122 vlib_cli_output (vm, "%s segment rings:", regp->name);
1126 vlib_cli_output (vm, "regp %llx not found?", regp);
1130 vlib_cli_output (vm, "%U", format_api_message_rings, am,
1131 0 /* print header */ , 0 /* notused */ );
1132 vlib_cli_output (vm, "%U", format_api_message_rings, am,
1133 shmem_hdr, 0 /* main segment */ );
1140 * Display binary api message allocation ring statistics
1143 VLIB_CLI_COMMAND (cli_show_api_ring_command, static) =
1145 .path = "show api ring-stats",
1146 .short_help = "Message ring statistics",
1147 .function = vl_api_ring_command,
1152 vlibmemory_init (vlib_main_t * vm)
1154 api_main_t *am = vlibapi_get_main ();
1155 svm_map_region_args_t _a, *a = &_a;
1156 u8 *remove_path1, *remove_path2;
1157 void vlibsocket_reference (void);
1159 vlibsocket_reference ();
1162 * By popular request / to avoid support fires, remove any old api segment
1165 if (am->root_path == 0)
1167 remove_path1 = format (0, "/dev/shm/global_vm%c", 0);
1168 remove_path2 = format (0, "/dev/shm/vpe-api%c", 0);
1172 remove_path1 = format (0, "/dev/shm/%s-global_vm%c", am->root_path, 0);
1173 remove_path2 = format (0, "/dev/shm/%s-vpe-api%c", am->root_path, 0);
1176 (void) unlink ((char *) remove_path1);
1177 (void) unlink ((char *) remove_path2);
1179 vec_free (remove_path1);
1180 vec_free (remove_path2);
1182 clib_memset (a, 0, sizeof (*a));
1183 a->root_path = am->root_path;
1184 a->name = SVM_GLOBAL_REGION_NAME;
1185 a->baseva = (am->global_baseva != 0) ?
1186 am->global_baseva : +svm_get_global_region_base_va ();
1187 a->size = (am->global_size != 0) ? am->global_size : SVM_GLOBAL_REGION_SIZE;
1188 a->flags = SVM_FLAGS_NODATA;
1189 a->uid = am->api_uid;
1190 a->gid = am->api_gid;
1192 (am->global_pvt_heap_size !=
1193 0) ? am->global_pvt_heap_size : SVM_PVT_MHEAP_SIZE;
1195 svm_region_init_args (a);
1201 vl_set_memory_region_name (const char *name)
1203 api_main_t *am = vlibapi_get_main ();
1204 am->region_name = name;
1208 * fd.io coding-style-patch-verification: ON
1211 * eval: (c-set-style "gnu")