2 *------------------------------------------------------------------
3 * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
6 * Copyright (c) 2009 Cisco and/or its affiliates.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at:
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *------------------------------------------------------------------
23 #include <sys/types.h>
26 #include <netinet/in.h>
33 #include <vppinfra/clib.h>
34 #include <vppinfra/vec.h>
35 #include <vppinfra/hash.h>
36 #include <vppinfra/bitmap.h>
37 #include <vppinfra/fifo.h>
38 #include <vppinfra/time.h>
39 #include <vppinfra/mheap.h>
40 #include <vppinfra/heap.h>
41 #include <vppinfra/pool.h>
42 #include <vppinfra/format.h>
46 static svm_region_t *root_rp;
47 static int root_rp_refcount;
50 static pthread_mutex_t *mutexes_held[MAXLOCK];
54 svm_get_root_rp (void)
62 region_lock (svm_region_t * rp, int tag)
64 pthread_mutex_lock (&rp->mutex);
66 rp->mutex_owner_pid = getpid ();
67 rp->mutex_owner_tag = tag;
69 ASSERT (nheld < MAXLOCK);
71 * Keep score of held mutexes so we can try to exit
72 * cleanly if the world comes to an end at the worst possible
75 mutexes_held[nheld++] = &rp->mutex;
79 region_unlock (svm_region_t * rp)
83 rp->mutex_owner_pid = 0;
84 rp->mutex_owner_tag = 0;
87 for (i = nheld - 1; i >= 0; i--)
89 if (mutexes_held[i] == &rp->mutex)
91 for (j = i; j < MAXLOCK - 1; j++)
92 mutexes_held[j] = mutexes_held[j + 1];
100 CLIB_MEMORY_BARRIER ();
101 pthread_mutex_unlock (&rp->mutex);
106 format_svm_flags (u8 * s, va_list * args)
108 uword f = va_arg (*args, uword);
110 if (f & SVM_FLAGS_MHEAP)
111 s = format (s, "MHEAP ");
112 if (f & SVM_FLAGS_FILE)
113 s = format (s, "FILE ");
114 if (f & SVM_FLAGS_NODATA)
115 s = format (s, "NODATA ");
116 if (f & SVM_FLAGS_NEED_DATA_INIT)
117 s = format (s, "INIT ");
123 format_svm_size (u8 * s, va_list * args)
125 uword size = va_arg (*args, uword);
127 if (size >= (1 << 20))
129 s = format (s, "(%d mb)", size >> 20);
131 else if (size >= (1 << 10))
133 s = format (s, "(%d kb)", size >> 10);
137 s = format (s, "(%d bytes)", size);
143 format_svm_region (u8 * s, va_list * args)
145 svm_region_t *rp = va_arg (*args, svm_region_t *);
146 int verbose = va_arg (*args, int);
150 s = format (s, "%s: base va 0x%x size 0x%x %U\n",
151 rp->region_name, rp->virtual_base,
152 rp->virtual_size, format_svm_size, rp->virtual_size);
153 s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
154 rp->user_ctx, rp->bitmap_size);
158 s = format (s, " flags: 0x%x %U\n", rp->flags,
159 format_svm_flags, rp->flags);
161 " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
162 rp->region_heap, rp->data_base, rp->data_heap);
165 s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
167 for (i = 0; i < vec_len (rp->client_pids); i++)
168 s = format (s, "%d ", rp->client_pids[i]);
170 s = format (s, "\n");
176 s = format (s, " VM in use: ");
178 for (i = 0; i < rp->bitmap_size; i++)
180 if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
184 hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
188 hi = rp->virtual_base + i * MMAP_PAGESIZE;
195 hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
196 s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
202 s = format (s, " rgn heap stats: %U", format_mheap,
204 if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap)
206 s = format (s, "\n data heap stats: %U", format_mheap,
209 s = format (s, "\n");
217 * Round to a pagesize multiple, presumably 4k works
220 rnd_pagesize (u64 size)
224 rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
229 * svm_data_region_setup
232 svm_data_region_create (svm_map_region_args_t * a, svm_region_t * rp)
238 map_size = rp->virtual_size - (MMAP_PAGESIZE +
239 (a->pvt_heap_size ? a->pvt_heap_size :
240 SVM_PVT_MHEAP_SIZE));
242 if (a->flags & SVM_FLAGS_FILE)
246 fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
250 clib_unix_warning ("open");
254 if (fstat (fd, &statb) < 0)
256 clib_unix_warning ("fstat");
261 if (statb.st_mode & S_IFREG)
263 if (statb.st_size == 0)
265 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
267 clib_unix_warning ("seek region size");
271 if (write (fd, &junk, 1) != 1)
273 clib_unix_warning ("set region size");
280 map_size = rnd_pagesize (statb.st_size);
285 map_size = a->backing_mmap_size;
288 ASSERT (map_size <= rp->virtual_size -
289 (MMAP_PAGESIZE + SVM_PVT_MHEAP_SIZE));
291 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
292 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
294 clib_unix_warning ("mmap");
299 rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
300 rp->flags |= SVM_FLAGS_FILE;
303 if (a->flags & SVM_FLAGS_MHEAP)
306 mheap_alloc_with_flags ((void *) (rp->data_base), map_size,
307 MHEAP_FLAG_DISABLE_VM);
308 rp->flags |= SVM_FLAGS_MHEAP;
314 svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp)
321 map_size = rp->virtual_size -
323 + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE));
325 if (a->flags & SVM_FLAGS_FILE)
328 fd = open (a->backing_file, O_RDWR, 0777);
332 clib_unix_warning ("open");
336 if (fstat (fd, &statb) < 0)
338 clib_unix_warning ("fstat");
343 if (statb.st_mode & S_IFREG)
345 if (statb.st_size == 0)
347 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
349 clib_unix_warning ("seek region size");
353 if (write (fd, &junk, 1) != 1)
355 clib_unix_warning ("set region size");
362 map_size = rnd_pagesize (statb.st_size);
367 map_size = a->backing_mmap_size;
370 ASSERT (map_size <= rp->virtual_size
373 (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)));
375 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
376 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
378 clib_unix_warning ("mmap");
388 shm_name_from_svm_map_region_args (svm_map_region_args_t * a)
394 int root_path_offset = 0;
399 /* Tolerate present or absent slashes */
400 if (a->root_path[0] == '/')
403 /* create the root_path under /dev/shm
404 iterate through path creating directories */
406 path = format (0, "/dev/shm/%s%c", &a->root_path[root_path_offset], 0);
407 split_point = path + 1;
408 vec_add1 (mkdir_arg, '-');
412 while (*split_point && *split_point != '/')
414 vec_add1 (mkdir_arg, *split_point);
417 vec_add1 (mkdir_arg, 0);
419 /* ready to descend another level */
420 mkdir_arg[vec_len (mkdir_arg) - 1] = '-';
423 vec_free (mkdir_arg);
426 if (a->name[0] == '/')
429 shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
430 &a->name[name_offset], 0);
433 shm_name = format (0, "%s%c", a->name, 0);
438 svm_region_init_mapped_region (svm_map_region_args_t * a, svm_region_t * rp)
440 pthread_mutexattr_t attr;
441 pthread_condattr_t cattr;
442 int nbits, words, bit;
449 memset (rp, 0, sizeof (*rp));
451 if (pthread_mutexattr_init (&attr))
452 clib_unix_warning ("mutexattr_init");
454 if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
455 clib_unix_warning ("mutexattr_setpshared");
457 if (pthread_mutex_init (&rp->mutex, &attr))
458 clib_unix_warning ("mutex_init");
460 if (pthread_mutexattr_destroy (&attr))
461 clib_unix_warning ("mutexattr_destroy");
463 if (pthread_condattr_init (&cattr))
464 clib_unix_warning ("condattr_init");
466 if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
467 clib_unix_warning ("condattr_setpshared");
469 if (pthread_cond_init (&rp->condvar, &cattr))
470 clib_unix_warning ("cond_init");
472 if (pthread_condattr_destroy (&cattr))
473 clib_unix_warning ("condattr_destroy");
477 rp->virtual_base = a->baseva;
478 rp->virtual_size = a->size;
481 mheap_alloc_with_flags (uword_to_pointer
482 (a->baseva + MMAP_PAGESIZE, void *),
484 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE,
485 MHEAP_FLAG_DISABLE_VM);
486 oldheap = svm_push_pvt_heap (rp);
488 rp->region_name = (char *) format (0, "%s%c", a->name, 0);
489 vec_add1 (rp->client_pids, getpid ());
491 nbits = rp->virtual_size / MMAP_PAGESIZE;
494 rp->bitmap_size = nbits;
495 words = (nbits + BITS (uword) - 1) / BITS (uword);
496 vec_validate (rp->bitmap, words - 1);
498 overhead_space = MMAP_PAGESIZE /* header */ +
499 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
502 data_base = (uword) rp->virtual_base;
504 if (a->flags & SVM_FLAGS_NODATA)
505 rp->flags |= SVM_FLAGS_NEED_DATA_INIT;
509 clib_bitmap_set_no_check (rp->bitmap, bit, 1);
511 overhead_space -= MMAP_PAGESIZE;
512 data_base += MMAP_PAGESIZE;
514 while (overhead_space > 0);
516 rp->data_base = (void *) data_base;
519 * Note: although the POSIX spec guarantees that only one
520 * process enters this block, we have to play games
521 * to hold off clients until e.g. the mutex is ready
523 rp->version = SVM_VERSION;
525 /* setup the data portion of the region */
527 rv = svm_data_region_create (a, rp);
530 clib_warning ("data_region_create: %d", rv);
535 svm_pop_heap (oldheap);
542 svm_map_region (svm_map_region_args_t * a)
550 int pid_holding_region_lock;
552 int dead_region_recovery = 0;
555 struct timespec ts, tsrem;
557 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
560 shm_name = shm_name_from_svm_map_region_args (a);
563 clib_warning ("[%d] map region %s: shm_open (%s)",
564 getpid (), a->name, shm_name);
566 svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
570 if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
571 clib_unix_warning ("segment chmod");
572 /* This turns out to fail harmlessly if the client starts first */
573 if (fchown (svm_fd, a->uid, a->gid) < 0)
574 clib_unix_warning ("segment chown [ok if client starts first]");
578 if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
580 clib_warning ("seek region size");
584 if (write (svm_fd, &junk, 1) != 1)
586 clib_warning ("set region size");
591 rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
592 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
594 if (rp == (svm_region_t *) MAP_FAILED)
596 clib_unix_warning ("mmap create");
602 svm_region_init_mapped_region (a, rp);
604 return ((void *) rp);
608 svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
614 perror ("svm_region_map(mmap open)");
621 if (0 != fstat (svm_fd, &stat))
623 clib_warning ("fstat failed: %d", errno);
627 if (stat.st_size > 0)
633 clib_warning ("waiting for resize of shm file timed out");
638 ts.tv_nsec = 100000000;
639 while (nanosleep (&ts, &tsrem) < 0)
644 rp = mmap (0, MMAP_PAGESIZE,
645 PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
647 if (rp == (svm_region_t *) MAP_FAILED)
650 clib_warning ("mmap");
654 * We lost the footrace to create this region; make sure
655 * the winner has crossed the finish line.
657 while (rp->version == 0 && deadman++ < 5)
665 if (rp->version == 0)
667 clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
669 munmap (rp, a->size);
672 /* Remap now that the region has been placed */
673 a->baseva = rp->virtual_base;
674 a->size = rp->virtual_size;
675 munmap (rp, MMAP_PAGESIZE);
677 rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
678 PROT_READ | PROT_WRITE,
679 MAP_SHARED | MAP_FIXED, svm_fd, 0);
680 if ((uword) rp == (uword) MAP_FAILED)
682 clib_unix_warning ("mmap");
687 if ((uword) rp != rp->virtual_base)
689 clib_warning ("mmap botch");
693 * Try to fix the region mutex if it is held by
696 pid_holding_region_lock = rp->mutex_owner_pid;
697 if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
700 ("region %s mutex held by dead pid %d, tag %d, force unlock",
701 rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
702 /* owner pid is nonexistent */
703 rp->mutex.__data.__owner = 0;
704 rp->mutex.__data.__lock = 0;
705 dead_region_recovery = 1;
708 if (dead_region_recovery)
709 clib_warning ("recovery: attempt to re-lock region");
712 oldheap = svm_push_pvt_heap (rp);
713 vec_add1 (rp->client_pids, getpid ());
715 if (dead_region_recovery)
716 clib_warning ("recovery: attempt svm_data_region_map");
718 rv = svm_data_region_map (a, rp);
721 clib_warning ("data_region_map: %d", rv);
724 if (dead_region_recovery)
725 clib_warning ("unlock and continue");
729 svm_pop_heap (oldheap);
731 return ((void *) rp);
734 return 0; /* NOTREACHED */
738 svm_mutex_cleanup (void)
741 for (i = 0; i < nheld; i++)
743 pthread_mutex_unlock (mutexes_held[i]);
748 svm_region_init_internal (svm_map_region_args_t * a)
751 u64 ticks = clib_cpu_time_now ();
752 uword randomize_baseva;
754 /* guard against klutz calls */
760 atexit (svm_mutex_cleanup);
762 /* Randomize the shared-VM base at init time */
763 if (MMAP_PAGESIZE <= (4 << 10))
764 randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
766 randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
768 a->baseva += randomize_baseva;
770 rp = svm_map_region (a);
776 /* Set up the main region data structures */
777 if (rp->flags & SVM_FLAGS_NEED_DATA_INIT)
779 svm_main_region_t *mp = 0;
782 rp->flags &= ~(SVM_FLAGS_NEED_DATA_INIT);
784 oldheap = svm_push_pvt_heap (rp);
785 vec_validate (mp, 0);
786 mp->name_hash = hash_create_string (0, sizeof (uword));
787 mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
791 svm_pop_heap (oldheap);
800 svm_region_init (void)
802 svm_map_region_args_t _a, *a = &_a;
804 memset (a, 0, sizeof (*a));
806 a->name = SVM_GLOBAL_REGION_NAME;
807 a->baseva = SVM_GLOBAL_REGION_BASEVA;
808 a->size = SVM_GLOBAL_REGION_SIZE;
809 a->flags = SVM_FLAGS_NODATA;
813 svm_region_init_internal (a);
817 svm_region_init_chroot (const char *root_path)
819 svm_map_region_args_t _a, *a = &_a;
821 memset (a, 0, sizeof (*a));
822 a->root_path = root_path;
823 a->name = SVM_GLOBAL_REGION_NAME;
824 a->baseva = SVM_GLOBAL_REGION_BASEVA;
825 a->size = SVM_GLOBAL_REGION_SIZE;
826 a->flags = SVM_FLAGS_NODATA;
830 return svm_region_init_internal (a);
834 svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
836 svm_map_region_args_t _a, *a = &_a;
838 memset (a, 0, sizeof (*a));
839 a->root_path = root_path;
840 a->name = SVM_GLOBAL_REGION_NAME;
841 a->baseva = SVM_GLOBAL_REGION_BASEVA;
842 a->size = SVM_GLOBAL_REGION_SIZE;
843 a->flags = SVM_FLAGS_NODATA;
847 svm_region_init_internal (a);
851 svm_region_init_args (svm_map_region_args_t * a)
853 svm_region_init_internal (a);
857 svm_region_find_or_create (svm_map_region_args_t * a)
859 svm_main_region_t *mp;
866 svm_subregion_t *subp;
870 a->size += MMAP_PAGESIZE +
871 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
872 a->size = rnd_pagesize (a->size);
874 region_lock (root_rp, 4);
875 oldheap = svm_push_pvt_heap (root_rp);
876 mp = root_rp->data_base;
880 /* Map the named region from the correct chroot environment */
881 if (a->root_path == NULL)
882 a->root_path = (char *) mp->root_path;
885 * See if this region is already known. If it is, we're
888 p = hash_get_mem (mp->name_hash, a->name);
892 rp = svm_map_region (a);
893 region_unlock (root_rp);
894 svm_pop_heap (oldheap);
898 /* Create the region. */
899 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
901 need_nbits = a->size / MMAP_PAGESIZE;
903 index = 1; /* $$$ fixme, figure out how many bit to really skip */
906 * Scan the virtual space allocation bitmap, looking for a large
911 if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
913 for (i = 0; i < (need_nbits - 1); i++)
915 if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
926 while (index < root_rp->bitmap_size);
928 /* Completely out of VM? */
929 if (index >= root_rp->bitmap_size)
931 clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
932 root_rp->region_name, a->size, a->size);
933 svm_pop_heap (oldheap);
934 region_unlock (root_rp);
939 * Mark virtual space allocated
942 clib_warning ("set %d bits at index %d", need_nbits, index);
945 for (i = 0; i < need_nbits; i++)
947 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
950 /* Place this region where it goes... */
951 a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
953 rp = svm_map_region (a);
955 pool_get (mp->subregions, subp);
956 name = format (0, "%s%c", a->name, 0);
957 subp->subregion_name = name;
959 hash_set_mem (mp->name_hash, name, subp - mp->subregions);
961 svm_pop_heap (oldheap);
963 region_unlock (root_rp);
969 svm_region_unlink (svm_region_t * rp)
971 svm_map_region_args_t _a, *a = &_a;
972 svm_main_region_t *mp;
977 ASSERT (vec_c_string_is_terminated (rp->region_name));
979 mp = root_rp->data_base;
982 a->root_path = (char *) mp->root_path;
983 a->name = rp->region_name;
984 shm_name = shm_name_from_svm_map_region_args (a);
986 clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
987 shm_unlink ((const char *) shm_name);
994 * Let go of the indicated region. If the calling process
995 * is the last customer, throw it away completely.
996 * The root region mutex guarantees atomicity with respect to
997 * a new region client showing up at the wrong moment.
1000 svm_region_unmap (void *rp_arg)
1002 int i, mypid = getpid ();
1005 uword virtual_base, virtual_size;
1006 svm_region_t *rp = rp_arg;
1010 * If we take a signal while holding one or more shared-memory
1011 * mutexes, we may end up back here from an otherwise
1012 * benign exit handler. Bail out to avoid a recursive
1022 clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
1024 region_lock (root_rp, 5);
1025 region_lock (rp, 6);
1027 oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1029 /* Remove the caller from the list of mappers */
1030 for (i = 0; i < vec_len (rp->client_pids); i++)
1032 if (rp->client_pids[i] == mypid)
1034 vec_delete (rp->client_pids, 1, i);
1038 clib_warning ("pid %d AWOL", mypid);
1042 svm_pop_heap (oldheap);
1044 nclients_left = vec_len (rp->client_pids);
1045 virtual_base = rp->virtual_base;
1046 virtual_size = rp->virtual_size;
1048 if (nclients_left == 0)
1050 int index, nbits, i;
1051 svm_main_region_t *mp;
1053 svm_subregion_t *subp;
1055 /* Kill the region, last guy on his way out */
1057 oldheap = svm_push_pvt_heap (root_rp);
1058 name = vec_dup (rp->region_name);
1060 virtual_base = rp->virtual_base;
1061 virtual_size = rp->virtual_size;
1063 /* Figure out which bits to clear in the root region bitmap */
1064 index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
1066 nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
1069 clib_warning ("clear %d bits at index %d", nbits, index);
1071 /* Give back the allocated VM */
1072 for (i = 0; i < nbits; i++)
1074 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1077 mp = root_rp->data_base;
1079 p = hash_get_mem (mp->name_hash, name);
1081 /* Better never happen ... */
1085 region_unlock (root_rp);
1086 svm_pop_heap (oldheap);
1087 clib_warning ("Region name '%s' not found?", name);
1091 /* Remove from the root region subregion pool */
1092 subp = mp->subregions + p[0];
1093 pool_put (mp->subregions, subp);
1095 hash_unset_mem (mp->name_hash, name);
1100 svm_region_unlink (rp);
1101 munmap ((void *) virtual_base, virtual_size);
1102 region_unlock (root_rp);
1103 svm_pop_heap (oldheap);
1108 region_unlock (root_rp);
1110 munmap ((void *) virtual_base, virtual_size);
1120 int i, mypid = getpid ();
1121 uword virtual_base, virtual_size;
1123 /* It felt so nice we did it twice... */
1127 if (--root_rp_refcount > 0)
1131 * If we take a signal while holding one or more shared-memory
1132 * mutexes, we may end up back here from an otherwise
1133 * benign exit handler. Bail out to avoid a recursive
1139 region_lock (root_rp, 7);
1140 oldheap = svm_push_pvt_heap (root_rp);
1142 virtual_base = root_rp->virtual_base;
1143 virtual_size = root_rp->virtual_size;
1145 for (i = 0; i < vec_len (root_rp->client_pids); i++)
1147 if (root_rp->client_pids[i] == mypid)
1149 vec_delete (root_rp->client_pids, 1, i);
1153 clib_warning ("pid %d AWOL", mypid);
1157 if (vec_len (root_rp->client_pids) == 0)
1158 svm_region_unlink (root_rp);
1160 region_unlock (root_rp);
1161 svm_pop_heap (oldheap);
1164 munmap ((void *) virtual_base, virtual_size);
1168 svm_client_scan_this_region_nolock (svm_region_t * rp)
1171 int mypid = getpid ();
1174 for (j = 0; j < vec_len (rp->client_pids); j++)
1176 if (mypid == rp->client_pids[j])
1178 if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1180 clib_warning ("%s: cleanup ghost pid %d",
1181 rp->region_name, rp->client_pids[j]);
1182 /* nb: client vec in rp->region_heap */
1183 oldheap = svm_push_pvt_heap (rp);
1184 vec_delete (rp->client_pids, 1, j);
1186 svm_pop_heap (oldheap);
1193 * Scan svm regions for dead clients
1196 svm_client_scan (const char *root_path)
1199 svm_main_region_t *mp;
1200 svm_map_region_args_t *a = 0;
1201 svm_region_t *root_rp;
1203 svm_subregion_t *subp;
1207 int mypid = getpid ();
1209 vec_validate (a, 0);
1211 svm_region_init_chroot (root_path);
1213 root_rp = svm_get_root_rp ();
1215 pthread_mutex_lock (&root_rp->mutex);
1217 mp = root_rp->data_base;
1219 for (j = 0; j < vec_len (root_rp->client_pids); j++)
1221 if (mypid == root_rp->client_pids[j])
1223 if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1225 clib_warning ("%s: cleanup ghost pid %d",
1226 root_rp->region_name, root_rp->client_pids[j]);
1227 /* nb: client vec in root_rp->region_heap */
1228 oldheap = svm_push_pvt_heap (root_rp);
1229 vec_delete (root_rp->client_pids, 1, j);
1231 svm_pop_heap (oldheap);
1236 * Snapshoot names, can't hold root rp mutex across
1240 pool_foreach (subp, mp->subregions, ({
1241 name = vec_dup (subp->subregion_name);
1242 vec_add1(svm_names, name);
1246 pthread_mutex_unlock (&root_rp->mutex);
1248 for (i = 0; i < vec_len (svm_names); i++)
1250 vec_validate (a, 0);
1251 a->root_path = root_path;
1252 a->name = (char *) svm_names[i];
1253 rp = svm_region_find_or_create (a);
1256 pthread_mutex_lock (&rp->mutex);
1258 svm_client_scan_this_region_nolock (rp);
1260 pthread_mutex_unlock (&rp->mutex);
1261 svm_region_unmap (rp);
1262 vec_free (svm_names[i]);
1266 vec_free (svm_names);
1274 * fd.io coding-style-patch-verification: ON
1277 * eval: (c-set-style "gnu")