2 *------------------------------------------------------------------
3 * svm.c - shared VM allocation, mmap(...MAP_FIXED...)
6 * Copyright (c) 2009 Cisco and/or its affiliates.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at:
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *------------------------------------------------------------------
23 #include <sys/types.h>
26 #include <netinet/in.h>
33 #include <vppinfra/clib.h>
34 #include <vppinfra/vec.h>
35 #include <vppinfra/hash.h>
36 #include <vppinfra/bitmap.h>
37 #include <vppinfra/fifo.h>
38 #include <vppinfra/time.h>
39 #include <vppinfra/mheap.h>
40 #include <vppinfra/heap.h>
41 #include <vppinfra/pool.h>
42 #include <vppinfra/format.h>
46 static svm_region_t *root_rp;
47 static int root_rp_refcount;
50 static pthread_mutex_t *mutexes_held[MAXLOCK];
54 svm_get_root_rp (void)
62 svm_get_global_region_base_va ()
65 /* On AArch64 VA space can have different size, from 36 to 48 bits.
66 Here we are trying to detect VA bits by parsing /proc/self/maps
69 unformat_input_t input;
73 if ((fd = open ("/proc/self/maps", 0)) < 0)
74 clib_unix_error ("open '/proc/self/maps'");
76 unformat_init_clib_file (&input, fd);
77 while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
79 if (unformat (&input, "%llx-%llx", &start, &end))
81 unformat_skip_line (&input);
83 unformat_free (&input);
86 bits = count_leading_zeros (end);
88 if (bits >= 36 && bits <= 48)
89 return ((1ul << bits) / 4) - (2 * SVM_GLOBAL_REGION_SIZE);
91 clib_unix_error ("unexpected va bits '%u'", bits);
95 return 0x130000000ULL;
99 region_lock (svm_region_t * rp, int tag)
101 pthread_mutex_lock (&rp->mutex);
103 rp->mutex_owner_pid = getpid ();
104 rp->mutex_owner_tag = tag;
106 ASSERT (nheld < MAXLOCK);
108 * Keep score of held mutexes so we can try to exit
109 * cleanly if the world comes to an end at the worst possible
112 mutexes_held[nheld++] = &rp->mutex;
116 region_unlock (svm_region_t * rp)
120 rp->mutex_owner_pid = 0;
121 rp->mutex_owner_tag = 0;
124 for (i = nheld - 1; i >= 0; i--)
126 if (mutexes_held[i] == &rp->mutex)
128 for (j = i; j < MAXLOCK - 1; j++)
129 mutexes_held[j] = mutexes_held[j + 1];
137 CLIB_MEMORY_BARRIER ();
138 pthread_mutex_unlock (&rp->mutex);
143 format_svm_flags (u8 * s, va_list * args)
145 uword f = va_arg (*args, uword);
147 if (f & SVM_FLAGS_MHEAP)
148 s = format (s, "MHEAP ");
149 if (f & SVM_FLAGS_FILE)
150 s = format (s, "FILE ");
151 if (f & SVM_FLAGS_NODATA)
152 s = format (s, "NODATA ");
153 if (f & SVM_FLAGS_NEED_DATA_INIT)
154 s = format (s, "INIT ");
160 format_svm_size (u8 * s, va_list * args)
162 uword size = va_arg (*args, uword);
164 if (size >= (1 << 20))
166 s = format (s, "(%d mb)", size >> 20);
168 else if (size >= (1 << 10))
170 s = format (s, "(%d kb)", size >> 10);
174 s = format (s, "(%d bytes)", size);
180 format_svm_region (u8 * s, va_list * args)
182 svm_region_t *rp = va_arg (*args, svm_region_t *);
183 int verbose = va_arg (*args, int);
187 s = format (s, "%s: base va 0x%x size 0x%x %U\n",
188 rp->region_name, rp->virtual_base,
189 rp->virtual_size, format_svm_size, rp->virtual_size);
190 s = format (s, " user_ctx 0x%x, bitmap_size %d\n",
191 rp->user_ctx, rp->bitmap_size);
195 s = format (s, " flags: 0x%x %U\n", rp->flags,
196 format_svm_flags, rp->flags);
198 " region_heap 0x%x data_base 0x%x data_heap 0x%x\n",
199 rp->region_heap, rp->data_base, rp->data_heap);
202 s = format (s, " %d clients, pids: ", vec_len (rp->client_pids));
204 for (i = 0; i < vec_len (rp->client_pids); i++)
205 s = format (s, "%d ", rp->client_pids[i]);
207 s = format (s, "\n");
213 s = format (s, " VM in use: ");
215 for (i = 0; i < rp->bitmap_size; i++)
217 if (clib_bitmap_get_no_check (rp->bitmap, i) != 0)
221 hi = lo = rp->virtual_base + i * MMAP_PAGESIZE;
225 hi = rp->virtual_base + i * MMAP_PAGESIZE;
232 hi = rp->virtual_base + i * MMAP_PAGESIZE - 1;
233 s = format (s, " 0x%x - 0x%x (%dk)\n", lo, hi,
239 #if USE_DLMALLOC == 0
240 s = format (s, " rgn heap stats: %U", format_mheap,
242 if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap)
244 s = format (s, "\n data heap stats: %U", format_mheap,
247 s = format (s, "\n");
256 * Round to a pagesize multiple, presumably 4k works
259 rnd_pagesize (u64 size)
263 rv = (size + (MMAP_PAGESIZE - 1)) & ~(MMAP_PAGESIZE - 1);
268 * svm_data_region_setup
271 svm_data_region_create (svm_map_region_args_t * a, svm_region_t * rp)
277 map_size = rp->virtual_size - (MMAP_PAGESIZE +
278 (a->pvt_heap_size ? a->pvt_heap_size :
279 SVM_PVT_MHEAP_SIZE));
281 if (a->flags & SVM_FLAGS_FILE)
285 fd = open (a->backing_file, O_RDWR | O_CREAT, 0777);
289 clib_unix_warning ("open");
293 if (fstat (fd, &statb) < 0)
295 clib_unix_warning ("fstat");
300 if (statb.st_mode & S_IFREG)
302 if (statb.st_size == 0)
304 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
306 clib_unix_warning ("seek region size");
310 if (write (fd, &junk, 1) != 1)
312 clib_unix_warning ("set region size");
319 map_size = rnd_pagesize (statb.st_size);
324 map_size = a->backing_mmap_size;
327 ASSERT (map_size <= rp->virtual_size -
328 (MMAP_PAGESIZE + SVM_PVT_MHEAP_SIZE));
330 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
331 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
333 clib_unix_warning ("mmap");
338 rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
339 rp->flags |= SVM_FLAGS_FILE;
342 if (a->flags & SVM_FLAGS_MHEAP)
344 #if USE_DLMALLOC == 0
345 mheap_t *heap_header;
347 mheap_alloc_with_flags ((void *) (rp->data_base), map_size,
348 MHEAP_FLAG_DISABLE_VM);
349 heap_header = mheap_header (rp->data_heap);
350 heap_header->flags |= MHEAP_FLAG_THREAD_SAFE;
352 rp->data_heap = create_mspace_with_base (rp->data_base,
353 map_size, 1 /* locked */ );
354 mspace_disable_expand (rp->data_heap);
357 rp->flags |= SVM_FLAGS_MHEAP;
363 svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp)
370 map_size = rp->virtual_size -
372 + (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE));
374 if (a->flags & SVM_FLAGS_FILE)
377 fd = open (a->backing_file, O_RDWR, 0777);
381 clib_unix_warning ("open");
385 if (fstat (fd, &statb) < 0)
387 clib_unix_warning ("fstat");
392 if (statb.st_mode & S_IFREG)
394 if (statb.st_size == 0)
396 if (lseek (fd, map_size, SEEK_SET) == (off_t) - 1)
398 clib_unix_warning ("seek region size");
402 if (write (fd, &junk, 1) != 1)
404 clib_unix_warning ("set region size");
411 map_size = rnd_pagesize (statb.st_size);
416 map_size = a->backing_mmap_size;
419 ASSERT (map_size <= rp->virtual_size
422 (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)));
424 if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
425 MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
427 clib_unix_warning ("mmap");
437 shm_name_from_svm_map_region_args (svm_map_region_args_t * a)
443 int root_path_offset = 0;
448 /* Tolerate present or absent slashes */
449 if (a->root_path[0] == '/')
452 /* create the root_path under /dev/shm
453 iterate through path creating directories */
455 path = format (0, "/dev/shm/%s%c", &a->root_path[root_path_offset], 0);
456 split_point = path + 1;
457 vec_add1 (mkdir_arg, '-');
461 while (*split_point && *split_point != '/')
463 vec_add1 (mkdir_arg, *split_point);
466 vec_add1 (mkdir_arg, 0);
468 /* ready to descend another level */
469 mkdir_arg[vec_len (mkdir_arg) - 1] = '-';
472 vec_free (mkdir_arg);
475 if (a->name[0] == '/')
478 shm_name = format (0, "/%s-%s%c", &a->root_path[root_path_offset],
479 &a->name[name_offset], 0);
482 shm_name = format (0, "%s%c", a->name, 0);
487 svm_region_init_mapped_region (svm_map_region_args_t * a, svm_region_t * rp)
489 pthread_mutexattr_t attr;
490 pthread_condattr_t cattr;
491 int nbits, words, bit;
498 clib_memset (rp, 0, sizeof (*rp));
500 if (pthread_mutexattr_init (&attr))
501 clib_unix_warning ("mutexattr_init");
503 if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
504 clib_unix_warning ("mutexattr_setpshared");
506 if (pthread_mutex_init (&rp->mutex, &attr))
507 clib_unix_warning ("mutex_init");
509 if (pthread_mutexattr_destroy (&attr))
510 clib_unix_warning ("mutexattr_destroy");
512 if (pthread_condattr_init (&cattr))
513 clib_unix_warning ("condattr_init");
515 if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED))
516 clib_unix_warning ("condattr_setpshared");
518 if (pthread_cond_init (&rp->condvar, &cattr))
519 clib_unix_warning ("cond_init");
521 if (pthread_condattr_destroy (&cattr))
522 clib_unix_warning ("condattr_destroy");
526 rp->virtual_base = a->baseva;
527 rp->virtual_size = a->size;
529 #if USE_DLMALLOC == 0
531 mheap_alloc_with_flags (uword_to_pointer
532 (a->baseva + MMAP_PAGESIZE, void *),
534 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE,
535 MHEAP_FLAG_DISABLE_VM);
537 rp->region_heap = create_mspace_with_base
538 (uword_to_pointer (a->baseva + MMAP_PAGESIZE, void *),
540 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, 1 /* locked */ );
542 mspace_disable_expand (rp->region_heap);
545 oldheap = svm_push_pvt_heap (rp);
547 rp->region_name = (char *) format (0, "%s%c", a->name, 0);
548 vec_add1 (rp->client_pids, getpid ());
550 nbits = rp->virtual_size / MMAP_PAGESIZE;
553 rp->bitmap_size = nbits;
554 words = (nbits + BITS (uword) - 1) / BITS (uword);
555 vec_validate (rp->bitmap, words - 1);
557 overhead_space = MMAP_PAGESIZE /* header */ +
558 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
561 data_base = (uword) rp->virtual_base;
563 if (a->flags & SVM_FLAGS_NODATA)
564 rp->flags |= SVM_FLAGS_NEED_DATA_INIT;
568 clib_bitmap_set_no_check (rp->bitmap, bit, 1);
570 overhead_space -= MMAP_PAGESIZE;
571 data_base += MMAP_PAGESIZE;
573 while (overhead_space > 0);
575 rp->data_base = (void *) data_base;
578 * Note: although the POSIX spec guarantees that only one
579 * process enters this block, we have to play games
580 * to hold off clients until e.g. the mutex is ready
582 rp->version = SVM_VERSION;
584 /* setup the data portion of the region */
586 rv = svm_data_region_create (a, rp);
589 clib_warning ("data_region_create: %d", rv);
594 svm_pop_heap (oldheap);
601 svm_map_region (svm_map_region_args_t * a)
609 int pid_holding_region_lock;
611 int dead_region_recovery = 0;
614 struct timespec ts, tsrem;
616 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
619 shm_name = shm_name_from_svm_map_region_args (a);
622 clib_warning ("[%d] map region %s: shm_open (%s)",
623 getpid (), a->name, shm_name);
625 svm_fd = shm_open ((char *) shm_name, O_RDWR | O_CREAT | O_EXCL, 0777);
629 if (fchmod (svm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0)
630 clib_unix_warning ("segment chmod");
631 /* This turns out to fail harmlessly if the client starts first */
632 if (fchown (svm_fd, a->uid, a->gid) < 0)
633 clib_unix_warning ("segment chown [ok if client starts first]");
637 if (lseek (svm_fd, a->size, SEEK_SET) == (off_t) - 1)
639 clib_warning ("seek region size");
643 if (write (svm_fd, &junk, 1) != 1)
645 clib_warning ("set region size");
650 rp = mmap (uword_to_pointer (a->baseva, void *), a->size,
651 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0);
653 if (rp == (svm_region_t *) MAP_FAILED)
655 clib_unix_warning ("mmap create");
661 svm_region_init_mapped_region (a, rp);
663 return ((void *) rp);
667 svm_fd = shm_open ((char *) shm_name, O_RDWR, 0777);
673 perror ("svm_region_map(mmap open)");
677 /* Reset ownership in case the client started first */
678 if (fchown (svm_fd, a->uid, a->gid) < 0)
679 clib_unix_warning ("segment chown [ok if client starts first]");
684 if (0 != fstat (svm_fd, &stat))
686 clib_warning ("fstat failed: %d", errno);
690 if (stat.st_size > 0)
696 clib_warning ("waiting for resize of shm file timed out");
701 ts.tv_nsec = 100000000;
702 while (nanosleep (&ts, &tsrem) < 0)
707 rp = mmap (0, MMAP_PAGESIZE,
708 PROT_READ | PROT_WRITE, MAP_SHARED, svm_fd, 0);
710 if (rp == (svm_region_t *) MAP_FAILED)
713 clib_warning ("mmap");
717 * We lost the footrace to create this region; make sure
718 * the winner has crossed the finish line.
720 while (rp->version == 0 && deadman++ < 5)
728 if (rp->version == 0)
730 clib_warning ("rp->version %d not %d", rp->version, SVM_VERSION);
732 munmap (rp, a->size);
735 /* Remap now that the region has been placed */
736 a->baseva = rp->virtual_base;
737 a->size = rp->virtual_size;
738 munmap (rp, MMAP_PAGESIZE);
740 rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size,
741 PROT_READ | PROT_WRITE,
742 MAP_SHARED | MAP_FIXED, svm_fd, 0);
743 if ((uword) rp == (uword) MAP_FAILED)
745 clib_unix_warning ("mmap");
752 if ((uword) rp != rp->virtual_base)
754 clib_warning ("mmap botch");
758 * Try to fix the region mutex if it is held by
761 pid_holding_region_lock = rp->mutex_owner_pid;
762 if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
765 ("region %s mutex held by dead pid %d, tag %d, force unlock",
766 rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
767 /* owner pid is nonexistent */
768 rp->mutex.__data.__owner = 0;
769 rp->mutex.__data.__lock = 0;
770 dead_region_recovery = 1;
773 if (dead_region_recovery)
774 clib_warning ("recovery: attempt to re-lock region");
777 oldheap = svm_push_pvt_heap (rp);
778 vec_add1 (rp->client_pids, getpid ());
780 if (dead_region_recovery)
781 clib_warning ("recovery: attempt svm_data_region_map");
783 rv = svm_data_region_map (a, rp);
786 clib_warning ("data_region_map: %d", rv);
789 if (dead_region_recovery)
790 clib_warning ("unlock and continue");
794 svm_pop_heap (oldheap);
796 return ((void *) rp);
799 return 0; /* NOTREACHED */
803 svm_mutex_cleanup (void)
806 for (i = 0; i < nheld; i++)
808 pthread_mutex_unlock (mutexes_held[i]);
813 svm_region_init_internal (svm_map_region_args_t * a)
816 u64 ticks = clib_cpu_time_now ();
817 uword randomize_baseva;
819 /* guard against klutz calls */
825 atexit (svm_mutex_cleanup);
827 /* Randomize the shared-VM base at init time */
828 if (MMAP_PAGESIZE <= (4 << 10))
829 randomize_baseva = (ticks & 15) * MMAP_PAGESIZE;
831 randomize_baseva = (ticks & 3) * MMAP_PAGESIZE;
833 a->baseva += randomize_baseva;
835 rp = svm_map_region (a);
841 /* Set up the main region data structures */
842 if (rp->flags & SVM_FLAGS_NEED_DATA_INIT)
844 svm_main_region_t *mp = 0;
847 rp->flags &= ~(SVM_FLAGS_NEED_DATA_INIT);
849 oldheap = svm_push_pvt_heap (rp);
850 vec_validate (mp, 0);
851 mp->name_hash = hash_create_string (0, sizeof (uword));
852 mp->root_path = a->root_path ? format (0, "%s%c", a->root_path, 0) : 0;
856 svm_pop_heap (oldheap);
865 svm_region_init (void)
867 svm_map_region_args_t _a, *a = &_a;
869 clib_memset (a, 0, sizeof (*a));
871 a->name = SVM_GLOBAL_REGION_NAME;
872 a->baseva = svm_get_global_region_base_va ();
873 a->size = SVM_GLOBAL_REGION_SIZE;
874 a->flags = SVM_FLAGS_NODATA;
878 svm_region_init_internal (a);
882 svm_region_init_chroot (const char *root_path)
884 svm_map_region_args_t _a, *a = &_a;
886 clib_memset (a, 0, sizeof (*a));
887 a->root_path = root_path;
888 a->name = SVM_GLOBAL_REGION_NAME;
889 a->baseva = svm_get_global_region_base_va ();
890 a->size = SVM_GLOBAL_REGION_SIZE;
891 a->flags = SVM_FLAGS_NODATA;
895 return svm_region_init_internal (a);
899 svm_region_init_chroot_uid_gid (const char *root_path, int uid, int gid)
901 svm_map_region_args_t _a, *a = &_a;
903 clib_memset (a, 0, sizeof (*a));
904 a->root_path = root_path;
905 a->name = SVM_GLOBAL_REGION_NAME;
906 a->baseva = svm_get_global_region_base_va ();
907 a->size = SVM_GLOBAL_REGION_SIZE;
908 a->flags = SVM_FLAGS_NODATA;
912 svm_region_init_internal (a);
916 svm_region_init_args (svm_map_region_args_t * a)
918 svm_region_init_internal (a);
922 svm_region_find_or_create (svm_map_region_args_t * a)
924 svm_main_region_t *mp;
931 svm_subregion_t *subp;
935 a->size += MMAP_PAGESIZE +
936 ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
937 a->size = rnd_pagesize (a->size);
939 region_lock (root_rp, 4);
940 oldheap = svm_push_pvt_heap (root_rp);
941 mp = root_rp->data_base;
945 /* Map the named region from the correct chroot environment */
946 if (a->root_path == NULL)
947 a->root_path = (char *) mp->root_path;
950 * See if this region is already known. If it is, we're
953 p = hash_get_mem (mp->name_hash, a->name);
957 rp = svm_map_region (a);
958 region_unlock (root_rp);
959 svm_pop_heap (oldheap);
963 /* Create the region. */
964 ASSERT ((a->size & ~(MMAP_PAGESIZE - 1)) == a->size);
966 need_nbits = a->size / MMAP_PAGESIZE;
968 index = 1; /* $$$ fixme, figure out how many bit to really skip */
971 * Scan the virtual space allocation bitmap, looking for a large
976 if (clib_bitmap_get_no_check (root_rp->bitmap, index) == 0)
978 for (i = 0; i < (need_nbits - 1); i++)
980 if (clib_bitmap_get_no_check (root_rp->bitmap, index + i) == 1)
991 while (index < root_rp->bitmap_size);
993 /* Completely out of VM? */
994 if (index >= root_rp->bitmap_size)
996 clib_warning ("region %s: not enough VM to allocate 0x%llx (%lld)",
997 root_rp->region_name, a->size, a->size);
998 svm_pop_heap (oldheap);
999 region_unlock (root_rp);
1004 * Mark virtual space allocated
1007 clib_warning ("set %d bits at index %d", need_nbits, index);
1010 for (i = 0; i < need_nbits; i++)
1012 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 1);
1015 /* Place this region where it goes... */
1016 a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
1018 rp = svm_map_region (a);
1020 pool_get (mp->subregions, subp);
1021 name = format (0, "%s%c", a->name, 0);
1022 subp->subregion_name = name;
1024 hash_set_mem (mp->name_hash, name, subp - mp->subregions);
1026 svm_pop_heap (oldheap);
1028 region_unlock (root_rp);
1034 svm_region_unlink (svm_region_t * rp)
1036 svm_map_region_args_t _a, *a = &_a;
1037 svm_main_region_t *mp;
1042 ASSERT (vec_c_string_is_terminated (rp->region_name));
1044 mp = root_rp->data_base;
1047 a->root_path = (char *) mp->root_path;
1048 a->name = rp->region_name;
1049 shm_name = shm_name_from_svm_map_region_args (a);
1051 clib_warning ("[%d] shm_unlink (%s)", getpid (), shm_name);
1052 shm_unlink ((const char *) shm_name);
1053 vec_free (shm_name);
1059 * Let go of the indicated region. If the calling process
1060 * is the last customer, throw it away completely.
1061 * The root region mutex guarantees atomicity with respect to
1062 * a new region client showing up at the wrong moment.
1065 svm_region_unmap_internal (void *rp_arg, u8 is_client)
1067 int i, mypid = getpid ();
1070 uword virtual_base, virtual_size;
1071 svm_region_t *rp = rp_arg;
1075 * If we take a signal while holding one or more shared-memory
1076 * mutexes, we may end up back here from an otherwise
1077 * benign exit handler. Bail out to avoid a recursive
1087 clib_warning ("[%d] unmap region %s", getpid (), rp->region_name);
1089 region_lock (root_rp, 5);
1090 region_lock (rp, 6);
1092 oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
1094 /* Remove the caller from the list of mappers */
1095 for (i = 0; i < vec_len (rp->client_pids); i++)
1097 if (rp->client_pids[i] == mypid)
1099 vec_delete (rp->client_pids, 1, i);
1103 clib_warning ("pid %d AWOL", mypid);
1107 svm_pop_heap (oldheap);
1109 nclients_left = vec_len (rp->client_pids);
1110 virtual_base = rp->virtual_base;
1111 virtual_size = rp->virtual_size;
1113 if (nclients_left == 0)
1115 int index, nbits, i;
1116 svm_main_region_t *mp;
1118 svm_subregion_t *subp;
1120 /* Kill the region, last guy on his way out */
1122 oldheap = svm_push_pvt_heap (root_rp);
1123 name = vec_dup (rp->region_name);
1125 virtual_base = rp->virtual_base;
1126 virtual_size = rp->virtual_size;
1128 /* Figure out which bits to clear in the root region bitmap */
1129 index = (virtual_base - root_rp->virtual_base) / MMAP_PAGESIZE;
1131 nbits = (virtual_size + MMAP_PAGESIZE - 1) / MMAP_PAGESIZE;
1134 clib_warning ("clear %d bits at index %d", nbits, index);
1136 /* Give back the allocated VM */
1137 for (i = 0; i < nbits; i++)
1139 clib_bitmap_set_no_check (root_rp->bitmap, index + i, 0);
1142 mp = root_rp->data_base;
1144 p = hash_get_mem (mp->name_hash, name);
1146 /* Better never happen ... */
1150 region_unlock (root_rp);
1151 svm_pop_heap (oldheap);
1152 clib_warning ("Region name '%s' not found?", name);
1156 /* Remove from the root region subregion pool */
1157 subp = mp->subregions + p[0];
1158 pool_put (mp->subregions, subp);
1160 hash_unset_mem (mp->name_hash, name);
1166 /* If a client asks for the cleanup, don't unlink the backing
1167 * file since we can't tell if it has been recreated. */
1169 svm_region_unlink (rp);
1171 munmap ((void *) virtual_base, virtual_size);
1172 region_unlock (root_rp);
1173 svm_pop_heap (oldheap);
1178 region_unlock (root_rp);
1180 munmap ((void *) virtual_base, virtual_size);
1184 svm_region_unmap (void *rp_arg)
1186 svm_region_unmap_internal (rp_arg, 0 /* is_client */ );
1190 svm_region_unmap_client (void *rp_arg)
1192 svm_region_unmap_internal (rp_arg, 1 /* is_client */ );
1199 svm_region_exit_internal (u8 is_client)
1202 int i, mypid = getpid ();
1203 uword virtual_base, virtual_size;
1205 /* It felt so nice we did it twice... */
1209 if (--root_rp_refcount > 0)
1213 * If we take a signal while holding one or more shared-memory
1214 * mutexes, we may end up back here from an otherwise
1215 * benign exit handler. Bail out to avoid a recursive
1221 region_lock (root_rp, 7);
1222 oldheap = svm_push_pvt_heap (root_rp);
1224 virtual_base = root_rp->virtual_base;
1225 virtual_size = root_rp->virtual_size;
1227 for (i = 0; i < vec_len (root_rp->client_pids); i++)
1229 if (root_rp->client_pids[i] == mypid)
1231 vec_delete (root_rp->client_pids, 1, i);
1235 clib_warning ("pid %d AWOL", mypid);
1239 if (!is_client && vec_len (root_rp->client_pids) == 0)
1240 svm_region_unlink (root_rp);
1242 region_unlock (root_rp);
1243 svm_pop_heap (oldheap);
1246 munmap ((void *) virtual_base, virtual_size);
1250 svm_region_exit (void)
1252 svm_region_exit_internal (0 /* is_client */ );
1256 svm_region_exit_client (void)
1258 svm_region_exit_internal (1 /* is_client */ );
1262 svm_client_scan_this_region_nolock (svm_region_t * rp)
1265 int mypid = getpid ();
1268 for (j = 0; j < vec_len (rp->client_pids); j++)
1270 if (mypid == rp->client_pids[j])
1272 if (rp->client_pids[j] && (kill (rp->client_pids[j], 0) < 0))
1274 clib_warning ("%s: cleanup ghost pid %d",
1275 rp->region_name, rp->client_pids[j]);
1276 /* nb: client vec in rp->region_heap */
1277 oldheap = svm_push_pvt_heap (rp);
1278 vec_delete (rp->client_pids, 1, j);
1280 svm_pop_heap (oldheap);
1287 * Scan svm regions for dead clients
1290 svm_client_scan (const char *root_path)
1293 svm_main_region_t *mp;
1294 svm_map_region_args_t *a = 0;
1295 svm_region_t *root_rp;
1297 svm_subregion_t *subp;
1301 int mypid = getpid ();
1303 vec_validate (a, 0);
1305 svm_region_init_chroot (root_path);
1307 root_rp = svm_get_root_rp ();
1309 pthread_mutex_lock (&root_rp->mutex);
1311 mp = root_rp->data_base;
1313 for (j = 0; j < vec_len (root_rp->client_pids); j++)
1315 if (mypid == root_rp->client_pids[j])
1317 if (root_rp->client_pids[j] && (kill (root_rp->client_pids[j], 0) < 0))
1319 clib_warning ("%s: cleanup ghost pid %d",
1320 root_rp->region_name, root_rp->client_pids[j]);
1321 /* nb: client vec in root_rp->region_heap */
1322 oldheap = svm_push_pvt_heap (root_rp);
1323 vec_delete (root_rp->client_pids, 1, j);
1325 svm_pop_heap (oldheap);
1330 * Snapshoot names, can't hold root rp mutex across
1334 pool_foreach (subp, mp->subregions, ({
1335 name = vec_dup (subp->subregion_name);
1336 vec_add1(svm_names, name);
1340 pthread_mutex_unlock (&root_rp->mutex);
1342 for (i = 0; i < vec_len (svm_names); i++)
1344 vec_validate (a, 0);
1345 a->root_path = root_path;
1346 a->name = (char *) svm_names[i];
1347 rp = svm_region_find_or_create (a);
1350 pthread_mutex_lock (&rp->mutex);
1352 svm_client_scan_this_region_nolock (rp);
1354 pthread_mutex_unlock (&rp->mutex);
1355 svm_region_unmap (rp);
1356 vec_free (svm_names[i]);
1360 vec_free (svm_names);
1368 * fd.io coding-style-patch-verification: ON
1371 * eval: (c-set-style "gnu")