#include <vppinfra/bitmap.h>
#include <vppinfra/fifo.h>
#include <vppinfra/time.h>
-#include <vppinfra/mheap.h>
#include <vppinfra/heap.h>
#include <vppinfra/pool.h>
#include <vppinfra/format.h>
#define MUTEX_DEBUG
+u64
+svm_get_global_region_base_va ()
+{
+#if __aarch64__
+ /* On AArch64 VA space can have different size, from 36 to 48 bits.
+ Here we are trying to detect VA bits by parsing /proc/self/maps
+ address ranges */
+ int fd;
+ unformat_input_t input;
+ u64 start, end = 0;
+ u8 bits = 0;
+
+ if ((fd = open ("/proc/self/maps", 0)) < 0)
+ clib_unix_error ("open '/proc/self/maps'");
+
+ unformat_init_clib_file (&input, fd);
+ while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (&input, "%llx-%llx", &start, &end))
+ end--;
+ unformat_skip_line (&input);
+ }
+ unformat_free (&input);
+ close (fd);
+
+ bits = count_leading_zeros (end);
+ bits = 64 - bits;
+ if (bits >= 36 && bits <= 48)
+ return ((1ul << bits) / 4) - (2 * SVM_GLOBAL_REGION_SIZE);
+ else
+ clib_unix_error ("unexpected va bits '%u'", bits);
+#endif
+
+#ifdef CLIB_SANITIZE_ADDR
+ return 0x200000000000;
+#endif
+ /* default value */
+ return 0x130000000ULL;
+}
+
static void
region_lock (svm_region_t * rp, int tag)
{
rp->mutex_owner_pid = getpid ();
rp->mutex_owner_tag = tag;
#endif
- ASSERT (nheld < MAXLOCK);
+ ASSERT (nheld < MAXLOCK); //NOSONAR
/*
* Keep score of held mutexes so we can try to exit
* cleanly if the world comes to an end at the worst possible
}
}
}
- s = format (s, " rgn heap stats: %U", format_mheap,
- rp->region_heap, 0);
- if ((rp->flags & SVM_FLAGS_MHEAP) && rp->data_heap)
- {
- s = format (s, "\n data heap stats: %U", format_mheap,
- rp->data_heap, 1);
- }
- s = format (s, "\n");
}
return (s);
return -3;
}
close (fd);
- rp->backing_file = (char *) format (0, "%s\0", a->backing_file);
+ CLIB_MEM_UNPOISON (rp->data_base, map_size);
+ rp->backing_file = (char *) format (0, "%s%c", a->backing_file, 0);
rp->flags |= SVM_FLAGS_FILE;
}
if (a->flags & SVM_FLAGS_MHEAP)
{
- rp->data_heap =
- mheap_alloc_with_flags ((void *) (rp->data_base), map_size,
- MHEAP_FLAG_DISABLE_VM);
+ rp->data_heap = clib_mem_create_heap (rp->data_base, map_size,
+ 1 /* locked */ , "svm data");
+
rp->flags |= SVM_FLAGS_MHEAP;
}
return 0;
return -3;
}
close (fd);
+ CLIB_MEM_UNPOISON (rp->data_base, map_size);
}
return 0;
}
u8 *
shm_name_from_svm_map_region_args (svm_map_region_args_t * a)
{
- u8 *path;
u8 *shm_name;
- u8 *split_point;
- u8 *mkdir_arg = 0;
int root_path_offset = 0;
int name_offset = 0;
if (a->root_path[0] == '/')
root_path_offset++;
- /* create the root_path under /dev/shm
- iterate through path creating directories */
-
- path = format (0, "/dev/shm/%s%c", &a->root_path[root_path_offset], 0);
- split_point = path + 1;
- vec_add1 (mkdir_arg, '-');
-
- while (*split_point)
- {
- while (*split_point && *split_point != '/')
- {
- vec_add1 (mkdir_arg, *split_point);
- split_point++;
- }
- vec_add1 (mkdir_arg, 0);
-
- /* ready to descend another level */
- mkdir_arg[vec_len (mkdir_arg) - 1] = '-';
- split_point++;
- }
- vec_free (mkdir_arg);
- vec_free (path);
-
if (a->name[0] == '/')
name_offset = 1;
ASSERT (rp);
int rv;
- memset (rp, 0, sizeof (*rp));
+ clib_memset (rp, 0, sizeof (*rp));
if (pthread_mutexattr_init (&attr))
clib_unix_warning ("mutexattr_init");
rp->virtual_base = a->baseva;
rp->virtual_size = a->size;
- rp->region_heap =
- mheap_alloc_with_flags (uword_to_pointer
- (a->baseva + MMAP_PAGESIZE, void *),
- (a->pvt_heap_size !=
- 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE,
- MHEAP_FLAG_DISABLE_VM);
+ rp->region_heap = clib_mem_create_heap
+ (uword_to_pointer (a->baseva + MMAP_PAGESIZE, void *),
+ (a->pvt_heap_size !=
+ 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, 1 /* locked */ ,
+ "svm region");
+
oldheap = svm_push_pvt_heap (rp);
rp->region_name = (char *) format (0, "%s%c", a->name, 0);
return (0);
}
close (svm_fd);
+ CLIB_MEM_UNPOISON (rp, a->size);
svm_region_init_mapped_region (a, rp);
return (0);
}
+ /* Reset ownership in case the client started first */
+ if (fchown (svm_fd, a->uid, a->gid) < 0)
+ clib_unix_warning ("segment chown [ok if client starts first]");
+
time_left = 20;
while (1)
{
clib_warning ("mmap");
return (0);
}
+
+ CLIB_MEM_UNPOISON (rp, MMAP_PAGESIZE);
+
/*
* We lost the footrace to create this region; make sure
* the winner has crossed the finish line.
return (0);
}
+ close (svm_fd);
+
+ CLIB_MEM_UNPOISON (rp, a->size);
+
if ((uword) rp != rp->virtual_base)
{
clib_warning ("mmap botch");
pid_holding_region_lock = rp->mutex_owner_pid;
if (pid_holding_region_lock && kill (pid_holding_region_lock, 0) < 0)
{
+ pthread_mutexattr_t attr;
clib_warning
("region %s mutex held by dead pid %d, tag %d, force unlock",
rp->region_name, pid_holding_region_lock, rp->mutex_owner_tag);
/* owner pid is nonexistent */
- rp->mutex.__data.__owner = 0;
- rp->mutex.__data.__lock = 0;
+ if (pthread_mutexattr_init (&attr))
+ clib_unix_warning ("mutexattr_init");
+ if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED))
+ clib_unix_warning ("mutexattr_setpshared");
+ if (pthread_mutex_init (&rp->mutex, &attr))
+ clib_unix_warning ("mutex_init");
dead_region_recovery = 1;
}
return ((void *) rp);
}
- return 0; /* NOTREACHED */
+ return 0; /* NOTREACHED *///NOSONAR
}
static void
int i;
for (i = 0; i < nheld; i++)
{
- pthread_mutex_unlock (mutexes_held[i]);
+ pthread_mutex_unlock (mutexes_held[i]); //NOSONAR
}
}
{
svm_map_region_args_t _a, *a = &_a;
- memset (a, 0, sizeof (*a));
+ clib_memset (a, 0, sizeof (*a));
a->root_path = 0;
a->name = SVM_GLOBAL_REGION_NAME;
- a->baseva = SVM_GLOBAL_REGION_BASEVA;
+ a->baseva = svm_get_global_region_base_va ();
a->size = SVM_GLOBAL_REGION_SIZE;
a->flags = SVM_FLAGS_NODATA;
a->uid = 0;
{
svm_map_region_args_t _a, *a = &_a;
- memset (a, 0, sizeof (*a));
+ clib_memset (a, 0, sizeof (*a));
a->root_path = root_path;
a->name = SVM_GLOBAL_REGION_NAME;
- a->baseva = SVM_GLOBAL_REGION_BASEVA;
+ a->baseva = svm_get_global_region_base_va ();
a->size = SVM_GLOBAL_REGION_SIZE;
a->flags = SVM_FLAGS_NODATA;
a->uid = 0;
{
svm_map_region_args_t _a, *a = &_a;
- memset (a, 0, sizeof (*a));
+ clib_memset (a, 0, sizeof (*a));
a->root_path = root_path;
a->name = SVM_GLOBAL_REGION_NAME;
- a->baseva = SVM_GLOBAL_REGION_BASEVA;
+ a->baseva = svm_get_global_region_base_va ();
a->size = SVM_GLOBAL_REGION_SIZE;
a->flags = SVM_FLAGS_NODATA;
a->uid = uid;
* a new region client showing up at the wrong moment.
*/
void
-svm_region_unmap (void *rp_arg)
+svm_region_unmap_internal (void *rp_arg, u8 is_client)
{
int i, mypid = getpid ();
int nclients_left;
oldheap = svm_push_pvt_heap (rp); /* nb vec_delete() in the loop */
/* Remove the caller from the list of mappers */
+ CLIB_MEM_UNPOISON (rp->client_pids, vec_bytes (rp->client_pids));
for (i = 0; i < vec_len (rp->client_pids); i++)
{
if (rp->client_pids[i] == mypid)
vec_free (name);
region_unlock (rp);
- svm_region_unlink (rp);
+
+ /* If a client asks for the cleanup, don't unlink the backing
+ * file since we can't tell if it has been recreated. */
+ if (!is_client)
+ svm_region_unlink (rp);
+
munmap ((void *) virtual_base, virtual_size);
region_unlock (root_rp);
svm_pop_heap (oldheap);
munmap ((void *) virtual_base, virtual_size);
}
+void
+svm_region_unmap (void *rp_arg)
+{
+ svm_region_unmap_internal (rp_arg, 0 /* is_client */ );
+}
+
+void
+svm_region_unmap_client (void *rp_arg)
+{
+ svm_region_unmap_internal (rp_arg, 1 /* is_client */ );
+}
+
/*
* svm_region_exit
*/
-void
-svm_region_exit ()
+static void
+svm_region_exit_internal (u8 is_client)
{
void *oldheap;
int i, mypid = getpid ();
virtual_base = root_rp->virtual_base;
virtual_size = root_rp->virtual_size;
+ CLIB_MEM_UNPOISON (root_rp->client_pids, vec_bytes (root_rp->client_pids));
for (i = 0; i < vec_len (root_rp->client_pids); i++)
{
if (root_rp->client_pids[i] == mypid)
found:
- if (vec_len (root_rp->client_pids) == 0)
+ if (!is_client && vec_len (root_rp->client_pids) == 0)
svm_region_unlink (root_rp);
region_unlock (root_rp);
munmap ((void *) virtual_base, virtual_size);
}
+void
+svm_region_exit (void)
+{
+ svm_region_exit_internal (0 /* is_client */ );
+}
+
+void
+svm_region_exit_client (void)
+{
+ svm_region_exit_internal (1 /* is_client */ );
+}
+
void
svm_client_scan_this_region_nolock (svm_region_t * rp)
{
* find_or_create.
*/
/* *INDENT-OFF* */
- pool_foreach (subp, mp->subregions, ({
+ pool_foreach (subp, mp->subregions) {
name = vec_dup (subp->subregion_name);
vec_add1(svm_names, name);
- }));
+ }
/* *INDENT-ON* */
pthread_mutex_unlock (&root_rp->mutex);