set_property(CACHE CMAKE_BUILD_TYPE PROPERTY
HELPSTRING "Build type - valid options are: ${BUILD_TYPES}")
+##############################################################################
+# sanitizers
+##############################################################################
+
+option(ENABLE_SANITIZE_ADDR "Enable Address Sanitizer" OFF)
+if (ENABLE_SANITIZE_ADDR)
+ set(CMAKE_C_FLAGS "-fsanitize=address --param asan-stack=0 -DCLIB_SANITIZE_ADDR ${CMAKE_C_FLAGS}")
+ set(CMAKE_EXE_LINKER_FLAGS "-fsanitize=address ${CMAKE_EXE_LINKER_FLAGS}")
+ set(CMAKE_SHARED_LINKER_FLAGS "-fsanitize=address ${CMAKE_SHARED_LINKER_FLAGS}")
+endif (ENABLE_SANITIZE_ADDR)
+
##############################################################################
# install config
##############################################################################
static_always_inline __m128i
aesni_gcm_load_partial (__m128i * p, int n_bytes)
{
+ ASSERT (n_bytes <= 16);
#ifdef __AVX512F__
return _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p);
#else
- return aesni_gcm_byte_mask (_mm_loadu_si128 (p), n_bytes);
+ return aesni_gcm_byte_mask (CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, p),
+ n_bytes);
#endif
}
T = aesni_gcm_ghash (T, kd, (__m128i *) addt, aad_bytes);
/* initalize counter */
- Y0 = _mm_loadu_si128 ((__m128i *) iv);
+ Y0 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) iv);
Y0 = _mm_insert_epi32 (Y0, clib_host_to_net_u32 (1), 3);
/* ghash and encrypt/edcrypt */
#ifndef __aesni_h__
#define __aesni_h__
-
typedef enum
{
AESNI_KEY_128 = 0,
__m128i r1, r2, r3;
k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
- r3 = _mm_loadu_si128 ((__m128i *) (key + 16));
+ /* load the 24-bytes key as 2 * 16-bytes (and ignore last 8-bytes) */
+ r3 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) (key + 16));
k[1] = r3;
r2 = _mm_aeskeygenassist_si128 (r3, 0x1);
u64
svm_get_global_region_base_va ()
{
+#ifdef CLIB_SANITIZE_ADDR
+ return 0x200000000000;
+#endif
+
#if __aarch64__
/* On AArch64 VA space can have different size, from 36 to 48 bits.
Here we are trying to detect VA bits by parsing /proc/self/maps
* Memory mapped to high addresses for session/vppcom/vcl/etc...
*/
#if __WORDSIZE == 64
+#ifdef CLIB_SANITIZE_ADDR
+#define HIGH_SEGMENT_BASEVA 0x300000000000
+#else /* CLIB_SANITIZE_ADDR */
#define HIGH_SEGMENT_BASEVA (8ULL << 30) /* 8GB */
+#endif /* CLIB_SANITIZE_ADDR */
#elif __WORDSIZE == 32
#define HIGH_SEGMENT_BASEVA (3584UL << 20) /* 3.5GB */
#else
void *handle = NULL;
int i;
-#ifdef RTLD_DEEPBIND
+#if defined(RTLD_DEEPBIND) && !defined(CLIB_SANITIZE_ADDR)
flags |= RTLD_DEEPBIND;
#endif
}
#endif
-DO_NOT_SANITIZE_ADDRESS_ATTRIBUTE int
+int
libc_vfcntl (int fd, int cmd, va_list ap)
{
long int args[4];
}
#ifdef HAVE_FCNTL64
-DO_NOT_SANITIZE_ADDRESS_ATTRIBUTE int
+int
libc_vfcntl64 (int fd, int cmd, va_list ap)
{
long int args[4];
}
#endif
-DO_NOT_SANITIZE_ADDRESS_ATTRIBUTE int
+int
libc_vioctl (int fd, int cmd, va_list ap)
{
long int args[4];
#define DESTRUCTOR_ATTRIBUTE
#endif
-#define HAVE_ADDRESS_SANITIZER_ATTRIBUTE
-#ifdef HAVE_ADDRESS_SANITIZER_ATTRIBUTE
-#define DO_NOT_SANITIZE_ADDRESS_ATTRIBUTE __attribute__((no_sanitize_address))
-#else
-#define DO_NOT_SANITIZE_ADDRESS_ATTRIBUTE
-#endif
-
/*
* IMPORTANT
*
int libc_eventfd (int count, int flags);
#endif
-DO_NOT_SANITIZE_ADDRESS_ATTRIBUTE int
-libc_vfcntl (int fd, int cmd, va_list ap);
+int libc_vfcntl (int fd, int cmd, va_list ap);
-DO_NOT_SANITIZE_ADDRESS_ATTRIBUTE int
-libc_vfcntl64 (int fd, int cmd, va_list ap);
+int libc_vfcntl64 (int fd, int cmd, va_list ap);
-DO_NOT_SANITIZE_ADDRESS_ATTRIBUTE int
-libc_vioctl (int fd, int cmd, va_list ap);
+int libc_vioctl (int fd, int cmd, va_list ap);
int libc_getpeername (int sockfd, struct sockaddr *addr, socklen_t * addrlen);
max = clib_min (n_left_to_next, count);
}
#if defined(CLIB_HAVE_VEC512)
- u16x32 next32 = u16x32_load_unaligned (nexts);
+ u16x32 next32 = CLIB_MEM_OVERFLOW_LOAD (u16x32_load_unaligned, nexts);
next32 = (next32 == u16x32_splat (next32[0]));
u64 bitmap = u16x32_msb_mask (next32);
n_enqueued = count_trailing_zeros (~bitmap);
#elif defined(CLIB_HAVE_VEC256)
- u16x16 next16 = u16x16_load_unaligned (nexts);
+ u16x16 next16 = CLIB_MEM_OVERFLOW_LOAD (u16x16_load_unaligned, nexts);
next16 = (next16 == u16x16_splat (next16[0]));
u64 bitmap = u8x32_msb_mask ((u8x32) next16);
n_enqueued = count_trailing_zeros (~bitmap) / 2;
#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
- u16x8 next8 = u16x8_load_unaligned (nexts);
+ u16x8 next8 = CLIB_MEM_OVERFLOW_LOAD (u16x8_load_unaligned, nexts);
next8 = (next8 == u16x8_splat (next8[0]));
u64 bitmap = u8x16_msb_mask ((u8x16) next8);
n_enqueued = count_trailing_zeros (~bitmap) / 2;
#include <vlib/pci/pci.h>
#include <vlib/linux/vfio.h>
-#ifdef __x86_64__
+#if defined(__x86_64__) && !defined(CLIB_SANITIZE_ADDR)
/* we keep physmem in low 38 bits of VA address space as some
IOMMU implamentation cannot map above that range */
#define VLIB_PHYSMEM_DEFAULT_BASE_ADDDR (1ULL << 36)
u8 data[0]; /**< actual message begins here */
} msgbuf_t;
+CLIB_NOSANITIZE_ADDR static inline void
+VL_MSG_API_UNPOISON (const void *a)
+{
+ const msgbuf_t *m = &((const msgbuf_t *) a)[-1];
+ CLIB_MEM_UNPOISON (m, sizeof (*m) + ntohl (m->data_len));
+}
+
+CLIB_NOSANITIZE_ADDR static inline void
+VL_MSG_API_SVM_QUEUE_UNPOISON (const svm_queue_t * q)
+{
+ CLIB_MEM_UNPOISON (q, sizeof (*q) + q->elsize * q->maxsize);
+}
+
+static inline void
+VL_MSG_API_POISON (const void *a)
+{
+ const msgbuf_t *m = &((const msgbuf_t *) a)[-1];
+ CLIB_MEM_POISON (m, sizeof (*m) + ntohl (m->data_len));
+}
+
/* api_shared.c prototypes */
void vl_msg_api_handler (void *the_msg);
void vl_msg_api_handler_no_free (void *the_msg);
regp->clib_file_index = am->shmem_hdr->clib_file_index;
q = regp->vl_input_queue = (svm_queue_t *) (uword) mp->input_queue;
+ VL_MSG_API_SVM_QUEUE_UNPOISON (q);
regp->name = format (0, "%s", mp->name);
vec_add1 (regp->name, 0);
uword mp;
if (!svm_queue_sub2 (q, (u8 *) & mp))
{
+ VL_MSG_API_UNPOISON ((void *) mp);
vl_msg_api_handler_with_vm_node (am, (void *) mp, vm, node);
return 0;
}
hash_free (am->msg_index_by_name_and_crc);
}
+CLIB_NOSANITIZE_ADDR static void
+VL_API_VEC_UNPOISON (const void *v)
+{
+ const vec_header_t *vh = &((vec_header_t *) v)[-1];
+ CLIB_MEM_UNPOISON (vh, sizeof (*vh) + vec_len (v));
+}
+
static void
vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp)
{
unserialize_open_data (sm, tblv, vec_len (tblv));
unserialize_integer (sm, &nmsgs, sizeof (u32));
+ VL_API_VEC_UNPOISON (tblv);
+
for (i = 0; i < nmsgs; i++)
{
msg_index = unserialize_likely_small_unsigned_integer (sm);
return -1;
}
+ CLIB_MEM_UNPOISON (shmem_hdr, sizeof (*shmem_hdr));
+ VL_MSG_API_SVM_QUEUE_UNPOISON (shmem_hdr->vl_input_queue);
+
pthread_mutex_lock (&svm->mutex);
oldheap = svm_push_data_heap (svm);
vl_input_queue = svm_queue_alloc_and_init (input_queue_size, sizeof (uword),
return -1;
read_one_msg:
+ VL_MSG_API_UNPOISON (rp);
if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_CREATE_REPLY)
{
clib_warning ("unexpected reply: id %d", ntohs (rp->_vl_msg_id));
if (svm_queue_sub (vl_input_queue, (u8 *) & rp, SVM_Q_NOWAIT, 0) < 0)
continue;
+ VL_MSG_API_UNPOISON (rp);
+
/* drain the queue */
if (ntohs (rp->_vl_msg_id) != VL_API_MEMCLNT_DELETE_REPLY)
{
#define DEBUG_MESSAGE_BUFFER_OVERRUN 0
-static inline void *
+CLIB_NOSANITIZE_ADDR static inline void *
vl_msg_api_alloc_internal (int nbytes, int pool, int may_return_null)
{
int i;
#endif
rv->data_len = htonl (nbytes - sizeof (msgbuf_t));
+ VL_MSG_API_UNPOISON (rv->data);
return (rv->data);
}
ASSERT (*overrun == 0x1badbabe);
}
#endif
+ VL_MSG_API_POISON (rv->data);
return;
}
if (rv->q)
{
rv->q = 0;
+ VL_MSG_API_POISON (rv->data);
return;
}
vl_msg_api_send_shmem (svm_queue_t * q, u8 * elem)
{
api_main_t *am = &api_main;
- uword *trace = (uword *) elem;
+ void *msg = (void *) *(uword *) elem;
if (am->tx_trace && am->tx_trace->enabled)
- vl_msg_api_trace (am, am->tx_trace, (void *) trace[0]);
+ vl_msg_api_trace (am, am->tx_trace, msg);
/*
* Announce a probable binary API client bug:
q);
}
}
+ VL_MSG_API_POISON (msg);
(void) svm_queue_add (q, elem, 0 /* nowait */ );
}
vl_msg_api_send_shmem_nolock (svm_queue_t * q, u8 * elem)
{
api_main_t *am = &api_main;
- uword *trace = (uword *) elem;
+ void *msg = (void *) *(uword *) elem;
if (am->tx_trace && am->tx_trace->enabled)
- vl_msg_api_trace (am, am->tx_trace, (void *) trace[0]);
+ vl_msg_api_trace (am, am->tx_trace, msg);
(void) svm_queue_add_nolock (q, elem);
+ VL_MSG_API_POISON (msg);
}
/*
clib_memcpy (ei->address, address, sizeof (ei->address));
vec_add (hi->hw_address, address, sizeof (ei->address));
+ CLIB_MEM_UNPOISON (hi->hw_address, 8);
if (error)
{
return result % L2FIB_NUM_BUCKETS;
}
-/**
- * make address sanitizer skip this:
- * The 6-Bytes mac-address is cast into an 8-Bytes u64, with 2 additional Bytes.
- * l2fib_make_key() does read those two Bytes but does not use them.
- */
-always_inline u64 __attribute__ ((no_sanitize_address))
+always_inline u64
l2fib_make_key (const u8 * mac_address, u16 bd_index)
{
u64 temp;
* Create the in-register key as F:E:D:C:B:A:H:L
* In memory the key is L:H:A:B:C:D:E:F
*/
- temp = *((u64 *) (mac_address)) << 16;
+ temp = CLIB_MEM_OVERFLOW_LOAD (*, (u64 *) mac_address) << 16;
temp = (temp & ~0xffff) | (u64) (bd_index);
#else
/*
* Create the in-register key as H:L:A:B:C:D:E:F
* In memory the key is H:L:A:B:C:D:E:F
*/
- temp = *((u64 *) (mac_address)) >> 16;
+ temp = CLIB_MEM_OVERFLOW_LOAD (*, (u64 *) mac_address) >> 16;
temp = temp | (((u64) bd_index) << 48);
#endif
while (1)
while (!svm_queue_sub(q, (u8 *)&msg, SVM_Q_WAIT, 0))
{
+ VL_MSG_API_UNPOISON((void *)msg);
u16 id = ntohs(*((u16 *)msg));
switch (id) {
case VL_API_RX_THREAD_EXIT:
rv = svm_queue_sub(q, (u8 *)&msg, SVM_Q_WAIT, 0);
if (rv == 0) {
+ VL_MSG_API_UNPOISON((void *)msg);
u16 msg_id = ntohs(*((u16 *)msg));
switch (msg_id) {
case VL_API_RX_THREAD_EXIT:
{
rv = VAPI_EAGAIN;
}
+ else
+ VL_MSG_API_POISON (msg);
out:
VAPI_DBG ("vapi_send() rv = %d", rv);
return rv;
{
rv = VAPI_EAGAIN;
}
+ else
+ VL_MSG_API_POISON (msg1);
out:
VAPI_DBG ("vapi_send() rv = %d", rv);
return rv;
if (tmp == 0)
{
+ VL_MSG_API_UNPOISON ((void *) data);
#if VAPI_DEBUG_ALLOC
vapi_add_to_be_freed ((void *) data);
#endif
};
/* *INDENT-ON* */
+#ifdef CLIB_SANITIZE_ADDR
+/* default options for Address Sanitizer */
+const char *
+__asan_default_options (void)
+{
+ return "unmap_shadow_on_exit=1:disable_coredump=0:abort_on_error=1";
+}
+#endif /* CLIB_SANITIZE_ADDR */
+
/*
* fd.io coding-style-patch-verification: ON
*
)
set(VPPINFRA_HEADERS
+ sanitizer.h
bihash_16_8.h
bihash_24_8.h
bihash_40_8.h
*/
#include <vppinfra/dlmalloc.h>
+#include <vppinfra/sanitizer.h>
/*------------------------------ internal #includes ---------------------- */
#if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
/* Plain spin locks use single word (embedded in malloc_states) */
+CLIB_NOSANITIZE_ADDR
static int spin_acquire_lock(int *sl) {
int spins = 0;
while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
/* Return segment holding given address */
+CLIB_NOSANITIZE_ADDR
static msegmentptr segment_holding(mstate m, char* addr) {
msegmentptr sp = &m->seg;
for (;;) {
}
/* Return true if segment contains a segment link */
+CLIB_NOSANITIZE_ADDR
static int has_segment_link(mstate m, msegmentptr ss) {
msegmentptr sp = &m->seg;
for (;;) {
#if (FOOTERS && !INSECURE)
/* Check if (alleged) mstate m has expected magic field */
+CLIB_NOSANITIZE_ADDR
static inline int
ok_magic (const mstate m)
{
/* ----------------------------- statistics ------------------------------ */
#if !NO_MALLINFO
+CLIB_NOSANITIZE_ADDR
static struct dlmallinfo internal_mallinfo(mstate m) {
struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
ensure_initialization();
/* ----------------------- system deallocation -------------------------- */
/* Unmap and unlink any mmapped segments that don't contain used chunks */
+CLIB_NOSANITIZE_ADDR
static size_t release_unused_segments(mstate m) {
size_t released = 0;
int nsegs = 0;
return released;
}
+CLIB_NOSANITIZE_ADDR
static int sys_trim(mstate m, size_t pad) {
size_t released = 0;
ensure_initialization();
/* Consolidate and bin a chunk. Differs from exported versions
of free mainly in that the chunk need not be marked as inuse.
*/
+CLIB_NOSANITIZE_ADDR
static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
mchunkptr next = chunk_plus_offset(p, psize);
if (!pinuse(p)) {
/* ---------------------------- malloc --------------------------- */
/* allocate a large request from the best fitting chunk in a treebin */
+CLIB_NOSANITIZE_ADDR
static void* tmalloc_large(mstate m, size_t nb) {
tchunkptr v = 0;
size_t rsize = -nb; /* Unsigned negation */
}
/* allocate a small request from the best fitting chunk in a treebin */
+CLIB_NOSANITIZE_ADDR
static void* tmalloc_small(mstate m, size_t nb) {
tchunkptr t, v;
size_t rsize;
return newp;
}
+CLIB_NOSANITIZE_ADDR
static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
void* mem = 0;
if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
*sizep = this_seg->size;
}
+CLIB_NOSANITIZE_ADDR
int mspace_is_heap_object (mspace msp, void *p)
{
msegment *this_seg;
return (was_enabled);
}
+CLIB_NOSANITIZE_ADDR
int mspace_is_traced (mspace msp)
{
mstate ms = (mstate)msp;
return 0;
}
+CLIB_NOSANITIZE_ADDR
void* mspace_get_aligned (mspace msp,
unsigned long n_user_data_bytes,
unsigned long align,
return (void *) searchp;
}
+CLIB_NOSANITIZE_ADDR
void mspace_put (mspace msp, void *p_arg)
{
char *object_header;
mheap_put_trace ((unsigned long)p_arg, psize);
}
-#if CLIB_DEBUG > 0
+#if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR)
/* Poison the object */
{
size_t psize = mspace_usable_size (object_header);
mspace_free (msp, p_arg);
}
+CLIB_NOSANITIZE_ADDR
size_t mspace_usable_size_with_delta (const void *p)
{
size_t usable_size;
versions. This is not so nice but better than the alternatives.
*/
+CLIB_NOSANITIZE_ADDR
void* mspace_malloc(mspace msp, size_t bytes) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
return 0;
}
+CLIB_NOSANITIZE_ADDR
void mspace_free(mspace msp, void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
}
#if !NO_MALLINFO
+CLIB_NOSANITIZE_ADDR
struct dlmallinfo mspace_mallinfo(mspace msp) {
mstate ms = (mstate)msp;
if (!ok_magic(ms)) {
}
#endif /* NO_MALLINFO */
+CLIB_NOSANITIZE_ADDR
size_t mspace_usable_size(const void* mem) {
if (mem != 0) {
mchunkptr p = mem2chunk(mem);
* The above is true *unless* the extra bytes cross a page boundary
* into unmapped or no-access space, hence the boundary crossing check.
*/
-static inline u64 __attribute__ ((no_sanitize_address))
+static inline u64
hash_memory64 (void *p, word n_bytes, u64 state)
{
u64 *q = p;
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
c +=
- zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
+ zap64 (CLIB_MEM_OVERFLOW
+ (clib_mem_unaligned (q + 2, u64), q + 2, sizeof (u64)),
+ n % sizeof (u64)) << 8;
else
{
clib_memcpy_fast (tmp.as_u8, q + 2, n % sizeof (u64));
if (n % sizeof (u64))
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
- b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
+ b +=
+ zap64 (CLIB_MEM_OVERFLOW
+ (clib_mem_unaligned (q + 1, u64), q + 1, sizeof (u64)),
+ n % sizeof (u64));
else
{
clib_memcpy_fast (tmp.as_u8, q + 1, n % sizeof (u64));
if (n % sizeof (u64))
{
if (PREDICT_TRUE (page_boundary_crossing == 0))
- a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
+ a +=
+ zap64 (CLIB_MEM_OVERFLOW
+ (clib_mem_unaligned (q + 0, u64), q + 0, sizeof (u64)),
+ n % sizeof (u64));
else
{
clib_memcpy_fast (tmp.as_u8, q, n % sizeof (u64));
#include <vppinfra/os.h>
#include <vppinfra/string.h> /* memcpy, clib_memset */
+#include <vppinfra/sanitizer.h>
#define CLIB_MAX_MHEAPS 256
return old;
}
+always_inline uword
+clib_mem_size_nocheck (void *p)
+{
+#if USE_DLMALLOC == 0
+ mheap_elt_t *e = mheap_user_pointer_to_elt (p);
+ return mheap_elt_data_bytes (e);
+#else
+ return mspace_usable_size_with_delta (p);
+#endif
+}
+
/* Memory allocator which may call os_out_of_memory() if it fails */
always_inline void *
clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset,
uword offset;
heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
clib_per_cpu_mheaps[cpu] = heap;
-
- if (offset != ~0)
- {
- p = heap + offset;
- return p;
- }
- else
- {
- if (os_out_of_memory_on_failure)
- os_out_of_memory ();
- return 0;
- }
+ if (PREDICT_TRUE (offset != ~0))
+ p = heap + offset;
#else
p = mspace_get_aligned (heap, size, align, align_offset);
- if (PREDICT_FALSE (p == 0))
+#endif /* USE_DLMALLOC */
+
+ if (PREDICT_FALSE (0 == p))
{
if (os_out_of_memory_on_failure)
os_out_of_memory ();
return 0;
}
+ CLIB_MEM_UNPOISON (p, size);
return p;
-#endif /* USE_DLMALLOC */
}
/* Memory allocator which calls os_out_of_memory() when it fails */
/* Make sure object is in the correct heap. */
ASSERT (clib_mem_is_heap_object (p));
+ CLIB_MEM_POISON (p, clib_mem_size_nocheck (p));
+
#if USE_DLMALLOC == 0
mheap_put (heap, (u8 *) p - heap);
#else
always_inline uword
clib_mem_size (void *p)
{
-#if USE_DLMALLOC == 0
- mheap_elt_t *e = mheap_user_pointer_to_elt (p);
ASSERT (clib_mem_is_heap_object (p));
- return mheap_elt_data_bytes (e);
-#else
- ASSERT (clib_mem_is_heap_object (p));
- return mspace_usable_size_with_delta (p);
-#endif
+ return clib_mem_size_nocheck (p);
}
always_inline void
clib_mem_free_s (void *p)
{
uword size = clib_mem_size (p);
+ CLIB_MEM_UNPOISON (p, size);
memset_s_inline (p, size, 0, size);
clib_mem_free (p);
}
#include <vppinfra/lock.h>
#include <vppinfra/hash.h>
#include <vppinfra/elf_clib.h>
+#include <vppinfra/sanitizer.h>
void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
if (mheap_trace_main.lock == 0)
clib_spinlock_init (&mheap_trace_main.lock);
+ CLIB_MEM_POISON (mspace_least_addr (heap), mspace_footprint (heap));
return heap;
}
clib_bitmap_andnoti_notrim (_pool_var (p)->free_bitmap, \
_pool_var (i)); \
_vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \
+ CLIB_MEM_UNPOISON((E), sizeof((E)[0])); \
} \
else \
{ \
/** Free an object E in pool P. */
#define pool_put(P,E) \
do { \
- pool_header_t * _pool_var (p) = pool_header (P); \
- uword _pool_var (l) = (E) - (P); \
- ASSERT (vec_is_member (P, E)); \
- ASSERT (! pool_is_free (P, E)); \
+ typeof (P) _pool_var(p__) = (P); \
+ typeof (E) _pool_var(e__) = (E); \
+ pool_header_t * _pool_var (p) = pool_header (_pool_var(p__)); \
+ uword _pool_var (l) = _pool_var(e__) - _pool_var(p__); \
+ ASSERT (vec_is_member (_pool_var(p__), _pool_var(e__))); \
+ ASSERT (! pool_is_free (_pool_var(p__), _pool_var(e__))); \
\
/* Add element to free bitmap and to free list. */ \
_pool_var (p)->free_bitmap = \
} \
else \
vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \
+ \
+ CLIB_MEM_POISON(_pool_var(e__), sizeof(_pool_var(e__)[0])); \
} while (0)
/** Free pool element with given index. */
--- /dev/null
+#ifndef _included_clib_sanitizer_h
+#define _included_clib_sanitizer_h
+
+#ifdef CLIB_SANITIZE_ADDR
+
+#include <sanitizer/asan_interface.h>
+#include <vppinfra/clib.h>
+
+#define CLIB_NOSANITIZE_ADDR __attribute__((no_sanitize_address))
+#define CLIB_MEM_POISON(a, s) ASAN_POISON_MEMORY_REGION((a), (s))
+#define CLIB_MEM_UNPOISON(a, s) ASAN_UNPOISON_MEMORY_REGION((a), (s))
+
+#define CLIB_MEM_OVERFLOW(f, src, n) \
+ ({ \
+ typeof (f) clib_mem_overflow_ret__; \
+ const void *clib_mem_overflow_src__ = (src); \
+ size_t clib_mem_overflow_n__ = (n); \
+ const void *clib_mem_overflow_start__ = __asan_region_is_poisoned((void *)clib_mem_overflow_src__, clib_mem_overflow_n__); \
+ clib_mem_overflow_n__ -= (size_t)(clib_mem_overflow_start__ - clib_mem_overflow_src__); \
+ if (clib_mem_overflow_start__) \
+ CLIB_MEM_UNPOISON(clib_mem_overflow_start__, clib_mem_overflow_n__); \
+ clib_mem_overflow_ret__ = f; \
+ if (clib_mem_overflow_start__) \
+ CLIB_MEM_POISON(clib_mem_overflow_start__, clib_mem_overflow_n__); \
+ clib_mem_overflow_ret__; \
+ })
+
+#define CLIB_MEM_OVERFLOW_LOAD(f, src) \
+ ({ \
+ typeof(src) clib_mem_overflow_load_src__ = (src); \
+ CLIB_MEM_OVERFLOW(f(clib_mem_overflow_load_src__), clib_mem_overflow_load_src__, sizeof(typeof(f(clib_mem_overflow_load_src__)))); \
+ })
+
+static_always_inline void
+CLIB_MEM_POISON_LEN (void *src, size_t oldlen, size_t newlen)
+{
+ if (oldlen > newlen)
+ CLIB_MEM_POISON (src + newlen, oldlen - newlen);
+ else if (newlen > oldlen)
+ CLIB_MEM_UNPOISON (src + oldlen, newlen - oldlen);
+}
+
+#else /* CLIB_SANITIZE_ADDR */
+
+#define CLIB_NOSANITIZE_ADDR
+#define CLIB_MEM_POISON(a, s) (void)(a)
+#define CLIB_MEM_UNPOISON(a, s) (void)(a)
+#define CLIB_MEM_OVERFLOW(a, b, c) a
+#define CLIB_MEM_OVERFLOW_LOAD(f, src) f(src)
+#define CLIB_MEM_POISON_LEN(a, b, c)
+
+#endif /* CLIB_SANITIZE_ADDR */
+
+#endif /* _included_clib_sanitizer_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
{
new = clib_mem_alloc_aligned_at_offset (data_bytes, data_align, header_bytes, 1 /* yes, call os_out_of_memory */
);
- data_bytes = clib_mem_size (new);
- clib_memset (new, 0, data_bytes);
+ new_alloc_bytes = clib_mem_size (new);
+ CLIB_MEM_UNPOISON (new + data_bytes, new_alloc_bytes - data_bytes);
+ clib_memset (new, 0, new_alloc_bytes);
+ CLIB_MEM_POISON (new + data_bytes, new_alloc_bytes - data_bytes);
v = new + header_bytes;
_vec_len (v) = length_increment;
return v;
/* Need to resize? */
if (data_bytes <= old_alloc_bytes)
- return v;
+ {
+ CLIB_MEM_UNPOISON (v, data_bytes);
+ return v;
+ }
new_alloc_bytes = (old_alloc_bytes * 3) / 2;
if (new_alloc_bytes < data_bytes)
("vec_resize fails, length increment %d, data bytes %d, alignment %d",
length_increment, data_bytes, data_align);
+ CLIB_MEM_UNPOISON (old, old_alloc_bytes);
clib_memcpy_fast (new, old, old_alloc_bytes);
clib_mem_free (old);
v = new;
/* Zero new memory. */
+ CLIB_MEM_UNPOISON (new + data_bytes, new_alloc_bytes - data_bytes);
memset (v + old_alloc_bytes, 0, new_alloc_bytes - old_alloc_bytes);
+ CLIB_MEM_POISON (new + data_bytes, new_alloc_bytes - data_bytes);
return v + header_bytes;
}
/* Typically we'll not need to resize. */
if (new_data_bytes <= clib_mem_size (p))
{
+ CLIB_MEM_UNPOISON (v, data_bytes);
vh->len += length_increment;
return v;
}
if (_v(n) > 0) \
clib_memset ((V) + _v(l) - _v(n), 0, _v(n) * sizeof ((V)[0])); \
_vec_len (V) -= _v(n); \
+ CLIB_MEM_POISON(vec_end(V), _v(n) * sizeof ((V)[0])); \
} while (0)
/** \brief Delete the element at index I
if (_vec_del_i < _vec_del_l) \
(v)[_vec_del_i] = (v)[_vec_del_l]; \
_vec_len (v) = _vec_del_l; \
+ CLIB_MEM_POISON(vec_end(v), sizeof ((v)[0])); \
} while (0)
/** \brief Append v2 after v1. Result in v1.
#define vec_set_len(v, l) do { \
ASSERT(v); \
ASSERT((l) <= vec_max_len(v)); \
+ CLIB_MEM_POISON_LEN((void *)(v), _vec_len(v) * sizeof((v)[0]), (l) * sizeof((v)[0])); \
_vec_len(v) = (l); \
} while (0)
#else /* __COVERITY__ */
+ASAN_OPTIONS?=verify_asan_link_order=0:detect_leaks=0:abort_on_error=1:unmap_shadow_on_exit=1:disable_coredump=0
+export ASAN_OPTIONS
+
.PHONY: verify-test-dir
FAILED_DIR=/tmp/vpp-failed-unittests/