index_t dpoi_index;
} __attribute__ ((aligned(sizeof(u64)))) dpo_id_t;
-_Static_assert(sizeof(dpo_id_t) <= sizeof(u64),
- "DPO ID is greater than sizeof u64 "
- "atomic updates need to be revisited");
+STATIC_ASSERT(sizeof(dpo_id_t) <= sizeof(u64),
+ "DPO ID is greater than sizeof u64 "
+ "atomic updates need to be revisited");
/**
* @brief An initialiser for DPos declared on the stack.
dpo_id_t lb_buckets_inline[LB_NUM_INLINE_BUCKETS];
} load_balance_t;
-_Static_assert(sizeof(load_balance_t) <= CLIB_CACHE_LINE_BYTES,
- "A load_balance object size exceeds one cachline");
+STATIC_ASSERT(sizeof(load_balance_t) <= CLIB_CACHE_LINE_BYTES,
+ "A load_balance object size exceeds one cachline");
/**
* Flags controlling load-balance formatting/display
FIB_SOURCE_LAST = FIB_SOURCE_DEFAULT_ROUTE,
} __attribute__ ((packed)) fib_source_t;
-_Static_assert (sizeof(fib_source_t) == 1,
- "FIB too many sources");
+STATIC_ASSERT (sizeof(fib_source_t) == 1,
+ "FIB too many sources");
/**
* The maximum number of sources
* Keep the size of the flags field to 2 bytes, so it
* can be placed next to the 2 bytes reference count
*/
-_Static_assert (sizeof(fib_entry_src_flag_t) <= 2,
- "FIB entry flags field size too big");
+STATIC_ASSERT (sizeof(fib_entry_src_flag_t) <= 2,
+ "FIB entry flags field size too big");
/**
* Information related to the source of a FIB entry
FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE = (1 << FIB_NODE_BW_REASON_ADJ_UPDATE),
} __attribute__ ((packed)) fib_node_bw_reason_flag_t;
-_Static_assert(sizeof(fib_node_bw_reason_flag_t) < 2,
- "BW Reason enum < 2 byte. Consequences for cover_upd_res_t");
+STATIC_ASSERT(sizeof(fib_node_bw_reason_flag_t) < 2,
+ "BW Reason enum < 2 byte. Consequences for cover_upd_res_t");
/**
* Forward eclarations
};
} fib_prefix_t;
-_Static_assert(STRUCT_OFFSET_OF(fib_prefix_t, fp_addr) == 4,
- "FIB Prefix's address is 4 byte aligned.");
+STATIC_ASSERT(STRUCT_OFFSET_OF(fib_prefix_t, fp_addr) == 4,
+ "FIB Prefix's address is 4 byte aligned.");
/**
* \brief Compare two prefixes for equality
- 1 * sizeof (i32)];
} ip4_fib_mtrie_ply_t;
-_Static_assert(0 == sizeof(ip4_fib_mtrie_ply_t) % CLIB_CACHE_LINE_BYTES,
- "IP4 Mtrie ply cache line");
+STATIC_ASSERT(0 == sizeof(ip4_fib_mtrie_ply_t) % CLIB_CACHE_LINE_BYTES,
+ "IP4 Mtrie ply cache line");
typedef struct {
/* Pool of plies. Index zero is root ply. */
fib_node_t ia_node;
} ip_adjacency_t;
-_Static_assert((STRUCT_OFFSET_OF(ip_adjacency_t, cacheline0) == 0),
- "IP adjacency cachline 0 is not offset");
-_Static_assert((STRUCT_OFFSET_OF(ip_adjacency_t, cacheline1) ==
- CLIB_CACHE_LINE_BYTES),
- "IP adjacency cachline 1 is more than one cachline size offset");
+STATIC_ASSERT((STRUCT_OFFSET_OF(ip_adjacency_t, cacheline0) == 0),
+ "IP adjacency cachline 0 is not offset");
+STATIC_ASSERT((STRUCT_OFFSET_OF(ip_adjacency_t, cacheline1) ==
+ CLIB_CACHE_LINE_BYTES),
+ "IP adjacency cachline 1 is more than one cachline size offset");
/* An all zeros address */
extern const ip46_address_t zero_addr;
u8 ip4_prefix_len;
} map_domain_t;
-_Static_assert ((sizeof (map_domain_t) <= CLIB_CACHE_LINE_BYTES),
- "MAP domain fits in one cacheline");
+STATIC_ASSERT ((sizeof (map_domain_t) <= CLIB_CACHE_LINE_BYTES),
+ "MAP domain fits in one cacheline");
#define MAP_REASS_INDEX_NONE ((u16)0xffff)
} \
} while (0)
+#if defined(__clang__)
+#define STATIC_ASSERT(truth,...)
+#else
#define STATIC_ASSERT(truth,...) _Static_assert(truth, __VA_ARGS__)
+#endif
/* Assert without allocating memory. */
#define ASSERT_AND_PANIC(truth) \