*/
typedef struct
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
u8 tag[64];
u32 count;
acl_rule_t *rules;
typedef struct
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
u8 tag[64];
u32 count;
macip_acl_rule_t *rules;
*/
typedef struct
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
fa_5tuple_t mask;
u32 refcount;
} ace_mask_type_entry_t;
typedef struct
{
+ /* Required for vec_validate_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
struct rte_ring *swq;
u64 hqos_field0_slabmask;
typedef struct
{
+ /* Required for vec_validate_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
struct rte_ring **swq;
struct rte_mbuf **pkts_enq;
struct rte_mbuf **pkts_deq;
typedef struct
{
+ /* Required for vec_validate_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
struct rte_mempool *crypto_op;
struct rte_mempool *session_h;
struct rte_mempool **session_drv;
typedef struct
{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
/* Rewrite string */
u8 *rewrite;
*/
typedef struct ioam_analyser_data_t_
{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
u8 is_free;
u8 pad[3];
typedef struct ioam_export_buffer
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
/* Allocated buffer */
u32 buffer_index;
u64 touched_at;
*/
typedef struct
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
ip6_address_t src_address;
ip6_address_t dst_address;
u16 src_port;
*/
typedef struct
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 pool_id;
u32 pool_index;
ip6_address_t src_address;
CLIB_CACHE_LINE_BYTES);
vec_validate_aligned (cm->ts_stats, no_of_threads - 1,
CLIB_CACHE_LINE_BYTES);
- vec_validate_aligned (cm->timer_wheels, no_of_threads - 1,
- CLIB_CACHE_LINE_BYTES);
+ vec_validate (cm->timer_wheels, no_of_threads - 1);
cm->lookup_table_nbuckets = IOAM_CACHE_TABLE_DEFAULT_HASH_NUM_BUCKETS;
cm->lookup_table_nbuckets = 1 << max_log2 (cm->lookup_table_nbuckets);
cm->lookup_table_size = IOAM_CACHE_TABLE_DEFAULT_HASH_MEMORY_SIZE;
*/
typedef struct
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
/** Local source IPv4/6 address to be used. */
ip46_address_t src;
typedef struct
{
+ /* Required for vec_validate_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
void *shm;
memif_region_size_t region_size;
int fd;
typedef struct
{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
/* pppoe session_id in HOST byte order */
u16 session_id;
typedef struct
{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
int epoll_fd;
struct epoll_event *epoll_events;
int n_epoll_fds;
* The BIER dispositon object
*/
typedef struct bier_disp_entry_t_ {
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* The DPO contirubted from the per-payload protocol parents
* on cachline 1.
*/
typedef struct bier_disp_table_t_
{
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* number of locks on the table
*/
* for the next lookup
*/
typedef struct bier_fmask_t_ {
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* The BIER fmask is a child of a FIB entry in the FIB graph.
*/
* The BIER imposition object
*/
typedef struct bier_imp_t_ {
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* The DPO contirubted from the resolving BIER table.
* One per-IP protocol. This allows us to share a BIER imposition
* bit-position. Since this is smal <4096, the table is a flat arry
*/
typedef struct bier_table_t_ {
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* Save the MPLS local label associated with the table
*/
*/
index_t *bt_entries;
- /**
- * Everything before this declaration is unused in the switch path
- */
- CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
-
/**
* The identity/key or the table. we need the hdr_len in the data-path
*/
*/
typedef struct classify_dpo_t
{
+ /**
+ * required for pool_get_aligned.
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
dpo_proto_t cd_proto;
u32 cd_table_index;
typedef struct l3_proxy_dpo_t_
{
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* The Software interface index on which traffic is l3_proxyd
*/
* - per-route counters
*/
typedef struct load_balance_t_ {
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* number of buckets in the load-balance. always a power of 2.
*/
/**
*/
typedef struct load_balance_map_t_ {
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* The buckets of the map that provide the index to index translation.
* In the first cacheline.
*/
typedef struct lookup_dpo_t
{
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* The FIB, or interface from which to get a FIB, in which to perform
* the next lookup;
*/
typedef struct mpls_disp_dpo_t
{
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* Next DPO in the graph
*/
*/
typedef struct mpls_label_dpo_t
{
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* The MPLS label header to impose. Outer most label first.
* Each DPO will occupy one cache line, stuff that many labels in.
typedef struct receive_dpo_t_
{
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* The Software interface index on which traffic is received
*/
* - per-route counters
*/
typedef struct replicate_t_ {
+ /**
+ * required for pool_get_aligned.
+ * memebers used in the switch path come first!
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* number of buckets in the replicate.
*/
ip4_fib_t *v4_fib;
void *old_heap;
- pool_get_aligned(ip4_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES);
+ pool_get(ip4_main.fibs, fib_table);
memset(fib_table, 0, sizeof(*fib_table));
old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);
u32 fwd_classify_table_index;
u32 rev_classify_table_index;
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
} ip4_fib_t;
extern fib_node_index_t ip4_fib_table_lookup(const ip4_fib_t *fib,
fib_table_t *fib_table;
ip6_fib_t *v6_fib;
- pool_get_aligned(ip6_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES);
+ pool_get(ip6_main.fibs, fib_table);
pool_get_aligned(ip6_main.v6_fibs, v6_fib, CLIB_CACHE_LINE_BYTES);
memset(fib_table, 0, sizeof(*fib_table));
mpls_fib_t *mf;
int i;
- pool_get_aligned(mpls_main.fibs, fib_table, CLIB_CACHE_LINE_BYTES);
+ pool_get(mpls_main.fibs, fib_table);
pool_get_aligned(mpls_main.mpls_fibs, mf, CLIB_CACHE_LINE_BYTES);
ASSERT((fib_table - mpls_main.fibs) ==
typedef struct mpls_fib_t_
{
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* A hash table of entries. 21 bit key
* Hash table for reduced memory footprint
typedef struct
{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
/* Rewrite string. $$$$ embed vnet_rewrite header */
u8 *rewrite;
*/
typedef struct
{
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
/**
* Linkage into the FIB object graph
*/
typedef struct
{
+ /* required for pool_get_aligned. */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
/* Table ID (hash key) for this FIB. */
u32 table_id;
*/
typedef struct protocol_port_range_dpo_t_
{
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
/**
* The number of blocks from the 'block' array below
* that have rnages configured. We keep this count so that in the data-path
*/
typedef struct
{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
ipip_mode_t mode;
ipip_transport_t transport;
ipip_tunnel_key_t *key;
*/
typedef struct
{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
ip4_address_t tunnel_src; /**< tunnel IPv4 src address */
ip4_address_t tunnel_dst; /**< tunnel IPv4 dst address */
u32 local_sa; /**< local IPSec SA index */
typedef struct
{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u32 input_sa_index;
u32 output_sa_index;
u32 hw_if_index;
*/
typedef struct
{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
ip6_address_t ip6_src;
ip6_address_t ip6_prefix;
ip6_address_t *rules;
*/
typedef struct mfib_itf_t_
{
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* @brief Forwarding Flags on the entry - checked in the data-path
*/
*/
typedef struct mfib_table_t_
{
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/**
* A union of the protocol specific FIBs that provide the
* underlying LPM mechanism.
*/
typedef struct qos_egress_map_t_
{
- /**
- * The array of output mapped values;
- * output = eq_qos[input-source][input-value]
- */
+ /**
+ * Required for pool_get_aligned
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /**
+ * The array of output mapped values;
+ * output = eq_qos[input-source][input-value]
+ */
qos_bits_t qem_output[QOS_N_SOURCES][256];
} qos_egress_map_t;
typedef struct _sctp_connection
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
sctp_sub_connection_t sub_conn[MAX_SCTP_CONNECTIONS]; /**< Common transport data. First! */
sctp_user_configuration_t conn_config; /**< Allows tuning of some SCTP behaviors */
* byproduct of fib table ids not necessarily being the same for
* identical fib idices of v4 and v6 fib protos */
u8 active_fib_proto;
+ /* Required for pool_get_aligned(...) */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
} session_table_t;
#define SESSION_TABLE_INVALID_INDEX ((u32)~0)
typedef struct
{
+ /** Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
transport_connection_t connection; /**< must be first */
clib_spinlock_t rx_lock; /**< rx fifo lock */
u8 is_connected; /**< connected mode */
*/
typedef struct
{
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
/** Rewrite string. $$$$ embed vnet_rewrite header */
u8 *rewrite;
}) vxlan6_tunnel_key_t;
typedef struct {
+ /* Required for pool_get_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
+
/* FIB DPO for IP forwarding of VXLAN encap packet */
dpo_id_t next_dpo;
ip_csum_t sum;
u16 csum;
- vec_validate_aligned (t->template, 0, CLIB_CACHE_LINE_BYTES);
+ vec_validate (t->template, 0);
h = t->template;
memset (h, 0, sizeof (*h));
pool_header_t * _pool_var (p) = pool_header (P); \
uword _pool_var (l); \
\
+ STATIC_ASSERT(A==0 || ((A % sizeof(P[0]))==0) || ((sizeof(P[0]) % A) == 0), \
+ "Pool aligned alloc of incorrectly sized object"); \
_pool_var (l) = 0; \
if (P) \
_pool_var (l) = vec_len (_pool_var (p)->free_indices); \
#define vec_validate_ha(V,I,H,A) \
do { \
+ STATIC_ASSERT(A==0 || ((A % sizeof(V[0]))==0) || ((sizeof(V[0]) % A) == 0),\
+ "vector validate aligned on incorrectly sized object"); \
word _v(i) = (I); \
word _v(l) = vec_len (V); \
if (_v(i) >= _v(l)) \