*/ \
p->n_non_empty_leafs = (prefix_len > ply_base_len ? \
ARRAY_LEN (p->leaves) : 0); \
- memset (p->dst_address_bits_of_leaves, prefix_len, \
+ clib_memset (p->dst_address_bits_of_leaves, prefix_len, \
sizeof (p->dst_address_bits_of_leaves)); \
p->dst_address_bits_base = ply_base_len; \
\
ply_16_init (ip4_fib_mtrie_16_ply_t * p,
ip4_fib_mtrie_leaf_t init, uword prefix_len)
{
- memset (p->dst_address_bits_of_leaves, prefix_len,
- sizeof (p->dst_address_bits_of_leaves));
+ clib_memset (p->dst_address_bits_of_leaves, prefix_len,
+ sizeof (p->dst_address_bits_of_leaves));
PLY_INIT_LEAVES (p);
}
void
ip4_mtrie_free (ip4_fib_mtrie_t * m)
{
- /* the root ply is embedded so the is nothing to do,
+ /* the root ply is embedded so there is nothing to do,
* the assumption being that the IP4 FIB table has emptied the trie
* before deletion.
*/
else if (new_leaf_dst_address_bits >=
ply->dst_address_bits_of_leaves[i])
{
- __sync_val_compare_and_swap (&ply->leaves[i], old_leaf, new_leaf);
- ASSERT (ply->leaves[i] == new_leaf);
+ clib_atomic_store_rel_n (&ply->leaves[i], new_leaf);
ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (ply, i);
}
old_ply->dst_address_bits_of_leaves[i] =
a->dst_address_length;
- __sync_val_compare_and_swap (&old_ply->leaves[i], old_leaf,
- new_leaf);
- ASSERT (old_ply->leaves[i] == new_leaf);
+ clib_atomic_store_rel_n (&old_ply->leaves[i], new_leaf);
old_ply->n_non_empty_leafs +=
ip4_fib_mtrie_leaf_is_non_empty (old_ply, i);
/* Refetch since ply_create may move pool. */
old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
- __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
- new_leaf);
- ASSERT (old_ply->leaves[dst_byte] == new_leaf);
+ clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
old_ply->n_non_empty_leafs +=
* the new one */
old_ply->dst_address_bits_of_leaves[slot] =
a->dst_address_length;
- __sync_val_compare_and_swap (&old_ply->leaves[slot],
- old_leaf, new_leaf);
- ASSERT (old_ply->leaves[slot] == new_leaf);
+ clib_atomic_store_rel_n (&old_ply->leaves[slot], new_leaf);
}
else
{
ply_base_len);
new_ply = get_next_ply_for_leaf (m, new_leaf);
- __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
- new_leaf);
- ASSERT (old_ply->leaves[dst_byte] == new_leaf);
+ clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
}
else
old_ply->n_non_empty_leafs -=
ip4_fib_mtrie_leaf_is_non_empty (old_ply, i);
- old_ply->leaves[i] =
- ip4_fib_mtrie_leaf_set_adj_index (a->cover_adj_index);
- old_ply->dst_address_bits_of_leaves[i] =
- clib_max (old_ply->dst_address_bits_base,
- a->cover_address_length);
+ clib_atomic_store_rel_n (&old_ply->leaves[i],
+ ip4_fib_mtrie_leaf_set_adj_index
+ (a->cover_adj_index));
+ old_ply->dst_address_bits_of_leaves[i] = a->cover_address_length;
old_ply->n_non_empty_leafs +=
ip4_fib_mtrie_leaf_is_non_empty (old_ply, i);
|| (!old_leaf_is_terminal
&& unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf), 2)))
{
- old_ply->leaves[slot] =
- ip4_fib_mtrie_leaf_set_adj_index (a->cover_adj_index);
+ clib_atomic_store_rel_n (&old_ply->leaves[slot],
+ ip4_fib_mtrie_leaf_set_adj_index
+ (a->cover_adj_index));
old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length;
}
}
if (0 == im->mtrie_heap_size)
im->mtrie_heap_size = IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE;
-#if USE_DLMALLOC == 0
- im->mtrie_mheap = mheap_alloc (0, im->mtrie_heap_size);
-#else
im->mtrie_mheap = create_mspace (im->mtrie_heap_size, 1 /* locked */ );
-#endif
/* Burn one ply so index 0 is taken */
old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);