X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fip%2Fip4_mtrie.c;h=b5d0a890a7c49043747fb558837c7578f785cfa0;hb=6e334e3e77bb156a9317a37500077a218a04f7a3;hp=ef6a033be7d25f4ce162c3726c29c73065e5c6dd;hpb=33af8c1ed89f15cf0601ee891e9603bef16f2c93;p=vpp.git diff --git a/src/vnet/ip/ip4_mtrie.c b/src/vnet/ip/ip4_mtrie.c old mode 100755 new mode 100644 index ef6a033be7d..b5d0a890a7c --- a/src/vnet/ip/ip4_mtrie.c +++ b/src/vnet/ip/ip4_mtrie.c @@ -254,8 +254,7 @@ set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m, else if (new_leaf_dst_address_bits >= ply->dst_address_bits_of_leaves[i]) { - clib_atomic_cmp_and_swap (&ply->leaves[i], old_leaf, new_leaf); - ASSERT (ply->leaves[i] == new_leaf); + clib_atomic_store_rel_n (&ply->leaves[i], new_leaf); ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits; ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (ply, i); } @@ -319,9 +318,7 @@ set_leaf (ip4_fib_mtrie_t * m, old_ply->dst_address_bits_of_leaves[i] = a->dst_address_length; - clib_atomic_cmp_and_swap (&old_ply->leaves[i], old_leaf, - new_leaf); - ASSERT (old_ply->leaves[i] == new_leaf); + clib_atomic_store_rel_n (&old_ply->leaves[i], new_leaf); old_ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (old_ply, i); @@ -378,9 +375,7 @@ set_leaf (ip4_fib_mtrie_t * m, /* Refetch since ply_create may move pool. */ old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index); - clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf, - new_leaf); - ASSERT (old_ply->leaves[dst_byte] == new_leaf); + clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf); old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len; old_ply->n_non_empty_leafs += @@ -451,9 +446,7 @@ set_root_leaf (ip4_fib_mtrie_t * m, * the new one */ old_ply->dst_address_bits_of_leaves[slot] = a->dst_address_length; - clib_atomic_cmp_and_swap (&old_ply->leaves[slot], - old_leaf, new_leaf); - ASSERT (old_ply->leaves[slot] == new_leaf); + clib_atomic_store_rel_n (&old_ply->leaves[slot], new_leaf); } else { @@ -498,9 +491,7 @@ set_root_leaf (ip4_fib_mtrie_t * m, ply_base_len); new_ply = get_next_ply_for_leaf (m, new_leaf); - clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf, - new_leaf); - ASSERT (old_ply->leaves[dst_byte] == new_leaf); + clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf); old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len; } else @@ -549,8 +540,9 @@ unset_leaf (ip4_fib_mtrie_t * m, old_ply->n_non_empty_leafs -= ip4_fib_mtrie_leaf_is_non_empty (old_ply, i); - old_ply->leaves[i] = - ip4_fib_mtrie_leaf_set_adj_index (a->cover_adj_index); + clib_atomic_store_rel_n (&old_ply->leaves[i], + ip4_fib_mtrie_leaf_set_adj_index + (a->cover_adj_index)); old_ply->dst_address_bits_of_leaves[i] = a->cover_address_length; old_ply->n_non_empty_leafs += @@ -620,8 +612,9 @@ unset_root_leaf (ip4_fib_mtrie_t * m, || (!old_leaf_is_terminal && unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf), 2))) { - old_ply->leaves[slot] = - ip4_fib_mtrie_leaf_set_adj_index (a->cover_adj_index); + clib_atomic_store_rel_n (&old_ply->leaves[slot], + ip4_fib_mtrie_leaf_set_adj_index + (a->cover_adj_index)); old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length; } } @@ -797,6 +790,9 @@ format_ip4_fib_mtrie (u8 * s, va_list * va) /** Default heap size for the IPv4 mtries */ #define IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE (32<<20) +#ifndef MAP_HUGE_SHIFT +#define MAP_HUGE_SHIFT 26 +#endif static clib_error_t * ip4_mtrie_module_init (vlib_main_t * vm) @@ -806,13 +802,46 @@ ip4_mtrie_module_init (vlib_main_t * vm) clib_error_t *error = NULL; uword *old_heap; - if (0 == im->mtrie_heap_size) + if (im->mtrie_heap_size == 0) im->mtrie_heap_size = IP4_FIB_DEFAULT_MTRIE_HEAP_SIZE; -#if USE_DLMALLOC == 0 - im->mtrie_mheap = mheap_alloc (0, im->mtrie_heap_size); -#else - im->mtrie_mheap = create_mspace (im->mtrie_heap_size, 1 /* locked */ ); -#endif + +again: + if (im->mtrie_hugetlb) + { + void *rv; + int mmap_flags, mmap_flags_huge; + uword htlb_pagesize = clib_mem_get_default_hugepage_size (); + if (htlb_pagesize == 0) + { + clib_warning ("WARNING: htlb pagesize == 0"); + im->mtrie_hugetlb = 0; + goto again; + } + /* Round the allocation request to an even number of huge pages */ + im->mtrie_heap_size = (im->mtrie_heap_size + (htlb_pagesize - 1)) & + ~(htlb_pagesize - 1); + mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS; + mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED | + min_log2 (htlb_pagesize) << MAP_HUGE_SHIFT); + rv = mmap (0, im->mtrie_heap_size, + PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0); + if (rv == MAP_FAILED) + { + /* Failure when running as root should be logged... */ + if (geteuid () == 0) + clib_warning ("ip4 mtrie htlb map failed: not enough pages?"); + im->mtrie_hugetlb = 0; + goto again; + } + if (mlock (rv, im->mtrie_heap_size)) + clib_warning ("WARNING: couldn't lock mtrie heap at %llx", rv); + im->mtrie_mheap = create_mspace_with_base (rv, im->mtrie_heap_size, + 1 /* locked */ ); + } + else + { + im->mtrie_mheap = create_mspace (im->mtrie_heap_size, 1 /* locked */ ); + } /* Burn one ply so index 0 is taken */ old_heap = clib_mem_set_heap (ip4_main.mtrie_mheap);