From 6bb2db0ea860812d9c366935312e7849deca9c93 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Fri, 6 Aug 2021 12:24:14 +0000 Subject: [PATCH] ip: Rename the ip4 mtrie function to be 16_8_8 specific Type: improvement the existing mtrie functions use the existing 16_8_8 mtrie. Rename them to make that explicit. Then we can add the 8_8_8_8 types and functions alongside. Signed-off-by: Neale Ranns Change-Id: If5ce7a282d5d3742dc65bcd0523220235d9c510d --- src/vnet/fib/ip4_fib.c | 12 +-- src/vnet/fib/ip4_fib.h | 76 +++++++------- src/vnet/ip/ip4_mtrie.c | 256 +++++++++++++++++++++++------------------------- src/vnet/ip/ip4_mtrie.h | 76 +++++++------- 4 files changed, 202 insertions(+), 218 deletions(-) diff --git a/src/vnet/fib/ip4_fib.c b/src/vnet/fib/ip4_fib.c index a3010149a4d..e8ab653e53a 100644 --- a/src/vnet/fib/ip4_fib.c +++ b/src/vnet/fib/ip4_fib.c @@ -129,7 +129,7 @@ ip4_create_fib_with_table_id (u32 table_id, fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP4, src); - ip4_mtrie_init(&v4_fib->mtrie); + ip4_mtrie_16_init(&v4_fib->mtrie); /* * add the special entries into the new FIB @@ -195,7 +195,7 @@ ip4_fib_table_destroy (u32 fib_index) } vec_free(fib_table->ft_src_route_counts); - ip4_mtrie_free(&v4_fib->mtrie); + ip4_mtrie_16_free(&v4_fib->mtrie); pool_put(ip4_main.v4_fibs, v4_fib); pool_put(ip4_main.fibs, fib_table); @@ -377,7 +377,7 @@ ip4_fib_table_fwding_dpo_update (ip4_fib_t *fib, u32 len, const dpo_id_t *dpo) { - ip4_fib_mtrie_route_add(&fib->mtrie, addr, len, dpo->dpoi_index); + ip4_mtrie_16_route_add(&fib->mtrie, addr, len, dpo->dpoi_index); } void @@ -398,7 +398,7 @@ ip4_fib_table_fwding_dpo_remove (ip4_fib_t *fib, cover_prefix = fib_entry_get_prefix(cover_index); cover_dpo = fib_entry_contribute_ip_forwarding(cover_index); - ip4_fib_mtrie_route_del(&fib->mtrie, + ip4_mtrie_16_route_del(&fib->mtrie, addr, len, dpo->dpoi_index, cover_prefix->fp_len, cover_dpo->dpoi_index); @@ -632,7 +632,7 @@ ip4_show_fib (vlib_main_t * vm, uword mtrie_size, hash_size; - mtrie_size = ip4_fib_mtrie_memory_usage(&fib->mtrie); + mtrie_size = ip4_mtrie_16_memory_usage(&fib->mtrie); hash_size = 0; for (i = 0; i < ARRAY_LEN (fib->fib_entry_by_dst_address); i++) @@ -679,7 +679,7 @@ ip4_show_fib (vlib_main_t * vm, /* Show summary? */ if (mtrie) { - vlib_cli_output (vm, "%U", format_ip4_fib_mtrie, &fib->mtrie, verbose); + vlib_cli_output (vm, "%U", format_ip4_mtrie_16, &fib->mtrie, verbose); continue; } if (! verbose) diff --git a/src/vnet/fib/ip4_fib.h b/src/vnet/fib/ip4_fib.h index dc41eb8163f..1f0c168eff3 100644 --- a/src/vnet/fib/ip4_fib.h +++ b/src/vnet/fib/ip4_fib.h @@ -45,7 +45,7 @@ typedef struct ip4_fib_t_ * Mtrie for fast lookups. Hash is used to maintain overlapping prefixes. * First member so it's in the first cacheline. */ - ip4_fib_mtrie_t mtrie; + ip4_mtrie_16_t mtrie; /* Hash table for each prefix length mapping. */ uword *fib_entry_by_dst_address[33]; @@ -160,16 +160,16 @@ always_inline index_t ip4_fib_forwarding_lookup (u32 fib_index, const ip4_address_t * addr) { - ip4_fib_mtrie_leaf_t leaf; - ip4_fib_mtrie_t * mtrie; + ip4_mtrie_leaf_t leaf; + ip4_mtrie_16_t * mtrie; mtrie = &ip4_fib_get(fib_index)->mtrie; - leaf = ip4_fib_mtrie_lookup_step_one (mtrie, addr); - leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 2); - leaf = ip4_fib_mtrie_lookup_step (mtrie, leaf, addr, 3); + leaf = ip4_mtrie_16_lookup_step_one (mtrie, addr); + leaf = ip4_mtrie_16_lookup_step (mtrie, leaf, addr, 2); + leaf = ip4_mtrie_16_lookup_step (mtrie, leaf, addr, 3); - return (ip4_fib_mtrie_leaf_get_adj_index(leaf)); + return (ip4_mtrie_leaf_get_adj_index(leaf)); } static_always_inline void @@ -180,21 +180,21 @@ ip4_fib_forwarding_lookup_x2 (u32 fib_index0, index_t *lb0, index_t *lb1) { - ip4_fib_mtrie_leaf_t leaf[2]; - ip4_fib_mtrie_t * mtrie[2]; + ip4_mtrie_leaf_t leaf[2]; + ip4_mtrie_16_t * mtrie[2]; mtrie[0] = &ip4_fib_get(fib_index0)->mtrie; mtrie[1] = &ip4_fib_get(fib_index1)->mtrie; - leaf[0] = ip4_fib_mtrie_lookup_step_one (mtrie[0], addr0); - leaf[1] = ip4_fib_mtrie_lookup_step_one (mtrie[1], addr1); - leaf[0] = ip4_fib_mtrie_lookup_step (mtrie[0], leaf[0], addr0, 2); - leaf[1] = ip4_fib_mtrie_lookup_step (mtrie[1], leaf[1], addr1, 2); - leaf[0] = ip4_fib_mtrie_lookup_step (mtrie[0], leaf[0], addr0, 3); - leaf[1] = ip4_fib_mtrie_lookup_step (mtrie[1], leaf[1], addr1, 3); + leaf[0] = ip4_mtrie_16_lookup_step_one (mtrie[0], addr0); + leaf[1] = ip4_mtrie_16_lookup_step_one (mtrie[1], addr1); + leaf[0] = ip4_mtrie_16_lookup_step (mtrie[0], leaf[0], addr0, 2); + leaf[1] = ip4_mtrie_16_lookup_step (mtrie[1], leaf[1], addr1, 2); + leaf[0] = ip4_mtrie_16_lookup_step (mtrie[0], leaf[0], addr0, 3); + leaf[1] = ip4_mtrie_16_lookup_step (mtrie[1], leaf[1], addr1, 3); - *lb0 = ip4_fib_mtrie_leaf_get_adj_index(leaf[0]); - *lb1 = ip4_fib_mtrie_leaf_get_adj_index(leaf[1]); + *lb0 = ip4_mtrie_leaf_get_adj_index(leaf[0]); + *lb1 = ip4_mtrie_leaf_get_adj_index(leaf[1]); } static_always_inline void @@ -211,33 +211,33 @@ ip4_fib_forwarding_lookup_x4 (u32 fib_index0, index_t *lb2, index_t *lb3) { - ip4_fib_mtrie_leaf_t leaf[4]; - ip4_fib_mtrie_t * mtrie[4]; + ip4_mtrie_leaf_t leaf[4]; + ip4_mtrie_16_t * mtrie[4]; mtrie[0] = &ip4_fib_get(fib_index0)->mtrie; mtrie[1] = &ip4_fib_get(fib_index1)->mtrie; mtrie[2] = &ip4_fib_get(fib_index2)->mtrie; mtrie[3] = &ip4_fib_get(fib_index3)->mtrie; - leaf[0] = ip4_fib_mtrie_lookup_step_one (mtrie[0], addr0); - leaf[1] = ip4_fib_mtrie_lookup_step_one (mtrie[1], addr1); - leaf[2] = ip4_fib_mtrie_lookup_step_one (mtrie[2], addr2); - leaf[3] = ip4_fib_mtrie_lookup_step_one (mtrie[3], addr3); - - leaf[0] = ip4_fib_mtrie_lookup_step (mtrie[0], leaf[0], addr0, 2); - leaf[1] = ip4_fib_mtrie_lookup_step (mtrie[1], leaf[1], addr1, 2); - leaf[2] = ip4_fib_mtrie_lookup_step (mtrie[2], leaf[2], addr2, 2); - leaf[3] = ip4_fib_mtrie_lookup_step (mtrie[3], leaf[3], addr3, 2); - - leaf[0] = ip4_fib_mtrie_lookup_step (mtrie[0], leaf[0], addr0, 3); - leaf[1] = ip4_fib_mtrie_lookup_step (mtrie[1], leaf[1], addr1, 3); - leaf[2] = ip4_fib_mtrie_lookup_step (mtrie[2], leaf[2], addr2, 3); - leaf[3] = ip4_fib_mtrie_lookup_step (mtrie[3], leaf[3], addr3, 3); - - *lb0 = ip4_fib_mtrie_leaf_get_adj_index(leaf[0]); - *lb1 = ip4_fib_mtrie_leaf_get_adj_index(leaf[1]); - *lb2 = ip4_fib_mtrie_leaf_get_adj_index(leaf[2]); - *lb3 = ip4_fib_mtrie_leaf_get_adj_index(leaf[3]); + leaf[0] = ip4_mtrie_16_lookup_step_one (mtrie[0], addr0); + leaf[1] = ip4_mtrie_16_lookup_step_one (mtrie[1], addr1); + leaf[2] = ip4_mtrie_16_lookup_step_one (mtrie[2], addr2); + leaf[3] = ip4_mtrie_16_lookup_step_one (mtrie[3], addr3); + + leaf[0] = ip4_mtrie_16_lookup_step (mtrie[0], leaf[0], addr0, 2); + leaf[1] = ip4_mtrie_16_lookup_step (mtrie[1], leaf[1], addr1, 2); + leaf[2] = ip4_mtrie_16_lookup_step (mtrie[2], leaf[2], addr2, 2); + leaf[3] = ip4_mtrie_16_lookup_step (mtrie[3], leaf[3], addr3, 2); + + leaf[0] = ip4_mtrie_16_lookup_step (mtrie[0], leaf[0], addr0, 3); + leaf[1] = ip4_mtrie_16_lookup_step (mtrie[1], leaf[1], addr1, 3); + leaf[2] = ip4_mtrie_16_lookup_step (mtrie[2], leaf[2], addr2, 3); + leaf[3] = ip4_mtrie_16_lookup_step (mtrie[3], leaf[3], addr3, 3); + + *lb0 = ip4_mtrie_leaf_get_adj_index(leaf[0]); + *lb1 = ip4_mtrie_leaf_get_adj_index(leaf[1]); + *lb2 = ip4_mtrie_leaf_get_adj_index(leaf[2]); + *lb3 = ip4_mtrie_leaf_get_adj_index(leaf[3]); } #endif diff --git a/src/vnet/ip/ip4_mtrie.c b/src/vnet/ip/ip4_mtrie.c index 7bfcf986e6f..7eaac5917c4 100644 --- a/src/vnet/ip/ip4_mtrie.c +++ b/src/vnet/ip/ip4_mtrie.c @@ -45,10 +45,10 @@ /** * Global pool of IPv4 8bit PLYs */ -ip4_fib_mtrie_8_ply_t *ip4_ply_pool; +ip4_mtrie_8_ply_t *ip4_ply_pool; always_inline u32 -ip4_fib_mtrie_leaf_is_non_empty (ip4_fib_mtrie_8_ply_t * p, u8 dst_byte) +ip4_mtrie_leaf_is_non_empty (ip4_mtrie_8_ply_t *p, u8 dst_byte) { /* * It's 'non-empty' if the length of the leaf stored is greater than the @@ -60,34 +60,34 @@ ip4_fib_mtrie_leaf_is_non_empty (ip4_fib_mtrie_8_ply_t * p, u8 dst_byte) return (0); } -always_inline ip4_fib_mtrie_leaf_t -ip4_fib_mtrie_leaf_set_adj_index (u32 adj_index) +always_inline ip4_mtrie_leaf_t +ip4_mtrie_leaf_set_adj_index (u32 adj_index) { - ip4_fib_mtrie_leaf_t l; + ip4_mtrie_leaf_t l; l = 1 + 2 * adj_index; - ASSERT (ip4_fib_mtrie_leaf_get_adj_index (l) == adj_index); + ASSERT (ip4_mtrie_leaf_get_adj_index (l) == adj_index); return l; } always_inline u32 -ip4_fib_mtrie_leaf_is_next_ply (ip4_fib_mtrie_leaf_t n) +ip4_mtrie_leaf_is_next_ply (ip4_mtrie_leaf_t n) { return (n & 1) == 0; } always_inline u32 -ip4_fib_mtrie_leaf_get_next_ply_index (ip4_fib_mtrie_leaf_t n) +ip4_mtrie_leaf_get_next_ply_index (ip4_mtrie_leaf_t n) { - ASSERT (ip4_fib_mtrie_leaf_is_next_ply (n)); + ASSERT (ip4_mtrie_leaf_is_next_ply (n)); return n >> 1; } -always_inline ip4_fib_mtrie_leaf_t -ip4_fib_mtrie_leaf_set_next_ply_index (u32 i) +always_inline ip4_mtrie_leaf_t +ip4_mtrie_leaf_set_next_ply_index (u32 i) { - ip4_fib_mtrie_leaf_t l; + ip4_mtrie_leaf_t l; l = 0 + 2 * i; - ASSERT (ip4_fib_mtrie_leaf_get_next_ply_index (l) == i); + ASSERT (ip4_mtrie_leaf_get_next_ply_index (l) == i); return l; } @@ -155,45 +155,43 @@ ip4_fib_mtrie_leaf_set_next_ply_index (u32 i) } static void -ply_8_init (ip4_fib_mtrie_8_ply_t * p, - ip4_fib_mtrie_leaf_t init, uword prefix_len, u32 ply_base_len) +ply_8_init (ip4_mtrie_8_ply_t *p, ip4_mtrie_leaf_t init, uword prefix_len, + u32 ply_base_len) { PLY_INIT (p, init, prefix_len, ply_base_len); } static void -ply_16_init (ip4_fib_mtrie_16_ply_t * p, - ip4_fib_mtrie_leaf_t init, uword prefix_len) +ply_16_init (ip4_mtrie_16_ply_t *p, ip4_mtrie_leaf_t init, uword prefix_len) { clib_memset (p->dst_address_bits_of_leaves, prefix_len, sizeof (p->dst_address_bits_of_leaves)); PLY_INIT_LEAVES (p); } -static ip4_fib_mtrie_leaf_t -ply_create (ip4_fib_mtrie_t * m, - ip4_fib_mtrie_leaf_t init_leaf, - u32 leaf_prefix_len, u32 ply_base_len) +static ip4_mtrie_leaf_t +ply_create (ip4_mtrie_16_t *m, ip4_mtrie_leaf_t init_leaf, u32 leaf_prefix_len, + u32 ply_base_len) { - ip4_fib_mtrie_8_ply_t *p; + ip4_mtrie_8_ply_t *p; /* Get cache aligned ply. */ pool_get_aligned (ip4_ply_pool, p, CLIB_CACHE_LINE_BYTES); ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len); - return ip4_fib_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool); + return ip4_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool); } -always_inline ip4_fib_mtrie_8_ply_t * -get_next_ply_for_leaf (ip4_fib_mtrie_t * m, ip4_fib_mtrie_leaf_t l) +always_inline ip4_mtrie_8_ply_t * +get_next_ply_for_leaf (ip4_mtrie_16_t *m, ip4_mtrie_leaf_t l) { - uword n = ip4_fib_mtrie_leaf_get_next_ply_index (l); + uword n = ip4_mtrie_leaf_get_next_ply_index (l); return pool_elt_at_index (ip4_ply_pool, n); } void -ip4_mtrie_free (ip4_fib_mtrie_t * m) +ip4_mtrie_16_free (ip4_mtrie_16_t *m) { /* the root ply is embedded so there is nothing to do, * the assumption being that the IP4 FIB table has emptied the trie @@ -203,15 +201,15 @@ ip4_mtrie_free (ip4_fib_mtrie_t * m) int i; for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++) { - ASSERT (!ip4_fib_mtrie_leaf_is_next_ply (m->root_ply.leaves[i])); + ASSERT (!ip4_mtrie_leaf_is_next_ply (m->root_ply.leaves[i])); } #endif } void -ip4_mtrie_init (ip4_fib_mtrie_t * m) +ip4_mtrie_16_init (ip4_mtrie_16_t *m) { - ply_16_init (&m->root_ply, IP4_FIB_MTRIE_LEAF_EMPTY, 0); + ply_16_init (&m->root_ply, IP4_MTRIE_LEAF_EMPTY, 0); } typedef struct @@ -221,28 +219,26 @@ typedef struct u32 adj_index; u32 cover_address_length; u32 cover_adj_index; -} ip4_fib_mtrie_set_unset_leaf_args_t; +} ip4_mtrie_set_unset_leaf_args_t; static void -set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m, - ip4_fib_mtrie_8_ply_t * ply, - ip4_fib_mtrie_leaf_t new_leaf, +set_ply_with_more_specific_leaf (ip4_mtrie_16_t *m, ip4_mtrie_8_ply_t *ply, + ip4_mtrie_leaf_t new_leaf, uword new_leaf_dst_address_bits) { - ip4_fib_mtrie_leaf_t old_leaf; + ip4_mtrie_leaf_t old_leaf; uword i; - ASSERT (ip4_fib_mtrie_leaf_is_terminal (new_leaf)); + ASSERT (ip4_mtrie_leaf_is_terminal (new_leaf)); for (i = 0; i < ARRAY_LEN (ply->leaves); i++) { old_leaf = ply->leaves[i]; /* Recurse into sub plies. */ - if (!ip4_fib_mtrie_leaf_is_terminal (old_leaf)) + if (!ip4_mtrie_leaf_is_terminal (old_leaf)) { - ip4_fib_mtrie_8_ply_t *sub_ply = - get_next_ply_for_leaf (m, old_leaf); + ip4_mtrie_8_ply_t *sub_ply = get_next_ply_for_leaf (m, old_leaf); set_ply_with_more_specific_leaf (m, sub_ply, new_leaf, new_leaf_dst_address_bits); } @@ -253,20 +249,19 @@ set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m, { clib_atomic_store_rel_n (&ply->leaves[i], new_leaf); ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits; - ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (ply, i); + ply->n_non_empty_leafs += ip4_mtrie_leaf_is_non_empty (ply, i); } } } static void -set_leaf (ip4_fib_mtrie_t * m, - const ip4_fib_mtrie_set_unset_leaf_args_t * a, +set_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a, u32 old_ply_index, u32 dst_address_byte_index) { - ip4_fib_mtrie_leaf_t old_leaf, new_leaf; + ip4_mtrie_leaf_t old_leaf, new_leaf; i32 n_dst_bits_next_plies; u8 dst_byte; - ip4_fib_mtrie_8_ply_t *old_ply; + ip4_mtrie_8_ply_t *old_ply; old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index); @@ -295,30 +290,30 @@ set_leaf (ip4_fib_mtrie_t * m, * fill the buckets/slots of the ply */ for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++) { - ip4_fib_mtrie_8_ply_t *new_ply; + ip4_mtrie_8_ply_t *new_ply; old_leaf = old_ply->leaves[i]; - old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf); + old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf); if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[i]) { /* The new leaf is more or equally specific than the one currently * occupying the slot */ - new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index); + new_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index); if (old_leaf_is_terminal) { /* The current leaf is terminal, we can replace it with * the new one */ old_ply->n_non_empty_leafs -= - ip4_fib_mtrie_leaf_is_non_empty (old_ply, i); + ip4_mtrie_leaf_is_non_empty (old_ply, i); old_ply->dst_address_bits_of_leaves[i] = a->dst_address_length; clib_atomic_store_rel_n (&old_ply->leaves[i], new_leaf); old_ply->n_non_empty_leafs += - ip4_fib_mtrie_leaf_is_non_empty (old_ply, i); + ip4_mtrie_leaf_is_non_empty (old_ply, i); ASSERT (old_ply->n_non_empty_leafs <= ARRAY_LEN (old_ply->leaves)); } @@ -350,18 +345,18 @@ set_leaf (ip4_fib_mtrie_t * m, { /* The address to insert requires us to move down at a lower level of * the trie - recurse on down */ - ip4_fib_mtrie_8_ply_t *new_ply; + ip4_mtrie_8_ply_t *new_ply; u8 ply_base_len; ply_base_len = 8 * (dst_address_byte_index + 1); old_leaf = old_ply->leaves[dst_byte]; - if (ip4_fib_mtrie_leaf_is_terminal (old_leaf)) + if (ip4_mtrie_leaf_is_terminal (old_leaf)) { /* There is a leaf occupying the slot. Replace it with a new ply */ old_ply->n_non_empty_leafs -= - ip4_fib_mtrie_leaf_is_non_empty (old_ply, dst_byte); + ip4_mtrie_leaf_is_non_empty (old_ply, dst_byte); new_leaf = ply_create (m, old_leaf, @@ -376,7 +371,7 @@ set_leaf (ip4_fib_mtrie_t * m, old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len; old_ply->n_non_empty_leafs += - ip4_fib_mtrie_leaf_is_non_empty (old_ply, dst_byte); + ip4_mtrie_leaf_is_non_empty (old_ply, dst_byte); ASSERT (old_ply->n_non_empty_leafs >= 0); } else @@ -387,11 +382,10 @@ set_leaf (ip4_fib_mtrie_t * m, } static void -set_root_leaf (ip4_fib_mtrie_t * m, - const ip4_fib_mtrie_set_unset_leaf_args_t * a) +set_root_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a) { - ip4_fib_mtrie_leaf_t old_leaf, new_leaf; - ip4_fib_mtrie_16_ply_t *old_ply; + ip4_mtrie_leaf_t old_leaf, new_leaf; + ip4_mtrie_16_ply_t *old_ply; i32 n_dst_bits_next_plies; u16 dst_byte; @@ -420,7 +414,7 @@ set_root_leaf (ip4_fib_mtrie_t * m, * fill the buckets/slots of the ply */ for (i = 0; i < (1 << n_dst_bits_this_ply); i++) { - ip4_fib_mtrie_8_ply_t *new_ply; + ip4_mtrie_8_ply_t *new_ply; u16 slot; slot = clib_net_to_host_u16 (dst_byte); @@ -428,14 +422,14 @@ set_root_leaf (ip4_fib_mtrie_t * m, slot = clib_host_to_net_u16 (slot); old_leaf = old_ply->leaves[slot]; - old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf); + old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf); if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[slot]) { /* The new leaf is more or equally specific than the one currently * occupying the slot */ - new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index); + new_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index); if (old_leaf_is_terminal) { @@ -472,14 +466,14 @@ set_root_leaf (ip4_fib_mtrie_t * m, { /* The address to insert requires us to move down at a lower level of * the trie - recurse on down */ - ip4_fib_mtrie_8_ply_t *new_ply; + ip4_mtrie_8_ply_t *new_ply; u8 ply_base_len; ply_base_len = 16; old_leaf = old_ply->leaves[dst_byte]; - if (ip4_fib_mtrie_leaf_is_terminal (old_leaf)) + if (ip4_mtrie_leaf_is_terminal (old_leaf)) { /* There is a leaf occupying the slot. Replace it with a new ply */ new_leaf = @@ -499,11 +493,10 @@ set_root_leaf (ip4_fib_mtrie_t * m, } static uword -unset_leaf (ip4_fib_mtrie_t * m, - const ip4_fib_mtrie_set_unset_leaf_args_t * a, - ip4_fib_mtrie_8_ply_t * old_ply, u32 dst_address_byte_index) +unset_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a, + ip4_mtrie_8_ply_t *old_ply, u32 dst_address_byte_index) { - ip4_fib_mtrie_leaf_t old_leaf, del_leaf; + ip4_mtrie_leaf_t old_leaf, del_leaf; i32 n_dst_bits_next_plies; i32 i, n_dst_bits_this_ply, old_leaf_is_terminal; u8 dst_byte; @@ -522,12 +515,12 @@ unset_leaf (ip4_fib_mtrie_t * m, n_dst_bits_next_plies <= 0 ? -n_dst_bits_next_plies : 0; n_dst_bits_this_ply = clib_min (8, n_dst_bits_this_ply); - del_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index); + del_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index); for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++) { old_leaf = old_ply->leaves[i]; - old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf); + old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf); if (old_leaf == del_leaf || (!old_leaf_is_terminal @@ -535,15 +528,15 @@ unset_leaf (ip4_fib_mtrie_t * m, dst_address_byte_index + 1))) { old_ply->n_non_empty_leafs -= - ip4_fib_mtrie_leaf_is_non_empty (old_ply, i); + ip4_mtrie_leaf_is_non_empty (old_ply, i); - clib_atomic_store_rel_n (&old_ply->leaves[i], - ip4_fib_mtrie_leaf_set_adj_index - (a->cover_adj_index)); + clib_atomic_store_rel_n ( + &old_ply->leaves[i], + ip4_mtrie_leaf_set_adj_index (a->cover_adj_index)); old_ply->dst_address_bits_of_leaves[i] = a->cover_address_length; old_ply->n_non_empty_leafs += - ip4_fib_mtrie_leaf_is_non_empty (old_ply, i); + ip4_mtrie_leaf_is_non_empty (old_ply, i); ASSERT (old_ply->n_non_empty_leafs >= 0); if (old_ply->n_non_empty_leafs == 0 && dst_address_byte_index > 0) @@ -558,7 +551,7 @@ unset_leaf (ip4_fib_mtrie_t * m, int ii, count = 0; for (ii = 0; ii < ARRAY_LEN (old_ply->leaves); ii++) { - count += ip4_fib_mtrie_leaf_is_non_empty (old_ply, ii); + count += ip4_mtrie_leaf_is_non_empty (old_ply, ii); } ASSERT (count); } @@ -571,14 +564,13 @@ unset_leaf (ip4_fib_mtrie_t * m, } static void -unset_root_leaf (ip4_fib_mtrie_t * m, - const ip4_fib_mtrie_set_unset_leaf_args_t * a) +unset_root_leaf (ip4_mtrie_16_t *m, const ip4_mtrie_set_unset_leaf_args_t *a) { - ip4_fib_mtrie_leaf_t old_leaf, del_leaf; + ip4_mtrie_leaf_t old_leaf, del_leaf; i32 n_dst_bits_next_plies; i32 i, n_dst_bits_this_ply, old_leaf_is_terminal; u16 dst_byte; - ip4_fib_mtrie_16_ply_t *old_ply; + ip4_mtrie_16_ply_t *old_ply; ASSERT (a->dst_address_length <= 32); @@ -590,7 +582,7 @@ unset_root_leaf (ip4_fib_mtrie_t * m, n_dst_bits_this_ply = (n_dst_bits_next_plies <= 0 ? (16 - a->dst_address_length) : 0); - del_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index); + del_leaf = ip4_mtrie_leaf_set_adj_index (a->adj_index); /* Starting at the value of the byte at this section of the v4 address * fill the buckets/slots of the ply */ @@ -603,26 +595,25 @@ unset_root_leaf (ip4_fib_mtrie_t * m, slot = clib_host_to_net_u16 (slot); old_leaf = old_ply->leaves[slot]; - old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf); + old_leaf_is_terminal = ip4_mtrie_leaf_is_terminal (old_leaf); if (old_leaf == del_leaf || (!old_leaf_is_terminal && unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf), 2))) { - clib_atomic_store_rel_n (&old_ply->leaves[slot], - ip4_fib_mtrie_leaf_set_adj_index - (a->cover_adj_index)); + clib_atomic_store_rel_n ( + &old_ply->leaves[slot], + ip4_mtrie_leaf_set_adj_index (a->cover_adj_index)); old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length; } } } void -ip4_fib_mtrie_route_add (ip4_fib_mtrie_t * m, - const ip4_address_t * dst_address, - u32 dst_address_length, u32 adj_index) +ip4_mtrie_16_route_add (ip4_mtrie_16_t *m, const ip4_address_t *dst_address, + u32 dst_address_length, u32 adj_index) { - ip4_fib_mtrie_set_unset_leaf_args_t a; + ip4_mtrie_set_unset_leaf_args_t a; ip4_main_t *im = &ip4_main; /* Honor dst_address_length. Fib masks are in network byte order */ @@ -635,13 +626,11 @@ ip4_fib_mtrie_route_add (ip4_fib_mtrie_t * m, } void -ip4_fib_mtrie_route_del (ip4_fib_mtrie_t * m, - const ip4_address_t * dst_address, - u32 dst_address_length, - u32 adj_index, - u32 cover_address_length, u32 cover_adj_index) +ip4_mtrie_16_route_del (ip4_mtrie_16_t *m, const ip4_address_t *dst_address, + u32 dst_address_length, u32 adj_index, + u32 cover_address_length, u32 cover_adj_index) { - ip4_fib_mtrie_set_unset_leaf_args_t a; + ip4_mtrie_set_unset_leaf_args_t a; ip4_main_t *im = &ip4_main; /* Honor dst_address_length. Fib masks are in network byte order */ @@ -658,15 +647,15 @@ ip4_fib_mtrie_route_del (ip4_fib_mtrie_t * m, /* Returns number of bytes of memory used by mtrie. */ static uword -mtrie_ply_memory_usage (ip4_fib_mtrie_t * m, ip4_fib_mtrie_8_ply_t * p) +mtrie_ply_memory_usage (ip4_mtrie_16_t *m, ip4_mtrie_8_ply_t *p) { uword bytes, i; bytes = sizeof (p[0]); for (i = 0; i < ARRAY_LEN (p->leaves); i++) { - ip4_fib_mtrie_leaf_t l = p->leaves[i]; - if (ip4_fib_mtrie_leaf_is_next_ply (l)) + ip4_mtrie_leaf_t l = p->leaves[i]; + if (ip4_mtrie_leaf_is_next_ply (l)) bytes += mtrie_ply_memory_usage (m, get_next_ply_for_leaf (m, l)); } @@ -675,15 +664,15 @@ mtrie_ply_memory_usage (ip4_fib_mtrie_t * m, ip4_fib_mtrie_8_ply_t * p) /* Returns number of bytes of memory used by mtrie. */ uword -ip4_fib_mtrie_memory_usage (ip4_fib_mtrie_t * m) +ip4_mtrie_16_memory_usage (ip4_mtrie_16_t *m) { uword bytes, i; bytes = sizeof (*m); for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++) { - ip4_fib_mtrie_leaf_t l = m->root_ply.leaves[i]; - if (ip4_fib_mtrie_leaf_is_next_ply (l)) + ip4_mtrie_leaf_t l = m->root_ply.leaves[i]; + if (ip4_mtrie_leaf_is_next_ply (l)) bytes += mtrie_ply_memory_usage (m, get_next_ply_for_leaf (m, l)); } @@ -691,46 +680,44 @@ ip4_fib_mtrie_memory_usage (ip4_fib_mtrie_t * m) } static u8 * -format_ip4_fib_mtrie_leaf (u8 * s, va_list * va) +format_ip4_mtrie_leaf (u8 *s, va_list *va) { - ip4_fib_mtrie_leaf_t l = va_arg (*va, ip4_fib_mtrie_leaf_t); + ip4_mtrie_leaf_t l = va_arg (*va, ip4_mtrie_leaf_t); - if (ip4_fib_mtrie_leaf_is_terminal (l)) - s = format (s, "lb-index %d", ip4_fib_mtrie_leaf_get_adj_index (l)); + if (ip4_mtrie_leaf_is_terminal (l)) + s = format (s, "lb-index %d", ip4_mtrie_leaf_get_adj_index (l)); else - s = format (s, "next ply %d", ip4_fib_mtrie_leaf_get_next_ply_index (l)); + s = format (s, "next ply %d", ip4_mtrie_leaf_get_next_ply_index (l)); return s; } -#define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent) \ -({ \ - u32 a, ia_length; \ - ip4_address_t ia; \ - ip4_fib_mtrie_leaf_t _l = p->leaves[(_i)]; \ - \ - a = (_base_address) + ((_a) << (32 - (_ply_max_len))); \ - ia.as_u32 = clib_host_to_net_u32 (a); \ - ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \ - s = format (s, "\n%U%U %U", \ - format_white_space, (_indent) + 4, \ - format_ip4_address_and_length, &ia, ia_length, \ - format_ip4_fib_mtrie_leaf, _l); \ - \ - if (ip4_fib_mtrie_leaf_is_next_ply (_l)) \ - s = format (s, "\n%U", \ - format_ip4_fib_mtrie_ply, m, a, (_indent) + 8, \ - ip4_fib_mtrie_leaf_get_next_ply_index (_l)); \ - s; \ -}) +#define FORMAT_PLY(s, _p, _a, _i, _base_address, _ply_max_len, _indent) \ + ({ \ + u32 a, ia_length; \ + ip4_address_t ia; \ + ip4_mtrie_leaf_t _l = p->leaves[(_i)]; \ + \ + a = (_base_address) + ((_a) << (32 - (_ply_max_len))); \ + ia.as_u32 = clib_host_to_net_u32 (a); \ + ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \ + s = format (s, "\n%U%U %U", format_white_space, (_indent) + 4, \ + format_ip4_address_and_length, &ia, ia_length, \ + format_ip4_mtrie_leaf, _l); \ + \ + if (ip4_mtrie_leaf_is_next_ply (_l)) \ + s = format (s, "\n%U", format_ip4_mtrie_ply, m, a, (_indent) + 8, \ + ip4_mtrie_leaf_get_next_ply_index (_l)); \ + s; \ + }) static u8 * -format_ip4_fib_mtrie_ply (u8 * s, va_list * va) +format_ip4_mtrie_ply (u8 *s, va_list *va) { - ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *); + ip4_mtrie_16_t *m = va_arg (*va, ip4_mtrie_16_t *); u32 base_address = va_arg (*va, u32); u32 indent = va_arg (*va, u32); u32 ply_index = va_arg (*va, u32); - ip4_fib_mtrie_8_ply_t *p; + ip4_mtrie_8_ply_t *p; int i; p = pool_elt_at_index (ip4_ply_pool, ply_index); @@ -739,7 +726,7 @@ format_ip4_fib_mtrie_ply (u8 * s, va_list * va) for (i = 0; i < ARRAY_LEN (p->leaves); i++) { - if (ip4_fib_mtrie_leaf_is_non_empty (p, i)) + if (ip4_mtrie_leaf_is_non_empty (p, i)) { s = FORMAT_PLY (s, p, i, i, base_address, p->dst_address_bits_base + 8, indent); @@ -750,17 +737,16 @@ format_ip4_fib_mtrie_ply (u8 * s, va_list * va) } u8 * -format_ip4_fib_mtrie (u8 * s, va_list * va) +format_ip4_mtrie_16 (u8 *s, va_list *va) { - ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *); + ip4_mtrie_16_t *m = va_arg (*va, ip4_mtrie_16_t *); int verbose = va_arg (*va, int); - ip4_fib_mtrie_16_ply_t *p; + ip4_mtrie_16_ply_t *p; u32 base_address = 0; int i; - s = format (s, "%d plies, memory usage %U\n", - pool_elts (ip4_ply_pool), - format_memory_size, ip4_fib_mtrie_memory_usage (m)); + s = format (s, "%d plies, memory usage %U\n", pool_elts (ip4_ply_pool), + format_memory_size, ip4_mtrie_16_memory_usage (m)); s = format (s, "root-ply"); p = &m->root_ply; @@ -794,7 +780,7 @@ format_ip4_fib_mtrie (u8 * s, va_list * va) static clib_error_t * ip4_mtrie_module_init (vlib_main_t * vm) { - CLIB_UNUSED (ip4_fib_mtrie_8_ply_t * p); + CLIB_UNUSED (ip4_mtrie_8_ply_t * p); clib_error_t *error = NULL; /* Burn one ply so index 0 is taken */ diff --git a/src/vnet/ip/ip4_mtrie.h b/src/vnet/ip/ip4_mtrie.h index 87e2b5892ba..332e34a75ca 100644 --- a/src/vnet/ip/ip4_mtrie.h +++ b/src/vnet/ip/ip4_mtrie.h @@ -49,9 +49,9 @@ 1 + 2*adj_index for terminal leaves. 0 + 2*next_ply_index for non-terminals, i.e. PLYs 1 => empty (adjacency index of zero is special miss adjacency). */ -typedef u32 ip4_fib_mtrie_leaf_t; +typedef u32 ip4_mtrie_leaf_t; -#define IP4_FIB_MTRIE_LEAF_EMPTY (1 + 2*0) +#define IP4_MTRIE_LEAF_EMPTY (1 + 2 * 0) /** * @brief the 16 way stride that is the top PLY of the mtrie @@ -60,14 +60,14 @@ typedef u32 ip4_fib_mtrie_leaf_t; * the FIB is destroyed. */ #define PLY_16_SIZE (1<<16) -typedef struct ip4_fib_mtrie_16_ply_t_ +typedef struct ip4_mtrie_16_ply_t_ { /** * The leaves/slots/buckets to be filed with leafs */ union { - ip4_fib_mtrie_leaf_t leaves[PLY_16_SIZE]; + ip4_mtrie_leaf_t leaves[PLY_16_SIZE]; #ifdef CLIB_HAVE_VEC128 u32x4 leaves_as_u32x4[PLY_16_SIZE / 4]; @@ -78,19 +78,19 @@ typedef struct ip4_fib_mtrie_16_ply_t_ * Prefix length for terminal leaves. */ u8 dst_address_bits_of_leaves[PLY_16_SIZE]; -} ip4_fib_mtrie_16_ply_t; +} ip4_mtrie_16_ply_t; /** * @brief One ply of the 4 ply mtrie fib. */ -typedef struct ip4_fib_mtrie_8_ply_t_ +typedef struct ip4_mtrie_8_ply_t_ { /** * The leaves/slots/buckets to be filed with leafs */ union { - ip4_fib_mtrie_leaf_t leaves[256]; + ip4_mtrie_leaf_t leaves[256]; #ifdef CLIB_HAVE_VEC128 u32x4 leaves_as_u32x4[256 / 4]; @@ -116,10 +116,9 @@ typedef struct ip4_fib_mtrie_8_ply_t_ /* Pad to cache line boundary. */ u8 pad[CLIB_CACHE_LINE_BYTES - 2 * sizeof (i32)]; -} -ip4_fib_mtrie_8_ply_t; +} ip4_mtrie_8_ply_t; -STATIC_ASSERT (0 == sizeof (ip4_fib_mtrie_8_ply_t) % CLIB_CACHE_LINE_BYTES, +STATIC_ASSERT (0 == sizeof (ip4_mtrie_8_ply_t) % CLIB_CACHE_LINE_BYTES, "IP4 Mtrie ply cache line"); /** @@ -133,54 +132,53 @@ typedef struct * 'get me the mtrie' returns the first ply, and not an indirect 'pointer' * to it. therefore no cacheline misses in the data-path. */ - ip4_fib_mtrie_16_ply_t root_ply; -} ip4_fib_mtrie_t; + ip4_mtrie_16_ply_t root_ply; +} ip4_mtrie_16_t; /** * @brief Initialise an mtrie */ -void ip4_mtrie_init (ip4_fib_mtrie_t * m); +void ip4_mtrie_16_init (ip4_mtrie_16_t *m); /** * @brief Free an mtrie, It must be emty when free'd */ -void ip4_mtrie_free (ip4_fib_mtrie_t * m); +void ip4_mtrie_16_free (ip4_mtrie_16_t *m); /** * @brief Add a route/entry to the mtrie */ -void ip4_fib_mtrie_route_add (ip4_fib_mtrie_t * m, - const ip4_address_t * dst_address, - u32 dst_address_length, u32 adj_index); +void ip4_mtrie_16_route_add (ip4_mtrie_16_t *m, + const ip4_address_t *dst_address, + u32 dst_address_length, u32 adj_index); /** * @brief remove a route/entry to the mtrie */ -void ip4_fib_mtrie_route_del (ip4_fib_mtrie_t * m, - const ip4_address_t * dst_address, - u32 dst_address_length, - u32 adj_index, - u32 cover_address_length, u32 cover_adj_index); +void ip4_mtrie_16_route_del (ip4_mtrie_16_t *m, + const ip4_address_t *dst_address, + u32 dst_address_length, u32 adj_index, + u32 cover_address_length, u32 cover_adj_index); /** * @brief return the memory used by the table */ -uword ip4_fib_mtrie_memory_usage (ip4_fib_mtrie_t * m); +uword ip4_mtrie_16_memory_usage (ip4_mtrie_16_t *m); /** * @brief Format/display the contents of the mtrie */ -format_function_t format_ip4_fib_mtrie; +format_function_t format_ip4_mtrie_16; /** * @brief A global pool of 8bit stride plys */ -extern ip4_fib_mtrie_8_ply_t *ip4_ply_pool; +extern ip4_mtrie_8_ply_t *ip4_ply_pool; /** * Is the leaf terminal (i.e. an LB index) or non-terminal (i.e. a PLY index) */ always_inline u32 -ip4_fib_mtrie_leaf_is_terminal (ip4_fib_mtrie_leaf_t n) +ip4_mtrie_leaf_is_terminal (ip4_mtrie_leaf_t n) { return n & 1; } @@ -189,24 +187,24 @@ ip4_fib_mtrie_leaf_is_terminal (ip4_fib_mtrie_leaf_t n) * From the stored slot value extract the LB index value */ always_inline u32 -ip4_fib_mtrie_leaf_get_adj_index (ip4_fib_mtrie_leaf_t n) +ip4_mtrie_leaf_get_adj_index (ip4_mtrie_leaf_t n) { - ASSERT (ip4_fib_mtrie_leaf_is_terminal (n)); + ASSERT (ip4_mtrie_leaf_is_terminal (n)); return n >> 1; } /** * @brief Lookup step. Processes 1 byte of 4 byte ip4 address. */ -always_inline ip4_fib_mtrie_leaf_t -ip4_fib_mtrie_lookup_step (const ip4_fib_mtrie_t * m, - ip4_fib_mtrie_leaf_t current_leaf, - const ip4_address_t * dst_address, - u32 dst_address_byte_index) +always_inline ip4_mtrie_leaf_t +ip4_mtrie_16_lookup_step (const ip4_mtrie_16_t *m, + ip4_mtrie_leaf_t current_leaf, + const ip4_address_t *dst_address, + u32 dst_address_byte_index) { - ip4_fib_mtrie_8_ply_t *ply; + ip4_mtrie_8_ply_t *ply; - uword current_is_terminal = ip4_fib_mtrie_leaf_is_terminal (current_leaf); + uword current_is_terminal = ip4_mtrie_leaf_is_terminal (current_leaf); if (!current_is_terminal) { @@ -220,11 +218,11 @@ ip4_fib_mtrie_lookup_step (const ip4_fib_mtrie_t * m, /** * @brief Lookup step number 1. Processes 2 bytes of 4 byte ip4 address. */ -always_inline ip4_fib_mtrie_leaf_t -ip4_fib_mtrie_lookup_step_one (const ip4_fib_mtrie_t * m, - const ip4_address_t * dst_address) +always_inline ip4_mtrie_leaf_t +ip4_mtrie_16_lookup_step_one (const ip4_mtrie_16_t *m, + const ip4_address_t *dst_address) { - ip4_fib_mtrie_leaf_t next_leaf; + ip4_mtrie_leaf_t next_leaf; next_leaf = m->root_ply.leaves[dst_address->as_u16[0]]; -- 2.16.6