2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * ip/ip4_fib.h: ip4 mtrie fib
18 * Copyright (c) 2012 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vnet/ip/ip.h>
41 #include <vnet/ip/ip4_mtrie.h>
42 #include <vnet/fib/ip4_fib.h>
46 * Global pool of IPv4 8bit PLYs
48 ip4_fib_mtrie_8_ply_t *ip4_ply_pool;
51 ip4_fib_mtrie_leaf_is_non_empty (ip4_fib_mtrie_8_ply_t * p, u8 dst_byte)
54 * It's 'non-empty' if the length of the leaf stored is greater than the
55 * length of a leaf in the covering ply. i.e. the leaf is more specific
56 * than it's would be cover in the covering ply
58 if (p->dst_address_bits_of_leaves[dst_byte] > p->dst_address_bits_base)
63 always_inline ip4_fib_mtrie_leaf_t
64 ip4_fib_mtrie_leaf_set_adj_index (u32 adj_index)
66 ip4_fib_mtrie_leaf_t l;
67 l = 1 + 2 * adj_index;
68 ASSERT (ip4_fib_mtrie_leaf_get_adj_index (l) == adj_index);
73 ip4_fib_mtrie_leaf_is_next_ply (ip4_fib_mtrie_leaf_t n)
79 ip4_fib_mtrie_leaf_get_next_ply_index (ip4_fib_mtrie_leaf_t n)
81 ASSERT (ip4_fib_mtrie_leaf_is_next_ply (n));
85 always_inline ip4_fib_mtrie_leaf_t
86 ip4_fib_mtrie_leaf_set_next_ply_index (u32 i)
88 ip4_fib_mtrie_leaf_t l;
90 ASSERT (ip4_fib_mtrie_leaf_get_next_ply_index (l) == i);
95 #define PLY_X4_SPLAT_INIT(init_x4, init) \
96 init_x4 = u32x4_splat (init);
98 #define PLY_X4_SPLAT_INIT(init_x4, init) \
101 y.as_u32[0] = init; \
102 y.as_u32[1] = init; \
103 y.as_u32[2] = init; \
104 y.as_u32[3] = init; \
105 init_x4 = y.as_u32x4; \
109 #ifdef CLIB_HAVE_VEC128
110 #define PLY_INIT_LEAVES(p) \
114 PLY_X4_SPLAT_INIT(init_x4, init); \
115 for (l = p->leaves_as_u32x4; \
116 l < p->leaves_as_u32x4 + ARRAY_LEN (p->leaves_as_u32x4); \
126 #define PLY_INIT_LEAVES(p) \
130 for (l = p->leaves; l < p->leaves + ARRAY_LEN (p->leaves); l += 4) \
140 #define PLY_INIT(p, init, prefix_len, ply_base_len) \
143 * A leaf is 'empty' if it represents a leaf from the covering PLY \
144 * i.e. if the prefix length of the leaf is less than or equal to \
145 * the prefix length of the PLY \
147 p->n_non_empty_leafs = (prefix_len > ply_base_len ? \
148 ARRAY_LEN (p->leaves) : 0); \
149 memset (p->dst_address_bits_of_leaves, prefix_len, \
150 sizeof (p->dst_address_bits_of_leaves)); \
151 p->dst_address_bits_base = ply_base_len; \
153 /* Initialize leaves. */ \
154 PLY_INIT_LEAVES(p); \
158 ply_8_init (ip4_fib_mtrie_8_ply_t * p,
159 ip4_fib_mtrie_leaf_t init, uword prefix_len, u32 ply_base_len)
161 PLY_INIT (p, init, prefix_len, ply_base_len);
165 ply_16_init (ip4_fib_mtrie_16_ply_t * p,
166 ip4_fib_mtrie_leaf_t init, uword prefix_len)
168 memset (p->dst_address_bits_of_leaves, prefix_len,
169 sizeof (p->dst_address_bits_of_leaves));
173 static ip4_fib_mtrie_leaf_t
174 ply_create (ip4_fib_mtrie_t * m,
175 ip4_fib_mtrie_leaf_t init_leaf,
176 u32 leaf_prefix_len, u32 ply_base_len)
178 ip4_fib_mtrie_8_ply_t *p;
180 /* Get cache aligned ply. */
181 pool_get_aligned (ip4_ply_pool, p, CLIB_CACHE_LINE_BYTES);
183 ply_8_init (p, init_leaf, leaf_prefix_len, ply_base_len);
184 return ip4_fib_mtrie_leaf_set_next_ply_index (p - ip4_ply_pool);
187 always_inline ip4_fib_mtrie_8_ply_t *
188 get_next_ply_for_leaf (ip4_fib_mtrie_t * m, ip4_fib_mtrie_leaf_t l)
190 uword n = ip4_fib_mtrie_leaf_get_next_ply_index (l);
192 return pool_elt_at_index (ip4_ply_pool, n);
196 ip4_mtrie_free (ip4_fib_mtrie_t * m)
198 /* the root ply is embedded so the is nothing to do,
199 * the assumption being that the IP4 FIB table has emptied the trie
204 for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
206 ASSERT (!ip4_fib_mtrie_leaf_is_next_ply (m->root_ply.leaves[i]));
212 ip4_mtrie_init (ip4_fib_mtrie_t * m)
214 ply_16_init (&m->root_ply, IP4_FIB_MTRIE_LEAF_EMPTY, 0);
219 ip4_address_t dst_address;
220 u32 dst_address_length;
222 u32 cover_address_length;
224 } ip4_fib_mtrie_set_unset_leaf_args_t;
227 set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m,
228 ip4_fib_mtrie_8_ply_t * ply,
229 ip4_fib_mtrie_leaf_t new_leaf,
230 uword new_leaf_dst_address_bits)
232 ip4_fib_mtrie_leaf_t old_leaf;
235 ASSERT (ip4_fib_mtrie_leaf_is_terminal (new_leaf));
237 for (i = 0; i < ARRAY_LEN (ply->leaves); i++)
239 old_leaf = ply->leaves[i];
241 /* Recurse into sub plies. */
242 if (!ip4_fib_mtrie_leaf_is_terminal (old_leaf))
244 ip4_fib_mtrie_8_ply_t *sub_ply =
245 get_next_ply_for_leaf (m, old_leaf);
246 set_ply_with_more_specific_leaf (m, sub_ply, new_leaf,
247 new_leaf_dst_address_bits);
250 /* Replace less specific terminal leaves with new leaf. */
251 else if (new_leaf_dst_address_bits >=
252 ply->dst_address_bits_of_leaves[i])
254 __sync_val_compare_and_swap (&ply->leaves[i], old_leaf, new_leaf);
255 ASSERT (ply->leaves[i] == new_leaf);
256 ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
257 ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (ply, i);
263 set_leaf (ip4_fib_mtrie_t * m,
264 const ip4_fib_mtrie_set_unset_leaf_args_t * a,
265 u32 old_ply_index, u32 dst_address_byte_index)
267 ip4_fib_mtrie_leaf_t old_leaf, new_leaf;
268 i32 n_dst_bits_next_plies;
270 ip4_fib_mtrie_8_ply_t *old_ply;
272 old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
274 ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32);
275 ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
277 /* how many bits of the destination address are in the next PLY */
278 n_dst_bits_next_plies =
279 a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
281 dst_byte = a->dst_address.as_u8[dst_address_byte_index];
283 /* Number of bits next plies <= 0 => insert leaves this ply. */
284 if (n_dst_bits_next_plies <= 0)
286 /* The mask length of the address to insert maps to this ply */
287 uword i, n_dst_bits_this_ply, old_leaf_is_terminal;
289 /* The number of bits, and hence slots/buckets, we will fill */
290 n_dst_bits_this_ply = clib_min (8, -n_dst_bits_next_plies);
291 ASSERT ((a->dst_address.as_u8[dst_address_byte_index] &
292 pow2_mask (n_dst_bits_this_ply)) == 0);
294 /* Starting at the value of the byte at this section of the v4 address
295 * fill the buckets/slots of the ply */
296 for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
298 ip4_fib_mtrie_8_ply_t *new_ply;
300 old_leaf = old_ply->leaves[i];
301 old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
303 if (a->dst_address_length >= old_ply->dst_address_bits_of_leaves[i])
305 /* The new leaf is more or equally specific than the one currently
306 * occupying the slot */
307 new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
309 if (old_leaf_is_terminal)
311 /* The current leaf is terminal, we can replace it with
313 old_ply->n_non_empty_leafs -=
314 ip4_fib_mtrie_leaf_is_non_empty (old_ply, i);
316 old_ply->dst_address_bits_of_leaves[i] =
317 a->dst_address_length;
318 __sync_val_compare_and_swap (&old_ply->leaves[i], old_leaf,
320 ASSERT (old_ply->leaves[i] == new_leaf);
322 old_ply->n_non_empty_leafs +=
323 ip4_fib_mtrie_leaf_is_non_empty (old_ply, i);
324 ASSERT (old_ply->n_non_empty_leafs <=
325 ARRAY_LEN (old_ply->leaves));
329 /* Existing leaf points to another ply. We need to place
330 * new_leaf into all more specific slots. */
331 new_ply = get_next_ply_for_leaf (m, old_leaf);
332 set_ply_with_more_specific_leaf (m, new_ply, new_leaf,
333 a->dst_address_length);
336 else if (!old_leaf_is_terminal)
338 /* The current leaf is less specific and not termial (i.e. a ply),
339 * recurse on down the trie */
340 new_ply = get_next_ply_for_leaf (m, old_leaf);
341 set_leaf (m, a, new_ply - ip4_ply_pool,
342 dst_address_byte_index + 1);
346 * the route we are adding is less specific than the leaf currently
347 * occupying this slot. leave it there
353 /* The address to insert requires us to move down at a lower level of
354 * the trie - recurse on down */
355 ip4_fib_mtrie_8_ply_t *new_ply;
358 ply_base_len = 8 * (dst_address_byte_index + 1);
360 old_leaf = old_ply->leaves[dst_byte];
362 if (ip4_fib_mtrie_leaf_is_terminal (old_leaf))
364 /* There is a leaf occupying the slot. Replace it with a new ply */
365 old_ply->n_non_empty_leafs -=
366 ip4_fib_mtrie_leaf_is_non_empty (old_ply, dst_byte);
368 new_leaf = ply_create (m, old_leaf,
369 clib_max (old_ply->dst_address_bits_of_leaves
370 [dst_byte], ply_base_len),
372 new_ply = get_next_ply_for_leaf (m, new_leaf);
374 /* Refetch since ply_create may move pool. */
375 old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
377 __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
379 ASSERT (old_ply->leaves[dst_byte] == new_leaf);
380 old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
382 old_ply->n_non_empty_leafs +=
383 ip4_fib_mtrie_leaf_is_non_empty (old_ply, dst_byte);
384 ASSERT (old_ply->n_non_empty_leafs >= 0);
387 new_ply = get_next_ply_for_leaf (m, old_leaf);
389 set_leaf (m, a, new_ply - ip4_ply_pool, dst_address_byte_index + 1);
394 set_root_leaf (ip4_fib_mtrie_t * m,
395 const ip4_fib_mtrie_set_unset_leaf_args_t * a)
397 ip4_fib_mtrie_leaf_t old_leaf, new_leaf;
398 ip4_fib_mtrie_16_ply_t *old_ply;
399 i32 n_dst_bits_next_plies;
402 old_ply = &m->root_ply;
404 ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32);
406 /* how many bits of the destination address are in the next PLY */
407 n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
409 dst_byte = a->dst_address.as_u16[0];
411 /* Number of bits next plies <= 0 => insert leaves this ply. */
412 if (n_dst_bits_next_plies <= 0)
414 /* The mask length of the address to insert maps to this ply */
415 uword i, n_dst_bits_this_ply, old_leaf_is_terminal;
417 /* The number of bits, and hence slots/buckets, we will fill */
418 n_dst_bits_this_ply = 16 - a->dst_address_length;
419 ASSERT ((clib_host_to_net_u16 (a->dst_address.as_u16[0]) &
420 pow2_mask (n_dst_bits_this_ply)) == 0);
422 /* Starting at the value of the byte at this section of the v4 address
423 * fill the buckets/slots of the ply */
424 for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
426 ip4_fib_mtrie_8_ply_t *new_ply;
429 slot = clib_net_to_host_u16 (dst_byte);
431 slot = clib_host_to_net_u16 (slot);
433 old_leaf = old_ply->leaves[slot];
434 old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
436 if (a->dst_address_length >=
437 old_ply->dst_address_bits_of_leaves[slot])
439 /* The new leaf is more or equally specific than the one currently
440 * occupying the slot */
441 new_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
443 if (old_leaf_is_terminal)
445 /* The current leaf is terminal, we can replace it with
447 old_ply->dst_address_bits_of_leaves[slot] =
448 a->dst_address_length;
449 __sync_val_compare_and_swap (&old_ply->leaves[slot],
451 ASSERT (old_ply->leaves[slot] == new_leaf);
455 /* Existing leaf points to another ply. We need to place
456 * new_leaf into all more specific slots. */
457 new_ply = get_next_ply_for_leaf (m, old_leaf);
458 set_ply_with_more_specific_leaf (m, new_ply, new_leaf,
459 a->dst_address_length);
462 else if (!old_leaf_is_terminal)
464 /* The current leaf is less specific and not termial (i.e. a ply),
465 * recurse on down the trie */
466 new_ply = get_next_ply_for_leaf (m, old_leaf);
467 set_leaf (m, a, new_ply - ip4_ply_pool, 2);
471 * the route we are adding is less specific than the leaf currently
472 * occupying this slot. leave it there
478 /* The address to insert requires us to move down at a lower level of
479 * the trie - recurse on down */
480 ip4_fib_mtrie_8_ply_t *new_ply;
485 old_leaf = old_ply->leaves[dst_byte];
487 if (ip4_fib_mtrie_leaf_is_terminal (old_leaf))
489 /* There is a leaf occupying the slot. Replace it with a new ply */
490 new_leaf = ply_create (m, old_leaf,
491 clib_max (old_ply->dst_address_bits_of_leaves
492 [dst_byte], ply_base_len),
494 new_ply = get_next_ply_for_leaf (m, new_leaf);
496 __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf,
498 ASSERT (old_ply->leaves[dst_byte] == new_leaf);
499 old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
502 new_ply = get_next_ply_for_leaf (m, old_leaf);
504 set_leaf (m, a, new_ply - ip4_ply_pool, 2);
509 unset_leaf (ip4_fib_mtrie_t * m,
510 const ip4_fib_mtrie_set_unset_leaf_args_t * a,
511 ip4_fib_mtrie_8_ply_t * old_ply, u32 dst_address_byte_index)
513 ip4_fib_mtrie_leaf_t old_leaf, del_leaf;
514 i32 n_dst_bits_next_plies;
515 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
518 ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32);
519 ASSERT (dst_address_byte_index < ARRAY_LEN (a->dst_address.as_u8));
521 n_dst_bits_next_plies =
522 a->dst_address_length - BITS (u8) * (dst_address_byte_index + 1);
524 dst_byte = a->dst_address.as_u8[dst_address_byte_index];
525 if (n_dst_bits_next_plies < 0)
526 dst_byte &= ~pow2_mask (-n_dst_bits_next_plies);
528 n_dst_bits_this_ply =
529 n_dst_bits_next_plies <= 0 ? -n_dst_bits_next_plies : 0;
530 n_dst_bits_this_ply = clib_min (8, n_dst_bits_this_ply);
532 del_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
534 for (i = dst_byte; i < dst_byte + (1 << n_dst_bits_this_ply); i++)
536 old_leaf = old_ply->leaves[i];
537 old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
539 if (old_leaf == del_leaf
540 || (!old_leaf_is_terminal
541 && unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf),
542 dst_address_byte_index + 1)))
544 old_ply->n_non_empty_leafs -=
545 ip4_fib_mtrie_leaf_is_non_empty (old_ply, i);
548 ip4_fib_mtrie_leaf_set_adj_index (a->cover_adj_index);
549 old_ply->dst_address_bits_of_leaves[i] =
550 clib_max (old_ply->dst_address_bits_base,
551 a->cover_address_length);
553 old_ply->n_non_empty_leafs +=
554 ip4_fib_mtrie_leaf_is_non_empty (old_ply, i);
556 ASSERT (old_ply->n_non_empty_leafs >= 0);
557 if (old_ply->n_non_empty_leafs == 0 && dst_address_byte_index > 0)
559 pool_put (ip4_ply_pool, old_ply);
560 /* Old ply was deleted. */
564 else if (dst_address_byte_index)
567 for (ii = 0; ii < ARRAY_LEN (old_ply->leaves); ii++)
569 count += ip4_fib_mtrie_leaf_is_non_empty (old_ply, ii);
577 /* Old ply was not deleted. */
582 unset_root_leaf (ip4_fib_mtrie_t * m,
583 const ip4_fib_mtrie_set_unset_leaf_args_t * a)
585 ip4_fib_mtrie_leaf_t old_leaf, del_leaf;
586 i32 n_dst_bits_next_plies;
587 i32 i, n_dst_bits_this_ply, old_leaf_is_terminal;
589 ip4_fib_mtrie_16_ply_t *old_ply;
591 ASSERT (a->dst_address_length >= 0 && a->dst_address_length <= 32);
593 old_ply = &m->root_ply;
594 n_dst_bits_next_plies = a->dst_address_length - BITS (u16);
596 dst_byte = a->dst_address.as_u16[0];
598 n_dst_bits_this_ply = (n_dst_bits_next_plies <= 0 ?
599 (16 - a->dst_address_length) : 0);
601 del_leaf = ip4_fib_mtrie_leaf_set_adj_index (a->adj_index);
603 /* Starting at the value of the byte at this section of the v4 address
604 * fill the buckets/slots of the ply */
605 for (i = 0; i < (1 << n_dst_bits_this_ply); i++)
609 slot = clib_net_to_host_u16 (dst_byte);
611 slot = clib_host_to_net_u16 (slot);
613 old_leaf = old_ply->leaves[slot];
614 old_leaf_is_terminal = ip4_fib_mtrie_leaf_is_terminal (old_leaf);
616 if (old_leaf == del_leaf
617 || (!old_leaf_is_terminal
618 && unset_leaf (m, a, get_next_ply_for_leaf (m, old_leaf), 2)))
620 old_ply->leaves[slot] =
621 ip4_fib_mtrie_leaf_set_adj_index (a->cover_adj_index);
622 old_ply->dst_address_bits_of_leaves[slot] = a->cover_address_length;
628 ip4_fib_mtrie_route_add (ip4_fib_mtrie_t * m,
629 const ip4_address_t * dst_address,
630 u32 dst_address_length, u32 adj_index)
632 ip4_fib_mtrie_set_unset_leaf_args_t a;
633 ip4_main_t *im = &ip4_main;
635 /* Honor dst_address_length. Fib masks are in network byte order */
636 a.dst_address.as_u32 = (dst_address->as_u32 &
637 im->fib_masks[dst_address_length]);
638 a.dst_address_length = dst_address_length;
639 a.adj_index = adj_index;
641 set_root_leaf (m, &a);
645 ip4_fib_mtrie_route_del (ip4_fib_mtrie_t * m,
646 const ip4_address_t * dst_address,
647 u32 dst_address_length,
649 u32 cover_address_length, u32 cover_adj_index)
651 ip4_fib_mtrie_set_unset_leaf_args_t a;
652 ip4_main_t *im = &ip4_main;
654 /* Honor dst_address_length. Fib masks are in network byte order */
655 a.dst_address.as_u32 = (dst_address->as_u32 &
656 im->fib_masks[dst_address_length]);
657 a.dst_address_length = dst_address_length;
658 a.adj_index = adj_index;
659 a.cover_adj_index = cover_adj_index;
660 a.cover_address_length = cover_address_length;
662 /* the top level ply is never removed */
663 unset_root_leaf (m, &a);
666 /* Returns number of bytes of memory used by mtrie. */
668 mtrie_ply_memory_usage (ip4_fib_mtrie_t * m, ip4_fib_mtrie_8_ply_t * p)
672 bytes = sizeof (p[0]);
673 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
675 ip4_fib_mtrie_leaf_t l = p->leaves[i];
676 if (ip4_fib_mtrie_leaf_is_next_ply (l))
677 bytes += mtrie_ply_memory_usage (m, get_next_ply_for_leaf (m, l));
683 /* Returns number of bytes of memory used by mtrie. */
685 mtrie_memory_usage (ip4_fib_mtrie_t * m)
690 for (i = 0; i < ARRAY_LEN (m->root_ply.leaves); i++)
692 ip4_fib_mtrie_leaf_t l = m->root_ply.leaves[i];
693 if (ip4_fib_mtrie_leaf_is_next_ply (l))
694 bytes += mtrie_ply_memory_usage (m, get_next_ply_for_leaf (m, l));
701 format_ip4_fib_mtrie_leaf (u8 * s, va_list * va)
703 ip4_fib_mtrie_leaf_t l = va_arg (*va, ip4_fib_mtrie_leaf_t);
705 if (ip4_fib_mtrie_leaf_is_terminal (l))
706 s = format (s, "lb-index %d", ip4_fib_mtrie_leaf_get_adj_index (l));
708 s = format (s, "next ply %d", ip4_fib_mtrie_leaf_get_next_ply_index (l));
712 #define FORMAT_PLY(s, _p, _i, _base_address, _ply_max_len, _indent) \
716 ip4_fib_mtrie_leaf_t _l = p->leaves[(_i)]; \
718 a = (_base_address) + ((_i) << (32 - (_ply_max_len))); \
719 ia.as_u32 = clib_host_to_net_u32 (a); \
720 ia_length = (_p)->dst_address_bits_of_leaves[(_i)]; \
721 s = format (s, "\n%U%20U %U", \
722 format_white_space, (_indent) + 2, \
723 format_ip4_address_and_length, &ia, ia_length, \
724 format_ip4_fib_mtrie_leaf, _l); \
726 if (ip4_fib_mtrie_leaf_is_next_ply (_l)) \
727 s = format (s, "\n%U%U", \
728 format_white_space, (_indent) + 2, \
729 format_ip4_fib_mtrie_ply, m, a, \
730 ip4_fib_mtrie_leaf_get_next_ply_index (_l)); \
735 format_ip4_fib_mtrie_ply (u8 * s, va_list * va)
737 ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *);
738 u32 base_address = va_arg (*va, u32);
739 u32 ply_index = va_arg (*va, u32);
740 ip4_fib_mtrie_8_ply_t *p;
744 p = pool_elt_at_index (ip4_ply_pool, ply_index);
745 indent = format_get_indent (s);
746 s = format (s, "ply index %d, %d non-empty leaves", ply_index,
747 p->n_non_empty_leafs);
749 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
751 if (ip4_fib_mtrie_leaf_is_non_empty (p, i))
753 FORMAT_PLY (s, p, i, base_address,
754 p->dst_address_bits_base + 8, indent);
762 format_ip4_fib_mtrie (u8 * s, va_list * va)
764 ip4_fib_mtrie_t *m = va_arg (*va, ip4_fib_mtrie_t *);
765 ip4_fib_mtrie_16_ply_t *p;
766 u32 base_address = 0;
769 s = format (s, "%d plies, memory usage %U\n",
770 pool_elts (ip4_ply_pool),
771 format_memory_size, mtrie_memory_usage (m));
772 s = format (s, "root-ply");
775 for (i = 0; i < ARRAY_LEN (p->leaves); i++)
779 slot = clib_host_to_net_u16 (i);
781 if (p->dst_address_bits_of_leaves[slot] > 0)
783 FORMAT_PLY (s, p, slot, base_address, 16, 2);
790 static clib_error_t *
791 ip4_mtrie_module_init (vlib_main_t * vm)
793 /* Burn one ply so index 0 is taken */
794 CLIB_UNUSED (ip4_fib_mtrie_8_ply_t * p);
796 pool_get (ip4_ply_pool, p);
801 VLIB_INIT_FUNCTION (ip4_mtrie_module_init);
804 * fd.io coding-style-patch-verification: ON
807 * eval: (c-set-style "gnu")