It turns out that for scalar sizes 0..24, frames are always the same
size. That range includes all current use-cases - and then some - so
get rid of the hash table. Old code preserved under #ifdef
VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES.
Change-Id: Ic005c7143c9639f77d1a0fadd2fc0e90dccb68c1
Signed-off-by: Dave Barach <[email protected]>
return p;
}
-static vlib_frame_size_t *
+static inline vlib_frame_size_t *
get_frame_size_info (vlib_node_main_t * nm,
u32 n_scalar_bytes, u32 n_vector_bytes)
{
+#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
uword key = (n_scalar_bytes << 16) | n_vector_bytes;
uword *p, i;
}
return vec_elt_at_index (nm->frame_sizes, i);
+#else
+ ASSERT (vlib_frame_bytes (n_scalar_bytes, n_vector_bytes)
+ == (vlib_frame_bytes (0, 4)));
+ return vec_elt_at_index (nm->frame_sizes, 0);
+#endif
}
static u32
vlib_node_t *n;
uword ni;
+ nm->frame_sizes = vec_new (vlib_frame_size_t, 1);
+#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
nm->frame_size_hash = hash_create (0, sizeof (uword));
+#endif
nm->flags |= VLIB_NODE_MAIN_RUNTIME_STARTED;
/* Generate sibling relationships */
nm_clone->processes = vec_dup_aligned (nm->processes,
CLIB_CACHE_LINE_BYTES);
- /* zap the (per worker) frame freelists, etc */
- nm_clone->frame_sizes = 0;
+ /* Create per-thread frame freelist */
+ nm_clone->frame_sizes = vec_new (vlib_frame_size_t, 1);
+#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
nm_clone->frame_size_hash = hash_create (0, sizeof (uword));
+#endif
/* Packet trace buffers are guaranteed to be empty, nothing to do here */