{
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
+ if (pool_is_free_index
+ (vm->trace_main.trace_buffer_pool, vlib_buffer_get_trace_index (b)))
+ {
+ // this buffer's trace is gone
+ b->flags &= ~VLIB_BUFFER_IS_TRACED;
+ return;
+ }
bool is_after_handoff = false;
if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
{
{
vlib_buffer_free (vm, to_free, vec_len (to_free));
}
+ vec_free (to_free);
}
always_inline void
vec_reset_length (pool_indexes_to_free);
/* *INDENT-OFF* */
- pool_foreach_index (index, rt->pool, ({
+ pool_foreach_index (index, rt->pool) {
reass = pool_elt_at_index (rt->pool, index);
if (now > reass->last_heard + rm->timeout)
{
vec_add1 (pool_indexes_to_free, index);
}
- }));
+ }
/* *INDENT-ON* */
int *i;
/* *INDENT-OFF* */
if (details)
{
/* *INDENT-OFF* */
- pool_foreach (reass, rt->pool, {
+ pool_foreach (reass, rt->pool) {
vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
- });
+ }
/* *INDENT-ON* */
}
sum_reass_n += rt->reass_n;
vlib_cli_output (vm,
"Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
(long unsigned) rm->max_reass_n);
+ vlib_cli_output (vm,
+ "Maximum configured amount of fragments "
+ "per full IP4 reassembly: %lu\n",
+ (long unsigned) rm->max_reass_len);
vlib_cli_output (vm,
"Maximum configured full IP4 reassembly timeout: %lums\n",
(long unsigned) rm->timeout_ms);
ti += 1;
b += 1;
}
- n_enq =
- vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
- frame->n_vectors, 1);
+ n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
+ thread_indices, frame->n_vectors, 1);
if (n_enq < frame->n_vectors)
vlib_node_increment_counter (vm, node->node_index,