static void
ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_full_reass_main_t * rm,
ip4_full_reass_t * reass, u32 bi,
ip4_full_reass_trace_operation_e action,
u32 size_diff, u32 thread_id_to)
}
always_inline void
-ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
+ip4_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip4_full_reass_t *reass)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
if (now > reass->last_heard + rm->timeout)
{
- ip4_full_reass_drop_all (vm, node, rm, reass);
+ ip4_full_reass_drop_all (vm, node, reass);
ip4_full_reass_free (rm, rt, reass);
reass = NULL;
}
first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
- FINALIZE, 0, ~0);
+ ip4_full_reass_add_trace (vm, node, reass, reass->first_bi, FINALIZE, 0,
+ ~0);
#if 0
// following code does a hexdump of packet fragments to stdout ...
do
always_inline ip4_full_reass_rc_t
ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
- ip4_full_reass_main_t * rm,
- ip4_full_reass_per_thread_t * rt,
ip4_full_reass_t * reass,
u32 prev_range_bi, u32 new_next_bi)
{
always_inline ip4_full_reass_rc_t
ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
vlib_node_runtime_t * node,
- ip4_full_reass_main_t * rm,
ip4_full_reass_t * reass,
u32 prev_range_bi, u32 discard_bi)
{
u32 to_be_freed_bi = discard_bi;
if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
- RANGE_DISCARD, 0, ~0);
+ ip4_full_reass_add_trace (vm, node, reass, discard_bi, RANGE_DISCARD,
+ 0, ~0);
}
if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
{
// starting a new reassembly
rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
}
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
- ~0);
+ ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
}
*bi0 = ~0;
reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
~0 == candidate_range_bi)
{
// special case - this fragment falls beyond all known ranges
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (vm, reass,
+ prev_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
if (fragment_last < candidate_vnb->ip.reass.range_first)
{
// this fragment ends before candidate range without any overlap
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi,
+ *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
// this fragment is a (sub)part of existing range, ignore it
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
+ ip4_full_reass_add_trace (vm, node, reass, *bi0,
RANGE_OVERLAP, 0, ~0);
}
break;
reass->data_len -= overlap;
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass,
+ ip4_full_reass_add_trace (vm, node, reass,
candidate_range_bi,
RANGE_SHRINK, 0, ~0);
}
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi,
- *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (
+ vm, reass, prev_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
else
{
// special case - last range discarded
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt,
- reass,
- candidate_range_bi,
- *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (
+ vm, reass, candidate_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
{
u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
// discard candidate range, probe next range
- rc =
- ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
- prev_range_bi,
- candidate_range_bi);
+ rc = ip4_full_reass_remove_range_from_chain (
+ vm, node, reass, prev_range_bi, candidate_range_bi);
if (IP4_REASS_RC_OK != rc)
{
return rc;
else
{
// special case - last range discarded
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi,
- *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (
+ vm, reass, prev_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
{
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
- ~0);
+ ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
}
}
if (~0 != reass->last_packet_octet &&
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1);
- ip4_full_reass_drop_all (vm, node, rm, reass);
+ ip4_full_reass_drop_all (vm, node, reass);
ip4_full_reass_free (rm, rt, reass);
goto next_packet;
break;
case IP4_REASS_RC_NO_BUF:
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_NO_BUF, 1);
- ip4_full_reass_drop_all (vm, node, rm, reass);
+ ip4_full_reass_drop_all (vm, node, reass);
ip4_full_reass_free (rm, rt, reass);
goto next_packet;
break;
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_INTERNAL_ERROR,
1);
- ip4_full_reass_drop_all (vm, node, rm, reass);
+ ip4_full_reass_drop_all (vm, node, reass);
ip4_full_reass_free (rm, rt, reass);
goto next_packet;
break;
{
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, NULL, bi0,
- HANDOFF, 0,
- vnet_buffer (b0)->ip.
- reass.owner_thread_index);
+ ip4_full_reass_add_trace (
+ vm, node, NULL, bi0, HANDOFF, 0,
+ vnet_buffer (b0)->ip.reass.owner_thread_index);
}
}
else if (FEATURE == type && IP4_ERROR_NONE == error0)
return ip4_full_reass_inline (vm, node, frame, NORMAL);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_node) = {
.name = "ip4-full-reassembly",
.vector_size = sizeof (u32),
},
};
-/* *INDENT-ON* */
VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
vlib_node_runtime_t * node,
return ip4_full_reass_inline (vm, node, frame, FEATURE);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
.name = "ip4-full-reassembly-feature",
.vector_size = sizeof (u32),
[IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
.arc_name = "ip4-unicast",
.node_name = "ip4-full-reassembly-feature",
"ipsec4-input-feature"),
.runs_after = 0,
};
-/* *INDENT-ON* */
VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm,
vlib_node_runtime_t * node,
return ip4_full_reass_inline (vm, node, frame, CUSTOM);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = {
.name = "ip4-full-reassembly-custom",
.vector_size = sizeof (u32),
[IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-custom-hoff",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VNET_FEATURE_INIT (ip4_full_reass_custom, static) = {
.arc_name = "ip4-unicast",
.node_name = "ip4-full-reassembly-feature",
.runs_after = 0,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
uword
#endif /* CLIB_MARCH_VARIANT */
static uword
-ip4_full_reass_walk_expired (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * f)
+ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node,
+ CLIB_UNUSED (vlib_frame_t *f))
{
ip4_full_reass_main_t *rm = &ip4_full_reass_main;
uword event_type, *event_data = 0;
switch (event_type)
{
- case ~0: /* no events => timeout */
- /* nothing to do here */
- break;
+ case ~0:
+ /* no events => timeout */
+ /* fallthrough */
case IP4_EVENT_CONFIG_CHANGED:
+ /* nothing to do here */
break;
default:
clib_warning ("BUG: event type 0x%wx", event_type);
clib_spinlock_lock (&rt->lock);
vec_reset_length (pool_indexes_to_free);
- /* *INDENT-OFF* */
pool_foreach_index (index, rt->pool) {
reass = pool_elt_at_index (rt->pool, index);
if (now > reass->last_heard + rm->timeout)
vec_add1 (pool_indexes_to_free, index);
}
}
- /* *INDENT-ON* */
int *i;
- /* *INDENT-OFF* */
vec_foreach (i, pool_indexes_to_free)
{
ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
- ip4_full_reass_drop_all (vm, node, rm, reass);
- ip4_full_reass_free (rm, rt, reass);
- }
- /* *INDENT-ON* */
+ ip4_full_reass_drop_all (vm, node, reass);
+ ip4_full_reass_free (rm, rt, reass);
+ }
clib_spinlock_unlock (&rt->lock);
}
return 0;
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
.function = ip4_full_reass_walk_expired,
.type = VLIB_NODE_TYPE_PROCESS,
.error_strings = ip4_full_reass_error_strings,
};
-/* *INDENT-ON* */
static u8 *
format_ip4_full_reass_key (u8 * s, va_list * args)
clib_spinlock_lock (&rt->lock);
if (details)
{
- /* *INDENT-OFF* */
pool_foreach (reass, rt->pool) {
vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
}
- /* *INDENT-ON* */
}
sum_reass_n += rt->reass_n;
clib_spinlock_unlock (&rt->lock);
vlib_cli_output (vm,
"Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
(long unsigned) rm->max_reass_n);
+ vlib_cli_output (vm,
+ "Maximum configured amount of fragments "
+ "per full IP4 reassembly: %lu\n",
+ (long unsigned) rm->max_reass_len);
vlib_cli_output (vm,
"Maximum configured full IP4 reassembly timeout: %lums\n",
(long unsigned) rm->timeout_ms);
return 0;
}
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
.path = "show ip4-full-reassembly",
.short_help = "show ip4-full-reassembly [details]",
.function = show_ip4_reass,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
vnet_api_error_t
ti += 1;
b += 1;
}
- n_enq =
- vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
- frame->n_vectors, 1);
+ n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
+ thread_indices, frame->n_vectors, 1);
if (n_enq < frame->n_vectors)
vlib_node_increment_counter (vm, node->node_index,
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
.name = "ip4-full-reassembly-handoff",
.vector_size = sizeof (u32),
[0] = "error-drop",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
vlib_node_runtime_t *
node,
{
return ip4_full_reass_handoff_node_inline (vm, node, frame, FEATURE);
}
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
.name = "ip4-full-reass-feature-hoff",
.vector_size = sizeof (u32),
[0] = "error-drop",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_NODE_FN (ip4_full_reass_custom_handoff_node) (vlib_main_t * vm,
vlib_node_runtime_t *
node,
{
return ip4_full_reass_handoff_node_inline (vm, node, frame, CUSTOM);
}
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_custom_handoff_node) = {
.name = "ip4-full-reass-custom-hoff",
.vector_size = sizeof (u32),
[0] = "error-drop",
},
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
int