u32 fq_index;
u32 fq_feature_index;
+ // reference count for enabling/disabling feature - per interface
+ u32 *feature_use_refcount_per_intf;
} ip6_full_reass_main_t;
extern ip6_full_reass_main_t ip6_full_reass_main;
u32 total_data_len;
u32 thread_id;
u32 thread_id_to;
+ bool is_after_handoff;
+ ip6_header_t ip6_header;
+ ip6_frag_hdr_t ip6_frag_header;
} ip6_full_reass_trace_t;
static void
u32 indent = 0;
if (~0 != t->reass_id)
{
- s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id);
+ if (t->is_after_handoff)
+ {
+ s =
+ format (s, "%U\n", format_ip6_header, &t->ip6_header,
+ sizeof (t->ip6_header));
+ s =
+ format (s, " %U\n", format_ip6_frag_hdr, &t->ip6_frag_header,
+ sizeof (t->ip6_frag_header));
+ indent = 2;
+ }
+ s =
+ format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
+ t->reass_id, t->op_id);
indent = format_get_indent (s);
s = format (s, "first bi: %u, data len: %u, ip/fragment[%u, %u]",
t->trace_range.first_bi, t->total_data_len,
ip6_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_full_reass_main_t * rm,
ip6_full_reass_t * reass, u32 bi,
+ ip6_frag_hdr_t * ip6_frag_header,
ip6_full_reass_trace_operation_e action,
u32 thread_id_to)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
+ bool is_after_handoff = false;
+ if (pool_is_free_index
+ (vm->trace_main.trace_buffer_pool, vlib_buffer_get_trace_index (b)))
+ {
+ // this buffer's trace is gone
+ b->flags &= ~VLIB_BUFFER_IS_TRACED;
+ return;
+ }
+ if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
+ {
+ is_after_handoff = true;
+ }
ip6_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
+ t->is_after_handoff = is_after_handoff;
+ if (t->is_after_handoff)
+ {
+ clib_memcpy (&t->ip6_header, vlib_buffer_get_current (b),
+ clib_min (sizeof (t->ip6_header), b->current_length));
+ if (ip6_frag_header)
+ {
+ clib_memcpy (&t->ip6_frag_header, ip6_frag_header,
+ sizeof (t->ip6_frag_header));
+ }
+ else
+ {
+ clib_memset (&t->ip6_frag_header, 0, sizeof (t->ip6_frag_header));
+ }
+ }
if (reass)
{
t->reass_id = reass->id;
if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
{
ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
- ICMP_ERROR_RT_EXCEEDED, ~0);
+ NULL, ICMP_ERROR_RT_EXCEEDED, ~0);
}
// fragment with offset zero received - send icmp message back
if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
reass = NULL;
now = vlib_time_now (vm);
- if (!clib_bihash_search_48_8
- (&rm->hash, (clib_bihash_kv_48_8_t *) kv, (clib_bihash_kv_48_8_t *) kv))
+ if (!clib_bihash_search_48_8 (&rm->hash, &kv->kv, &kv->kv))
{
- reass =
- pool_elt_at_index (rm->per_thread_data
- [kv->v.memory_owner_thread_index].pool,
- kv->v.reass_index);
if (vm->thread_index != kv->v.memory_owner_thread_index)
{
*do_handoff = 1;
- return reass;
+ return NULL;
}
+ reass =
+ pool_elt_at_index (rm->per_thread_data
+ [kv->v.memory_owner_thread_index].pool,
+ kv->v.reass_index);
+
if (now > reass->last_heard + rm->timeout)
{
ip6_full_reass_on_timeout (vm, node, rm, reass, icmp_bi);
++rt->reass_n;
}
- reass->key.as_u64[0] = ((clib_bihash_kv_48_8_t *) kv)->key[0];
- reass->key.as_u64[1] = ((clib_bihash_kv_48_8_t *) kv)->key[1];
- reass->key.as_u64[2] = ((clib_bihash_kv_48_8_t *) kv)->key[2];
- reass->key.as_u64[3] = ((clib_bihash_kv_48_8_t *) kv)->key[3];
- reass->key.as_u64[4] = ((clib_bihash_kv_48_8_t *) kv)->key[4];
- reass->key.as_u64[5] = ((clib_bihash_kv_48_8_t *) kv)->key[5];
+ reass->key.as_u64[0] = kv->kv.key[0];
+ reass->key.as_u64[1] = kv->kv.key[1];
+ reass->key.as_u64[2] = kv->kv.key[2];
+ reass->key.as_u64[3] = kv->kv.key[3];
+ reass->key.as_u64[4] = kv->kv.key[4];
+ reass->key.as_u64[5] = kv->kv.key[5];
kv->v.reass_index = (reass - rt->pool);
kv->v.memory_owner_thread_index = vm->thread_index;
reass->last_heard = now;
- int rv =
- clib_bihash_add_del_48_8 (&rm->hash, (clib_bihash_kv_48_8_t *) kv, 2);
+ int rv = clib_bihash_add_del_48_8 (&rm->hash, &kv->kv, 2);
if (rv)
{
ip6_full_reass_free (rm, rt, reass);
first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
+ ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi, NULL,
FINALIZE, ~0);
#if 0
// following code does a hexdump of packet fragments to stdout ...
// overlapping fragment - not allowed by RFC 8200
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_full_reass_add_trace (vm, node, rm, reass, *bi0,
+ ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, frag_hdr,
RANGE_OVERLAP, ~0);
}
ip6_full_reass_drop_all (vm, node, rm, reass);
{
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, ~0);
+ ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, frag_hdr,
+ RANGE_NEW, ~0);
}
}
if (~0 != reass->last_packet_octet &&
next0 = IP6_FULL_REASSEMBLY_NEXT_INPUT;
goto skip_reass;
}
+ vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset =
+ (u8 *) frag_hdr - (u8 *) ip0;
+
if (0 == ip6_frag_hdr_offset (frag_hdr))
{
// first fragment - verify upper-layer is present
next0 = IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR;
goto skip_reass;
}
- vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset =
- (u8 *) frag_hdr - (u8 *) ip0;
-
ip6_full_reass_kv_t kv;
u8 do_handoff = 0;
error0 = IP6_ERROR_REASS_LIMIT_REACHED;
}
- b0->error = node->errors[error0];
-
if (~0 != bi0)
{
skip_reass:
to_next[0] = bi0;
to_next += 1;
n_left_to_next -= 1;
+
+ /* bi0 might have been updated by reass_finalize, reload */
+ b0 = vlib_get_buffer (vm, bi0);
+ if (IP6_ERROR_NONE != error0)
+ {
+ b0->error = node->errors[error0];
+ }
+
if (next0 == IP6_FULL_REASSEMBLY_NEXT_HANDOFF)
{
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
- HANDOFF,
+ frag_hdr, HANDOFF,
vnet_buffer (b0)->ip.
reass.owner_thread_index);
}
}
else if (is_feature && IP6_ERROR_NONE == error0)
{
- b0 = vlib_get_buffer (vm, bi0);
vnet_feature_next (&next0, b0);
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
clib_bihash_48_8_t *new_hash;
} ip6_rehash_cb_ctx;
-static void
+static int
ip6_rehash_cb (clib_bihash_kv_48_8_t * kv, void *_ctx)
{
ip6_rehash_cb_ctx *ctx = _ctx;
{
ctx->failure = 1;
}
+ return (BIHASH_WALK_CONTINUE);
}
static void
rm->fq_feature_index =
vlib_frame_queue_main_init (ip6_full_reass_node_feature.index, 0);
+ rm->feature_use_refcount_per_intf = NULL;
return error;
}
vec_reset_length (pool_indexes_to_free);
/* *INDENT-OFF* */
- pool_foreach_index (index, rt->pool, ({
+ pool_foreach_index (index, rt->pool) {
reass = pool_elt_at_index (rt->pool, index);
if (now > reass->last_heard + rm->timeout)
{
vec_add1 (pool_indexes_to_free, index);
}
- }));
+ }
/* *INDENT-ON* */
int *i;
/* *INDENT-OFF* */
if (details)
{
/* *INDENT-OFF* */
- pool_foreach (reass, rt->pool, {
+ pool_foreach (reass, rt->pool) {
vlib_cli_output (vm, "%U", format_ip6_full_reass, vm, reass);
- });
+ }
/* *INDENT-ON* */
}
sum_reass_n += rt->reass_n;
vlib_cli_output (vm, "---------------------");
vlib_cli_output (vm, "Current IP6 reassemblies count: %lu\n",
(long unsigned) sum_reass_n);
- vlib_cli_output (vm, "Maximum configured concurrent IP6 reassemblies per "
- "worker-thread: %lu\n", (long unsigned) rm->max_reass_n);
+ vlib_cli_output (vm,
+ "Maximum configured concurrent full IP6 reassemblies per worker-thread: %lu\n",
+ (long unsigned) rm->max_reass_n);
+ vlib_cli_output (vm,
+ "Maximum configured amount of fragments "
+ "per full IP6 reassembly: %lu\n",
+ (long unsigned) rm->max_reass_len);
+ vlib_cli_output (vm,
+ "Maximum configured full IP6 reassembly timeout: %lums\n",
+ (long unsigned) rm->timeout_ms);
+ vlib_cli_output (vm,
+ "Maximum configured full IP6 reassembly expire walk interval: %lums\n",
+ (long unsigned) rm->expire_walk_interval_ms);
vlib_cli_output (vm, "Buffers in use: %lu\n",
(long unsigned) sum_buffers_n);
return 0;
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
+int
+ip6_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
+{
+ ip6_full_reass_main_t *rm = &ip6_full_reass_main;
+ vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
+ if (is_enable)
+ {
+ if (!rm->feature_use_refcount_per_intf[sw_if_index])
+ {
+ ++rm->feature_use_refcount_per_intf[sw_if_index];
+ return vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-full-reassembly-feature",
+ sw_if_index, 1, 0, 0);
+ }
+ ++rm->feature_use_refcount_per_intf[sw_if_index];
+ }
+ else
+ {
+ --rm->feature_use_refcount_per_intf[sw_if_index];
+ if (!rm->feature_use_refcount_per_intf[sw_if_index])
+ return vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-full-reassembly-feature",
+ sw_if_index, 0, 0, 0);
+ }
+ return -1;
+}
+#endif
+
/*
* fd.io coding-style-patch-verification: ON
*