u32 fq_index;
u32 fq_feature_index;
+ // reference count for enabling/disabling feature - per interface
+ u32 *feature_use_refcount_per_intf;
} ip6_full_reass_main_t;
extern ip6_full_reass_main_t ip6_full_reass_main;
else
{
// overlapping fragment - not allowed by RFC 8200
- ip6_full_reass_drop_all (vm, node, rm, reass);
- ip6_full_reass_free (rm, rt, reass);
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
ip6_full_reass_add_trace (vm, node, rm, reass, *bi0,
RANGE_OVERLAP, ~0);
}
+ ip6_full_reass_drop_all (vm, node, rm, reass);
+ ip6_full_reass_free (rm, rt, reass);
*next0 = IP6_FULL_REASSEMBLY_NEXT_DROP;
*error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
return IP6_FULL_REASS_RC_OK;
reass->data_len == reass->last_packet_octet + 1)
{
*handoff_thread_idx = reass->sendout_thread_index;
+ int handoff =
+ reass->memory_owner_thread_index != reass->sendout_thread_index;
ip6_full_reass_rc_t rc =
ip6_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
is_custom_app);
- if (IP6_FULL_REASS_RC_OK == rc
- && reass->memory_owner_thread_index != reass->sendout_thread_index)
+ if (IP6_FULL_REASS_RC_OK == rc && handoff)
{
return IP6_FULL_REASS_RC_HANDOFF;
}
if (PREDICT_FALSE (do_handoff))
{
next0 = IP6_FULL_REASSEMBLY_NEXT_HANDOFF;
- if (is_feature)
- vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
- kv.v.memory_owner_thread_index;
- else
- vnet_buffer (b0)->ip.reass.owner_thread_index =
- kv.v.memory_owner_thread_index;
+ vnet_buffer (b0)->ip.reass.owner_thread_index =
+ kv.v.memory_owner_thread_index;
}
else if (reass)
{
case IP6_FULL_REASS_RC_HANDOFF:
next0 = IP6_FULL_REASSEMBLY_NEXT_HANDOFF;
b0 = vlib_get_buffer (vm, bi0);
- if (is_feature)
- vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
- handoff_thread_idx;
- else
- vnet_buffer (b0)->ip.reass.owner_thread_index =
- handoff_thread_idx;
+ vnet_buffer (b0)->ip.reass.owner_thread_index =
+ handoff_thread_idx;
break;
case IP6_FULL_REASS_RC_TOO_MANY_FRAGMENTS:
vlib_node_increment_counter (vm, node->node_index,
{
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- if (is_feature)
- ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
- HANDOFF,
- vnet_buffer (b0)->ip.
- reass.owner_feature_thread_index);
- else
- ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
- HANDOFF,
- vnet_buffer (b0)->ip.
- reass.owner_thread_index);
+ ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
+ HANDOFF,
+ vnet_buffer (b0)->ip.
+ reass.owner_thread_index);
}
}
else if (is_feature && IP6_ERROR_NONE == error0)
rm->fq_feature_index =
vlib_frame_queue_main_init (ip6_full_reass_node_feature.index, 0);
+ rm->feature_use_refcount_per_intf = NULL;
return error;
}
while (n_left_from > 0)
{
- ti[0] =
- (is_feature) ? vnet_buffer (b[0])->ip.
- reass.owner_feature_thread_index : vnet_buffer (b[0])->ip.
- reass.owner_thread_index;
+ ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
if (PREDICT_FALSE
((node->flags & VLIB_NODE_FLAG_TRACE)
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
+int
+ip6_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
+{
+ ip6_full_reass_main_t *rm = &ip6_full_reass_main;
+ vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
+ if (is_enable)
+ {
+ if (!rm->feature_use_refcount_per_intf[sw_if_index])
+ {
+ ++rm->feature_use_refcount_per_intf[sw_if_index];
+ return vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-full-reassembly-feature",
+ sw_if_index, 1, 0, 0);
+ }
+ ++rm->feature_use_refcount_per_intf[sw_if_index];
+ }
+ else
+ {
+ --rm->feature_use_refcount_per_intf[sw_if_index];
+ if (!rm->feature_use_refcount_per_intf[sw_if_index])
+ return vnet_feature_enable_disable ("ip6-unicast",
+ "ip6-full-reassembly-feature",
+ sw_if_index, 0, 0, 0);
+ }
+ return -1;
+}
+#endif
+
/*
* fd.io coding-style-patch-verification: ON
*