u32 fq_index;
u32 fq_feature_index;
+ // reference count for enabling/disabling feature - per interface
+ u32 *feature_use_refcount_per_intf;
} ip4_full_reass_main_t;
extern ip4_full_reass_main_t ip4_full_reass_main;
reass->data_len == reass->last_packet_octet + 1)
{
*handoff_thread_idx = reass->sendout_thread_index;
+ int handoff =
+ reass->memory_owner_thread_index != reass->sendout_thread_index;
rc =
ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
is_custom_app);
- if (IP4_REASS_RC_OK == rc
- && reass->memory_owner_thread_index != reass->sendout_thread_index)
+ if (IP4_REASS_RC_OK == rc && handoff)
{
rc = IP4_REASS_RC_HANDOFF;
}
rm->fq_feature_index =
vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
+ rm->feature_use_refcount_per_intf = NULL;
return error;
}
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
+int
+ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
+{
+ ip4_full_reass_main_t *rm = &ip4_full_reass_main;
+ vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
+ if (is_enable)
+ {
+ if (!rm->feature_use_refcount_per_intf[sw_if_index])
+ {
+ ++rm->feature_use_refcount_per_intf[sw_if_index];
+ return vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-full-reassembly-feature",
+ sw_if_index, 1, 0, 0);
+ }
+ ++rm->feature_use_refcount_per_intf[sw_if_index];
+ }
+ else
+ {
+ --rm->feature_use_refcount_per_intf[sw_if_index];
+ if (!rm->feature_use_refcount_per_intf[sw_if_index])
+ return vnet_feature_enable_disable ("ip4-unicast",
+ "ip4-full-reassembly-feature",
+ sw_if_index, 0, 0, 0);
+ }
+ return -1;
+}
+#endif
+
/*
* fd.io coding-style-patch-verification: ON
*