if (PREDICT_FALSE (do_handoff))
{
next0 = IP6_FULL_REASSEMBLY_NEXT_HANDOFF;
- if (is_feature)
- vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
- kv.v.memory_owner_thread_index;
- else
- vnet_buffer (b0)->ip.reass.owner_thread_index =
- kv.v.memory_owner_thread_index;
+ vnet_buffer (b0)->ip.reass.owner_thread_index =
+ kv.v.memory_owner_thread_index;
}
else if (reass)
{
case IP6_FULL_REASS_RC_HANDOFF:
next0 = IP6_FULL_REASSEMBLY_NEXT_HANDOFF;
b0 = vlib_get_buffer (vm, bi0);
- if (is_feature)
- vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
- handoff_thread_idx;
- else
- vnet_buffer (b0)->ip.reass.owner_thread_index =
- handoff_thread_idx;
+ vnet_buffer (b0)->ip.reass.owner_thread_index =
+ handoff_thread_idx;
break;
case IP6_FULL_REASS_RC_TOO_MANY_FRAGMENTS:
vlib_node_increment_counter (vm, node->node_index,
{
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- if (is_feature)
- ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
- HANDOFF,
- vnet_buffer (b0)->ip.
- reass.owner_feature_thread_index);
- else
- ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
- HANDOFF,
- vnet_buffer (b0)->ip.
- reass.owner_thread_index);
+ ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
+ HANDOFF,
+ vnet_buffer (b0)->ip.
+ reass.owner_thread_index);
}
}
else if (is_feature && IP6_ERROR_NONE == error0)
while (n_left_from > 0)
{
- ti[0] =
- (is_feature) ? vnet_buffer (b[0])->ip.
- reass.owner_feature_thread_index : vnet_buffer (b[0])->ip.
- reass.owner_thread_index;
+ ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
if (PREDICT_FALSE
((node->flags & VLIB_NODE_FLAG_TRACE)