This change is made fix a crash, because is_feature flag semantics turn
out to be different from "custom app code" semantics. Introduce a flag
which custom plugins/apps can use to instead of tying that code to
is_feature flag.
Change-Id: Ief5898711e68529f9306cfac54c4dc9b3650f9e3
Ticket: N/A
Type: fix
Fixes:
21aa8f1022590b8b5caf819b4bbd485de0f1dfe5
Signed-off-by: Klement Sekera <ksekera@cisco.com>
Signed-off-by: Ole Troan <ot@cisco.com>
/* in/out variables */
struct
{
/* in/out variables */
struct
{
- u32 next_index; /* index of next node - ignored if "feature" node */
- u32 error_next_index; /* index of next node if error - ignored if 'feature' node */
+ u32 next_index; /* index of next node - used by custom apps */
+ u32 error_next_index; /* index of next node if error - used by custom apps */
u16 estimated_mtu; /* estimated MTU calculated during reassembly */
u16 owner_thread_index;
};
u16 estimated_mtu; /* estimated MTU calculated during reassembly */
u16 owner_thread_index;
};
u32 trace_op_counter;
// next index - used by non-feature node
u32 next_index;
u32 trace_op_counter;
// next index - used by non-feature node
u32 next_index;
- // error next index - used by non-feature node
+ // error next index - used by custom apps (~0 if not used)
- // is_feature flag stored for non-inline code use
- bool is_feature;
// minimum fragment length for this reassembly - used to estimate MTU
u16 min_fragment_length;
// number of fragments in this reassembly
// minimum fragment length for this reassembly - used to estimate MTU
u16 min_fragment_length;
// number of fragments in this reassembly
always_inline void
ip4_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
always_inline void
ip4_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_reass_main_t * rm, ip4_reass_t * reass,
- bool is_feature)
+ ip4_reass_main_t * rm, ip4_reass_t * reass)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
range_bi = range_vnb->ip.reass.next_range_bi;
}
/* send to next_error_index */
range_bi = range_vnb->ip.reass.next_range_bi;
}
/* send to next_error_index */
+ if (~0 != reass->error_next_index)
{
u32 n_left_to_next, *to_next, next_index;
{
u32 n_left_to_next, *to_next, next_index;
static ip4_reass_t *
ip4_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
static ip4_reass_t *
ip4_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
- ip4_reass_kv_t * kv, u8 * do_handoff,
- bool is_feature)
+ ip4_reass_kv_t * kv, u8 * do_handoff)
{
ip4_reass_t *reass = NULL;
f64 now = vlib_time_now (rm->vlib_main);
{
ip4_reass_t *reass = NULL;
f64 now = vlib_time_now (rm->vlib_main);
if (now > reass->last_heard + rm->timeout)
{
if (now > reass->last_heard + rm->timeout)
{
- ip4_reass_drop_all (vm, node, rm, reass, is_feature);
+ ip4_reass_drop_all (vm, node, rm, reass);
ip4_reass_free (rm, rt, reass);
reass = NULL;
}
ip4_reass_free (rm, rt, reass);
reass = NULL;
}
reass->first_bi = ~0;
reass->last_packet_octet = ~0;
reass->data_len = 0;
reass->first_bi = ~0;
reass->last_packet_octet = ~0;
reass->data_len = 0;
- reass->is_feature = is_feature;
+ reass->next_index = ~0;
+ reass->error_next_index = ~0;
ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
{
vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
vlib_buffer_t *last_b = NULL;
{
vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
vlib_buffer_t *last_b = NULL;
#endif
}
*bi0 = reass->first_bi;
#endif
}
*bi0 = reass->first_bi;
{
*next0 = IP4_REASSEMBLY_NEXT_INPUT;
}
{
*next0 = IP4_REASSEMBLY_NEXT_INPUT;
}
ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
{
ip4_reass_rc_t rc = IP4_REASS_RC_OK;
int consumed = 0;
vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
ip4_header_t *fip = vlib_buffer_get_current (fb);
vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
{
ip4_reass_rc_t rc = IP4_REASS_RC_OK;
int consumed = 0;
vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
ip4_header_t *fip = vlib_buffer_get_current (fb);
vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
- reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
+ if (is_custom_app)
+ {
+ // store (error_)next_index before it's overwritten
+ reass->next_index = fvnb->ip.reass.next_index;
+ reass->error_next_index = fvnb->ip.reass.error_next_index;
+ }
const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
const u32 fragment_length =
clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
const u32 fragment_length =
clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
reass->data_len == reass->last_packet_octet + 1)
{
return ip4_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
reass->data_len == reass->last_packet_octet + 1)
{
return ip4_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
-ip4_reassembly_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, bool is_feature)
+ip4_reassembly_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, bool is_feature,
+ bool is_custom_app)
{
u32 *from = vlib_frame_vector_args (frame);
u32 n_left_from, n_left_to_next, *to_next, next_index;
{
u32 *from = vlib_frame_vector_args (frame);
u32 n_left_from, n_left_to_next, *to_next, next_index;
if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
{
// this is a whole packet - no fragmentation
if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
{
// this is a whole packet - no fragmentation
{
next0 = IP4_REASSEMBLY_NEXT_INPUT;
}
{
next0 = IP4_REASSEMBLY_NEXT_INPUT;
}
ip4_reass_t *reass =
ip4_reass_find_or_create (vm, node, rm, rt, &kv,
ip4_reass_t *reass =
ip4_reass_find_or_create (vm, node, rm, rt, &kv,
- &do_handoff, is_feature);
if (PREDICT_FALSE (do_handoff))
{
if (PREDICT_FALSE (do_handoff))
{
{
switch (ip4_reass_update
(vm, node, rm, rt, reass, &bi0, &next0,
{
switch (ip4_reass_update
(vm, node, rm, rt, reass, &bi0, &next0,
+ &error0, is_custom_app))
{
case IP4_REASS_RC_OK:
/* nothing to do here */
{
case IP4_REASS_RC_OK:
/* nothing to do here */
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1);
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1);
- ip4_reass_drop_all (vm, node, rm, reass,
- is_feature);
+ ip4_reass_drop_all (vm, node, rm, reass);
ip4_reass_free (rm, rt, reass);
goto next_packet;
break;
ip4_reass_free (rm, rt, reass);
goto next_packet;
break;
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_NO_BUF,
1);
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_NO_BUF,
1);
- ip4_reass_drop_all (vm, node, rm, reass,
- is_feature);
+ ip4_reass_drop_all (vm, node, rm, reass);
ip4_reass_free (rm, rt, reass);
goto next_packet;
break;
ip4_reass_free (rm, rt, reass);
goto next_packet;
break;
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_INTERNAL_ERROR,
1);
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_INTERNAL_ERROR,
1);
- ip4_reass_drop_all (vm, node, rm, reass,
- is_feature);
+ ip4_reass_drop_all (vm, node, rm, reass);
ip4_reass_free (rm, rt, reass);
goto next_packet;
break;
ip4_reass_free (rm, rt, reass);
goto next_packet;
break;
VLIB_NODE_FN (ip4_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
VLIB_NODE_FN (ip4_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- return ip4_reassembly_inline (vm, node, frame, false /* is_feature */ );
+ return ip4_reassembly_inline (vm, node, frame, false /* is_feature */ ,
+ false /* is_custom_app */ );
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- return ip4_reassembly_inline (vm, node, frame, true /* is_feature */ );
+ return ip4_reassembly_inline (vm, node, frame, true /* is_feature */ ,
+ false /* is_custom_app */ );
vec_foreach (i, pool_indexes_to_free)
{
ip4_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
vec_foreach (i, pool_indexes_to_free)
{
ip4_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
- ip4_reass_drop_all (vm, node, rm, reass, reass->is_feature);
+ ip4_reass_drop_all (vm, node, rm, reass);
ip4_reass_free (rm, rt, reass);
}
/* *INDENT-ON* */
ip4_reass_free (rm, rt, reass);
}
/* *INDENT-ON* */
u32 data_len;
// trace operation counter
u32 trace_op_counter;
u32 data_len;
// trace operation counter
u32 trace_op_counter;
- // next index - used by non-feature node
+ // next index - used by custom apps (~0 if not set)
- // error next index - used by non-feature node
+ // error next index - used by custom apps (~0 if not set)
- // is_feature flag stored for non-inline code use
- bool is_feature;
// minimum fragment length for this reassembly - used to estimate MTU
u16 min_fragment_length;
// number of fragments for this reassembly
// minimum fragment length for this reassembly - used to estimate MTU
u16 min_fragment_length;
// number of fragments for this reassembly
always_inline void
ip6_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
always_inline void
ip6_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip6_reass_main_t * rm, ip6_reass_t * reass,
- bool is_feature)
+ ip6_reass_main_t * rm, ip6_reass_t * reass)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
range_bi = range_vnb->ip.reass.next_range_bi;
}
/* send to next_error_index */
range_bi = range_vnb->ip.reass.next_range_bi;
}
/* send to next_error_index */
+ if (~0 != reass->error_next_index)
{
u32 n_left_to_next, *to_next, next_index;
{
u32 n_left_to_next, *to_next, next_index;
always_inline void
ip6_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_t * reass,
always_inline void
ip6_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_t * reass,
- u32 * icmp_bi, bool is_feature)
{
if (~0 == reass->first_bi)
{
return;
}
{
if (~0 == reass->first_bi)
{
return;
}
+ if (~0 == reass->next_index) // custom apps don't want icmp
{
vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
if (0 == vnet_buffer (b)->ip.reass.fragment_first)
{
vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
if (0 == vnet_buffer (b)->ip.reass.fragment_first)
- ip6_reass_drop_all (vm, node, rm, reass, is_feature);
+ ip6_reass_drop_all (vm, node, rm, reass);
}
always_inline ip6_reass_t *
ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
}
always_inline ip6_reass_t *
ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
- ip6_reass_kv_t * kv, u32 * icmp_bi, u8 * do_handoff,
- bool is_feature)
+ ip6_reass_kv_t * kv, u32 * icmp_bi, u8 * do_handoff)
{
ip6_reass_t *reass = NULL;
f64 now = vlib_time_now (rm->vlib_main);
{
ip6_reass_t *reass = NULL;
f64 now = vlib_time_now (rm->vlib_main);
if (now > reass->last_heard + rm->timeout)
{
if (now > reass->last_heard + rm->timeout)
{
- ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi, is_feature);
+ ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi);
ip6_reass_free (rm, rt, reass);
reass = NULL;
}
ip6_reass_free (rm, rt, reass);
reass = NULL;
}
reass->first_bi = ~0;
reass->last_packet_octet = ~0;
reass->data_len = 0;
reass->first_bi = ~0;
reass->last_packet_octet = ~0;
reass->data_len = 0;
- reass->is_feature = is_feature;
+ reass->next_index = ~0;
+ reass->error_next_index = ~0;
ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
{
*bi0 = reass->first_bi;
*error0 = IP6_ERROR_NONE;
{
*bi0 = reass->first_bi;
*error0 = IP6_ERROR_NONE;
{
*next0 = IP6_REASSEMBLY_NEXT_INPUT;
}
{
*next0 = IP6_REASSEMBLY_NEXT_INPUT;
}
ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
- ip6_frag_hdr_t * frag_hdr, bool is_feature)
+ ip6_frag_hdr_t * frag_hdr, bool is_custom_app)
{
int consumed = 0;
vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
{
int consumed = 0;
vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
- reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
- reass->error_next_index = fvnb->ip.reass.error_next_index; // store error_next_index before it is overwritten
+ if (is_custom_app)
+ {
+ reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
+ reass->error_next_index = fvnb->ip.reass.error_next_index; // store error_next_index before it is overwritten
+ }
fvnb->ip.reass.ip6_frag_hdr_offset =
(u8 *) frag_hdr - (u8 *) vlib_buffer_get_current (fb);
fvnb->ip.reass.ip6_frag_hdr_offset =
(u8 *) frag_hdr - (u8 *) vlib_buffer_get_current (fb);
else
{
// overlapping fragment - not allowed by RFC 8200
else
{
// overlapping fragment - not allowed by RFC 8200
- ip6_reass_drop_all (vm, node, rm, reass, is_feature);
+ ip6_reass_drop_all (vm, node, rm, reass);
ip6_reass_free (rm, rt, reass);
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
ip6_reass_free (rm, rt, reass);
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
reass->data_len == reass->last_packet_octet + 1)
{
return ip6_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
reass->data_len == reass->last_packet_octet + 1)
{
return ip6_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
-ip6_reassembly_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, bool is_feature)
+ip6_reassembly_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, bool is_feature,
+ bool is_custom_app)
{
u32 *from = vlib_frame_vector_args (frame);
u32 n_left_from, n_left_to_next, *to_next, next_index;
{
u32 *from = vlib_frame_vector_args (frame);
u32 n_left_from, n_left_to_next, *to_next, next_index;
ip6_reass_t *reass =
ip6_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
ip6_reass_t *reass =
ip6_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
- &do_handoff, is_feature);
if (PREDICT_FALSE (do_handoff))
{
if (PREDICT_FALSE (do_handoff))
{
else if (reass)
{
switch (ip6_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
else if (reass)
{
switch (ip6_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
- &error0, frag_hdr, is_feature))
+ &error0, frag_hdr, is_custom_app))
{
case IP6_REASS_RC_OK:
/* nothing to do here */
{
case IP6_REASS_RC_OK:
/* nothing to do here */
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1);
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1);
- ip6_reass_drop_all (vm, node, rm, reass, is_feature);
+ ip6_reass_drop_all (vm, node, rm, reass);
ip6_reass_free (rm, rt, reass);
goto next_packet;
break;
case IP6_REASS_RC_NO_BUF:
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_NO_BUF, 1);
ip6_reass_free (rm, rt, reass);
goto next_packet;
break;
case IP6_REASS_RC_NO_BUF:
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_NO_BUF, 1);
- ip6_reass_drop_all (vm, node, rm, reass, is_feature);
+ ip6_reass_drop_all (vm, node, rm, reass);
ip6_reass_free (rm, rt, reass);
goto next_packet;
break;
ip6_reass_free (rm, rt, reass);
goto next_packet;
break;
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_INTERNAL_ERROR,
1);
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_INTERNAL_ERROR,
1);
- ip6_reass_drop_all (vm, node, rm, reass, is_feature);
+ ip6_reass_drop_all (vm, node, rm, reass);
ip6_reass_free (rm, rt, reass);
goto next_packet;
break;
ip6_reass_free (rm, rt, reass);
goto next_packet;
break;
VLIB_NODE_FN (ip6_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
VLIB_NODE_FN (ip6_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- return ip6_reassembly_inline (vm, node, frame, false /* is_feature */ );
+ return ip6_reassembly_inline (vm, node, frame, false /* is_feature */ ,
+ false /* is_custom_app */ );
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- return ip6_reassembly_inline (vm, node, frame, true /* is_feature */ );
+ return ip6_reassembly_inline (vm, node, frame, true /* is_feature */ ,
+ false /* is_custom_app */ );
b->flags &= ~VLIB_BUFFER_IS_TRACED;
}
}
b->flags &= ~VLIB_BUFFER_IS_TRACED;
}
}
- ip6_reass_on_timeout (vm, node, rm, reass, &icmp_bi, reass->is_feature);
+ ip6_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
if (~0 != icmp_bi)
{
vec_add1 (vec_icmp_bi, icmp_bi);
if (~0 != icmp_bi)
{
vec_add1 (vec_icmp_bi, icmp_bi);
"Interface %s: Packet expected from interface %s "
"didn't arrive" % (dst_if.name, i.name))
"Interface %s: Packet expected from interface %s "
"didn't arrive" % (dst_if.name, i.name))
+ def test_next_header_anomaly(self):
+ """ IPv6 next header anomaly test
+
+ Test scenario:
+ - ipv6 next header field = Fragment Header (44)
+ - next header is ICMPv6 Echo Request
+ - wait for reassembly
+ """
+ pkt = (Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6, nh=44) /
+ ICMPv6EchoRequest())
+
+ self.pg0.add_stream(pkt)
+ self.pg_start()
+
+ # wait for reassembly
+ self.sleep(10)
+
def test_fib(self):
""" IPv6 FIB test
def test_fib(self):
""" IPv6 FIB test