#include <vppinfra/vec.h>
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
-#include <vppinfra/bihash_24_8.h>
+#include <vppinfra/bihash_16_8.h>
#include <vnet/ip/ip4_reassembly.h>
#define MSEC_PER_SEC 1000
#define IP4_REASS_DEBUG_BUFFER(...)
#endif
-static vlib_node_registration_t ip4_reass_node;
+typedef enum
+{
+ IP4_REASS_RC_OK,
+ IP4_REASS_RC_INTERNAL_ERROR,
+} ip4_reass_rc_t;
typedef struct
{
{
struct
{
- // align by making this 4 octets even though its a 2 octets field
u32 xx_id;
ip4_address_t src;
ip4_address_t dst;
- // align by making this 4 octets even though its a 2 octets field
- u32 frag_id;
- // align by making this 4 octets even though its a 1 octet field
- u32 proto;
- u32 unused;
+ u16 frag_id;
+ u8 proto;
+ u8 unused;
};
- u64 as_u64[3];
+ u64 as_u64[2];
};
} ip4_reass_key_t;
-always_inline u32
-ip4_reass_buffer_get_data_offset_no_check (vlib_buffer_t * b)
-{
- vnet_buffer_opaque_t *vnb = vnet_buffer (b);
- return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
-}
-
always_inline u32
ip4_reass_buffer_get_data_offset (vlib_buffer_t * b)
{
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
- ASSERT (vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first);
- return ip4_reass_buffer_get_data_offset_no_check (b);
+ return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
}
always_inline u16
-ip4_reass_buffer_get_data_len_no_check (vlib_buffer_t * b)
+ip4_reass_buffer_get_data_len (vlib_buffer_t * b)
{
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
(vnb->ip.reass.fragment_first + ip4_reass_buffer_get_data_offset (b)) + 1;
}
-always_inline u16
-ip4_reass_buffer_get_data_len (vlib_buffer_t * b)
-{
- vnet_buffer_opaque_t *vnb = vnet_buffer (b);
- ASSERT (vnb->ip.reass.range_last > vnb->ip.reass.fragment_first);
- return ip4_reass_buffer_get_data_len_no_check (b);
-}
-
typedef struct
{
// hash table key
u32 max_reass_n;
// IPv4 runtime
- clib_bihash_24_8_t hash;
+ clib_bihash_16_8_t hash;
// per-thread data
ip4_reass_per_thread_t *per_thread_data;
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
trace->range_first = vnb->ip.reass.range_first;
trace->range_last = vnb->ip.reass.range_last;
- trace->data_offset = ip4_reass_buffer_get_data_offset_no_check (b);
- trace->data_len = ip4_reass_buffer_get_data_len_no_check (b);
+ trace->data_offset = ip4_reass_buffer_get_data_offset (b);
+ trace->data_len = ip4_reass_buffer_get_data_len (b);
trace->range_bi = bi;
}
ip4_reass_free (ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass)
{
- clib_bihash_kv_24_8_t kv;
+ clib_bihash_kv_16_8_t kv;
kv.key[0] = reass->key.as_u64[0];
kv.key[1] = reass->key.as_u64[1];
- kv.key[2] = reass->key.as_u64[2];
- clib_bihash_add_del_24_8 (&rm->hash, &kv, 0);
+ clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
pool_put (rt->pool, reass);
--rt->reass_n;
}
{
ip4_reass_t *reass = NULL;
f64 now = vlib_time_now (rm->vlib_main);
- clib_bihash_kv_24_8_t kv, value;
+ clib_bihash_kv_16_8_t kv, value;
kv.key[0] = k->as_u64[0];
kv.key[1] = k->as_u64[1];
- kv.key[2] = k->as_u64[2];
- if (!clib_bihash_search_24_8 (&rm->hash, &kv, &value))
+ if (!clib_bihash_search_16_8 (&rm->hash, &kv, &value))
{
reass = pool_elt_at_index (rt->pool, value.value);
if (now > reass->last_heard + rm->timeout)
else
{
pool_get (rt->pool, reass);
- memset (reass, 0, sizeof (*reass));
+ clib_memset (reass, 0, sizeof (*reass));
reass->id =
((u64) os_get_thread_index () * 1000000000) + rt->id_counter;
++rt->id_counter;
reass->key.as_u64[0] = kv.key[0] = k->as_u64[0];
reass->key.as_u64[1] = kv.key[1] = k->as_u64[1];
- reass->key.as_u64[2] = kv.key[2] = k->as_u64[2];
kv.value = reass - rt->pool;
reass->last_heard = now;
- if (clib_bihash_add_del_24_8 (&rm->hash, &kv, 1))
+ if (clib_bihash_add_del_16_8 (&rm->hash, &kv, 1))
{
ip4_reass_free (rm, rt, reass);
reass = NULL;
return reass;
}
-always_inline void
+always_inline ip4_reass_rc_t
ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass, u32 * bi0, u32 * next0,
- vlib_error_t * error0, u32 ** vec_drop_compress,
+ u32 * error0, u32 ** vec_drop_compress,
u32 ** vec_drop_overlap, bool is_feature)
{
- ASSERT (~0 != reass->first_bi);
vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
vlib_buffer_t *last_b = NULL;
u32 sub_chain_bi = reass->first_bi;
u32 tmp_bi = sub_chain_bi;
vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
ip4_header_t *ip = vlib_buffer_get_current (tmp);
+ vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
+ if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
+ !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
+
u32 data_len = ip4_reass_buffer_get_data_len (tmp);
u32 trim_front =
ip4_header_bytes (ip) + ip4_reass_buffer_get_data_offset (tmp);
if (tmp_bi == reass->first_bi)
{
/* first buffer - keep ip4 header */
- ASSERT (0 == ip4_reass_buffer_get_data_offset (tmp));
+ if (0 != ip4_reass_buffer_get_data_offset (tmp))
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
trim_front = 0;
trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
ip4_header_bytes (ip);
- ASSERT (vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0);
+ if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
}
u32 keep_data =
vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
vec_add1 (*vec_drop_compress, tmp_bi);
++dropped_cnt;
trim_front -= tmp->current_length;
- ASSERT (tmp->flags & VLIB_BUFFER_NEXT_PRESENT);
+ if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
tmp_bi = tmp->next_buffer;
tmp = vlib_get_buffer (vm, tmp_bi);
else
{
keep_data -= tmp->current_length;
- ASSERT (tmp->flags & VLIB_BUFFER_NEXT_PRESENT);
+ if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
}
total_length += tmp->current_length;
}
else
{
vec_add1 (*vec_drop_overlap, tmp_bi);
- ASSERT (reass->first_bi != tmp_bi);
+ if (reass->first_bi == tmp_bi)
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
++dropped_cnt;
}
if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
}
while (~0 != sub_chain_bi);
- ASSERT (last_b != NULL);
+ if (!last_b)
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
- ASSERT (rt->buffers_n >= (buf_cnt - dropped_cnt));
rt->buffers_n -= buf_cnt - dropped_cnt;
- ASSERT (total_length >= first_b->current_length);
+ if (total_length < first_b->current_length)
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
total_length -= first_b->current_length;
first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
first_b->total_length_not_including_first_buffer = total_length;
ip->flags_and_fragment_offset = 0;
ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
ip->checksum = ip4_header_checksum (ip);
+ u32 before = vec_len (*vec_drop_compress);
vlib_buffer_chain_compress (vm, first_b, vec_drop_compress);
+ rt->buffers_n += vec_len (*vec_drop_compress) - before;
+
if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
{
ip4_reass_add_trace (vm, node, rm, reass, reass->first_bi, FINALIZE, 0);
*error0 = IP4_ERROR_NONE;
ip4_reass_free (rm, rt, reass);
reass = NULL;
+ return IP4_REASS_RC_OK;
}
always_inline u32
return len;
}
-always_inline void
+always_inline ip4_reass_rc_t
ip4_reass_insert_range_in_chain (vlib_main_t * vm,
ip4_reass_main_t * rm,
ip4_reass_per_thread_t * rt,
ip4_reass_t * reass,
u32 prev_range_bi, u32 new_next_bi)
{
-
vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
if (~0 != prev_range_bi)
}
reass->first_bi = new_next_bi;
}
+ vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
+ if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
+ !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
reass->data_len += ip4_reass_buffer_get_data_len (new_next_b);
rt->buffers_n += ip4_reass_get_buffer_chain_length (vm, new_next_b);
+ return IP4_REASS_RC_OK;
}
-always_inline void
+always_inline ip4_reass_rc_t
ip4_reass_remove_range_from_chain (vlib_main_t * vm,
vlib_node_runtime_t * node,
ip4_reass_main_t * rm,
{
vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
- ASSERT (prev_vnb->ip.reass.next_range_bi == discard_bi);
+ if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
}
else
{
reass->first_bi = discard_vnb->ip.reass.next_range_bi;
}
+ vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
+ if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
+ !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
reass->data_len -= ip4_reass_buffer_get_data_len (discard_b);
while (1)
{
break;
}
}
+ return IP4_REASS_RC_OK;
}
-always_inline void
+always_inline ip4_reass_rc_t
ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
ip4_reass_t * reass, u32 * bi0, u32 * next0,
- vlib_error_t * error0, u32 ** vec_drop_overlap,
+ u32 * error0, u32 ** vec_drop_overlap,
u32 ** vec_drop_compress, bool is_feature)
{
+ ip4_reass_rc_t rc = IP4_REASS_RC_OK;
int consumed = 0;
vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
ip4_header_t *fip = vlib_buffer_get_current (fb);
- ASSERT (fb->current_length >= sizeof (*fip));
vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
- u32 fragment_first = fvnb->ip.reass.fragment_first =
- ip4_get_fragment_offset_bytes (fip);
- u32 fragment_length =
+ reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
+ const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
+ const u32 fragment_length =
clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
- u32 fragment_last = fvnb->ip.reass.fragment_last =
- fragment_first + fragment_length - 1;
+ const u32 fragment_last = fragment_first + fragment_length - 1;
+ fvnb->ip.reass.fragment_first = fragment_first;
+ fvnb->ip.reass.fragment_last = fragment_last;
int more_fragments = ip4_get_fragment_more (fip);
u32 candidate_range_bi = reass->first_bi;
u32 prev_range_bi = ~0;
- reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
fvnb->ip.reass.range_first = fragment_first;
fvnb->ip.reass.range_last = fragment_last;
fvnb->ip.reass.next_range_bi = ~0;
if (~0 == reass->first_bi)
{
// starting a new reassembly
- ip4_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
- *bi0);
+ rc =
+ ip4_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
+ *bi0);
+ if (IP4_REASS_RC_OK != rc)
+ {
+ return rc;
+ }
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
ip4_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0);
}
*bi0 = ~0;
reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
- return;
+ return IP4_REASS_RC_OK;
}
reass->min_fragment_length = clib_min (clib_net_to_host_u16 (fip->length),
fvnb->ip.reass.estimated_mtu);
~0 == candidate_range_bi)
{
// special case - this fragment falls beyond all known ranges
- ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ rc =
+ ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
+ prev_range_bi, *bi0);
+ if (IP4_REASS_RC_OK != rc)
+ {
+ return rc;
+ }
consumed = 1;
break;
}
if (fragment_last < candidate_vnb->ip.reass.range_first)
{
// this fragment ends before candidate range without any overlap
- ip4_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
- *bi0);
+ rc =
+ ip4_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
+ *bi0);
+ if (IP4_REASS_RC_OK != rc)
+ {
+ return rc;
+ }
consumed = 1;
}
else
if (overlap < ip4_reass_buffer_get_data_len (candidate_b))
{
candidate_vnb->ip.reass.range_first += overlap;
- ASSERT (reass->data_len >= overlap);
+ if (reass->data_len < overlap)
+ {
+ return IP4_REASS_RC_INTERNAL_ERROR;
+ }
reass->data_len -= overlap;
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
candidate_range_bi, RANGE_SHRINK,
overlap);
}
- ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ rc =
+ ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
+ prev_range_bi, *bi0);
+ if (IP4_REASS_RC_OK != rc)
+ {
+ return rc;
+ }
consumed = 1;
}
else
else
{
// special case - last range discarded
- ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
- candidate_range_bi,
- *bi0);
+ rc =
+ ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
+ candidate_range_bi,
+ *bi0);
+ if (IP4_REASS_RC_OK != rc)
+ {
+ return rc;
+ }
consumed = 1;
}
}
{
u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
// discard candidate range, probe next range
- ip4_reass_remove_range_from_chain (vm, node, rm,
- vec_drop_overlap, reass,
- prev_range_bi,
- candidate_range_bi);
+ rc =
+ ip4_reass_remove_range_from_chain (vm, node, rm,
+ vec_drop_overlap, reass,
+ prev_range_bi,
+ candidate_range_bi);
+ if (IP4_REASS_RC_OK != rc)
+ {
+ return rc;
+ }
if (~0 != next_range_bi)
{
candidate_range_bi = next_range_bi;
else
{
// special case - last range discarded
- ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ rc =
+ ip4_reass_insert_range_in_chain (vm, rm, rt, reass,
+ prev_range_bi, *bi0);
+ if (IP4_REASS_RC_OK != rc)
+ {
+ return rc;
+ }
consumed = 1;
}
}
if (~0 != reass->last_packet_octet &&
reass->data_len == reass->last_packet_octet + 1)
{
- ip4_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
- vec_drop_compress, vec_drop_overlap, is_feature);
+ return ip4_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
+ vec_drop_compress, vec_drop_overlap,
+ is_feature);
}
else
{
*error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
}
}
+ return rc;
}
always_inline uword
next_index = node->cached_next_index;
static u32 *vec_drop_timeout = NULL; // indexes of buffers which timed out
static u32 *vec_drop_overlap = NULL; // indexes of buffers which were discarded due to overlap
+ static u32 *vec_drop_internal_error = NULL; // indexes of buffers which were discarded due to internal errors
static u32 *vec_drop_compress = NULL; // indexes of buffers dicarded due to buffer compression
- while (n_left_from > 0 || vec_len (vec_drop_timeout) > 0 ||
- vec_len (vec_drop_overlap) > 0 || vec_len (vec_drop_compress) > 0)
+ while (n_left_from > 0 || vec_len (vec_drop_timeout) > 0
+ || vec_len (vec_drop_overlap) > 0 || vec_len (vec_drop_compress) > 0
+ || vec_len (vec_drop_internal_error) > 0)
{
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
n_left_to_next, bi,
IP4_REASSEMBLY_NEXT_DROP);
IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_timeout);
- ASSERT (rt->buffers_n > 0);
--rt->buffers_n;
}
n_left_to_next, bi,
IP4_REASSEMBLY_NEXT_DROP);
IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_duplicate_fragment);
- ASSERT (rt->buffers_n > 0);
--rt->buffers_n;
}
n_left_to_next, bi,
IP4_REASSEMBLY_NEXT_DROP);
IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_compress);
- ASSERT (rt->buffers_n > 0);
--rt->buffers_n;
}
-
+ while (vec_len (vec_drop_internal_error) > 0 && n_left_to_next > 0)
+ {
+ u32 bi = vec_pop (vec_drop_internal_error);
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ b->error = node->errors[IP4_ERROR_REASS_INTERNAL_ERROR];
+ to_next[0] = bi;
+ to_next += 1;
+ n_left_to_next -= 1;
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi,
+ IP4_REASSEMBLY_NEXT_DROP);
+ IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_internal_error);
+ --rt->buffers_n;
+ }
while (n_left_from > 0 && n_left_to_next > 0)
{
u32 bi0;
}
else
{
- ip4_reass_key_t k;
- k.src.as_u32 = ip0->src_address.as_u32;
- k.dst.as_u32 = ip0->dst_address.as_u32;
- k.xx_id = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- k.frag_id = ip0->fragment_id;
- k.proto = ip0->protocol;
- k.unused = 0;
- ip4_reass_t *reass =
- ip4_reass_find_or_create (vm, rm, rt, &k, &vec_drop_timeout);
-
- if (reass)
+ const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
+ const u32 fragment_length =
+ clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
+ const u32 fragment_last = fragment_first + fragment_length - 1;
+ if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
{
- ip4_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
- &error0, &vec_drop_overlap,
- &vec_drop_compress, is_feature);
+ next0 = IP4_REASSEMBLY_NEXT_DROP;
+ error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
}
else
{
- next0 = IP4_REASSEMBLY_NEXT_DROP;
- error0 = IP4_ERROR_REASS_LIMIT_REACHED;
+ ip4_reass_key_t k;
+ k.as_u64[0] =
+ (u64) vnet_buffer (b0)->sw_if_index[VLIB_RX] |
+ (u64) ip0->src_address.as_u32 << 32;
+ k.as_u64[1] =
+ (u64) ip0->dst_address.as_u32 |
+ (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
+
+ ip4_reass_t *reass =
+ ip4_reass_find_or_create (vm, rm, rt, &k,
+ &vec_drop_timeout);
+
+ if (reass)
+ {
+ switch (ip4_reass_update
+ (vm, node, rm, rt, reass, &bi0, &next0, &error0,
+ &vec_drop_overlap, &vec_drop_compress,
+ is_feature))
+ {
+ case IP4_REASS_RC_OK:
+ /* nothing to do here */
+ break;
+ case IP4_REASS_RC_INTERNAL_ERROR:
+ /* drop everything and start with a clean slate */
+ ip4_reass_on_timeout (vm, rm, reass,
+ &vec_drop_internal_error);
+ ip4_reass_free (rm, rt, reass);
+ goto next_packet;
+ break;
+ }
+ }
+ else
+ {
+ next0 = IP4_REASSEMBLY_NEXT_DROP;
+ error0 = IP4_ERROR_REASS_LIMIT_REACHED;
+ }
}
b0->error = node->errors[error0];
n_left_to_next -= 1;
if (is_feature && IP4_ERROR_NONE == error0)
{
- vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX],
- &next0, b0);
+ b0 = vlib_get_buffer (vm, bi0);
+ vnet_feature_next (&next0, b0);
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
}
+ next_packet:
from += 1;
n_left_from -= 1;
}
#undef _
};
-always_inline uword
+static uword
ip4_reassembly (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
VLIB_NODE_FUNCTION_MULTIARCH (ip4_reass_node, ip4_reassembly);
-always_inline uword
+static uword
ip4_reassembly_feature (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame)
{
typedef struct
{
int failure;
- clib_bihash_24_8_t *new_hash;
+ clib_bihash_16_8_t *new_hash;
} ip4_rehash_cb_ctx;
static void
-ip4_rehash_cb (clib_bihash_kv_24_8_t * kv, void *_ctx)
+ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
{
ip4_rehash_cb_ctx *ctx = _ctx;
- if (clib_bihash_add_del_24_8 (ctx->new_hash, kv, 1))
+ if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
{
ctx->failure = 1;
}
ip4_reass_main.ip4_reass_expire_node_idx,
IP4_EVENT_CONFIG_CHANGED, 0);
u32 new_nbuckets = ip4_reass_get_nbuckets ();
- if (ip4_reass_main.max_reass_n > 0 && new_nbuckets > 1 &&
- new_nbuckets != old_nbuckets)
+ if (ip4_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
{
- clib_bihash_24_8_t new_hash;
- memset (&new_hash, 0, sizeof (new_hash));
+ clib_bihash_16_8_t new_hash;
+ clib_memset (&new_hash, 0, sizeof (new_hash));
ip4_rehash_cb_ctx ctx;
ctx.failure = 0;
ctx.new_hash = &new_hash;
- clib_bihash_init_24_8 (&new_hash, "ip4-reass", new_nbuckets,
+ clib_bihash_init_16_8 (&new_hash, "ip4-reass", new_nbuckets,
new_nbuckets * 1024);
- clib_bihash_foreach_key_value_pair_24_8 (&ip4_reass_main.hash,
+ clib_bihash_foreach_key_value_pair_16_8 (&ip4_reass_main.hash,
ip4_rehash_cb, &ctx);
if (ctx.failure)
{
- clib_bihash_free_24_8 (&new_hash);
+ clib_bihash_free_16_8 (&new_hash);
return -1;
}
else
{
- clib_bihash_free_24_8 (&ip4_reass_main.hash);
- clib_memcpy (&ip4_reass_main.hash, &new_hash,
- sizeof (ip4_reass_main.hash));
+ clib_bihash_free_16_8 (&ip4_reass_main.hash);
+ clib_memcpy_fast (&ip4_reass_main.hash, &new_hash,
+ sizeof (ip4_reass_main.hash));
}
}
return 0;
rm->vlib_main = vm;
rm->vnet_main = vnet_get_main ();
- vec_validate (rm->per_thread_data, vlib_num_workers () + 1);
+ vec_validate (rm->per_thread_data, vlib_num_workers ());
ip4_reass_per_thread_t *rt;
vec_foreach (rt, rm->per_thread_data)
{
ASSERT (node);
rm->ip4_reass_expire_node_idx = node->index;
+ ip4_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
+ IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
+ IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
+
nbuckets = ip4_reass_get_nbuckets ();
- clib_bihash_init_24_8 (&rm->hash, "ip4-reass", nbuckets, nbuckets * 1024);
+ clib_bihash_init_16_8 (&rm->hash, "ip4-reass", nbuckets, nbuckets * 1024);
node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
ASSERT (node);
rm->ip4_drop_idx = node->index;
- ip4_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
- IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
- IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
-
return error;
}
uword thread_index = 0;
int index;
- const uword nthreads = os_get_nthreads ();
+ const uword nthreads = vlib_num_workers () + 1;
for (thread_index = 0; thread_index < nthreads; ++thread_index)
{
ip4_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
}
ip4_reass_on_timeout (vm, rm, reass, &vec_drop_timeout);
u32 after = vec_len (vec_drop_timeout);
- ASSERT (rt->buffers_n >= (after - before));
- rt->buffers_n -= (after - before);
+ rt->buffers_n -= (after - before);
ip4_reass_free (rm, rt, reass);
}
/* *INDENT-ON* */
n_left_to_next -= 1;
IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_timeout_walk);
}
- f->flags |= (trace_frame * VLIB_FRAME_TRACE);
+ f->frame_flags |= (trace_frame * VLIB_FRAME_TRACE);
vlib_put_frame_to_node (vm, rm->ip4_drop_idx, f);
}
"fragment[%u, %u]\n",
counter, vnb->ip.reass.range_first,
vnb->ip.reass.range_last, bi,
- ip4_reass_buffer_get_data_offset_no_check (b),
- ip4_reass_buffer_get_data_len_no_check (b),
+ ip4_reass_buffer_get_data_offset (b),
+ ip4_reass_buffer_get_data_len (b),
vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
u64 sum_buffers_n = 0;
ip4_reass_t *reass;
uword thread_index;
- const uword nthreads = os_get_nthreads ();
+ const uword nthreads = vlib_num_workers () + 1;
for (thread_index = 0; thread_index < nthreads; ++thread_index)
{
ip4_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];