#include <vppinfra/vec.h>
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
+#include <vnet/ip/ip.api_enum.h>
#include <vppinfra/fifo.h>
#include <vppinfra/bihash_16_8.h>
#include <vnet/ip/reass/ip4_full_reass.h>
#include <stddef.h>
#define MSEC_PER_SEC 1000
-#define IP4_REASS_TIMEOUT_DEFAULT_MS 100
-#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
+#define IP4_REASS_TIMEOUT_DEFAULT_MS 200
+
+/* As there are only 1024 reass context per thread, either the DDOS attacks
+ * or fractions of real timeouts, would consume these contexts quickly and
+ * running out context space and unable to perform reassembly */
+#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 50 // 50 ms default
#define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
-#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
+#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
#define IP4_REASS_HT_LOAD_FACTOR (0.75)
#define IP4_REASS_DEBUG_BUFFERS 0
ip4_full_reass_t *pool;
u32 reass_n;
u32 id_counter;
+ // for pacing the main thread timeouts
+ u32 last_id;
clib_spinlock_t lock;
} ip4_full_reass_per_thread_t;
return ip4_full_reass_free_ctx (rt, reass);
}
+/* n_left_to_next, and to_next are taken as input params, as this function
+ * could be called from a graphnode, where its managing local copy of these
+ * variables, and ignoring those and still trying to enqueue the buffers
+ * with local variables would cause either buffer leak or corruption */
always_inline void
ip4_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node,
- ip4_full_reass_t *reass, u32 offending_bi)
+ ip4_full_reass_t *reass, u32 *n_left_to_next,
+ u32 **to_next)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
vnet_buffer_opaque_t *range_vnb;
u32 *to_free = NULL;
+
while (~0 != range_bi)
{
range_b = vlib_get_buffer (vm, range_bi);
range_vnb = vnet_buffer (range_b);
- u32 bi = range_bi;
- while (~0 != bi)
+
+ if (~0 != range_bi)
{
- vec_add1 (to_free, bi);
- if (offending_bi == bi)
- {
- offending_bi = ~0;
- }
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
- {
- bi = b->next_buffer;
- b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
- }
- else
- {
- bi = ~0;
- }
+ vec_add1 (to_free, range_bi);
}
+
range_bi = range_vnb->ip.reass.next_range_bi;
}
- if (~0 != offending_bi)
- {
- vec_add1 (to_free, offending_bi);
- }
+
/* send to next_error_index */
- if (~0 != reass->error_next_index)
+ if (~0 != reass->error_next_index &&
+ reass->error_next_index < node->n_next_nodes)
{
- u32 n_left_to_next, *to_next, next_index;
+ u32 next_index;
next_index = reass->error_next_index;
u32 bi = ~0;
+ /* record number of packets sent to custom app */
+ vlib_node_increment_counter (vm, node->node_index,
+ IP4_ERROR_REASS_TO_CUSTOM_APP,
+ vec_len (to_free));
+
while (vec_len (to_free) > 0)
{
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ vlib_get_next_frame (vm, node, next_index, *to_next,
+ (*n_left_to_next));
- while (vec_len (to_free) > 0 && n_left_to_next > 0)
+ while (vec_len (to_free) > 0 && (*n_left_to_next) > 0)
{
bi = vec_pop (to_free);
if (~0 != bi)
{
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ip4_full_reass_add_trace (vm, node, reass, bi,
+ RANGE_DISCARD, 0, ~0);
+ }
+ *to_next[0] = bi;
+ (*to_next) += 1;
+ (*n_left_to_next) -= 1;
}
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_put_next_frame (vm, node, next_index, (*n_left_to_next));
}
}
else
vec_free (to_free);
}
+always_inline void
+sanitize_reass_buffers_add_missing (vlib_main_t *vm, ip4_full_reass_t *reass,
+ u32 *bi0)
+{
+ u32 range_bi = reass->first_bi;
+ vlib_buffer_t *range_b;
+ vnet_buffer_opaque_t *range_vnb;
+
+ while (~0 != range_bi)
+ {
+ range_b = vlib_get_buffer (vm, range_bi);
+ range_vnb = vnet_buffer (range_b);
+ u32 bi = range_bi;
+ if (~0 != bi)
+ {
+ if (bi == *bi0)
+ *bi0 = ~0;
+ if (range_b->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ u32 _bi = bi;
+ vlib_buffer_t *_b = vlib_get_buffer (vm, _bi);
+ while (_b->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ if (_b->next_buffer != range_vnb->ip.reass.next_range_bi)
+ {
+ _bi = _b->next_buffer;
+ _b = vlib_get_buffer (vm, _bi);
+ }
+ else
+ {
+ _b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+ break;
+ }
+ }
+ }
+ range_bi = range_vnb->ip.reass.next_range_bi;
+ }
+ }
+ if (*bi0 != ~0)
+ {
+ vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
+ vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
+ if (~0 != reass->first_bi)
+ {
+ fvnb->ip.reass.next_range_bi = reass->first_bi;
+ reass->first_bi = *bi0;
+ }
+ else
+ {
+ reass->first_bi = *bi0;
+ fvnb->ip.reass.next_range_bi = ~0;
+ }
+ *bi0 = ~0;
+ }
+}
+
always_inline void
ip4_full_reass_init (ip4_full_reass_t * reass)
{
}
always_inline ip4_full_reass_t *
-ip4_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_full_reass_main_t * rm,
- ip4_full_reass_per_thread_t * rt,
- ip4_full_reass_kv_t * kv, u8 * do_handoff)
+ip4_full_reass_find_or_create (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip4_full_reass_main_t *rm,
+ ip4_full_reass_per_thread_t *rt,
+ ip4_full_reass_kv_t *kv, u8 *do_handoff,
+ u32 *n_left_to_next, u32 **to_next)
{
ip4_full_reass_t *reass;
f64 now;
if (now > reass->last_heard + rm->timeout)
{
- ip4_full_reass_drop_all (vm, node, reass, ~0);
+ vlib_node_increment_counter (vm, node->node_index,
+ IP4_ERROR_REASS_TIMEOUT, 1);
+ ip4_full_reass_drop_all (vm, node, reass, n_left_to_next, to_next);
ip4_full_reass_free (rm, rt, reass);
reass = NULL;
}
*next0 = reass->next_index;
}
vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
+
+ /* Keep track of number of successfully reassembled packets and number of
+ * fragments reassembled */
+ vlib_node_increment_counter (vm, node->node_index, IP4_ERROR_REASS_SUCCESS,
+ 1);
+
+ vlib_node_increment_counter (vm, node->node_index,
+ IP4_ERROR_REASS_FRAGMENTS_REASSEMBLED,
+ reass->fragments_n);
+
*error0 = IP4_ERROR_NONE;
ip4_full_reass_free (rm, rt, reass);
reass = NULL;
const u32 fragment_length =
clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
const u32 fragment_last = fragment_first + fragment_length - 1;
- if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0))) // 8 is minimum frag length per RFC 791
+
+ /* Keep track of received fragments */
+ vlib_node_increment_counter (vm, node->node_index,
+ IP4_ERROR_REASS_FRAGMENTS_RCVD, 1);
+
+ if (fragment_first > fragment_last ||
+ fragment_first + fragment_length > UINT16_MAX - 20 ||
+ (fragment_length < 8 && // 8 is minimum frag length per RFC 791
+ ip4_get_fragment_more (ip0)))
{
next0 = IP4_FULL_REASS_NEXT_DROP;
error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
(u64) ip0->dst_address.
as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
- ip4_full_reass_t *reass =
- ip4_full_reass_find_or_create (vm, node, rm, rt, &kv,
- &do_handoff);
+ ip4_full_reass_t *reass = ip4_full_reass_find_or_create (
+ vm, node, rm, rt, &kv, &do_handoff, &n_left_to_next, &to_next);
if (reass)
{
break;
case IP4_REASS_RC_INTERNAL_ERROR:
counter = IP4_ERROR_REASS_INTERNAL_ERROR;
+ /* Sanitization is needed in internal error cases only, as
+ * the incoming packet is already dropped in other cases,
+ * also adding bi0 back to the reassembly list, fixes the
+ * leaking of buffers during internal errors.
+ *
+ * Also it doesnt make sense to send these buffers custom
+ * app, these fragments are with internal errors */
+ sanitize_reass_buffers_add_missing (vm, reass, &bi0);
+ reass->error_next_index = ~0;
break;
}
{
vlib_node_increment_counter (vm, node->node_index, counter,
1);
- ip4_full_reass_drop_all (vm, node, reass, bi0);
+ ip4_full_reass_drop_all (vm, node, reass, &n_left_to_next,
+ &to_next);
ip4_full_reass_free (rm, rt, reass);
goto next_packet;
}
{
vnet_feature_next (&next0, b0);
}
+
+ /* Increment the counter to-custom-app also as this fragment is
+ * also going to application */
+ if (CUSTOM == type)
+ {
+ vlib_node_increment_counter (
+ vm, node->node_index, IP4_ERROR_REASS_TO_CUSTOM_APP, 1);
+ }
+
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
bi0, next0);
return frame->n_vectors;
}
-static char *ip4_full_reass_error_strings[] = {
-#define _(sym, string) string,
- foreach_ip4_error
-#undef _
-};
-
VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame)
.name = "ip4-full-reassembly",
.vector_size = sizeof (u32),
.format_trace = format_ip4_full_reass_trace,
- .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
- .error_strings = ip4_full_reass_error_strings,
+ .n_errors = IP4_N_ERROR,
+ .error_counters = ip4_error_counters,
.n_next_nodes = IP4_FULL_REASS_N_NEXT,
.next_nodes =
{
.name = "ip4-local-full-reassembly",
.vector_size = sizeof (u32),
.format_trace = format_ip4_full_reass_trace,
- .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
- .error_strings = ip4_full_reass_error_strings,
+ .n_errors = IP4_N_ERROR,
+ .error_counters = ip4_error_counters,
.n_next_nodes = IP4_FULL_REASS_N_NEXT,
.next_nodes =
{
.name = "ip4-full-reassembly-feature",
.vector_size = sizeof (u32),
.format_trace = format_ip4_full_reass_trace,
- .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
- .error_strings = ip4_full_reass_error_strings,
+ .n_errors = IP4_N_ERROR,
+ .error_counters = ip4_error_counters,
.n_next_nodes = IP4_FULL_REASS_N_NEXT,
.next_nodes =
{
.name = "ip4-full-reassembly-custom",
.vector_size = sizeof (u32),
.format_trace = format_ip4_full_reass_trace,
- .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
- .error_strings = ip4_full_reass_error_strings,
+ .n_errors = IP4_N_ERROR,
+ .error_counters = ip4_error_counters,
.n_next_nodes = IP4_FULL_REASS_N_NEXT,
.next_nodes =
{
u32 nbuckets;
u8 i;
- nbuckets = (u32) (rm->max_reass_n / IP4_REASS_HT_LOAD_FACTOR);
+ /* need more mem with more workers */
+ nbuckets = (u32) (rm->max_reass_n * (vlib_num_workers () + 1) /
+ IP4_REASS_HT_LOAD_FACTOR);
for (i = 0; i < 31; i++)
if ((1 << i) >= nbuckets)
uword thread_index = 0;
int index;
const uword nthreads = vlib_num_workers () + 1;
+ u32 n_left_to_next, *to_next;
+
for (thread_index = 0; thread_index < nthreads; ++thread_index)
{
ip4_full_reass_per_thread_t *rt =
clib_spinlock_lock (&rt->lock);
vec_reset_length (pool_indexes_to_free);
- pool_foreach_index (index, rt->pool) {
- reass = pool_elt_at_index (rt->pool, index);
- if (now > reass->last_heard + rm->timeout)
- {
- vec_add1 (pool_indexes_to_free, index);
- }
- }
+
+ /* Pace the number of timeouts handled per thread,to avoid barrier
+ * sync issues in real world scenarios */
+
+ u32 beg = rt->last_id;
+ /* to ensure we walk at least once per sec per context */
+ u32 end =
+ beg + (IP4_REASS_MAX_REASSEMBLIES_DEFAULT *
+ IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS / MSEC_PER_SEC +
+ 1);
+ if (end > vec_len (rt->pool))
+ {
+ end = vec_len (rt->pool);
+ rt->last_id = 0;
+ }
+ else
+ {
+ rt->last_id = end;
+ }
+
+ pool_foreach_stepping_index (index, beg, end, rt->pool)
+ {
+ reass = pool_elt_at_index (rt->pool, index);
+ if (now > reass->last_heard + rm->timeout)
+ {
+ vec_add1 (pool_indexes_to_free, index);
+ }
+ }
+
+ if (vec_len (pool_indexes_to_free))
+ vlib_node_increment_counter (vm, node->node_index,
+ IP4_ERROR_REASS_TIMEOUT,
+ vec_len (pool_indexes_to_free));
int *i;
vec_foreach (i, pool_indexes_to_free)
{
ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
- ip4_full_reass_drop_all (vm, node, reass, ~0);
+ ip4_full_reass_drop_all (vm, node, reass, &n_left_to_next,
+ &to_next);
ip4_full_reass_free (rm, rt, reass);
}
}
VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
- .function = ip4_full_reass_walk_expired,
- .type = VLIB_NODE_TYPE_PROCESS,
- .name = "ip4-full-reassembly-expire-walk",
- .format_trace = format_ip4_full_reass_trace,
- .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
- .error_strings = ip4_full_reass_error_strings,
-
+ .function = ip4_full_reass_walk_expired,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "ip4-full-reassembly-expire-walk",
+ .format_trace = format_ip4_full_reass_trace,
+ .n_errors = IP4_N_ERROR,
+ .error_counters = ip4_error_counters,
};
static u8 *