X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fip%2Freass%2Fip4_full_reass.c;h=7f0b8d90c12798c6f2d630c16c1a7512da389005;hb=205ed8f8845a8ea36f38ed29df158a5a07c2e2c3;hp=b5ea0276c8813e233bd16af46e8d6883cd497493;hpb=14a7442e73721b3484fdd870464bd582a693626d;p=vpp.git diff --git a/src/vnet/ip/reass/ip4_full_reass.c b/src/vnet/ip/reass/ip4_full_reass.c index b5ea0276c88..7f0b8d90c12 100644 --- a/src/vnet/ip/reass/ip4_full_reass.c +++ b/src/vnet/ip/reass/ip4_full_reass.c @@ -23,14 +23,19 @@ #include #include #include +#include #include #include #include #include #define MSEC_PER_SEC 1000 -#define IP4_REASS_TIMEOUT_DEFAULT_MS 100 -#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default +#define IP4_REASS_TIMEOUT_DEFAULT_MS 200 + +/* As there are only 1024 reass context per thread, either the DDOS attacks + * or fractions of real timeouts, would consume these contexts quickly and + * running out context space and unable to perform reassembly */ +#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 50 // 50 ms default #define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024 #define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3 #define IP4_REASS_HT_LOAD_FACTOR (0.75) @@ -68,21 +73,19 @@ typedef enum typedef struct { - union + struct { - struct - { - u32 xx_id; - ip4_address_t src; - ip4_address_t dst; - u16 frag_id; - u8 proto; - u8 unused; - }; - u64 as_u64[2]; + u16 frag_id; + u8 proto; + u8 unused; + u32 fib_index; + ip4_address_t src; + ip4_address_t dst; }; } ip4_full_reass_key_t; +STATIC_ASSERT_SIZEOF (ip4_full_reass_key_t, 16); + typedef union { struct @@ -155,6 +158,8 @@ typedef struct ip4_full_reass_t *pool; u32 reass_n; u32 id_counter; + // for pacing the main thread timeouts + u32 last_id; clib_spinlock_t lock; } ip4_full_reass_per_thread_t; @@ -410,9 +415,8 @@ ip4_full_reass_free (ip4_full_reass_main_t * rm, ip4_full_reass_per_thread_t * rt, ip4_full_reass_t * reass) { - clib_bihash_kv_16_8_t kv; - kv.key[0] = reass->key.as_u64[0]; - kv.key[1] = reass->key.as_u64[1]; + clib_bihash_kv_16_8_t kv = {}; + clib_memcpy_fast (&kv, &reass->key, sizeof (kv.key)); clib_bihash_add_del_16_8 (&rm->hash, &kv, 0); return ip4_full_reass_free_ctx (rt, reass); } @@ -423,8 +427,7 @@ ip4_full_reass_free (ip4_full_reass_main_t * rm, * with local variables would cause either buffer leak or corruption */ always_inline void ip4_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node, - ip4_full_reass_t *reass, u32 *n_left_to_next, - u32 **to_next) + ip4_full_reass_t *reass) { u32 range_bi = reass->first_bi; vlib_buffer_t *range_b; @@ -448,40 +451,23 @@ ip4_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node, if (~0 != reass->error_next_index && reass->error_next_index < node->n_next_nodes) { - u32 next_index; - - next_index = reass->error_next_index; - u32 bi = ~0; + u32 n_free = vec_len (to_free); /* record number of packets sent to custom app */ vlib_node_increment_counter (vm, node->node_index, - IP4_ERROR_REASS_TO_CUSTOM_APP, - vec_len (to_free)); - - while (vec_len (to_free) > 0) - { - vlib_get_next_frame (vm, node, next_index, *to_next, - (*n_left_to_next)); - - while (vec_len (to_free) > 0 && (*n_left_to_next) > 0) - { - bi = vec_pop (to_free); + IP4_ERROR_REASS_TO_CUSTOM_APP, n_free); + + if (node->flags & VLIB_NODE_FLAG_TRACE) + for (u32 i = 0; i < n_free; i++) + { + vlib_buffer_t *b = vlib_get_buffer (vm, to_free[i]); + if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED)) + ip4_full_reass_add_trace (vm, node, reass, to_free[i], + RANGE_DISCARD, 0, ~0); + } - if (~0 != bi) - { - vlib_buffer_t *b = vlib_get_buffer (vm, bi); - if ((b->flags & VLIB_BUFFER_IS_TRACED)) - { - ip4_full_reass_add_trace (vm, node, reass, bi, - RANGE_DISCARD, 0, ~0); - } - *to_next[0] = bi; - (*to_next) += 1; - (*n_left_to_next) -= 1; - } - } - vlib_put_next_frame (vm, node, next_index, (*n_left_to_next)); - } + vlib_buffer_enqueue_to_single_next (vm, node, to_free, + reass->error_next_index, n_free); } else { @@ -560,8 +546,7 @@ always_inline ip4_full_reass_t * ip4_full_reass_find_or_create (vlib_main_t *vm, vlib_node_runtime_t *node, ip4_full_reass_main_t *rm, ip4_full_reass_per_thread_t *rt, - ip4_full_reass_kv_t *kv, u8 *do_handoff, - u32 *n_left_to_next, u32 **to_next) + ip4_full_reass_kv_t *kv, u8 *do_handoff) { ip4_full_reass_t *reass; f64 now; @@ -586,7 +571,7 @@ again: { vlib_node_increment_counter (vm, node->node_index, IP4_ERROR_REASS_TIMEOUT, 1); - ip4_full_reass_drop_all (vm, node, reass, n_left_to_next, to_next); + ip4_full_reass_drop_all (vm, node, reass); ip4_full_reass_free (rm, rt, reass); reass = NULL; } @@ -614,8 +599,7 @@ again: ++rt->reass_n; } - reass->key.as_u64[0] = kv->kv.key[0]; - reass->key.as_u64[1] = kv->kv.key[1]; + clib_memcpy_fast (&reass->key, &kv->kv.key, sizeof (reass->key)); kv->v.reass_index = (reass - rt->pool); kv->v.memory_owner_thread_index = vm->thread_index; reass->last_heard = now; @@ -644,7 +628,6 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t *last_b = NULL; u32 sub_chain_bi = reass->first_bi; u32 total_length = 0; - u32 buf_cnt = 0; do { u32 tmp_bi = sub_chain_bi; @@ -681,7 +664,6 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end; while (1) { - ++buf_cnt; if (trim_front) { if (trim_front > tmp->current_length) @@ -1181,212 +1163,198 @@ ip4_full_reass_inline (vlib_main_t *vm, vlib_node_runtime_t *node, bool is_local) { u32 *from = vlib_frame_vector_args (frame); - u32 n_left_from, n_left_to_next, *to_next, next_index; + u32 n_left, n_next = 0, to_next[VLIB_FRAME_SIZE]; ip4_full_reass_main_t *rm = &ip4_full_reass_main; ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index]; + u16 nexts[VLIB_FRAME_SIZE]; + clib_spinlock_lock (&rt->lock); - n_left_from = frame->n_vectors; - next_index = node->cached_next_index; - while (n_left_from > 0) + n_left = frame->n_vectors; + while (n_left > 0) { - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0; - vlib_buffer_t *b0; - u32 next0; - u32 error0 = IP4_ERROR_NONE; + u32 bi0; + vlib_buffer_t *b0; + u32 next0; + u32 error0 = IP4_ERROR_NONE; - bi0 = from[0]; - b0 = vlib_get_buffer (vm, bi0); + bi0 = from[0]; + b0 = vlib_get_buffer (vm, bi0); - ip4_header_t *ip0 = vlib_buffer_get_current (b0); - if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0)) + ip4_header_t *ip0 = vlib_buffer_get_current (b0); + if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0)) + { + // this is a whole packet - no fragmentation + if (CUSTOM != type) { - // this is a whole packet - no fragmentation - if (CUSTOM != type) - { - next0 = IP4_FULL_REASS_NEXT_INPUT; - } - else - { - next0 = vnet_buffer (b0)->ip.reass.next_index; - } - ip4_full_reass_add_trace (vm, node, NULL, bi0, PASSTHROUGH, 0, - ~0); - goto packet_enqueue; + next0 = IP4_FULL_REASS_NEXT_INPUT; } - - if (is_local && !rm->is_local_reass_enabled) + else { - next0 = IP4_FULL_REASS_NEXT_DROP; - goto packet_enqueue; + next0 = vnet_buffer (b0)->ip.reass.next_index; } + ip4_full_reass_add_trace (vm, node, NULL, bi0, PASSTHROUGH, 0, ~0); + goto packet_enqueue; + } - const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0); - const u32 fragment_length = - clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0); - const u32 fragment_last = fragment_first + fragment_length - 1; + if (is_local && !rm->is_local_reass_enabled) + { + next0 = IP4_FULL_REASS_NEXT_DROP; + goto packet_enqueue; + } - /* Keep track of received fragments */ - vlib_node_increment_counter (vm, node->node_index, - IP4_ERROR_REASS_FRAGMENTS_RCVD, 1); + const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0); + const u32 fragment_length = + clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0); + const u32 fragment_last = fragment_first + fragment_length - 1; - if (fragment_first > fragment_last || - fragment_first + fragment_length > UINT16_MAX - 20 || - (fragment_length < 8 && // 8 is minimum frag length per RFC 791 - ip4_get_fragment_more (ip0))) - { - next0 = IP4_FULL_REASS_NEXT_DROP; - error0 = IP4_ERROR_REASS_MALFORMED_PACKET; - goto packet_enqueue; - } - ip4_full_reass_kv_t kv; - u8 do_handoff = 0; + /* Keep track of received fragments */ + vlib_node_increment_counter (vm, node->node_index, + IP4_ERROR_REASS_FRAGMENTS_RCVD, 1); + + if (fragment_first > fragment_last || + fragment_first + fragment_length > UINT16_MAX - 20 || + (fragment_length < 8 && // 8 is minimum frag length per RFC 791 + ip4_get_fragment_more (ip0))) + { + next0 = IP4_FULL_REASS_NEXT_DROP; + error0 = IP4_ERROR_REASS_MALFORMED_PACKET; + goto packet_enqueue; + } - kv.k.as_u64[0] = - (u64) vec_elt (ip4_main.fib_index_by_sw_if_index, - vnet_buffer (b0)->sw_if_index[VLIB_RX]) | - (u64) ip0->src_address.as_u32 << 32; - kv.k.as_u64[1] = - (u64) ip0->dst_address. - as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48; + u32 fib_index = (vnet_buffer (b0)->sw_if_index[VLIB_TX] == (u32) ~0) ? + vec_elt (ip4_main.fib_index_by_sw_if_index, + vnet_buffer (b0)->sw_if_index[VLIB_RX]) : + vnet_buffer (b0)->sw_if_index[VLIB_TX]; - ip4_full_reass_t *reass = ip4_full_reass_find_or_create ( - vm, node, rm, rt, &kv, &do_handoff, &n_left_to_next, &to_next); + ip4_full_reass_kv_t kv = { .k.fib_index = fib_index, + .k.src.as_u32 = ip0->src_address.as_u32, + .k.dst.as_u32 = ip0->dst_address.as_u32, + .k.frag_id = ip0->fragment_id, + .k.proto = ip0->protocol - if (reass) + }; + u8 do_handoff = 0; + + ip4_full_reass_t *reass = + ip4_full_reass_find_or_create (vm, node, rm, rt, &kv, &do_handoff); + + if (reass) + { + const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0); + if (0 == fragment_first) { - const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0); - if (0 == fragment_first) - { - reass->sendout_thread_index = vm->thread_index; - } + reass->sendout_thread_index = vm->thread_index; } + } - if (PREDICT_FALSE (do_handoff)) + if (PREDICT_FALSE (do_handoff)) + { + next0 = IP4_FULL_REASS_NEXT_HANDOFF; + vnet_buffer (b0)->ip.reass.owner_thread_index = + kv.v.memory_owner_thread_index; + } + else if (reass) + { + u32 handoff_thread_idx; + u32 counter = ~0; + switch (ip4_full_reass_update (vm, node, rm, rt, reass, &bi0, &next0, + &error0, CUSTOM == type, + &handoff_thread_idx)) { + case IP4_REASS_RC_OK: + /* nothing to do here */ + break; + case IP4_REASS_RC_HANDOFF: next0 = IP4_FULL_REASS_NEXT_HANDOFF; + b0 = vlib_get_buffer (vm, bi0); vnet_buffer (b0)->ip.reass.owner_thread_index = - kv.v.memory_owner_thread_index; + handoff_thread_idx; + break; + case IP4_REASS_RC_TOO_MANY_FRAGMENTS: + counter = IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG; + break; + case IP4_REASS_RC_NO_BUF: + counter = IP4_ERROR_REASS_NO_BUF; + break; + case IP4_REASS_RC_INTERNAL_ERROR: + counter = IP4_ERROR_REASS_INTERNAL_ERROR; + /* Sanitization is needed in internal error cases only, as + * the incoming packet is already dropped in other cases, + * also adding bi0 back to the reassembly list, fixes the + * leaking of buffers during internal errors. + * + * Also it doesnt make sense to send these buffers custom + * app, these fragments are with internal errors */ + sanitize_reass_buffers_add_missing (vm, reass, &bi0); + reass->error_next_index = ~0; + break; } - else if (reass) - { - u32 handoff_thread_idx; - u32 counter = ~0; - switch (ip4_full_reass_update - (vm, node, rm, rt, reass, &bi0, &next0, - &error0, CUSTOM == type, &handoff_thread_idx)) - { - case IP4_REASS_RC_OK: - /* nothing to do here */ - break; - case IP4_REASS_RC_HANDOFF: - next0 = IP4_FULL_REASS_NEXT_HANDOFF; - b0 = vlib_get_buffer (vm, bi0); - vnet_buffer (b0)->ip.reass.owner_thread_index = - handoff_thread_idx; - break; - case IP4_REASS_RC_TOO_MANY_FRAGMENTS: - counter = IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG; - break; - case IP4_REASS_RC_NO_BUF: - counter = IP4_ERROR_REASS_NO_BUF; - break; - case IP4_REASS_RC_INTERNAL_ERROR: - counter = IP4_ERROR_REASS_INTERNAL_ERROR; - /* Sanitization is needed in internal error cases only, as - * the incoming packet is already dropped in other cases, - * also adding bi0 back to the reassembly list, fixes the - * leaking of buffers during internal errors. - * - * Also it doesnt make sense to send these buffers custom - * app, these fragments are with internal errors */ - sanitize_reass_buffers_add_missing (vm, reass, &bi0); - reass->error_next_index = ~0; - break; - } - if (~0 != counter) - { - vlib_node_increment_counter (vm, node->node_index, counter, - 1); - ip4_full_reass_drop_all (vm, node, reass, &n_left_to_next, - &to_next); - ip4_full_reass_free (rm, rt, reass); - goto next_packet; - } - } - else + if (~0 != counter) { - next0 = IP4_FULL_REASS_NEXT_DROP; - error0 = IP4_ERROR_REASS_LIMIT_REACHED; + vlib_node_increment_counter (vm, node->node_index, counter, 1); + ip4_full_reass_drop_all (vm, node, reass); + ip4_full_reass_free (rm, rt, reass); + goto next_packet; } + } + else + { + next0 = IP4_FULL_REASS_NEXT_DROP; + error0 = IP4_ERROR_REASS_LIMIT_REACHED; + } + packet_enqueue: - packet_enqueue: - - if (bi0 != ~0) + if (bi0 != ~0) + { + /* bi0 might have been updated by reass_finalize, reload */ + b0 = vlib_get_buffer (vm, bi0); + if (IP4_ERROR_NONE != error0) { - to_next[0] = bi0; - to_next += 1; - n_left_to_next -= 1; - - /* bi0 might have been updated by reass_finalize, reload */ - b0 = vlib_get_buffer (vm, bi0); - if (IP4_ERROR_NONE != error0) - { - b0->error = node->errors[error0]; - } - - if (next0 == IP4_FULL_REASS_NEXT_HANDOFF) - { - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - ip4_full_reass_add_trace ( - vm, node, NULL, bi0, HANDOFF, 0, - vnet_buffer (b0)->ip.reass.owner_thread_index); - } - } - else if (FEATURE == type && IP4_ERROR_NONE == error0) - { - vnet_feature_next (&next0, b0); - } + b0->error = node->errors[error0]; + } - /* Increment the counter to-custom-app also as this fragment is - * also going to application */ - if (CUSTOM == type) + if (next0 == IP4_FULL_REASS_NEXT_HANDOFF) + { + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vlib_node_increment_counter ( - vm, node->node_index, IP4_ERROR_REASS_TO_CUSTOM_APP, 1); + ip4_full_reass_add_trace ( + vm, node, NULL, bi0, HANDOFF, 0, + vnet_buffer (b0)->ip.reass.owner_thread_index); } + } + else if (FEATURE == type && IP4_ERROR_NONE == error0) + { + vnet_feature_next (&next0, b0); + } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next); + /* Increment the counter to-custom-app also as this fragment is + * also going to application */ + if (CUSTOM == type) + { + vlib_node_increment_counter (vm, node->node_index, + IP4_ERROR_REASS_TO_CUSTOM_APP, 1); } - next_packet: - from += 1; - n_left_from -= 1; + to_next[n_next] = bi0; + nexts[n_next] = next0; + n_next++; + IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next); } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + next_packet: + from += 1; + n_left -= 1; } clib_spinlock_unlock (&rt->lock); + + vlib_buffer_enqueue_to_next (vm, node, to_next, nexts, n_next); return frame->n_vectors; } -static char *ip4_full_reass_error_strings[] = { -#define _(sym, string) string, - foreach_ip4_error -#undef _ -}; - VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) @@ -1398,8 +1366,8 @@ VLIB_REGISTER_NODE (ip4_full_reass_node) = { .name = "ip4-full-reassembly", .vector_size = sizeof (u32), .format_trace = format_ip4_full_reass_trace, - .n_errors = ARRAY_LEN (ip4_full_reass_error_strings), - .error_strings = ip4_full_reass_error_strings, + .n_errors = IP4_N_ERROR, + .error_counters = ip4_error_counters, .n_next_nodes = IP4_FULL_REASS_N_NEXT, .next_nodes = { @@ -1420,8 +1388,8 @@ VLIB_REGISTER_NODE (ip4_local_full_reass_node) = { .name = "ip4-local-full-reassembly", .vector_size = sizeof (u32), .format_trace = format_ip4_full_reass_trace, - .n_errors = ARRAY_LEN (ip4_full_reass_error_strings), - .error_strings = ip4_full_reass_error_strings, + .n_errors = IP4_N_ERROR, + .error_counters = ip4_error_counters, .n_next_nodes = IP4_FULL_REASS_N_NEXT, .next_nodes = { @@ -1444,8 +1412,8 @@ VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = { .name = "ip4-full-reassembly-feature", .vector_size = sizeof (u32), .format_trace = format_ip4_full_reass_trace, - .n_errors = ARRAY_LEN (ip4_full_reass_error_strings), - .error_strings = ip4_full_reass_error_strings, + .n_errors = IP4_N_ERROR, + .error_counters = ip4_error_counters, .n_next_nodes = IP4_FULL_REASS_N_NEXT, .next_nodes = { @@ -1456,11 +1424,11 @@ VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = { }; VNET_FEATURE_INIT (ip4_full_reass_feature, static) = { - .arc_name = "ip4-unicast", - .node_name = "ip4-full-reassembly-feature", - .runs_before = VNET_FEATURES ("ip4-lookup", - "ipsec4-input-feature"), - .runs_after = 0, + .arc_name = "ip4-unicast", + .node_name = "ip4-full-reassembly-feature", + .runs_before = VNET_FEATURES ("ip4-lookup", "ipsec4-input-feature", + "ip4-sv-reassembly-feature"), + .runs_after = 0, }; VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm, @@ -1474,8 +1442,8 @@ VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = { .name = "ip4-full-reassembly-custom", .vector_size = sizeof (u32), .format_trace = format_ip4_full_reass_trace, - .n_errors = ARRAY_LEN (ip4_full_reass_error_strings), - .error_strings = ip4_full_reass_error_strings, + .n_errors = IP4_N_ERROR, + .error_counters = ip4_error_counters, .n_next_nodes = IP4_FULL_REASS_N_NEXT, .next_nodes = { @@ -1485,15 +1453,6 @@ VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = { }, }; -VNET_FEATURE_INIT (ip4_full_reass_custom, static) = { - .arc_name = "ip4-unicast", - .node_name = "ip4-full-reassembly-feature", - .runs_before = VNET_FEATURES ("ip4-lookup", - "ipsec4-input-feature"), - .runs_after = 0, -}; - - #ifndef CLIB_MARCH_VARIANT uword ip4_full_reass_custom_register_next_node (uword node_index) @@ -1689,7 +1648,6 @@ ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node, uword thread_index = 0; int index; const uword nthreads = vlib_num_workers () + 1; - u32 n_left_to_next, *to_next; for (thread_index = 0; thread_index < nthreads; ++thread_index) { @@ -1699,14 +1657,33 @@ ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node, vec_reset_length (pool_indexes_to_free); - pool_foreach_index (index, rt->pool) + /* Pace the number of timeouts handled per thread,to avoid barrier + * sync issues in real world scenarios */ + + u32 beg = rt->last_id; + /* to ensure we walk at least once per sec per context */ + u32 end = + beg + (IP4_REASS_MAX_REASSEMBLIES_DEFAULT * + IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS / MSEC_PER_SEC + + 1); + if (end > vec_len (rt->pool)) { - reass = pool_elt_at_index (rt->pool, index); - if (now > reass->last_heard + rm->timeout) - { - vec_add1 (pool_indexes_to_free, index); - } + end = vec_len (rt->pool); + rt->last_id = 0; } + else + { + rt->last_id = end; + } + + pool_foreach_stepping_index (index, beg, end, rt->pool) + { + reass = pool_elt_at_index (rt->pool, index); + if (now > reass->last_heard + rm->timeout) + { + vec_add1 (pool_indexes_to_free, index); + } + } if (vec_len (pool_indexes_to_free)) vlib_node_increment_counter (vm, node->node_index, @@ -1716,8 +1693,7 @@ ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node, vec_foreach (i, pool_indexes_to_free) { ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]); - ip4_full_reass_drop_all (vm, node, reass, &n_left_to_next, - &to_next); + ip4_full_reass_drop_all (vm, node, reass); ip4_full_reass_free (rm, rt, reass); } @@ -1735,13 +1711,12 @@ ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node, } VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = { - .function = ip4_full_reass_walk_expired, - .type = VLIB_NODE_TYPE_PROCESS, - .name = "ip4-full-reassembly-expire-walk", - .format_trace = format_ip4_full_reass_trace, - .n_errors = ARRAY_LEN (ip4_full_reass_error_strings), - .error_strings = ip4_full_reass_error_strings, - + .function = ip4_full_reass_walk_expired, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "ip4-full-reassembly-expire-walk", + .format_trace = format_ip4_full_reass_trace, + .n_errors = IP4_N_ERROR, + .error_counters = ip4_error_counters, }; static u8 * @@ -1749,9 +1724,8 @@ format_ip4_full_reass_key (u8 * s, va_list * args) { ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *); s = - format (s, - "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u", - key->xx_id, format_ip4_address, &key->src, format_ip4_address, + format (s, "fib_index: %u, src: %U, dst: %U, frag_id: %u, proto: %u", + key->fib_index, format_ip4_address, &key->src, format_ip4_address, &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto); return s; }