2 * Copyright (c) 2017 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 * @brief IPv4 Full Reassembly.
20 * This file contains the source code for IPv4 full reassembly.
23 #include <vppinfra/vec.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vnet/ip/ip.api_enum.h>
27 #include <vppinfra/fifo.h>
28 #include <vppinfra/bihash_16_8.h>
29 #include <vnet/ip/reass/ip4_full_reass.h>
32 #define MSEC_PER_SEC 1000
33 #define IP4_REASS_TIMEOUT_DEFAULT_MS 200
35 /* As there are only 1024 reass context per thread, either the DDOS attacks
36 * or fractions of real timeouts, would consume these contexts quickly and
37 * running out context space and unable to perform reassembly */
38 #define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 50 // 50 ms default
39 #define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
40 #define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
41 #define IP4_REASS_HT_LOAD_FACTOR (0.75)
43 #define IP4_REASS_DEBUG_BUFFERS 0
44 #if IP4_REASS_DEBUG_BUFFERS
45 #define IP4_REASS_DEBUG_BUFFER(bi, what) \
49 printf (#what "buffer %u", _bi); \
50 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi); \
51 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT) \
53 _bi = _b->next_buffer; \
54 printf ("[%u]", _bi); \
55 _b = vlib_get_buffer (vm, _bi); \
62 #define IP4_REASS_DEBUG_BUFFER(...)
68 IP4_REASS_RC_TOO_MANY_FRAGMENTS,
69 IP4_REASS_RC_INTERNAL_ERROR,
72 } ip4_full_reass_rc_t;
85 } ip4_full_reass_key_t;
87 STATIC_ASSERT_SIZEOF (ip4_full_reass_key_t, 16);
94 u32 memory_owner_thread_index;
97 } ip4_full_reass_val_t;
103 ip4_full_reass_key_t k;
104 ip4_full_reass_val_t v;
106 clib_bihash_kv_16_8_t kv;
107 } ip4_full_reass_kv_t;
110 ip4_full_reass_buffer_get_data_offset (vlib_buffer_t * b)
112 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
113 return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
117 ip4_full_reass_buffer_get_data_len (vlib_buffer_t * b)
119 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
120 return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
121 (vnb->ip.reass.fragment_first +
122 ip4_full_reass_buffer_get_data_offset (b)) + 1;
128 ip4_full_reass_key_t key;
129 // time when last packet was received
131 // internal id of this reassembly
133 // buffer index of first buffer in this reassembly context
135 // last octet of packet, ~0 until fragment without more_fragments arrives
136 u32 last_packet_octet;
137 // length of data collected so far
139 // trace operation counter
140 u32 trace_op_counter;
141 // next index - used by non-feature node
143 // error next index - used by custom apps (~0 if not used)
144 u32 error_next_index;
145 // minimum fragment length for this reassembly - used to estimate MTU
146 u16 min_fragment_length;
147 // number of fragments in this reassembly
149 // thread owning memory for this context (whose pool contains this ctx)
150 u32 memory_owner_thread_index;
151 // thread which received fragment with offset 0 and which sends out the
152 // completed reassembly
153 u32 sendout_thread_index;
158 ip4_full_reass_t *pool;
161 // for pacing the main thread timeouts
163 clib_spinlock_t lock;
164 } ip4_full_reass_per_thread_t;
171 u32 expire_walk_interval_ms;
172 // maximum number of fragments in one reassembly
174 // maximum number of reassemblies
178 clib_bihash_16_8_t hash;
180 ip4_full_reass_per_thread_t *per_thread_data;
183 vlib_main_t *vlib_main;
185 u32 ip4_full_reass_expire_node_idx;
187 /** Worker handoff */
190 u32 fq_feature_index;
193 // reference count for enabling/disabling feature - per interface
194 u32 *feature_use_refcount_per_intf;
196 // whether local fragmented packets are reassembled or not
197 int is_local_reass_enabled;
198 } ip4_full_reass_main_t;
200 extern ip4_full_reass_main_t ip4_full_reass_main;
202 #ifndef CLIB_MARCH_VARIANT
203 ip4_full_reass_main_t ip4_full_reass_main;
204 #endif /* CLIB_MARCH_VARIANT */
208 IP4_FULL_REASS_NEXT_INPUT,
209 IP4_FULL_REASS_NEXT_DROP,
210 IP4_FULL_REASS_NEXT_HANDOFF,
211 IP4_FULL_REASS_N_NEXT,
212 } ip4_full_reass_next_t;
219 } ip4_full_reass_node_type_t;
230 } ip4_full_reass_trace_operation_e;
240 } ip4_full_reass_range_trace_t;
244 ip4_full_reass_trace_operation_e action;
246 ip4_full_reass_range_trace_t trace_range;
254 bool is_after_handoff;
255 ip4_header_t ip4_header;
256 } ip4_full_reass_trace_t;
258 extern vlib_node_registration_t ip4_full_reass_node;
259 extern vlib_node_registration_t ip4_full_reass_node_feature;
260 extern vlib_node_registration_t ip4_full_reass_node_custom;
263 ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
264 ip4_full_reass_range_trace_t * trace)
266 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
267 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
268 trace->range_first = vnb->ip.reass.range_first;
269 trace->range_last = vnb->ip.reass.range_last;
270 trace->data_offset = ip4_full_reass_buffer_get_data_offset (b);
271 trace->data_len = ip4_full_reass_buffer_get_data_len (b);
272 trace->range_bi = bi;
276 format_ip4_full_reass_range_trace (u8 * s, va_list * args)
278 ip4_full_reass_range_trace_t *trace =
279 va_arg (*args, ip4_full_reass_range_trace_t *);
281 format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
282 trace->range_last, trace->data_offset, trace->data_len,
288 format_ip4_full_reass_trace (u8 * s, va_list * args)
290 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
291 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
292 ip4_full_reass_trace_t *t = va_arg (*args, ip4_full_reass_trace_t *);
294 if (~0 != t->reass_id)
296 if (t->is_after_handoff)
299 format (s, "%U\n", format_ip4_header, &t->ip4_header,
300 sizeof (t->ip4_header));
304 format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
305 t->reass_id, t->op_id);
306 indent = format_get_indent (s);
309 "first bi: %u, data len: %u, ip/fragment[%u, %u]",
310 t->trace_range.first_bi, t->total_data_len, t->fragment_first,
316 s = format (s, "\n%Ushrink %U by %u", format_white_space, indent,
317 format_ip4_full_reass_range_trace, &t->trace_range,
321 s = format (s, "\n%Udiscard %U", format_white_space, indent,
322 format_ip4_full_reass_range_trace, &t->trace_range);
325 s = format (s, "\n%Unew %U", format_white_space, indent,
326 format_ip4_full_reass_range_trace, &t->trace_range);
329 s = format (s, "\n%Uoverlapping/ignored %U", format_white_space, indent,
330 format_ip4_full_reass_range_trace, &t->trace_range);
333 s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
337 format (s, "handoff from thread #%u to thread #%u", t->thread_id,
341 s = format (s, "passthrough - not a fragment");
348 ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
349 ip4_full_reass_t * reass, u32 bi,
350 ip4_full_reass_trace_operation_e action,
351 u32 size_diff, u32 thread_id_to)
353 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
354 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
355 if (pool_is_free_index
356 (vm->trace_main.trace_buffer_pool, vlib_buffer_get_trace_index (b)))
358 // this buffer's trace is gone
359 b->flags &= ~VLIB_BUFFER_IS_TRACED;
362 bool is_after_handoff = false;
363 if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
365 is_after_handoff = true;
367 ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
368 t->is_after_handoff = is_after_handoff;
369 if (t->is_after_handoff)
371 clib_memcpy (&t->ip4_header, vlib_buffer_get_current (b),
372 clib_min (sizeof (t->ip4_header), b->current_length));
376 t->reass_id = reass->id;
377 t->op_id = reass->trace_op_counter;
378 t->trace_range.first_bi = reass->first_bi;
379 t->total_data_len = reass->data_len;
380 ++reass->trace_op_counter;
386 t->trace_range.first_bi = 0;
387 t->total_data_len = 0;
390 ip4_full_reass_trace_details (vm, bi, &t->trace_range);
391 t->size_diff = size_diff;
392 t->thread_id = vm->thread_index;
393 t->thread_id_to = thread_id_to;
394 t->fragment_first = vnb->ip.reass.fragment_first;
395 t->fragment_last = vnb->ip.reass.fragment_last;
398 s = format (s, "%U", format_ip4_full_reass_trace, NULL, NULL, t);
399 printf ("%.*s\n", vec_len (s), s);
401 vec_reset_length (s);
406 ip4_full_reass_free_ctx (ip4_full_reass_per_thread_t * rt,
407 ip4_full_reass_t * reass)
409 pool_put (rt->pool, reass);
414 ip4_full_reass_free (ip4_full_reass_main_t * rm,
415 ip4_full_reass_per_thread_t * rt,
416 ip4_full_reass_t * reass)
418 clib_bihash_kv_16_8_t kv = {};
419 clib_memcpy_fast (&kv, &reass->key, sizeof (kv.key));
420 clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
421 return ip4_full_reass_free_ctx (rt, reass);
424 /* n_left_to_next, and to_next are taken as input params, as this function
425 * could be called from a graphnode, where its managing local copy of these
426 * variables, and ignoring those and still trying to enqueue the buffers
427 * with local variables would cause either buffer leak or corruption */
429 ip4_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node,
430 ip4_full_reass_t *reass, u32 *n_left_to_next,
433 u32 range_bi = reass->first_bi;
434 vlib_buffer_t *range_b;
435 vnet_buffer_opaque_t *range_vnb;
438 while (~0 != range_bi)
440 range_b = vlib_get_buffer (vm, range_bi);
441 range_vnb = vnet_buffer (range_b);
445 vec_add1 (to_free, range_bi);
448 range_bi = range_vnb->ip.reass.next_range_bi;
451 /* send to next_error_index */
452 if (~0 != reass->error_next_index &&
453 reass->error_next_index < node->n_next_nodes)
457 next_index = reass->error_next_index;
460 /* record number of packets sent to custom app */
461 vlib_node_increment_counter (vm, node->node_index,
462 IP4_ERROR_REASS_TO_CUSTOM_APP,
465 while (vec_len (to_free) > 0)
467 vlib_get_next_frame (vm, node, next_index, *to_next,
470 while (vec_len (to_free) > 0 && (*n_left_to_next) > 0)
472 bi = vec_pop (to_free);
476 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
477 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
479 ip4_full_reass_add_trace (vm, node, reass, bi,
480 RANGE_DISCARD, 0, ~0);
484 (*n_left_to_next) -= 1;
487 vlib_put_next_frame (vm, node, next_index, (*n_left_to_next));
492 vlib_buffer_free (vm, to_free, vec_len (to_free));
498 sanitize_reass_buffers_add_missing (vlib_main_t *vm, ip4_full_reass_t *reass,
501 u32 range_bi = reass->first_bi;
502 vlib_buffer_t *range_b;
503 vnet_buffer_opaque_t *range_vnb;
505 while (~0 != range_bi)
507 range_b = vlib_get_buffer (vm, range_bi);
508 range_vnb = vnet_buffer (range_b);
514 if (range_b->flags & VLIB_BUFFER_NEXT_PRESENT)
517 vlib_buffer_t *_b = vlib_get_buffer (vm, _bi);
518 while (_b->flags & VLIB_BUFFER_NEXT_PRESENT)
520 if (_b->next_buffer != range_vnb->ip.reass.next_range_bi)
522 _bi = _b->next_buffer;
523 _b = vlib_get_buffer (vm, _bi);
527 _b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
532 range_bi = range_vnb->ip.reass.next_range_bi;
537 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
538 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
539 if (~0 != reass->first_bi)
541 fvnb->ip.reass.next_range_bi = reass->first_bi;
542 reass->first_bi = *bi0;
546 reass->first_bi = *bi0;
547 fvnb->ip.reass.next_range_bi = ~0;
554 ip4_full_reass_init (ip4_full_reass_t * reass)
556 reass->first_bi = ~0;
557 reass->last_packet_octet = ~0;
559 reass->next_index = ~0;
560 reass->error_next_index = ~0;
563 always_inline ip4_full_reass_t *
564 ip4_full_reass_find_or_create (vlib_main_t *vm, vlib_node_runtime_t *node,
565 ip4_full_reass_main_t *rm,
566 ip4_full_reass_per_thread_t *rt,
567 ip4_full_reass_kv_t *kv, u8 *do_handoff,
568 u32 *n_left_to_next, u32 **to_next)
570 ip4_full_reass_t *reass;
576 now = vlib_time_now (vm);
577 if (!clib_bihash_search_16_8 (&rm->hash, &kv->kv, &kv->kv))
579 if (vm->thread_index != kv->v.memory_owner_thread_index)
585 pool_elt_at_index (rm->per_thread_data
586 [kv->v.memory_owner_thread_index].pool,
589 if (now > reass->last_heard + rm->timeout)
591 vlib_node_increment_counter (vm, node->node_index,
592 IP4_ERROR_REASS_TIMEOUT, 1);
593 ip4_full_reass_drop_all (vm, node, reass, n_left_to_next, to_next);
594 ip4_full_reass_free (rm, rt, reass);
601 reass->last_heard = now;
605 if (rt->reass_n >= rm->max_reass_n)
612 pool_get (rt->pool, reass);
613 clib_memset (reass, 0, sizeof (*reass));
614 reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
615 reass->memory_owner_thread_index = vm->thread_index;
617 ip4_full_reass_init (reass);
621 clib_memcpy_fast (&reass->key, &kv->kv.key, sizeof (reass->key));
622 kv->v.reass_index = (reass - rt->pool);
623 kv->v.memory_owner_thread_index = vm->thread_index;
624 reass->last_heard = now;
626 int rv = clib_bihash_add_del_16_8 (&rm->hash, &kv->kv, 2);
629 ip4_full_reass_free_ctx (rt, reass);
631 // if other worker created a context already work with the other copy
639 always_inline ip4_full_reass_rc_t
640 ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
641 ip4_full_reass_main_t * rm,
642 ip4_full_reass_per_thread_t * rt,
643 ip4_full_reass_t * reass, u32 * bi0,
644 u32 * next0, u32 * error0, bool is_custom)
646 vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
647 vlib_buffer_t *last_b = NULL;
648 u32 sub_chain_bi = reass->first_bi;
649 u32 total_length = 0;
653 u32 tmp_bi = sub_chain_bi;
654 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
655 ip4_header_t *ip = vlib_buffer_get_current (tmp);
656 vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
657 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
658 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
660 return IP4_REASS_RC_INTERNAL_ERROR;
663 u32 data_len = ip4_full_reass_buffer_get_data_len (tmp);
665 ip4_header_bytes (ip) + ip4_full_reass_buffer_get_data_offset (tmp);
667 vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
668 if (tmp_bi == reass->first_bi)
670 /* first buffer - keep ip4 header */
671 if (0 != ip4_full_reass_buffer_get_data_offset (tmp))
673 return IP4_REASS_RC_INTERNAL_ERROR;
676 trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
677 ip4_header_bytes (ip);
678 if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
680 return IP4_REASS_RC_INTERNAL_ERROR;
684 vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
690 if (trim_front > tmp->current_length)
692 /* drop whole buffer */
693 u32 to_be_freed_bi = tmp_bi;
694 trim_front -= tmp->current_length;
695 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
697 return IP4_REASS_RC_INTERNAL_ERROR;
699 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
700 tmp_bi = tmp->next_buffer;
701 tmp->next_buffer = 0;
702 tmp = vlib_get_buffer (vm, tmp_bi);
703 vlib_buffer_free_one (vm, to_be_freed_bi);
708 vlib_buffer_advance (tmp, trim_front);
716 last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
717 last_b->next_buffer = tmp_bi;
720 if (keep_data <= tmp->current_length)
722 tmp->current_length = keep_data;
727 keep_data -= tmp->current_length;
728 if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
730 return IP4_REASS_RC_INTERNAL_ERROR;
733 total_length += tmp->current_length;
734 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
736 tmp_bi = tmp->next_buffer;
737 tmp = vlib_get_buffer (vm, tmp->next_buffer);
746 u32 to_be_freed_bi = tmp_bi;
747 if (reass->first_bi == tmp_bi)
749 return IP4_REASS_RC_INTERNAL_ERROR;
751 if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
753 tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
754 tmp_bi = tmp->next_buffer;
755 tmp->next_buffer = 0;
756 tmp = vlib_get_buffer (vm, tmp_bi);
757 vlib_buffer_free_one (vm, to_be_freed_bi);
761 tmp->next_buffer = 0;
762 vlib_buffer_free_one (vm, to_be_freed_bi);
768 vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
771 while (~0 != sub_chain_bi);
775 return IP4_REASS_RC_INTERNAL_ERROR;
777 last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
779 if (total_length < first_b->current_length)
781 return IP4_REASS_RC_INTERNAL_ERROR;
783 total_length -= first_b->current_length;
784 first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
785 first_b->total_length_not_including_first_buffer = total_length;
786 ip4_header_t *ip = vlib_buffer_get_current (first_b);
787 ip->flags_and_fragment_offset = 0;
788 ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
789 ip->checksum = ip4_header_checksum (ip);
790 if (!vlib_buffer_chain_linearize (vm, first_b))
792 return IP4_REASS_RC_NO_BUF;
794 // reset to reconstruct the mbuf linking
795 first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
796 if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
798 ip4_full_reass_add_trace (vm, node, reass, reass->first_bi, FINALIZE, 0,
801 // following code does a hexdump of packet fragments to stdout ...
804 u32 bi = reass->first_bi;
808 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
809 s = format (s, "%u: %U\n", bi, format_hexdump,
810 vlib_buffer_get_current (b), b->current_length);
811 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
820 printf ("%.*s\n", vec_len (s), s);
827 *bi0 = reass->first_bi;
830 *next0 = IP4_FULL_REASS_NEXT_INPUT;
834 *next0 = reass->next_index;
836 vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
838 /* Keep track of number of successfully reassembled packets and number of
839 * fragments reassembled */
840 vlib_node_increment_counter (vm, node->node_index, IP4_ERROR_REASS_SUCCESS,
843 vlib_node_increment_counter (vm, node->node_index,
844 IP4_ERROR_REASS_FRAGMENTS_REASSEMBLED,
847 *error0 = IP4_ERROR_NONE;
848 ip4_full_reass_free (rm, rt, reass);
850 return IP4_REASS_RC_OK;
853 always_inline ip4_full_reass_rc_t
854 ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
855 ip4_full_reass_t * reass,
856 u32 prev_range_bi, u32 new_next_bi)
858 vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
859 vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
860 if (~0 != prev_range_bi)
862 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
863 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
864 new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
865 prev_vnb->ip.reass.next_range_bi = new_next_bi;
869 if (~0 != reass->first_bi)
871 new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
873 reass->first_bi = new_next_bi;
875 vnet_buffer_opaque_t *vnb = vnet_buffer (new_next_b);
876 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
877 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
879 return IP4_REASS_RC_INTERNAL_ERROR;
881 reass->data_len += ip4_full_reass_buffer_get_data_len (new_next_b);
882 return IP4_REASS_RC_OK;
885 always_inline ip4_full_reass_rc_t
886 ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
887 vlib_node_runtime_t * node,
888 ip4_full_reass_t * reass,
889 u32 prev_range_bi, u32 discard_bi)
891 vlib_buffer_t *discard_b = vlib_get_buffer (vm, discard_bi);
892 vnet_buffer_opaque_t *discard_vnb = vnet_buffer (discard_b);
893 if (~0 != prev_range_bi)
895 vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
896 vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
897 if (!(prev_vnb->ip.reass.next_range_bi == discard_bi))
899 return IP4_REASS_RC_INTERNAL_ERROR;
901 prev_vnb->ip.reass.next_range_bi = discard_vnb->ip.reass.next_range_bi;
905 reass->first_bi = discard_vnb->ip.reass.next_range_bi;
907 vnet_buffer_opaque_t *vnb = vnet_buffer (discard_b);
908 if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
909 !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
911 return IP4_REASS_RC_INTERNAL_ERROR;
913 reass->data_len -= ip4_full_reass_buffer_get_data_len (discard_b);
916 u32 to_be_freed_bi = discard_bi;
917 if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
919 ip4_full_reass_add_trace (vm, node, reass, discard_bi, RANGE_DISCARD,
922 if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
924 discard_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
925 discard_bi = discard_b->next_buffer;
926 discard_b->next_buffer = 0;
927 discard_b = vlib_get_buffer (vm, discard_bi);
928 vlib_buffer_free_one (vm, to_be_freed_bi);
932 discard_b->next_buffer = 0;
933 vlib_buffer_free_one (vm, to_be_freed_bi);
937 return IP4_REASS_RC_OK;
940 always_inline ip4_full_reass_rc_t
941 ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
942 ip4_full_reass_main_t * rm,
943 ip4_full_reass_per_thread_t * rt,
944 ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
945 u32 * error0, bool is_custom, u32 * handoff_thread_idx)
947 vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
948 vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
951 // store (error_)next_index before it's overwritten
952 reass->next_index = fvnb->ip.reass.next_index;
953 reass->error_next_index = fvnb->ip.reass.error_next_index;
955 ip4_full_reass_rc_t rc = IP4_REASS_RC_OK;
957 ip4_header_t *fip = vlib_buffer_get_current (fb);
958 const u32 fragment_first = ip4_get_fragment_offset_bytes (fip);
959 const u32 fragment_length =
960 clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip);
961 const u32 fragment_last = fragment_first + fragment_length - 1;
962 fvnb->ip.reass.fragment_first = fragment_first;
963 fvnb->ip.reass.fragment_last = fragment_last;
964 int more_fragments = ip4_get_fragment_more (fip);
965 u32 candidate_range_bi = reass->first_bi;
966 u32 prev_range_bi = ~0;
967 fvnb->ip.reass.range_first = fragment_first;
968 fvnb->ip.reass.range_last = fragment_last;
969 fvnb->ip.reass.next_range_bi = ~0;
972 reass->last_packet_octet = fragment_last;
974 if (~0 == reass->first_bi)
976 // starting a new reassembly
978 ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi, *bi0);
979 if (IP4_REASS_RC_OK != rc)
983 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
985 ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
988 reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
989 reass->fragments_n = 1;
990 return IP4_REASS_RC_OK;
992 reass->min_fragment_length =
993 clib_min (clib_net_to_host_u16 (fip->length),
994 fvnb->ip.reass.estimated_mtu);
995 while (~0 != candidate_range_bi)
997 vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
998 vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
999 if (fragment_first > candidate_vnb->ip.reass.range_last)
1001 // this fragments starts after candidate range
1002 prev_range_bi = candidate_range_bi;
1003 candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
1004 if (candidate_vnb->ip.reass.range_last < fragment_last &&
1005 ~0 == candidate_range_bi)
1007 // special case - this fragment falls beyond all known ranges
1008 rc = ip4_full_reass_insert_range_in_chain (vm, reass,
1009 prev_range_bi, *bi0);
1010 if (IP4_REASS_RC_OK != rc)
1019 if (fragment_last < candidate_vnb->ip.reass.range_first)
1021 // this fragment ends before candidate range without any overlap
1022 rc = ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi,
1024 if (IP4_REASS_RC_OK != rc)
1032 if (fragment_first >= candidate_vnb->ip.reass.range_first &&
1033 fragment_last <= candidate_vnb->ip.reass.range_last)
1035 // this fragment is a (sub)part of existing range, ignore it
1036 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1038 ip4_full_reass_add_trace (vm, node, reass, *bi0,
1039 RANGE_OVERLAP, 0, ~0);
1043 int discard_candidate = 0;
1044 if (fragment_first < candidate_vnb->ip.reass.range_first)
1047 fragment_last - candidate_vnb->ip.reass.range_first + 1;
1048 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
1050 candidate_vnb->ip.reass.range_first += overlap;
1051 if (reass->data_len < overlap)
1053 return IP4_REASS_RC_INTERNAL_ERROR;
1055 reass->data_len -= overlap;
1056 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1058 ip4_full_reass_add_trace (vm, node, reass,
1060 RANGE_SHRINK, 0, ~0);
1062 rc = ip4_full_reass_insert_range_in_chain (
1063 vm, reass, prev_range_bi, *bi0);
1064 if (IP4_REASS_RC_OK != rc)
1072 discard_candidate = 1;
1075 else if (fragment_last > candidate_vnb->ip.reass.range_last)
1078 candidate_vnb->ip.reass.range_last - fragment_first + 1;
1079 if (overlap < ip4_full_reass_buffer_get_data_len (candidate_b))
1081 fvnb->ip.reass.range_first += overlap;
1082 if (~0 != candidate_vnb->ip.reass.next_range_bi)
1084 prev_range_bi = candidate_range_bi;
1085 candidate_range_bi =
1086 candidate_vnb->ip.reass.next_range_bi;
1091 // special case - last range discarded
1092 rc = ip4_full_reass_insert_range_in_chain (
1093 vm, reass, candidate_range_bi, *bi0);
1094 if (IP4_REASS_RC_OK != rc)
1103 discard_candidate = 1;
1108 discard_candidate = 1;
1110 if (discard_candidate)
1112 u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
1113 // discard candidate range, probe next range
1114 rc = ip4_full_reass_remove_range_from_chain (
1115 vm, node, reass, prev_range_bi, candidate_range_bi);
1116 if (IP4_REASS_RC_OK != rc)
1120 if (~0 != next_range_bi)
1122 candidate_range_bi = next_range_bi;
1127 // special case - last range discarded
1128 rc = ip4_full_reass_insert_range_in_chain (
1129 vm, reass, prev_range_bi, *bi0);
1130 if (IP4_REASS_RC_OK != rc)
1140 ++reass->fragments_n;
1143 if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
1145 ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
1148 if (~0 != reass->last_packet_octet &&
1149 reass->data_len == reass->last_packet_octet + 1)
1151 *handoff_thread_idx = reass->sendout_thread_index;
1153 reass->memory_owner_thread_index != reass->sendout_thread_index;
1155 ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
1157 if (IP4_REASS_RC_OK == rc && handoff)
1159 rc = IP4_REASS_RC_HANDOFF;
1167 if (reass->fragments_n > rm->max_reass_len)
1169 rc = IP4_REASS_RC_TOO_MANY_FRAGMENTS;
1174 *next0 = IP4_FULL_REASS_NEXT_DROP;
1175 *error0 = IP4_ERROR_REASS_DUPLICATE_FRAGMENT;
1182 ip4_full_reass_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1183 vlib_frame_t *frame, ip4_full_reass_node_type_t type,
1186 u32 *from = vlib_frame_vector_args (frame);
1187 u32 n_left_from, n_left_to_next, *to_next, next_index;
1188 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1189 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
1190 clib_spinlock_lock (&rt->lock);
1192 n_left_from = frame->n_vectors;
1193 next_index = node->cached_next_index;
1194 while (n_left_from > 0)
1196 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1198 while (n_left_from > 0 && n_left_to_next > 0)
1203 u32 error0 = IP4_ERROR_NONE;
1206 b0 = vlib_get_buffer (vm, bi0);
1208 ip4_header_t *ip0 = vlib_buffer_get_current (b0);
1209 if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
1211 // this is a whole packet - no fragmentation
1214 next0 = IP4_FULL_REASS_NEXT_INPUT;
1218 next0 = vnet_buffer (b0)->ip.reass.next_index;
1220 ip4_full_reass_add_trace (vm, node, NULL, bi0, PASSTHROUGH, 0,
1222 goto packet_enqueue;
1225 if (is_local && !rm->is_local_reass_enabled)
1227 next0 = IP4_FULL_REASS_NEXT_DROP;
1228 goto packet_enqueue;
1231 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1232 const u32 fragment_length =
1233 clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
1234 const u32 fragment_last = fragment_first + fragment_length - 1;
1236 /* Keep track of received fragments */
1237 vlib_node_increment_counter (vm, node->node_index,
1238 IP4_ERROR_REASS_FRAGMENTS_RCVD, 1);
1240 if (fragment_first > fragment_last ||
1241 fragment_first + fragment_length > UINT16_MAX - 20 ||
1242 (fragment_length < 8 && // 8 is minimum frag length per RFC 791
1243 ip4_get_fragment_more (ip0)))
1245 next0 = IP4_FULL_REASS_NEXT_DROP;
1246 error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
1247 goto packet_enqueue;
1250 u32 fib_index = vec_elt (ip4_main.fib_index_by_sw_if_index,
1251 vnet_buffer (b0)->sw_if_index[VLIB_RX]);
1253 ip4_full_reass_kv_t kv = { .k.fib_index = fib_index,
1254 .k.src.as_u32 = ip0->src_address.as_u32,
1255 .k.dst.as_u32 = ip0->dst_address.as_u32,
1256 .k.frag_id = ip0->fragment_id,
1257 .k.proto = ip0->protocol
1262 ip4_full_reass_t *reass = ip4_full_reass_find_or_create (
1263 vm, node, rm, rt, &kv, &do_handoff, &n_left_to_next, &to_next);
1267 const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
1268 if (0 == fragment_first)
1270 reass->sendout_thread_index = vm->thread_index;
1274 if (PREDICT_FALSE (do_handoff))
1276 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1277 vnet_buffer (b0)->ip.reass.owner_thread_index =
1278 kv.v.memory_owner_thread_index;
1282 u32 handoff_thread_idx;
1284 switch (ip4_full_reass_update
1285 (vm, node, rm, rt, reass, &bi0, &next0,
1286 &error0, CUSTOM == type, &handoff_thread_idx))
1288 case IP4_REASS_RC_OK:
1289 /* nothing to do here */
1291 case IP4_REASS_RC_HANDOFF:
1292 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
1293 b0 = vlib_get_buffer (vm, bi0);
1294 vnet_buffer (b0)->ip.reass.owner_thread_index =
1297 case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
1298 counter = IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG;
1300 case IP4_REASS_RC_NO_BUF:
1301 counter = IP4_ERROR_REASS_NO_BUF;
1303 case IP4_REASS_RC_INTERNAL_ERROR:
1304 counter = IP4_ERROR_REASS_INTERNAL_ERROR;
1305 /* Sanitization is needed in internal error cases only, as
1306 * the incoming packet is already dropped in other cases,
1307 * also adding bi0 back to the reassembly list, fixes the
1308 * leaking of buffers during internal errors.
1310 * Also it doesnt make sense to send these buffers custom
1311 * app, these fragments are with internal errors */
1312 sanitize_reass_buffers_add_missing (vm, reass, &bi0);
1313 reass->error_next_index = ~0;
1319 vlib_node_increment_counter (vm, node->node_index, counter,
1321 ip4_full_reass_drop_all (vm, node, reass, &n_left_to_next,
1323 ip4_full_reass_free (rm, rt, reass);
1329 next0 = IP4_FULL_REASS_NEXT_DROP;
1330 error0 = IP4_ERROR_REASS_LIMIT_REACHED;
1340 n_left_to_next -= 1;
1342 /* bi0 might have been updated by reass_finalize, reload */
1343 b0 = vlib_get_buffer (vm, bi0);
1344 if (IP4_ERROR_NONE != error0)
1346 b0->error = node->errors[error0];
1349 if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
1351 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1353 ip4_full_reass_add_trace (
1354 vm, node, NULL, bi0, HANDOFF, 0,
1355 vnet_buffer (b0)->ip.reass.owner_thread_index);
1358 else if (FEATURE == type && IP4_ERROR_NONE == error0)
1360 vnet_feature_next (&next0, b0);
1363 /* Increment the counter to-custom-app also as this fragment is
1364 * also going to application */
1367 vlib_node_increment_counter (
1368 vm, node->node_index, IP4_ERROR_REASS_TO_CUSTOM_APP, 1);
1371 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1372 to_next, n_left_to_next,
1374 IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
1382 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1385 clib_spinlock_unlock (&rt->lock);
1386 return frame->n_vectors;
1389 VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
1390 vlib_node_runtime_t * node,
1391 vlib_frame_t * frame)
1393 return ip4_full_reass_inline (vm, node, frame, NORMAL, false /* is_local */);
1396 VLIB_REGISTER_NODE (ip4_full_reass_node) = {
1397 .name = "ip4-full-reassembly",
1398 .vector_size = sizeof (u32),
1399 .format_trace = format_ip4_full_reass_trace,
1400 .n_errors = IP4_N_ERROR,
1401 .error_counters = ip4_error_counters,
1402 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
1405 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1406 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1407 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reassembly-handoff",
1412 VLIB_NODE_FN (ip4_local_full_reass_node)
1413 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
1415 return ip4_full_reass_inline (vm, node, frame, NORMAL, true /* is_local */);
1418 VLIB_REGISTER_NODE (ip4_local_full_reass_node) = {
1419 .name = "ip4-local-full-reassembly",
1420 .vector_size = sizeof (u32),
1421 .format_trace = format_ip4_full_reass_trace,
1422 .n_errors = IP4_N_ERROR,
1423 .error_counters = ip4_error_counters,
1424 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
1427 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1428 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1429 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-local-full-reassembly-handoff",
1434 VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
1435 vlib_node_runtime_t * node,
1436 vlib_frame_t * frame)
1438 return ip4_full_reass_inline (vm, node, frame, FEATURE,
1439 false /* is_local */);
1442 VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
1443 .name = "ip4-full-reassembly-feature",
1444 .vector_size = sizeof (u32),
1445 .format_trace = format_ip4_full_reass_trace,
1446 .n_errors = IP4_N_ERROR,
1447 .error_counters = ip4_error_counters,
1448 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
1451 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1452 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1453 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
1457 VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
1458 .arc_name = "ip4-unicast",
1459 .node_name = "ip4-full-reassembly-feature",
1460 .runs_before = VNET_FEATURES ("ip4-lookup",
1461 "ipsec4-input-feature"),
1465 VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm,
1466 vlib_node_runtime_t * node,
1467 vlib_frame_t * frame)
1469 return ip4_full_reass_inline (vm, node, frame, CUSTOM, false /* is_local */);
1472 VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = {
1473 .name = "ip4-full-reassembly-custom",
1474 .vector_size = sizeof (u32),
1475 .format_trace = format_ip4_full_reass_trace,
1476 .n_errors = IP4_N_ERROR,
1477 .error_counters = ip4_error_counters,
1478 .n_next_nodes = IP4_FULL_REASS_N_NEXT,
1481 [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
1482 [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
1483 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-custom-hoff",
1487 VNET_FEATURE_INIT (ip4_full_reass_custom, static) = {
1488 .arc_name = "ip4-unicast",
1489 .node_name = "ip4-full-reassembly-feature",
1490 .runs_before = VNET_FEATURES ("ip4-lookup",
1491 "ipsec4-input-feature"),
1496 #ifndef CLIB_MARCH_VARIANT
1498 ip4_full_reass_custom_register_next_node (uword node_index)
1500 return vlib_node_add_next (vlib_get_main (),
1501 ip4_full_reass_node_custom.index, node_index);
1505 ip4_full_reass_get_nbuckets ()
1507 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1511 /* need more mem with more workers */
1512 nbuckets = (u32) (rm->max_reass_n * (vlib_num_workers () + 1) /
1513 IP4_REASS_HT_LOAD_FACTOR);
1515 for (i = 0; i < 31; i++)
1516 if ((1 << i) >= nbuckets)
1522 #endif /* CLIB_MARCH_VARIANT */
1526 IP4_EVENT_CONFIG_CHANGED = 1,
1527 } ip4_full_reass_event_t;
1532 clib_bihash_16_8_t *new_hash;
1533 } ip4_rehash_cb_ctx;
1535 #ifndef CLIB_MARCH_VARIANT
1537 ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
1539 ip4_rehash_cb_ctx *ctx = _ctx;
1540 if (clib_bihash_add_del_16_8 (ctx->new_hash, kv, 1))
1544 return (BIHASH_WALK_CONTINUE);
1548 ip4_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1549 u32 max_reassembly_length,
1550 u32 expire_walk_interval_ms)
1552 ip4_full_reass_main.timeout_ms = timeout_ms;
1553 ip4_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1554 ip4_full_reass_main.max_reass_n = max_reassemblies;
1555 ip4_full_reass_main.max_reass_len = max_reassembly_length;
1556 ip4_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
1560 ip4_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1561 u32 max_reassembly_length, u32 expire_walk_interval_ms)
1563 u32 old_nbuckets = ip4_full_reass_get_nbuckets ();
1564 ip4_full_reass_set_params (timeout_ms, max_reassemblies,
1565 max_reassembly_length, expire_walk_interval_ms);
1566 vlib_process_signal_event (ip4_full_reass_main.vlib_main,
1567 ip4_full_reass_main.ip4_full_reass_expire_node_idx,
1568 IP4_EVENT_CONFIG_CHANGED, 0);
1569 u32 new_nbuckets = ip4_full_reass_get_nbuckets ();
1570 if (ip4_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
1572 clib_bihash_16_8_t new_hash;
1573 clib_memset (&new_hash, 0, sizeof (new_hash));
1574 ip4_rehash_cb_ctx ctx;
1576 ctx.new_hash = &new_hash;
1577 clib_bihash_init_16_8 (&new_hash, "ip4-dr", new_nbuckets,
1578 new_nbuckets * 1024);
1579 clib_bihash_foreach_key_value_pair_16_8 (&ip4_full_reass_main.hash,
1580 ip4_rehash_cb, &ctx);
1583 clib_bihash_free_16_8 (&new_hash);
1588 clib_bihash_free_16_8 (&ip4_full_reass_main.hash);
1589 clib_memcpy_fast (&ip4_full_reass_main.hash, &new_hash,
1590 sizeof (ip4_full_reass_main.hash));
1591 clib_bihash_copied (&ip4_full_reass_main.hash, &new_hash);
1598 ip4_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1599 u32 * max_reassembly_length,
1600 u32 * expire_walk_interval_ms)
1602 *timeout_ms = ip4_full_reass_main.timeout_ms;
1603 *max_reassemblies = ip4_full_reass_main.max_reass_n;
1604 *max_reassembly_length = ip4_full_reass_main.max_reass_len;
1605 *expire_walk_interval_ms = ip4_full_reass_main.expire_walk_interval_ms;
1609 static clib_error_t *
1610 ip4_full_reass_init_function (vlib_main_t * vm)
1612 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1613 clib_error_t *error = 0;
1619 vec_validate (rm->per_thread_data, vlib_num_workers ());
1620 ip4_full_reass_per_thread_t *rt;
1621 vec_foreach (rt, rm->per_thread_data)
1623 clib_spinlock_init (&rt->lock);
1624 pool_alloc (rt->pool, rm->max_reass_n);
1627 node = vlib_get_node_by_name (vm, (u8 *) "ip4-full-reassembly-expire-walk");
1629 rm->ip4_full_reass_expire_node_idx = node->index;
1631 ip4_full_reass_set_params (IP4_REASS_TIMEOUT_DEFAULT_MS,
1632 IP4_REASS_MAX_REASSEMBLIES_DEFAULT,
1633 IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT,
1634 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS);
1636 nbuckets = ip4_full_reass_get_nbuckets ();
1637 clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
1639 rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
1640 rm->fq_local_index =
1641 vlib_frame_queue_main_init (ip4_local_full_reass_node.index, 0);
1642 rm->fq_feature_index =
1643 vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
1644 rm->fq_custom_index =
1645 vlib_frame_queue_main_init (ip4_full_reass_node_custom.index, 0);
1647 rm->feature_use_refcount_per_intf = NULL;
1648 rm->is_local_reass_enabled = 1;
1653 VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
1654 #endif /* CLIB_MARCH_VARIANT */
1657 ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node,
1658 CLIB_UNUSED (vlib_frame_t *f))
1660 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1661 uword event_type, *event_data = 0;
1665 vlib_process_wait_for_event_or_clock (vm,
1667 rm->expire_walk_interval_ms /
1668 (f64) MSEC_PER_SEC);
1669 event_type = vlib_process_get_events (vm, &event_data);
1674 /* no events => timeout */
1676 case IP4_EVENT_CONFIG_CHANGED:
1677 /* nothing to do here */
1680 clib_warning ("BUG: event type 0x%wx", event_type);
1683 f64 now = vlib_time_now (vm);
1685 ip4_full_reass_t *reass;
1686 int *pool_indexes_to_free = NULL;
1688 uword thread_index = 0;
1690 const uword nthreads = vlib_num_workers () + 1;
1691 u32 n_left_to_next, *to_next;
1693 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1695 ip4_full_reass_per_thread_t *rt =
1696 &rm->per_thread_data[thread_index];
1697 clib_spinlock_lock (&rt->lock);
1699 vec_reset_length (pool_indexes_to_free);
1701 /* Pace the number of timeouts handled per thread,to avoid barrier
1702 * sync issues in real world scenarios */
1704 u32 beg = rt->last_id;
1705 /* to ensure we walk at least once per sec per context */
1707 beg + (IP4_REASS_MAX_REASSEMBLIES_DEFAULT *
1708 IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS / MSEC_PER_SEC +
1710 if (end > vec_len (rt->pool))
1712 end = vec_len (rt->pool);
1720 pool_foreach_stepping_index (index, beg, end, rt->pool)
1722 reass = pool_elt_at_index (rt->pool, index);
1723 if (now > reass->last_heard + rm->timeout)
1725 vec_add1 (pool_indexes_to_free, index);
1729 if (vec_len (pool_indexes_to_free))
1730 vlib_node_increment_counter (vm, node->node_index,
1731 IP4_ERROR_REASS_TIMEOUT,
1732 vec_len (pool_indexes_to_free));
1734 vec_foreach (i, pool_indexes_to_free)
1736 ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1737 ip4_full_reass_drop_all (vm, node, reass, &n_left_to_next,
1739 ip4_full_reass_free (rm, rt, reass);
1742 clib_spinlock_unlock (&rt->lock);
1745 vec_free (pool_indexes_to_free);
1748 vec_set_len (event_data, 0);
1755 VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
1756 .function = ip4_full_reass_walk_expired,
1757 .type = VLIB_NODE_TYPE_PROCESS,
1758 .name = "ip4-full-reassembly-expire-walk",
1759 .format_trace = format_ip4_full_reass_trace,
1760 .n_errors = IP4_N_ERROR,
1761 .error_counters = ip4_error_counters,
1765 format_ip4_full_reass_key (u8 * s, va_list * args)
1767 ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
1769 format (s, "fib_index: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1770 key->fib_index, format_ip4_address, &key->src, format_ip4_address,
1771 &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1776 format_ip4_reass (u8 * s, va_list * args)
1778 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1779 ip4_full_reass_t *reass = va_arg (*args, ip4_full_reass_t *);
1781 s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
1782 "last_packet_octet: %u, trace_op_counter: %u\n",
1783 reass->id, format_ip4_full_reass_key, &reass->key,
1784 reass->first_bi, reass->data_len,
1785 reass->last_packet_octet, reass->trace_op_counter);
1787 u32 bi = reass->first_bi;
1791 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1792 vnet_buffer_opaque_t *vnb = vnet_buffer (b);
1795 " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1796 "fragment[%u, %u]\n", counter, vnb->ip.reass.range_first,
1797 vnb->ip.reass.range_last, bi,
1798 ip4_full_reass_buffer_get_data_offset (b),
1799 ip4_full_reass_buffer_get_data_len (b),
1800 vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
1801 if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1803 bi = b->next_buffer;
1813 static clib_error_t *
1814 show_ip4_reass (vlib_main_t * vm,
1815 unformat_input_t * input,
1816 CLIB_UNUSED (vlib_cli_command_t * lmd))
1818 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1820 vlib_cli_output (vm, "---------------------");
1821 vlib_cli_output (vm, "IP4 reassembly status");
1822 vlib_cli_output (vm, "---------------------");
1823 bool details = false;
1824 if (unformat (input, "details"))
1829 u32 sum_reass_n = 0;
1830 ip4_full_reass_t *reass;
1832 const uword nthreads = vlib_num_workers () + 1;
1833 for (thread_index = 0; thread_index < nthreads; ++thread_index)
1835 ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1836 clib_spinlock_lock (&rt->lock);
1839 pool_foreach (reass, rt->pool) {
1840 vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
1843 sum_reass_n += rt->reass_n;
1844 clib_spinlock_unlock (&rt->lock);
1846 vlib_cli_output (vm, "---------------------");
1847 vlib_cli_output (vm, "Current full IP4 reassemblies count: %lu\n",
1848 (long unsigned) sum_reass_n);
1849 vlib_cli_output (vm,
1850 "Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
1851 (long unsigned) rm->max_reass_n);
1852 vlib_cli_output (vm,
1853 "Maximum configured amount of fragments "
1854 "per full IP4 reassembly: %lu\n",
1855 (long unsigned) rm->max_reass_len);
1856 vlib_cli_output (vm,
1857 "Maximum configured full IP4 reassembly timeout: %lums\n",
1858 (long unsigned) rm->timeout_ms);
1859 vlib_cli_output (vm,
1860 "Maximum configured full IP4 reassembly expire walk interval: %lums\n",
1861 (long unsigned) rm->expire_walk_interval_ms);
1865 VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
1866 .path = "show ip4-full-reassembly",
1867 .short_help = "show ip4-full-reassembly [details]",
1868 .function = show_ip4_reass,
1871 #ifndef CLIB_MARCH_VARIANT
1873 ip4_full_reass_enable_disable (u32 sw_if_index, u8 enable_disable)
1875 return vnet_feature_enable_disable ("ip4-unicast",
1876 "ip4-full-reassembly-feature",
1877 sw_if_index, enable_disable, 0, 0);
1879 #endif /* CLIB_MARCH_VARIANT */
1882 #define foreach_ip4_full_reass_handoff_error \
1883 _(CONGESTION_DROP, "congestion drop")
1888 #define _(sym,str) IP4_FULL_REASS_HANDOFF_ERROR_##sym,
1889 foreach_ip4_full_reass_handoff_error
1891 IP4_FULL_REASS_HANDOFF_N_ERROR,
1892 } ip4_full_reass_handoff_error_t;
1894 static char *ip4_full_reass_handoff_error_strings[] = {
1895 #define _(sym,string) string,
1896 foreach_ip4_full_reass_handoff_error
1902 u32 next_worker_index;
1903 } ip4_full_reass_handoff_trace_t;
1906 format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
1908 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1909 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1910 ip4_full_reass_handoff_trace_t *t =
1911 va_arg (*args, ip4_full_reass_handoff_trace_t *);
1914 format (s, "ip4-full-reassembly-handoff: next-worker %d",
1915 t->next_worker_index);
1921 ip4_full_reass_handoff_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1922 vlib_frame_t *frame,
1923 ip4_full_reass_node_type_t type,
1926 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
1928 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1929 u32 n_enq, n_left_from, *from;
1930 u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1933 from = vlib_frame_vector_args (frame);
1934 n_left_from = frame->n_vectors;
1935 vlib_get_buffers (vm, from, bufs, n_left_from);
1938 ti = thread_indices;
1945 fq_index = rm->fq_local_index;
1949 fq_index = rm->fq_index;
1953 fq_index = rm->fq_feature_index;
1956 fq_index = rm->fq_custom_index;
1959 clib_warning ("Unexpected `type' (%d)!", type);
1962 while (n_left_from > 0)
1964 ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
1967 ((node->flags & VLIB_NODE_FLAG_TRACE)
1968 && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1970 ip4_full_reass_handoff_trace_t *t =
1971 vlib_add_trace (vm, node, b[0], sizeof (*t));
1972 t->next_worker_index = ti[0];
1979 n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
1980 thread_indices, frame->n_vectors, 1);
1982 if (n_enq < frame->n_vectors)
1983 vlib_node_increment_counter (vm, node->node_index,
1984 IP4_FULL_REASS_HANDOFF_ERROR_CONGESTION_DROP,
1985 frame->n_vectors - n_enq);
1986 return frame->n_vectors;
1989 VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
1990 vlib_node_runtime_t * node,
1991 vlib_frame_t * frame)
1993 return ip4_full_reass_handoff_node_inline (vm, node, frame, NORMAL,
1994 false /* is_local */);
1998 VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
1999 .name = "ip4-full-reassembly-handoff",
2000 .vector_size = sizeof (u32),
2001 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
2002 .error_strings = ip4_full_reass_handoff_error_strings,
2003 .format_trace = format_ip4_full_reass_handoff_trace,
2012 VLIB_NODE_FN (ip4_local_full_reass_handoff_node)
2013 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
2015 return ip4_full_reass_handoff_node_inline (vm, node, frame, NORMAL,
2016 true /* is_local */);
2019 VLIB_REGISTER_NODE (ip4_local_full_reass_handoff_node) = {
2020 .name = "ip4-local-full-reassembly-handoff",
2021 .vector_size = sizeof (u32),
2022 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
2023 .error_strings = ip4_full_reass_handoff_error_strings,
2024 .format_trace = format_ip4_full_reass_handoff_trace,
2033 VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
2034 vlib_node_runtime_t *
2036 vlib_frame_t * frame)
2038 return ip4_full_reass_handoff_node_inline (vm, node, frame, FEATURE,
2039 false /* is_local */);
2042 VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
2043 .name = "ip4-full-reass-feature-hoff",
2044 .vector_size = sizeof (u32),
2045 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
2046 .error_strings = ip4_full_reass_handoff_error_strings,
2047 .format_trace = format_ip4_full_reass_handoff_trace,
2056 VLIB_NODE_FN (ip4_full_reass_custom_handoff_node) (vlib_main_t * vm,
2057 vlib_node_runtime_t *
2059 vlib_frame_t * frame)
2061 return ip4_full_reass_handoff_node_inline (vm, node, frame, CUSTOM,
2062 false /* is_local */);
2065 VLIB_REGISTER_NODE (ip4_full_reass_custom_handoff_node) = {
2066 .name = "ip4-full-reass-custom-hoff",
2067 .vector_size = sizeof (u32),
2068 .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
2069 .error_strings = ip4_full_reass_handoff_error_strings,
2070 .format_trace = format_ip4_full_reass_handoff_trace,
2079 #ifndef CLIB_MARCH_VARIANT
2081 ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
2083 ip4_full_reass_main_t *rm = &ip4_full_reass_main;
2084 vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
2087 if (!rm->feature_use_refcount_per_intf[sw_if_index])
2089 ++rm->feature_use_refcount_per_intf[sw_if_index];
2090 return vnet_feature_enable_disable ("ip4-unicast",
2091 "ip4-full-reassembly-feature",
2092 sw_if_index, 1, 0, 0);
2094 ++rm->feature_use_refcount_per_intf[sw_if_index];
2098 --rm->feature_use_refcount_per_intf[sw_if_index];
2099 if (!rm->feature_use_refcount_per_intf[sw_if_index])
2100 return vnet_feature_enable_disable ("ip4-unicast",
2101 "ip4-full-reassembly-feature",
2102 sw_if_index, 0, 0, 0);
2108 ip4_local_full_reass_enable_disable (int enable)
2112 ip4_full_reass_main.is_local_reass_enabled = 1;
2116 ip4_full_reass_main.is_local_reass_enabled = 0;
2121 ip4_local_full_reass_enabled ()
2123 return ip4_full_reass_main.is_local_reass_enabled;
2129 * fd.io coding-style-patch-verification: ON
2132 * eval: (c-set-style "gnu")