X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fip%2Freass%2Fip6_sv_reass.c;h=f18bbe0d564c147f73a4eceeaafd3574ca1df274;hb=d4ba0d1883d24cc03f5435fe71368d49e01761cb;hp=0f5e0e2997511eafd81a6110f17014bc438cbafa;hpb=364b20a6ad1676741cfc360bdaec32b199cf49d6;p=vpp.git diff --git a/src/vnet/ip/reass/ip6_sv_reass.c b/src/vnet/ip/reass/ip6_sv_reass.c index 0f5e0e29975..f18bbe0d564 100644 --- a/src/vnet/ip/reass/ip6_sv_reass.c +++ b/src/vnet/ip/reass/ip6_sv_reass.c @@ -50,7 +50,7 @@ typedef struct { ip6_address_t src; ip6_address_t dst; - u32 xx_id; + u32 fib_index; u32 frag_id; u8 unused[7]; u8 proto; @@ -96,6 +96,9 @@ typedef struct bool is_complete; // ip protocol u8 ip_proto; + u8 icmp_type_or_tcp_flags; + u32 tcp_ack_number; + u32 tcp_seq_number; // l4 src port u16 l4_src_port; // l4 dst port @@ -170,6 +173,7 @@ typedef enum REASS_FRAGMENT_CACHE, REASS_FINISH, REASS_FRAGMENT_FORWARD, + REASS_PASSTHROUGH, } ip6_sv_reass_trace_operation_e; typedef struct @@ -188,7 +192,10 @@ format_ip6_sv_reass_trace (u8 * s, va_list * args) CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); ip6_sv_reass_trace_t *t = va_arg (*args, ip6_sv_reass_trace_t *); - s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id); + if (REASS_PASSTHROUGH != t->action) + { + s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id); + } switch (t->action) { case REASS_FRAGMENT_CACHE: @@ -206,26 +213,38 @@ format_ip6_sv_reass_trace (u8 * s, va_list * args) t->ip_proto, clib_net_to_host_u16 (t->l4_src_port), clib_net_to_host_u16 (t->l4_dst_port)); break; + case REASS_PASSTHROUGH: + s = format (s, "[not-fragmented]"); + break; } return s; } static void ip6_sv_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node, - ip6_sv_reass_main_t * rm, ip6_sv_reass_t * reass, u32 bi, ip6_sv_reass_trace_operation_e action, u32 ip_proto, u16 l4_src_port, u16 l4_dst_port) { vlib_buffer_t *b = vlib_get_buffer (vm, bi); + if (pool_is_free_index + (vm->trace_main.trace_buffer_pool, vlib_buffer_get_trace_index (b))) + { + // this buffer's trace is gone + b->flags &= ~VLIB_BUFFER_IS_TRACED; + return; + } ip6_sv_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0])); - t->reass_id = reass->id; + if (reass) + { + t->reass_id = reass->id; + t->op_id = reass->trace_op_counter; + ++reass->trace_op_counter; + } t->action = action; - t->op_id = reass->trace_op_counter; t->ip_proto = ip_proto; t->l4_src_port = l4_src_port; t->l4_dst_port = l4_dst_port; - ++reass->trace_op_counter; #if 0 static u8 *s = NULL; s = format (s, "%U", format_ip6_sv_reass_trace, NULL, NULL, t); @@ -283,17 +302,14 @@ ip6_sv_reass_init (ip6_sv_reass_t * reass) } always_inline ip6_sv_reass_t * -ip6_sv_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, - ip6_sv_reass_main_t * rm, - ip6_sv_reass_per_thread_t * rt, - ip6_sv_reass_kv_t * kv, u32 * icmp_bi, - u8 * do_handoff) +ip6_sv_reass_find_or_create (vlib_main_t *vm, ip6_sv_reass_main_t *rm, + ip6_sv_reass_per_thread_t *rt, + ip6_sv_reass_kv_t *kv, u8 *do_handoff) { ip6_sv_reass_t *reass = NULL; - f64 now = vlib_time_now (rm->vlib_main); + f64 now = vlib_time_now (vm); - if (!clib_bihash_search_48_8 - (&rm->hash, (clib_bihash_kv_48_8_t *) kv, (clib_bihash_kv_48_8_t *) kv)) + if (!clib_bihash_search_48_8 (&rm->hash, &kv->kv, &kv->kv)) { if (vm->thread_index != kv->v.thread_index) { @@ -317,7 +333,7 @@ ip6_sv_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, if (rt->reass_n >= rm->max_reass_n) { - reass = pool_elt_at_index (rt->pool, rt->lru_last); + reass = pool_elt_at_index (rt->pool, rt->lru_first); ip6_sv_reass_free (vm, rm, rt, reass); } @@ -342,17 +358,17 @@ ip6_sv_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, rt->lru_first = rt->lru_last = reass - rt->pool; } - reass->key.as_u64[0] = ((clib_bihash_kv_48_8_t *) kv)->key[0]; - reass->key.as_u64[1] = ((clib_bihash_kv_48_8_t *) kv)->key[1]; - reass->key.as_u64[2] = ((clib_bihash_kv_48_8_t *) kv)->key[2]; - reass->key.as_u64[3] = ((clib_bihash_kv_48_8_t *) kv)->key[3]; - reass->key.as_u64[4] = ((clib_bihash_kv_48_8_t *) kv)->key[4]; - reass->key.as_u64[5] = ((clib_bihash_kv_48_8_t *) kv)->key[5]; + reass->key.as_u64[0] = kv->kv.key[0]; + reass->key.as_u64[1] = kv->kv.key[1]; + reass->key.as_u64[2] = kv->kv.key[2]; + reass->key.as_u64[3] = kv->kv.key[3]; + reass->key.as_u64[4] = kv->kv.key[4]; + reass->key.as_u64[5] = kv->kv.key[5]; kv->v.reass_index = (reass - rt->pool); kv->v.thread_index = vm->thread_index; reass->last_heard = now; - if (clib_bihash_add_del_48_8 (&rm->hash, (clib_bihash_kv_48_8_t *) kv, 1)) + if (clib_bihash_add_del_48_8 (&rm->hash, &kv->kv, 1)) { ip6_sv_reass_free (vm, rm, rt, reass); reass = NULL; @@ -362,10 +378,9 @@ ip6_sv_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, } always_inline ip6_sv_reass_rc_t -ip6_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node, - ip6_sv_reass_main_t * rm, ip6_sv_reass_per_thread_t * rt, - ip6_sv_reass_t * reass, u32 bi0, - ip6_frag_hdr_t * frag_hdr) +ip6_sv_reass_update (vlib_main_t *vm, vlib_node_runtime_t *node, + ip6_sv_reass_main_t *rm, ip6_sv_reass_t *reass, u32 bi0, + ip6_frag_hdr_t *frag_hdr) { vlib_buffer_t *fb = vlib_get_buffer (vm, bi0); vnet_buffer_opaque_t *fvnb = vnet_buffer (fb); @@ -391,23 +406,18 @@ ip6_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node, fvnb->ip.reass.next_range_bi = ~0; if (0 == fragment_first) { - ip6_ext_header_t *ext_hdr = (void *) frag_hdr; - while (ip6_ext_hdr (ext_hdr->next_hdr) - && vlib_object_within_buffer_data (vm, fb, ext_hdr, - ext_hdr->n_data_u64s * 8)) - { - ext_hdr = ip6_ext_next_header (ext_hdr); - } - reass->ip_proto = ext_hdr->next_hdr; - reass->l4_src_port = ip6_get_port (fip, 1, fb->current_length); - reass->l4_dst_port = ip6_get_port (fip, 0, fb->current_length); - if (!reass->l4_src_port || !reass->l4_dst_port) + if (!ip6_get_port + (vm, fb, fip, fb->current_length, &reass->ip_proto, + &reass->l4_src_port, &reass->l4_dst_port, + &reass->icmp_type_or_tcp_flags, &reass->tcp_ack_number, + &reass->tcp_seq_number)) return IP6_SV_REASS_RC_UNSUPP_IP_PROTO; + reass->is_complete = true; vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - ip6_sv_reass_add_trace (vm, node, rm, reass, bi0, REASS_FINISH, + ip6_sv_reass_add_trace (vm, node, reass, bi0, REASS_FINISH, reass->ip_proto, reass->l4_src_port, reass->l4_dst_port); } @@ -417,9 +427,9 @@ ip6_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node, { if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED)) { - ip6_sv_reass_add_trace (vm, node, rm, reass, bi0, - REASS_FRAGMENT_CACHE, reass->ip_proto, - reass->l4_src_port, reass->l4_dst_port); + ip6_sv_reass_add_trace (vm, node, reass, bi0, REASS_FRAGMENT_CACHE, + reass->ip_proto, reass->l4_src_port, + reass->l4_dst_port); } if (vec_len (reass->cached_buffers) > rm->max_reass_len) { @@ -453,7 +463,6 @@ ip6_sv_reass_verify_upper_layer_present (vlib_node_runtime_t * node, always_inline bool ip6_sv_reass_verify_fragment_multiple_8 (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_buffer_t * b, ip6_frag_hdr_t * frag_hdr) { @@ -475,7 +484,6 @@ ip6_sv_reass_verify_fragment_multiple_8 (vlib_main_t * vm, always_inline bool ip6_sv_reass_verify_packet_size_lt_64k (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_buffer_t * b, ip6_frag_hdr_t * frag_hdr) { @@ -520,7 +528,6 @@ ip6_sv_reassembly_inline (vlib_main_t * vm, vlib_buffer_t *b0; u32 next0 = IP6_SV_REASSEMBLY_NEXT_DROP; u32 error0 = IP6_ERROR_NONE; - u32 icmp_bi = ~0; bi0 = from[0]; b0 = vlib_get_buffer (vm, bi0); @@ -538,14 +545,34 @@ ip6_sv_reassembly_inline (vlib_main_t * vm, if (!frag_hdr) { // this is a regular packet - no fragmentation - vnet_buffer (b0)->ip.reass.ip_proto = ip0->protocol; - vnet_buffer (b0)->ip.reass.l4_src_port = - ip6_get_port (ip0, 1, b0->current_length); - vnet_buffer (b0)->ip.reass.l4_dst_port = - ip6_get_port (ip0, 0, b0->current_length); + if (!ip6_get_port + (vm, b0, ip0, b0->current_length, + &(vnet_buffer (b0)->ip.reass.ip_proto), + &(vnet_buffer (b0)->ip.reass.l4_src_port), + &(vnet_buffer (b0)->ip.reass.l4_dst_port), + &(vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags), + &(vnet_buffer (b0)->ip.reass.tcp_ack_number), + &(vnet_buffer (b0)->ip.reass.tcp_seq_number))) + { + error0 = IP6_ERROR_REASS_UNSUPP_IP_PROTO; + b0->error = node->errors[error0]; + next0 = IP6_SV_REASSEMBLY_NEXT_DROP; + goto packet_enqueue; + } + vnet_buffer (b0)->ip.reass.is_non_first_fragment = 0; next0 = IP6_SV_REASSEMBLY_NEXT_INPUT; + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + ip6_sv_reass_add_trace ( + vm, node, NULL, bi0, REASS_PASSTHROUGH, + vnet_buffer (b0)->ip.reass.ip_proto, + vnet_buffer (b0)->ip.reass.l4_src_port, + vnet_buffer (b0)->ip.reass.l4_dst_port); + } goto packet_enqueue; } + vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset = + (u8 *) frag_hdr - (u8 *) ip0; if (0 == ip6_frag_hdr_offset (frag_hdr)) { // first fragment - verify upper-layer is present @@ -556,16 +583,12 @@ ip6_sv_reassembly_inline (vlib_main_t * vm, goto packet_enqueue; } } - if (!ip6_sv_reass_verify_fragment_multiple_8 - (vm, node, b0, frag_hdr) - || !ip6_sv_reass_verify_packet_size_lt_64k (vm, node, b0, - frag_hdr)) + if (!ip6_sv_reass_verify_fragment_multiple_8 (vm, b0, frag_hdr) || + !ip6_sv_reass_verify_packet_size_lt_64k (vm, b0, frag_hdr)) { next0 = IP6_SV_REASSEMBLY_NEXT_ICMP_ERROR; goto packet_enqueue; } - vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset = - (u8 *) frag_hdr - (u8 *) ip0; ip6_sv_reass_kv_t kv; u8 do_handoff = 0; @@ -581,8 +604,7 @@ ip6_sv_reassembly_inline (vlib_main_t * vm, kv.k.as_u64[5] = ip0->protocol; ip6_sv_reass_t *reass = - ip6_sv_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi, - &do_handoff); + ip6_sv_reass_find_or_create (vm, rm, rt, &kv, &do_handoff); if (PREDICT_FALSE (do_handoff)) { @@ -596,57 +618,56 @@ ip6_sv_reassembly_inline (vlib_main_t * vm, { next0 = IP6_SV_REASSEMBLY_NEXT_DROP; error0 = IP6_ERROR_REASS_LIMIT_REACHED; + b0->error = node->errors[error0]; goto packet_enqueue; } if (reass->is_complete) { + vnet_buffer (b0)->ip.reass.is_non_first_fragment = + ! !ip6_frag_hdr_offset (frag_hdr); vnet_buffer (b0)->ip.reass.ip_proto = reass->ip_proto; + vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags = + reass->icmp_type_or_tcp_flags; + vnet_buffer (b0)->ip.reass.tcp_ack_number = + reass->tcp_ack_number; + vnet_buffer (b0)->ip.reass.tcp_seq_number = + reass->tcp_seq_number; vnet_buffer (b0)->ip.reass.l4_src_port = reass->l4_src_port; vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port; next0 = IP6_SV_REASSEMBLY_NEXT_INPUT; - error0 = IP6_ERROR_NONE; if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - ip6_sv_reass_add_trace (vm, node, rm, reass, bi0, - REASS_FRAGMENT_FORWARD, - reass->ip_proto, - reass->l4_src_port, - reass->l4_dst_port); + ip6_sv_reass_add_trace ( + vm, node, reass, bi0, REASS_FRAGMENT_FORWARD, + reass->ip_proto, reass->l4_src_port, reass->l4_dst_port); } goto packet_enqueue; } - switch (ip6_sv_reass_update - (vm, node, rm, rt, reass, bi0, frag_hdr)) + u32 counter = ~0; + switch (ip6_sv_reass_update (vm, node, rm, reass, bi0, frag_hdr)) { case IP6_SV_REASS_RC_OK: /* nothing to do here */ break; case IP6_SV_REASS_RC_TOO_MANY_FRAGMENTS: - vlib_node_increment_counter (vm, node->node_index, - IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG, - 1); - ip6_sv_reass_free (vm, rm, rt, reass); - goto next_packet; + counter = IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG; break; case IP6_SV_REASS_RC_UNSUPP_IP_PROTO: - vlib_node_increment_counter (vm, node->node_index, - IP6_ERROR_REASS_UNSUPP_IP_PROTO, - 1); - ip6_sv_reass_free (vm, rm, rt, reass); - goto next_packet; + counter = IP6_ERROR_REASS_UNSUPP_IP_PROTO; break; case IP6_SV_REASS_RC_INTERNAL_ERROR: - vlib_node_increment_counter (vm, node->node_index, - IP6_ERROR_REASS_INTERNAL_ERROR, 1); + counter = IP6_ERROR_REASS_INTERNAL_ERROR; + break; + } + if (~0 != counter) + { + vlib_node_increment_counter (vm, node->node_index, counter, 1); ip6_sv_reass_free (vm, rm, rt, reass); goto next_packet; - break; } - b0->error = node->errors[error0]; - if (reass->is_complete) { u32 idx; @@ -668,16 +689,25 @@ ip6_sv_reassembly_inline (vlib_main_t * vm, { vnet_feature_next (&next0, b0); } + frag_hdr = + vlib_buffer_get_current (b0) + + vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset; + vnet_buffer (b0)->ip.reass.is_non_first_fragment = + ! !ip6_frag_hdr_offset (frag_hdr); vnet_buffer (b0)->ip.reass.ip_proto = reass->ip_proto; + vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags = + reass->icmp_type_or_tcp_flags; + vnet_buffer (b0)->ip.reass.tcp_ack_number = + reass->tcp_ack_number; + vnet_buffer (b0)->ip.reass.tcp_seq_number = + reass->tcp_seq_number; vnet_buffer (b0)->ip.reass.l4_src_port = reass->l4_src_port; vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port; if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - ip6_sv_reass_add_trace (vm, node, rm, reass, bi0, - REASS_FRAGMENT_FORWARD, - reass->ip_proto, - reass->l4_src_port, - reass->l4_dst_port); + ip6_sv_reass_add_trace ( + vm, node, reass, bi0, REASS_FRAGMENT_FORWARD, + reass->ip_proto, reass->l4_src_port, reass->l4_dst_port); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, @@ -699,17 +729,6 @@ ip6_sv_reassembly_inline (vlib_main_t * vm, vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); - if (~0 != icmp_bi) - { - next0 = IP6_SV_REASSEMBLY_NEXT_ICMP_ERROR; - to_next[0] = icmp_bi; - to_next += 1; - n_left_to_next -= 1; - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, - n_left_to_next, icmp_bi, - next0); - } - next_packet: from += 1; n_left_from -= 1; @@ -818,7 +837,7 @@ typedef struct clib_bihash_48_8_t *new_hash; } ip6_rehash_cb_ctx; -static void +static int ip6_rehash_cb (clib_bihash_kv_48_8_t * kv, void *_ctx) { ip6_rehash_cb_ctx *ctx = _ctx; @@ -826,6 +845,7 @@ ip6_rehash_cb (clib_bihash_kv_48_8_t * kv, void *_ctx) { ctx->failure = 1; } + return (BIHASH_WALK_CONTINUE); } static void @@ -931,8 +951,6 @@ ip6_sv_reass_init_function (vlib_main_t * vm) if ((error = vlib_call_init_function (vm, ip_main_init))) return error; - ip6_register_protocol (IP_PROTOCOL_IPV6_FRAGMENTATION, - ip6_sv_reass_node.index); rm->fq_index = vlib_frame_queue_main_init (ip6_sv_reass_node.index, 0); rm->fq_feature_index = @@ -947,8 +965,9 @@ VLIB_INIT_FUNCTION (ip6_sv_reass_init_function); #endif /* CLIB_MARCH_VARIANT */ static uword -ip6_sv_reass_walk_expired (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * f) +ip6_sv_reass_walk_expired (vlib_main_t *vm, + CLIB_UNUSED (vlib_node_runtime_t *node), + CLIB_UNUSED (vlib_frame_t *f)) { ip6_sv_reass_main_t *rm = &ip6_sv_reass_main; uword event_type, *event_data = 0; @@ -962,10 +981,11 @@ ip6_sv_reass_walk_expired (vlib_main_t * vm, switch (event_type) { - case ~0: /* no events => timeout */ - /* nothing to do here */ - break; + case ~0: + /* no events => timeout */ + /* fallthrough */ case IP6_EVENT_CONFIG_CHANGED: + /* nothing to do here */ break; default: clib_warning ("BUG: event type 0x%wx", event_type); @@ -979,7 +999,6 @@ ip6_sv_reass_walk_expired (vlib_main_t * vm, uword thread_index = 0; int index; const uword nthreads = vlib_num_workers () + 1; - u32 *vec_icmp_bi = NULL; for (thread_index = 0; thread_index < nthreads; ++thread_index) { ip6_sv_reass_per_thread_t *rt = &rm->per_thread_data[thread_index]; @@ -987,13 +1006,13 @@ ip6_sv_reass_walk_expired (vlib_main_t * vm, vec_reset_length (pool_indexes_to_free); /* *INDENT-OFF* */ - pool_foreach_index (index, rt->pool, ({ + pool_foreach_index (index, rt->pool) { reass = pool_elt_at_index (rt->pool, index); if (now > reass->last_heard + rm->timeout) { vec_add1 (pool_indexes_to_free, index); } - })); + } /* *INDENT-ON* */ int *i; /* *INDENT-OFF* */ @@ -1007,33 +1026,7 @@ ip6_sv_reass_walk_expired (vlib_main_t * vm, clib_spinlock_unlock (&rt->lock); } - while (vec_len (vec_icmp_bi) > 0) - { - vlib_frame_t *f = - vlib_get_frame_to_node (vm, rm->ip6_icmp_error_idx); - u32 *to_next = vlib_frame_vector_args (f); - u32 n_left_to_next = VLIB_FRAME_SIZE - f->n_vectors; - int trace_frame = 0; - while (vec_len (vec_icmp_bi) > 0 && n_left_to_next > 0) - { - u32 bi = vec_pop (vec_icmp_bi); - vlib_buffer_t *b = vlib_get_buffer (vm, bi); - if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED)) - { - trace_frame = 1; - } - b->error = node->errors[IP6_ERROR_REASS_TIMEOUT]; - to_next[0] = bi; - ++f->n_vectors; - to_next += 1; - n_left_to_next -= 1; - } - f->frame_flags |= (trace_frame * VLIB_FRAME_TRACE); - vlib_put_frame_to_node (vm, rm->ip6_icmp_error_idx, f); - } - vec_free (pool_indexes_to_free); - vec_free (vec_icmp_bi); if (event_data) { _vec_len (event_data) = 0; @@ -1060,9 +1053,10 @@ static u8 * format_ip6_sv_reass_key (u8 * s, va_list * args) { ip6_sv_reass_key_t *key = va_arg (*args, ip6_sv_reass_key_t *); - s = format (s, "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u", - key->xx_id, format_ip6_address, &key->src, format_ip6_address, - &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto); + s = + format (s, "fib_index: %u, src: %U, dst: %U, frag_id: %u, proto: %u", + key->fib_index, format_ip6_address, &key->src, format_ip6_address, + &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto); return s; } @@ -1120,9 +1114,9 @@ show_ip6_sv_reass (vlib_main_t * vm, unformat_input_t * input, if (details) { /* *INDENT-OFF* */ - pool_foreach (reass, rt->pool, { + pool_foreach (reass, rt->pool) { vlib_cli_output (vm, "%U", format_ip6_sv_reass, vm, reass); - }); + } /* *INDENT-ON* */ } sum_reass_n += rt->reass_n; @@ -1131,8 +1125,19 @@ show_ip6_sv_reass (vlib_main_t * vm, unformat_input_t * input, vlib_cli_output (vm, "---------------------"); vlib_cli_output (vm, "Current IP6 reassemblies count: %lu\n", (long unsigned) sum_reass_n); - vlib_cli_output (vm, "Maximum configured concurrent IP6 reassemblies per " - "worker-thread: %lu\n", (long unsigned) rm->max_reass_n); + vlib_cli_output (vm, + "Maximum configured concurrent shallow virtual IP6 reassemblies per worker-thread: %lu\n", + (long unsigned) rm->max_reass_n); + vlib_cli_output (vm, + "Maximum configured amount of fragments per shallow " + "virtual IP6 reassembly: %lu\n", + (long unsigned) rm->max_reass_len); + vlib_cli_output (vm, + "Maximum configured shallow virtual IP6 reassembly timeout: %lums\n", + (long unsigned) rm->timeout_ms); + vlib_cli_output (vm, + "Maximum configured shallow virtual IP6 reassembly expire walk interval: %lums\n", + (long unsigned) rm->expire_walk_interval_ms); vlib_cli_output (vm, "Buffers in use: %lu\n", (long unsigned) sum_buffers_n); return 0; @@ -1150,9 +1155,8 @@ VLIB_CLI_COMMAND (show_ip6_sv_reassembly_cmd, static) = { vnet_api_error_t ip6_sv_reass_enable_disable (u32 sw_if_index, u8 enable_disable) { - return vnet_feature_enable_disable ("ip6-unicast", - "ip6-sv-reassembly-feature", - sw_if_index, enable_disable, 0, 0); + return ip6_sv_reass_enable_disable_with_refcnt (sw_if_index, + enable_disable); } #endif /* CLIB_MARCH_VARIANT */ @@ -1232,9 +1236,8 @@ ip6_sv_reassembly_handoff_inline (vlib_main_t * vm, ti += 1; b += 1; } - n_enq = - vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices, - frame->n_vectors, 1); + n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from, + thread_indices, frame->n_vectors, 1); if (n_enq < frame->n_vectors) vlib_node_increment_counter (vm, node->node_index, @@ -1315,7 +1318,7 @@ ip6_sv_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable) "ip6-sv-reassembly-feature", sw_if_index, 0, 0, 0); } - return -1; + return 0; } #endif