From: Mohammed Hawari Date: Thu, 2 Jun 2022 16:04:45 +0000 (+0200) Subject: ip: reassembly - custom context instead of VRF X-Git-Tag: v23.06-rc0~219 X-Git-Url: https://gerrit.fd.io/r/gitweb?a=commitdiff_plain;h=41404bf64c604565a42aa6d7e03fd7b952f1eee1;p=vpp.git ip: reassembly - custom context instead of VRF Change-Id: Id8d6ab96a710cdd207068cf19a6363bbcd584de4 Type: improvement Signed-off-by: Mohammed Hawari --- diff --git a/src/vnet/ip/reass/ip4_sv_reass.c b/src/vnet/ip/reass/ip4_sv_reass.c index 4ef144e9bee..41077b746a3 100644 --- a/src/vnet/ip/reass/ip4_sv_reass.c +++ b/src/vnet/ip/reass/ip4_sv_reass.c @@ -150,6 +150,7 @@ typedef struct /** Worker handoff */ u32 fq_index; u32 fq_feature_index; + u32 fq_custom_context_index; // reference count for enabling/disabling feature - per interface u32 *feature_use_refcount_per_intf; @@ -457,14 +458,19 @@ l4_layer_truncated (ip4_header_t *ip) } always_inline uword -ip4_sv_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame, bool is_feature, - bool is_output_feature, bool is_custom) +ip4_sv_reass_inline (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *frame, bool is_feature, + bool is_output_feature, bool is_custom, + bool with_custom_context) { u32 *from = vlib_frame_vector_args (frame); - u32 n_left_from, n_left_to_next, *to_next, next_index; + u32 n_left_from, n_left_to_next, *to_next, *to_next_aux, next_index; ip4_sv_reass_main_t *rm = &ip4_sv_reass_main; ip4_sv_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index]; + u32 *context; + if (with_custom_context) + context = vlib_frame_aux_args (frame); + clib_spinlock_lock (&rt->lock); n_left_from = frame->n_vectors; @@ -621,6 +627,8 @@ ip4_sv_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node, next[0] = next0; next[1] = next1; next += 2; + if (with_custom_context) + context += 2; } while (n_left_from > 0) @@ -696,6 +704,8 @@ ip4_sv_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node, n_left_from -= 1; next[0] = next0; next += 1; + if (with_custom_context) + context += 1; } vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts, @@ -709,7 +719,11 @@ slow_path: while (n_left_from > 0) { - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + if (with_custom_context) + vlib_get_next_frame_with_aux_safe (vm, node, next_index, to_next, + to_next_aux, n_left_to_next); + else + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { @@ -717,6 +731,7 @@ slow_path: vlib_buffer_t *b0; u32 next0; u32 error0 = IP4_ERROR_NONE; + u8 forward_context = 0; bi0 = from[0]; b0 = vlib_get_buffer (vm, bi0); @@ -792,13 +807,17 @@ slow_path: ip4_sv_reass_kv_t kv; u8 do_handoff = 0; - kv.k.as_u64[0] = - (u64) vec_elt (ip4_main.fib_index_by_sw_if_index, - vnet_buffer (b0)->sw_if_index[VLIB_RX]) | - (u64) ip0->src_address.as_u32 << 32; - kv.k.as_u64[1] = - (u64) ip0->dst_address. - as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48; + if (with_custom_context) + kv.k.as_u64[0] = (u64) *context | (u64) ip0->src_address.as_u32 + << 32; + else + kv.k.as_u64[0] = + (u64) vec_elt (ip4_main.fib_index_by_sw_if_index, + vnet_buffer (b0)->sw_if_index[VLIB_RX]) | + (u64) ip0->src_address.as_u32 << 32; + kv.k.as_u64[1] = (u64) ip0->dst_address.as_u32 | + (u64) ip0->fragment_id << 32 | + (u64) ip0->protocol << 48; ip4_sv_reass_t *reass = ip4_sv_reass_find_or_create (vm, rm, rt, &kv, &do_handoff); @@ -808,6 +827,8 @@ slow_path: next0 = IP4_SV_REASSEMBLY_NEXT_HANDOFF; vnet_buffer (b0)->ip.reass.owner_thread_index = kv.v.thread_index; + if (with_custom_context) + forward_context = 1; goto packet_enqueue; } @@ -938,13 +959,26 @@ slow_path: b0 = vlib_get_buffer (vm, bi0); vnet_feature_next (&next0, b0); } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); + if (with_custom_context && forward_context) + { + if (to_next_aux) + { + to_next_aux[0] = *context; + to_next_aux += 1; + } + vlib_validate_buffer_enqueue_with_aux_x1 ( + vm, node, next_index, to_next, to_next_aux, n_left_to_next, + bi0, *context, next0); + } + else + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); next_packet: from += 1; n_left_from -= 1; + if (with_custom_context) + context += 1; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); @@ -959,9 +993,9 @@ VLIB_NODE_FN (ip4_sv_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip4_sv_reass_inline (vm, node, frame, false /* is_feature */ , - false /* is_output_feature */ , - false /* is_custom */ ); + return ip4_sv_reass_inline ( + vm, node, frame, false /* is_feature */, false /* is_output_feature */, + false /* is_custom */, false /* with_custom_context */); } /* *INDENT-OFF* */ @@ -986,9 +1020,9 @@ VLIB_NODE_FN (ip4_sv_reass_node_feature) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip4_sv_reass_inline (vm, node, frame, true /* is_feature */ , - false /* is_output_feature */ , - false /* is_custom */ ); + return ip4_sv_reass_inline ( + vm, node, frame, true /* is_feature */, false /* is_output_feature */, + false /* is_custom */, false /* with_custom_context */); } /* *INDENT-OFF* */ @@ -1021,9 +1055,9 @@ VLIB_NODE_FN (ip4_sv_reass_node_output_feature) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip4_sv_reass_inline (vm, node, frame, true /* is_feature */ , - true /* is_output_feature */ , - false /* is_custom */ ); + return ip4_sv_reass_inline ( + vm, node, frame, true /* is_feature */, true /* is_output_feature */, + false /* is_custom */, false /* with_custom_context */); } @@ -1053,7 +1087,6 @@ VNET_FEATURE_INIT (ip4_sv_reass_output_feature) = { }; /* *INDENT-ON* */ -/* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_sv_reass_custom_node) = { .name = "ip4-sv-reassembly-custom-next", .vector_size = sizeof (u32), @@ -1069,15 +1102,39 @@ VLIB_REGISTER_NODE (ip4_sv_reass_custom_node) = { }, }; -/* *INDENT-ON* */ VLIB_NODE_FN (ip4_sv_reass_custom_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip4_sv_reass_inline (vm, node, frame, false /* is_feature */ , - false /* is_output_feature */ , - true /* is_custom */ ); + return ip4_sv_reass_inline ( + vm, node, frame, false /* is_feature */, false /* is_output_feature */, + true /* is_custom */, false /* with_custom_context */); +} + +VLIB_REGISTER_NODE (ip4_sv_reass_custom_context_node) = { + .name = "ip4-sv-reassembly-custom-context", + .vector_size = sizeof (u32), + .aux_size = sizeof(u32), + .format_trace = format_ip4_sv_reass_trace, + .n_errors = IP4_N_ERROR, + .error_counters = ip4_error_counters, + .n_next_nodes = IP4_SV_REASSEMBLY_N_NEXT, + .next_nodes = + { + [IP4_SV_REASSEMBLY_NEXT_INPUT] = "ip4-input", + [IP4_SV_REASSEMBLY_NEXT_DROP] = "ip4-drop", + [IP4_SV_REASSEMBLY_NEXT_HANDOFF] = "ip4-sv-reassembly-custom-context-handoff", + + }, +}; + +VLIB_NODE_FN (ip4_sv_reass_custom_context_node) +(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) +{ + return ip4_sv_reass_inline ( + vm, node, frame, false /* is_feature */, false /* is_output_feature */, + true /* is_custom */, true /* with_custom_context */); } #ifndef CLIB_MARCH_VARIANT @@ -1222,6 +1279,8 @@ ip4_sv_reass_init_function (vlib_main_t * vm) rm->fq_index = vlib_frame_queue_main_init (ip4_sv_reass_node.index, 0); rm->fq_feature_index = vlib_frame_queue_main_init (ip4_sv_reass_node_feature.index, 0); + rm->fq_custom_context_index = + vlib_frame_queue_main_init (ip4_sv_reass_custom_context_node.index, 0); rm->feature_use_refcount_per_intf = NULL; rm->output_feature_use_refcount_per_intf = NULL; @@ -1466,25 +1525,30 @@ format_ip4_sv_reass_handoff_trace (u8 * s, va_list * args) } always_inline uword -ip4_sv_reass_handoff_node_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, bool is_feature) +ip4_sv_reass_handoff_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *frame, bool is_feature, + bool is_custom_context) { ip4_sv_reass_main_t *rm = &ip4_sv_reass_main; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; - u32 n_enq, n_left_from, *from; + u32 n_enq, n_left_from, *from, *context; u16 thread_indices[VLIB_FRAME_SIZE], *ti; u32 fq_index; from = vlib_frame_vector_args (frame); + if (is_custom_context) + context = vlib_frame_aux_args (frame); + n_left_from = frame->n_vectors; vlib_get_buffers (vm, from, bufs, n_left_from); b = bufs; ti = thread_indices; - fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index; + fq_index = (is_feature) ? rm->fq_feature_index : + (is_custom_context ? rm->fq_custom_context_index : + rm->fq_index); while (n_left_from > 0) { @@ -1503,8 +1567,12 @@ ip4_sv_reass_handoff_node_inline (vlib_main_t * vm, ti += 1; b += 1; } - n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from, - thread_indices, frame->n_vectors, 1); + if (is_custom_context) + n_enq = vlib_buffer_enqueue_to_thread_with_aux ( + vm, node, fq_index, from, context, thread_indices, frame->n_vectors, 1); + else + n_enq = vlib_buffer_enqueue_to_thread ( + vm, node, fq_index, from, thread_indices, frame->n_vectors, 1); if (n_enq < frame->n_vectors) vlib_node_increment_counter (vm, node->node_index, @@ -1517,8 +1585,8 @@ VLIB_NODE_FN (ip4_sv_reass_handoff_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip4_sv_reass_handoff_node_inline (vm, node, frame, - false /* is_feature */ ); + return ip4_sv_reass_handoff_node_inline ( + vm, node, frame, false /* is_feature */, false /* is_custom_context */); } @@ -1538,6 +1606,27 @@ VLIB_REGISTER_NODE (ip4_sv_reass_handoff_node) = { }; /* *INDENT-ON* */ +VLIB_NODE_FN (ip4_sv_reass_custom_context_handoff_node) +(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) +{ + return ip4_sv_reass_handoff_node_inline ( + vm, node, frame, false /* is_feature */, true /* is_custom_context */); +} + +VLIB_REGISTER_NODE (ip4_sv_reass_custom_context_handoff_node) = { + .name = "ip4-sv-reassembly-custom-context-handoff", + .vector_size = sizeof (u32), + .aux_size = sizeof (u32), + .n_errors = ARRAY_LEN(ip4_sv_reass_handoff_error_strings), + .error_strings = ip4_sv_reass_handoff_error_strings, + .format_trace = format_ip4_sv_reass_handoff_trace, + + .n_next_nodes = 1, + + .next_nodes = { + [0] = "error-drop", + }, +}; /* *INDENT-OFF* */ VLIB_NODE_FN (ip4_sv_reass_feature_handoff_node) (vlib_main_t * vm, @@ -1545,8 +1634,8 @@ VLIB_NODE_FN (ip4_sv_reass_feature_handoff_node) (vlib_main_t * vm, node, vlib_frame_t * frame) { - return ip4_sv_reass_handoff_node_inline (vm, node, frame, - true /* is_feature */ ); + return ip4_sv_reass_handoff_node_inline ( + vm, node, frame, true /* is_feature */, false /* is_custom_context */); } /* *INDENT-ON* */ @@ -1603,6 +1692,13 @@ ip4_sv_reass_custom_register_next_node (uword node_index) node_index); } +uword +ip4_sv_reass_custom_context_register_next_node (uword node_index) +{ + return vlib_node_add_next ( + vlib_get_main (), ip4_sv_reass_custom_context_node.index, node_index); +} + int ip4_sv_reass_output_enable_disable_with_refcnt (u32 sw_if_index, int is_enable) diff --git a/src/vnet/ip/reass/ip4_sv_reass.h b/src/vnet/ip/reass/ip4_sv_reass.h index e926dbeebcc..3a684eb9809 100644 --- a/src/vnet/ip/reass/ip4_sv_reass.h +++ b/src/vnet/ip/reass/ip4_sv_reass.h @@ -49,6 +49,7 @@ int ip4_sv_reass_output_enable_disable_with_refcnt (u32 sw_if_index, int is_enable); uword ip4_sv_reass_custom_register_next_node (uword node_index); +uword ip4_sv_reass_custom_context_register_next_node (uword node_index); #endif /* __included_ip4_sv_reass_h__ */