2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/handoff.h>
19 #include <vnet/fib/ip4_fib.h>
20 #include <vppinfra/error.h>
22 #include <nat/nat44-ei/nat44_ei.h>
26 u32 next_worker_index;
30 } nat44_ei_handoff_trace_t;
32 #define foreach_nat44_ei_handoff_error \
33 _ (CONGESTION_DROP, "congestion drop") \
34 _ (SAME_WORKER, "same worker") \
35 _ (DO_HANDOFF, "do handoff")
39 #define _(sym, str) NAT44_EI_HANDOFF_ERROR_##sym,
40 foreach_nat44_ei_handoff_error
42 NAT44_EI_HANDOFF_N_ERROR,
43 } nat44_ei_handoff_error_t;
45 static char *nat44_ei_handoff_error_strings[] = {
46 #define _(sym, string) string,
47 foreach_nat44_ei_handoff_error
52 format_nat44_ei_handoff_trace (u8 *s, va_list *args)
54 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
56 nat44_ei_handoff_trace_t *t = va_arg (*args, nat44_ei_handoff_trace_t *);
59 tag = t->in2out ? "IN2OUT" : "OUT2IN";
60 output = t->output ? "OUTPUT-FEATURE" : "";
62 format (s, "NAT44_EI_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
63 tag, output, t->next_worker_index, t->trace_index);
69 nat44_ei_worker_handoff_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
70 vlib_frame_t *frame, u8 is_output,
73 u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
75 u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
76 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
77 nat44_ei_main_t *nm = &nat44_ei_main;
79 u32 fq_index, thread_index = vm->thread_index;
81 from = vlib_frame_vector_args (frame);
82 n_left_from = frame->n_vectors;
84 vlib_get_buffers (vm, from, b, n_left_from);
88 fq_index = is_output ? nm->fq_in2out_output_index : nm->fq_in2out_index;
92 fq_index = nm->fq_out2in_index;
95 while (n_left_from >= 4)
97 u32 arc_next0, arc_next1, arc_next2, arc_next3;
98 u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
99 u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
100 u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
101 ip4_header_t *ip0, *ip1, *ip2, *ip3;
103 if (PREDICT_TRUE (n_left_from >= 8))
105 vlib_prefetch_buffer_header (b[4], LOAD);
106 vlib_prefetch_buffer_header (b[5], LOAD);
107 vlib_prefetch_buffer_header (b[6], LOAD);
108 vlib_prefetch_buffer_header (b[7], LOAD);
109 clib_prefetch_load (&b[4]->data);
110 clib_prefetch_load (&b[5]->data);
111 clib_prefetch_load (&b[6]->data);
112 clib_prefetch_load (&b[7]->data);
117 iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
118 iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
119 iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
120 iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
124 (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) + iph_offset0);
126 (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) + iph_offset1);
128 (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) + iph_offset2);
130 (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) + iph_offset3);
132 vnet_feature_next (&arc_next0, b[0]);
133 vnet_feature_next (&arc_next1, b[1]);
134 vnet_feature_next (&arc_next2, b[2]);
135 vnet_feature_next (&arc_next3, b[3]);
137 vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
138 vnet_buffer2 (b[1])->nat.arc_next = arc_next1;
139 vnet_buffer2 (b[2])->nat.arc_next = arc_next2;
140 vnet_buffer2 (b[3])->nat.arc_next = arc_next3;
142 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
143 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
144 sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
145 sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
147 rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
148 rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
149 rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
150 rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
155 nat44_ei_get_in2out_worker_index (ip0, rx_fib_index0, is_output);
157 nat44_ei_get_in2out_worker_index (ip1, rx_fib_index1, is_output);
159 nat44_ei_get_in2out_worker_index (ip2, rx_fib_index2, is_output);
161 nat44_ei_get_in2out_worker_index (ip3, rx_fib_index3, is_output);
165 ti[0] = nat44_ei_get_out2in_worker_index (b[0], ip0, rx_fib_index0,
167 ti[1] = nat44_ei_get_out2in_worker_index (b[1], ip1, rx_fib_index1,
169 ti[2] = nat44_ei_get_out2in_worker_index (b[2], ip2, rx_fib_index2,
171 ti[3] = nat44_ei_get_out2in_worker_index (b[3], ip3, rx_fib_index3,
175 if (ti[0] == thread_index)
180 if (ti[1] == thread_index)
185 if (ti[2] == thread_index)
190 if (ti[3] == thread_index)
200 while (n_left_from > 0)
209 iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
212 (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) + iph_offset0);
214 vnet_feature_next (&arc_next0, b[0]);
215 vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
217 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
218 rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
223 nat44_ei_get_in2out_worker_index (ip0, rx_fib_index0, is_output);
227 ti[0] = nat44_ei_get_out2in_worker_index (b[0], ip0, rx_fib_index0,
231 if (ti[0] == thread_index)
241 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
247 for (i = 0; i < frame->n_vectors; i++)
249 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
251 nat44_ei_handoff_trace_t *t =
252 vlib_add_trace (vm, node, b[0], sizeof (*t));
253 t->next_worker_index = ti[0];
254 t->trace_index = vlib_buffer_get_trace_index (b[0]);
255 t->in2out = is_in2out;
256 t->output = is_output;
266 n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
267 thread_indices, frame->n_vectors, 1);
269 if (n_enq < frame->n_vectors)
271 vlib_node_increment_counter (vm, node->node_index,
272 NAT44_EI_HANDOFF_ERROR_CONGESTION_DROP,
273 frame->n_vectors - n_enq);
276 vlib_node_increment_counter (
277 vm, node->node_index, NAT44_EI_HANDOFF_ERROR_SAME_WORKER, same_worker);
278 vlib_node_increment_counter (vm, node->node_index,
279 NAT44_EI_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
280 return frame->n_vectors;
283 VLIB_NODE_FN (nat44_ei_in2out_worker_handoff_node)
284 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
286 return nat44_ei_worker_handoff_fn_inline (vm, node, frame, 0, 1);
289 VLIB_NODE_FN (nat44_ei_in2out_output_worker_handoff_node)
290 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
292 return nat44_ei_worker_handoff_fn_inline (vm, node, frame, 1, 1);
295 VLIB_NODE_FN (nat44_ei_out2in_worker_handoff_node)
296 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
298 return nat44_ei_worker_handoff_fn_inline (vm, node, frame, 0, 0);
301 VLIB_REGISTER_NODE (nat44_ei_in2out_output_worker_handoff_node) = {
302 .name = "nat44-ei-in2out-output-worker-handoff",
303 .vector_size = sizeof (u32),
304 .format_trace = format_nat44_ei_handoff_trace,
305 .type = VLIB_NODE_TYPE_INTERNAL,
306 .n_errors = ARRAY_LEN (nat44_ei_handoff_error_strings),
307 .error_strings = nat44_ei_handoff_error_strings,
310 VLIB_REGISTER_NODE (nat44_ei_in2out_worker_handoff_node) = {
311 .name = "nat44-ei-in2out-worker-handoff",
312 .vector_size = sizeof (u32),
313 .format_trace = format_nat44_ei_handoff_trace,
314 .type = VLIB_NODE_TYPE_INTERNAL,
315 .n_errors = ARRAY_LEN (nat44_ei_handoff_error_strings),
316 .error_strings = nat44_ei_handoff_error_strings,
319 VLIB_REGISTER_NODE (nat44_ei_out2in_worker_handoff_node) = {
320 .name = "nat44-ei-out2in-worker-handoff",
321 .vector_size = sizeof (u32),
322 .format_trace = format_nat44_ei_handoff_trace,
323 .type = VLIB_NODE_TYPE_INTERNAL,
324 .n_errors = ARRAY_LEN (nat44_ei_handoff_error_strings),
325 .error_strings = nat44_ei_handoff_error_strings,
329 * fd.io coding-style-patch-verification: ON
332 * eval: (c-set-style "gnu")