2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief NAT44 worker handoff
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/handoff.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vppinfra/error.h>
29 u32 next_worker_index;
32 } nat44_handoff_trace_t;
34 #define foreach_nat44_handoff_error \
35 _(CONGESTION_DROP, "congestion drop") \
36 _(SAME_WORKER, "same worker") \
37 _(DO_HANDOFF, "do handoff")
41 #define _(sym,str) NAT44_HANDOFF_ERROR_##sym,
42 foreach_nat44_handoff_error
44 NAT44_HANDOFF_N_ERROR,
45 } nat44_handoff_error_t;
47 static char *nat44_handoff_error_strings[] = {
48 #define _(sym,string) string,
49 foreach_nat44_handoff_error
55 format_nat44_handoff_trace (u8 * s, va_list * args)
57 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59 nat44_handoff_trace_t *t = va_arg (*args, nat44_handoff_trace_t *);
62 tag = t->in2out ? "IN2OUT" : "OUT2IN";
64 format (s, "NAT44_%s_WORKER_HANDOFF: next-worker %d trace index %d", tag,
65 t->next_worker_index, t->trace_index);
71 nat44_worker_handoff_fn_inline (vlib_main_t * vm,
72 vlib_node_runtime_t * node,
73 vlib_frame_t * frame, u8 is_output,
76 u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
78 u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
79 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
80 snat_main_t *sm = &snat_main;
82 snat_get_worker_function_t *get_worker;
83 u32 fq_index, thread_index = vm->thread_index;
85 from = vlib_frame_vector_args (frame);
86 n_left_from = frame->n_vectors;
88 vlib_get_buffers (vm, from, b, n_left_from);
92 fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
93 get_worker = sm->worker_in2out_cb;
97 fq_index = sm->fq_out2in_index;
98 get_worker = sm->worker_out2in_cb;
101 while (n_left_from >= 4)
103 u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
104 u32 rx_fib_index0 = 0, rx_fib_index1 = 0,
105 rx_fib_index2 = 0, rx_fib_index3 = 0;
106 ip4_header_t *ip0, *ip1, *ip2, *ip3;
108 if (PREDICT_TRUE (n_left_from >= 8))
110 vlib_prefetch_buffer_header (b[4], STORE);
111 vlib_prefetch_buffer_header (b[5], STORE);
112 vlib_prefetch_buffer_header (b[6], STORE);
113 vlib_prefetch_buffer_header (b[7], STORE);
114 CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, STORE);
115 CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, STORE);
116 CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, STORE);
117 CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, STORE);
120 ip0 = vlib_buffer_get_current (b[0]);
121 ip1 = vlib_buffer_get_current (b[1]);
122 ip2 = vlib_buffer_get_current (b[2]);
123 ip3 = vlib_buffer_get_current (b[3]);
125 if (PREDICT_FALSE (is_in2out))
127 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
128 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
129 sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
130 sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
133 ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
135 ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
137 ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
139 ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
142 ti[0] = get_worker (ip0, rx_fib_index0);
143 ti[1] = get_worker (ip1, rx_fib_index1);
144 ti[2] = get_worker (ip2, rx_fib_index2);
145 ti[3] = get_worker (ip3, rx_fib_index3);
147 if (ti[0] == thread_index)
152 if (ti[1] == thread_index)
157 if (ti[2] == thread_index)
162 if (ti[3] == thread_index)
172 while (n_left_from > 0)
175 u32 rx_fib_index0 = 0;
178 ip0 = vlib_buffer_get_current (b[0]);
180 if (PREDICT_FALSE (is_in2out))
182 sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
184 ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
187 ti[0] = get_worker (ip0, rx_fib_index0);
189 if (ti[0] == thread_index)
199 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
205 for (i = 0; i < frame->n_vectors; i++)
207 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
209 nat44_handoff_trace_t *t =
210 vlib_add_trace (vm, node, b[0], sizeof (*t));
211 t->next_worker_index = ti[0];
212 t->trace_index = vlib_buffer_get_trace_index (b[0]);
213 t->in2out = is_in2out;
223 n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
224 frame->n_vectors, 1);
226 if (n_enq < frame->n_vectors)
228 vlib_node_increment_counter (vm, node->node_index,
229 NAT44_HANDOFF_ERROR_CONGESTION_DROP,
230 frame->n_vectors - n_enq);
233 vlib_node_increment_counter (vm, node->node_index,
234 NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker);
235 vlib_node_increment_counter (vm, node->node_index,
236 NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
237 return frame->n_vectors;
242 VLIB_NODE_FN (snat_in2out_worker_handoff_node) (vlib_main_t * vm,
243 vlib_node_runtime_t * node,
244 vlib_frame_t * frame)
246 return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
250 VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node) = {
251 .name = "nat44-in2out-worker-handoff",
252 .vector_size = sizeof (u32),
253 .format_trace = format_nat44_handoff_trace,
254 .type = VLIB_NODE_TYPE_INTERNAL,
255 .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
256 .error_strings = nat44_handoff_error_strings,
264 VLIB_NODE_FN (snat_in2out_output_worker_handoff_node) (vlib_main_t * vm,
265 vlib_node_runtime_t *
267 vlib_frame_t * frame)
269 return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1);
273 VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node) = {
274 .name = "nat44-in2out-output-worker-handoff",
275 .vector_size = sizeof (u32),
276 .format_trace = format_nat44_handoff_trace,
277 .type = VLIB_NODE_TYPE_INTERNAL,
278 .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
279 .error_strings = nat44_handoff_error_strings,
287 VLIB_NODE_FN (snat_out2in_worker_handoff_node) (vlib_main_t * vm,
288 vlib_node_runtime_t * node,
289 vlib_frame_t * frame)
291 return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0);
295 VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node) = {
296 .name = "nat44-out2in-worker-handoff",
297 .vector_size = sizeof (u32),
298 .format_trace = format_nat44_handoff_trace,
299 .type = VLIB_NODE_TYPE_INTERNAL,
300 .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
301 .error_strings = nat44_handoff_error_strings,
310 * fd.io coding-style-patch-verification: ON
313 * eval: (c-set-style "gnu")