918d7728c1dcc8cdfb5d9e2b68d9ee28865efa4b
[vpp.git] / src / plugins / nat / nat44-ed / nat44_ed_handoff.c
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT44 worker handoff
18  */
19
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/handoff.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vppinfra/error.h>
25
26 #include <nat/nat44-ed/nat44_ed.h>
27 #include <nat/nat44-ed/nat44_ed_inlines.h>
28
29 typedef struct
30 {
31   u32 next_worker_index;
32   u32 trace_index;
33   u8 in2out;
34   u8 output;
35 } nat44_handoff_trace_t;
36
37 #define foreach_nat44_handoff_error                                           \
38   _ (CONGESTION_DROP, "congestion drop")                                      \
39   _ (SAME_WORKER, "same worker")                                              \
40   _ (DO_HANDOFF, "do handoff")
41
42 typedef enum
43 {
44 #define _(sym, str) NAT44_HANDOFF_ERROR_##sym,
45   foreach_nat44_handoff_error
46 #undef _
47     NAT44_HANDOFF_N_ERROR,
48 } nat44_handoff_error_t;
49
50 static char *nat44_handoff_error_strings[] = {
51 #define _(sym,string) string,
52   foreach_nat44_handoff_error
53 #undef _
54 };
55
56 static u8 *
57 format_nat44_handoff_trace (u8 * s, va_list * args)
58 {
59   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
60   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
61   nat44_handoff_trace_t *t = va_arg (*args, nat44_handoff_trace_t *);
62   char *tag, *output;
63
64   tag = t->in2out ? "IN2OUT" : "OUT2IN";
65   output = t->output ? "OUTPUT-FEATURE" : "";
66   s =
67     format (s, "NAT44_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
68             tag, output, t->next_worker_index, t->trace_index);
69
70   return s;
71 }
72
73 static inline uword
74 nat44_worker_handoff_fn_inline (vlib_main_t * vm,
75                                 vlib_node_runtime_t * node,
76                                 vlib_frame_t * frame, u8 is_output,
77                                 u8 is_in2out)
78 {
79   u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
80
81   u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
82   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
83   snat_main_t *sm = &snat_main;
84
85   u32 fq_index, thread_index = vm->thread_index;
86
87   from = vlib_frame_vector_args (frame);
88   n_left_from = frame->n_vectors;
89
90   vlib_get_buffers (vm, from, b, n_left_from);
91
92   if (is_in2out)
93     {
94       fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
95     }
96   else
97     {
98       fq_index = sm->fq_out2in_index;
99     }
100
101   while (n_left_from >= 4)
102     {
103       u32 arc_next0, arc_next1, arc_next2, arc_next3;
104       u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
105       u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
106       u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
107       ip4_header_t *ip0, *ip1, *ip2, *ip3;
108
109       if (PREDICT_TRUE (n_left_from >= 8))
110         {
111           vlib_prefetch_buffer_header (b[4], LOAD);
112           vlib_prefetch_buffer_header (b[5], LOAD);
113           vlib_prefetch_buffer_header (b[6], LOAD);
114           vlib_prefetch_buffer_header (b[7], LOAD);
115           clib_prefetch_load (&b[4]->data);
116           clib_prefetch_load (&b[5]->data);
117           clib_prefetch_load (&b[6]->data);
118           clib_prefetch_load (&b[7]->data);
119         }
120
121       if (is_output)
122         {
123           iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
124           iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
125           iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
126           iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
127         }
128
129       ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
130                               iph_offset0);
131       ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) +
132                               iph_offset1);
133       ip2 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) +
134                               iph_offset2);
135       ip3 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) +
136                               iph_offset3);
137
138       vnet_feature_next (&arc_next0, b[0]);
139       vnet_feature_next (&arc_next1, b[1]);
140       vnet_feature_next (&arc_next2, b[2]);
141       vnet_feature_next (&arc_next3, b[3]);
142
143       vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
144       vnet_buffer2 (b[1])->nat.arc_next = arc_next1;
145       vnet_buffer2 (b[2])->nat.arc_next = arc_next2;
146       vnet_buffer2 (b[3])->nat.arc_next = arc_next3;
147
148       sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
149       sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
150       sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
151       sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
152
153       rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
154       rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
155       rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
156       rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
157
158       if (is_in2out)
159         {
160           ti[0] = nat44_ed_get_in2out_worker_index (b[0], ip0, rx_fib_index0,
161                                                     is_output);
162           ti[1] = nat44_ed_get_in2out_worker_index (b[1], ip1, rx_fib_index1,
163                                                     is_output);
164           ti[2] = nat44_ed_get_in2out_worker_index (b[2], ip2, rx_fib_index2,
165                                                     is_output);
166           ti[3] = nat44_ed_get_in2out_worker_index (b[3], ip3, rx_fib_index3,
167                                                     is_output);
168         }
169       else
170         {
171           ti[0] = nat44_ed_get_out2in_worker_index (b[0], ip0, rx_fib_index0,
172                                                     is_output);
173           ti[1] = nat44_ed_get_out2in_worker_index (b[1], ip1, rx_fib_index1,
174                                                     is_output);
175           ti[2] = nat44_ed_get_out2in_worker_index (b[2], ip2, rx_fib_index2,
176                                                     is_output);
177           ti[3] = nat44_ed_get_out2in_worker_index (b[3], ip3, rx_fib_index3,
178                                                     is_output);
179         }
180
181       if (ti[0] == thread_index)
182         same_worker++;
183       else
184         do_handoff++;
185
186       if (ti[1] == thread_index)
187         same_worker++;
188       else
189         do_handoff++;
190
191       if (ti[2] == thread_index)
192         same_worker++;
193       else
194         do_handoff++;
195
196       if (ti[3] == thread_index)
197         same_worker++;
198       else
199         do_handoff++;
200
201       b += 4;
202       ti += 4;
203       n_left_from -= 4;
204     }
205
206   while (n_left_from > 0)
207     {
208       u32 arc_next0;
209       u32 sw_if_index0;
210       u32 rx_fib_index0;
211       u32 iph_offset0 = 0;
212       ip4_header_t *ip0;
213
214
215       if (is_output)
216         iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
217
218       ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
219                               iph_offset0);
220
221       vnet_feature_next (&arc_next0, b[0]);
222       vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
223
224       sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
225       rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
226
227       if (is_in2out)
228         {
229           ti[0] = nat44_ed_get_in2out_worker_index (b[0], ip0, rx_fib_index0,
230                                                     is_output);
231         }
232       else
233         {
234           ti[0] = nat44_ed_get_out2in_worker_index (b[0], ip0, rx_fib_index0,
235                                                     is_output);
236         }
237
238       if (ti[0] == thread_index)
239         same_worker++;
240       else
241         do_handoff++;
242
243       b += 1;
244       ti += 1;
245       n_left_from -= 1;
246     }
247
248   if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
249     {
250       u32 i;
251       b = bufs;
252       ti = thread_indices;
253
254       for (i = 0; i < frame->n_vectors; i++)
255         {
256           if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
257             {
258               nat44_handoff_trace_t *t =
259                 vlib_add_trace (vm, node, b[0], sizeof (*t));
260               t->next_worker_index = ti[0];
261               t->trace_index = vlib_buffer_get_trace_index (b[0]);
262               t->in2out = is_in2out;
263               t->output = is_output;
264
265               b += 1;
266               ti += 1;
267             }
268           else
269             break;
270         }
271     }
272
273   n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
274                                          thread_indices, frame->n_vectors, 1);
275
276   if (n_enq < frame->n_vectors)
277     {
278       vlib_node_increment_counter (vm, node->node_index,
279                                    NAT44_HANDOFF_ERROR_CONGESTION_DROP,
280                                    frame->n_vectors - n_enq);
281     }
282
283   vlib_node_increment_counter (vm, node->node_index,
284                                NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker);
285   vlib_node_increment_counter (vm, node->node_index,
286                                NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
287   return frame->n_vectors;
288 }
289
290 VLIB_NODE_FN (snat_in2out_worker_handoff_node) (vlib_main_t * vm,
291                                                 vlib_node_runtime_t * node,
292                                                 vlib_frame_t * frame)
293 {
294   return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
295 }
296
297 /* *INDENT-OFF* */
298 VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node) = {
299   .name = "nat44-in2out-worker-handoff",
300   .vector_size = sizeof (u32),
301   .sibling_of = "nat-default",
302   .format_trace = format_nat44_handoff_trace,
303   .type = VLIB_NODE_TYPE_INTERNAL,
304   .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
305   .error_strings = nat44_handoff_error_strings,
306 };
307 /* *INDENT-ON* */
308
309 VLIB_NODE_FN (snat_in2out_output_worker_handoff_node) (vlib_main_t * vm,
310                                                        vlib_node_runtime_t *
311                                                        node,
312                                                        vlib_frame_t * frame)
313 {
314   return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1);
315 }
316
317 /* *INDENT-OFF* */
318 VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node) = {
319   .name = "nat44-in2out-output-worker-handoff",
320   .vector_size = sizeof (u32),
321   .sibling_of = "nat-default",
322   .format_trace = format_nat44_handoff_trace,
323   .type = VLIB_NODE_TYPE_INTERNAL,
324   .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
325   .error_strings = nat44_handoff_error_strings,
326 };
327 /* *INDENT-ON* */
328
329 VLIB_NODE_FN (snat_out2in_worker_handoff_node) (vlib_main_t * vm,
330                                                 vlib_node_runtime_t * node,
331                                                 vlib_frame_t * frame)
332 {
333   return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0);
334 }
335
336 /* *INDENT-OFF* */
337 VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node) = {
338   .name = "nat44-out2in-worker-handoff",
339   .vector_size = sizeof (u32),
340   .sibling_of = "nat-default",
341   .format_trace = format_nat44_handoff_trace,
342   .type = VLIB_NODE_TYPE_INTERNAL,
343   .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
344   .error_strings = nat44_handoff_error_strings,
345 };
346 /* *INDENT-ON* */
347
348 /*
349  * fd.io coding-style-patch-verification: ON
350  *
351  * Local Variables:
352  * eval: (c-set-style "gnu")
353  * End:
354  */