nat: Final NAT44 EI/ED split patch
[vpp.git] / src / plugins / nat / nat44-ed / nat44_ed_classify.c
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief Classify for one armed NAT44 (in+out interface)
18  */
19
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/fib/ip4_fib.h>
23
24 #include <nat/nat44-ed/nat44_ed.h>
25 #include <nat/nat44-ed/nat44_ed_inlines.h>
26
27 #define foreach_nat44_classify_error                      \
28 _(NEXT_IN2OUT, "next in2out")                             \
29 _(NEXT_OUT2IN, "next out2in")                             \
30 _(FRAG_CACHED, "fragment cached")
31
32 typedef enum
33 {
34 #define _(sym,str) NAT44_CLASSIFY_ERROR_##sym,
35   foreach_nat44_classify_error
36 #undef _
37     NAT44_CLASSIFY_N_ERROR,
38 } nat44_classify_error_t;
39
40 typedef enum
41 {
42   NAT44_CLASSIFY_NEXT_IN2OUT,
43   NAT44_CLASSIFY_NEXT_OUT2IN,
44   NAT44_CLASSIFY_NEXT_DROP,
45   NAT44_CLASSIFY_N_NEXT,
46 } nat44_classify_next_t;
47
48 typedef struct
49 {
50   u8 next_in2out;
51   u8 cached;
52 } nat44_classify_trace_t;
53
54 static u8 *
55 format_nat44_classify_trace (u8 * s, va_list * args)
56 {
57   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59   nat44_classify_trace_t *t = va_arg (*args, nat44_classify_trace_t *);
60   char *next;
61
62   if (t->cached)
63     s = format (s, "nat44-classify: fragment cached");
64   else
65     {
66       next = t->next_in2out ? "nat44-ed-in2out" : "nat44-ed-out2in";
67       s = format (s, "nat44-classify: next %s", next);
68     }
69
70   return s;
71 }
72
73 static inline uword
74 nat44_handoff_classify_node_fn_inline (vlib_main_t * vm,
75                                        vlib_node_runtime_t * node,
76                                        vlib_frame_t * frame)
77 {
78   u32 n_left_from, *from, *to_next;
79   nat44_classify_next_t next_index;
80   snat_main_t *sm = &snat_main;
81   snat_static_mapping_t *m;
82   u32 next_in2out = 0, next_out2in = 0;
83
84   from = vlib_frame_vector_args (frame);
85   n_left_from = frame->n_vectors;
86   next_index = node->cached_next_index;
87
88   while (n_left_from > 0)
89     {
90       u32 n_left_to_next;
91
92       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
93
94       while (n_left_from > 0 && n_left_to_next > 0)
95         {
96           u32 bi0;
97           vlib_buffer_t *b0;
98           u32 next0 = NAT_NEXT_IN2OUT_CLASSIFY;
99           ip4_header_t *ip0;
100           snat_address_t *ap;
101           clib_bihash_kv_8_8_t kv0, value0;
102
103           /* speculatively enqueue b0 to the current next frame */
104           bi0 = from[0];
105           to_next[0] = bi0;
106           from += 1;
107           to_next += 1;
108           n_left_from -= 1;
109           n_left_to_next -= 1;
110
111           b0 = vlib_get_buffer (vm, bi0);
112           ip0 = vlib_buffer_get_current (b0);
113
114           vec_foreach (ap, sm->addresses)
115             {
116               if (ip0->dst_address.as_u32 == ap->addr.as_u32)
117                 {
118                   next0 = NAT_NEXT_OUT2IN_CLASSIFY;
119                   goto enqueue0;
120                 }
121             }
122
123           if (PREDICT_FALSE (pool_elts (sm->static_mappings)))
124             {
125               init_nat_k (&kv0, ip0->dst_address, 0, 0, 0);
126               /* try to classify the fragment based on IP header alone */
127               if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
128                                            &kv0, &value0))
129                 {
130                   m = pool_elt_at_index (sm->static_mappings, value0.value);
131                   if (m->local_addr.as_u32 != m->external_addr.as_u32)
132                     next0 = NAT_NEXT_OUT2IN_CLASSIFY;
133                   goto enqueue0;
134                 }
135               init_nat_k (&kv0, ip0->dst_address,
136                           vnet_buffer (b0)->ip.reass.l4_dst_port, 0,
137                           ip_proto_to_nat_proto (ip0->protocol));
138               if (!clib_bihash_search_8_8
139                   (&sm->static_mapping_by_external, &kv0, &value0))
140                 {
141                   m = pool_elt_at_index (sm->static_mappings, value0.value);
142                   if (m->local_addr.as_u32 != m->external_addr.as_u32)
143                     next0 = NAT_NEXT_OUT2IN_CLASSIFY;
144                 }
145             }
146
147         enqueue0:
148           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
149                              && (b0->flags & VLIB_BUFFER_IS_TRACED)))
150             {
151               nat44_classify_trace_t *t =
152                 vlib_add_trace (vm, node, b0, sizeof (*t));
153               t->cached = 0;
154               t->next_in2out = next0 == NAT_NEXT_IN2OUT_CLASSIFY ? 1 : 0;
155             }
156
157           next_in2out += next0 == NAT_NEXT_IN2OUT_CLASSIFY;
158           next_out2in += next0 == NAT_NEXT_OUT2IN_CLASSIFY;
159
160           /* verify speculative enqueue, maybe switch current next frame */
161           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
162                                            to_next, n_left_to_next,
163                                            bi0, next0);
164         }
165
166       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
167     }
168
169   vlib_node_increment_counter (vm, node->node_index,
170                                NAT44_CLASSIFY_ERROR_NEXT_IN2OUT, next_in2out);
171   vlib_node_increment_counter (vm, node->node_index,
172                                NAT44_CLASSIFY_ERROR_NEXT_OUT2IN, next_out2in);
173   return frame->n_vectors;
174 }
175
176 static inline uword
177 nat44_ed_classify_node_fn_inline (vlib_main_t * vm,
178                                   vlib_node_runtime_t * node,
179                                   vlib_frame_t * frame)
180 {
181   u32 n_left_from, *from, *to_next;
182   nat44_classify_next_t next_index;
183   snat_main_t *sm = &snat_main;
184   snat_static_mapping_t *m;
185   u32 next_in2out = 0, next_out2in = 0;
186
187   from = vlib_frame_vector_args (frame);
188   n_left_from = frame->n_vectors;
189   next_index = node->cached_next_index;
190
191   while (n_left_from > 0)
192     {
193       u32 n_left_to_next;
194
195       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
196
197       while (n_left_from > 0 && n_left_to_next > 0)
198         {
199           u32 bi0;
200           vlib_buffer_t *b0;
201           u32 next0 = NAT_NEXT_IN2OUT_ED_FAST_PATH;
202           u32 sw_if_index0, rx_fib_index0;
203           ip4_header_t *ip0;
204           snat_address_t *ap;
205           clib_bihash_kv_8_8_t kv0, value0;
206           clib_bihash_kv_16_8_t ed_kv0, ed_value0;
207
208           /* speculatively enqueue b0 to the current next frame */
209           bi0 = from[0];
210           to_next[0] = bi0;
211           from += 1;
212           to_next += 1;
213           n_left_from -= 1;
214           n_left_to_next -= 1;
215
216           b0 = vlib_get_buffer (vm, bi0);
217           ip0 = vlib_buffer_get_current (b0);
218
219           u32 arc_next;
220           vnet_feature_next (&arc_next, b0);
221           vnet_buffer2 (b0)->nat.arc_next = arc_next;
222
223           if (ip0->protocol != IP_PROTOCOL_ICMP)
224             {
225               /* process leading fragment/whole packet (with L4 header) */
226               sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
227               rx_fib_index0 =
228                 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
229                                                      sw_if_index0);
230               init_ed_k (&ed_kv0, ip0->src_address,
231                          vnet_buffer (b0)->ip.reass.l4_src_port,
232                          ip0->dst_address,
233                          vnet_buffer (b0)->ip.reass.l4_dst_port,
234                          rx_fib_index0, ip0->protocol);
235               /* process whole packet */
236               if (!clib_bihash_search_16_8 (&sm->flow_hash, &ed_kv0,
237                                             &ed_value0))
238                 {
239                   ASSERT (vm->thread_index ==
240                           ed_value_get_thread_index (&ed_value0));
241                   snat_main_per_thread_data_t *tsm =
242                     &sm->per_thread_data[vm->thread_index];
243                   snat_session_t *s = pool_elt_at_index (
244                     tsm->sessions, ed_value_get_session_index (&ed_value0));
245                   clib_bihash_kv_16_8_t i2o_kv;
246                   nat_6t_flow_to_ed_k (&i2o_kv, &s->i2o);
247                   vnet_buffer2 (b0)->nat.cached_session_index =
248                     ed_value_get_session_index (&ed_value0);
249                   if (i2o_kv.key[0] == ed_kv0.key[0] &&
250                       i2o_kv.key[1] == ed_kv0.key[1])
251                     {
252                       next0 = NAT_NEXT_IN2OUT_ED_FAST_PATH;
253                     }
254                   else
255                     {
256                       next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
257                     }
258
259                   goto enqueue0;
260                 }
261               /* session doesn't exist so continue in code */
262             }
263
264           vec_foreach (ap, sm->addresses)
265             {
266               if (ip0->dst_address.as_u32 == ap->addr.as_u32)
267                 {
268                   next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
269                   goto enqueue0;
270                 }
271             }
272
273           if (PREDICT_FALSE (pool_elts (sm->static_mappings)))
274             {
275               init_nat_k (&kv0, ip0->dst_address, 0, 0, 0);
276               /* try to classify the fragment based on IP header alone */
277               if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
278                                            &kv0, &value0))
279                 {
280                   m = pool_elt_at_index (sm->static_mappings, value0.value);
281                   if (m->local_addr.as_u32 != m->external_addr.as_u32)
282                     next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
283                   goto enqueue0;
284                 }
285               init_nat_k (&kv0, ip0->dst_address,
286                           vnet_buffer (b0)->ip.reass.l4_dst_port, 0,
287                           ip_proto_to_nat_proto (ip0->protocol));
288               if (!clib_bihash_search_8_8
289                   (&sm->static_mapping_by_external, &kv0, &value0))
290                 {
291                   m = pool_elt_at_index (sm->static_mappings, value0.value);
292                   if (m->local_addr.as_u32 != m->external_addr.as_u32)
293                     next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
294                 }
295             }
296
297         enqueue0:
298           if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
299                              && (b0->flags & VLIB_BUFFER_IS_TRACED)))
300             {
301               nat44_classify_trace_t *t =
302                 vlib_add_trace (vm, node, b0, sizeof (*t));
303               t->cached = 0;
304               t->next_in2out = next0 == NAT_NEXT_IN2OUT_ED_FAST_PATH ? 1 : 0;
305             }
306
307           next_in2out += next0 == NAT_NEXT_IN2OUT_ED_FAST_PATH;
308           next_out2in += next0 == NAT_NEXT_OUT2IN_ED_FAST_PATH;
309
310           /* verify speculative enqueue, maybe switch current next frame */
311           vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
312                                            to_next, n_left_to_next,
313                                            bi0, next0);
314         }
315
316       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
317     }
318
319   vlib_node_increment_counter (vm, node->node_index,
320                                NAT44_CLASSIFY_ERROR_NEXT_IN2OUT, next_in2out);
321   vlib_node_increment_counter (vm, node->node_index,
322                                NAT44_CLASSIFY_ERROR_NEXT_OUT2IN, next_out2in);
323   return frame->n_vectors;
324 }
325
326 VLIB_NODE_FN (nat44_ed_classify_node) (vlib_main_t * vm,
327                                        vlib_node_runtime_t * node,
328                                        vlib_frame_t * frame)
329 {
330   return nat44_ed_classify_node_fn_inline (vm, node, frame);
331 }
332
333 VLIB_REGISTER_NODE (nat44_ed_classify_node) = {
334   .name = "nat44-ed-classify",
335   .vector_size = sizeof (u32),
336   .sibling_of = "nat-default",
337   .format_trace = format_nat44_classify_trace,
338   .type = VLIB_NODE_TYPE_INTERNAL,
339 };
340
341 VLIB_NODE_FN (nat44_handoff_classify_node) (vlib_main_t * vm,
342                                             vlib_node_runtime_t * node,
343                                             vlib_frame_t * frame)
344 {
345   return nat44_handoff_classify_node_fn_inline (vm, node, frame);
346 }
347
348 VLIB_REGISTER_NODE (nat44_handoff_classify_node) = {
349   .name = "nat44-handoff-classify",
350   .vector_size = sizeof (u32),
351   .sibling_of = "nat-default",
352   .format_trace = format_nat44_classify_trace,
353   .type = VLIB_NODE_TYPE_INTERNAL,
354 };
355
356 /*
357  * fd.io coding-style-patch-verification: ON
358  *
359  * Local Variables:
360  * eval: (c-set-style "gnu")
361  * End:
362  */