2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief Classify for one armed NAT44 (in+out interface)
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/fib/ip4_fib.h>
24 #include <nat/nat44-ed/nat44_ed.h>
25 #include <nat/nat44-ed/nat44_ed_inlines.h>
27 #define foreach_nat44_classify_error \
28 _(NEXT_IN2OUT, "next in2out") \
29 _(NEXT_OUT2IN, "next out2in") \
30 _(FRAG_CACHED, "fragment cached")
34 #define _(sym,str) NAT44_CLASSIFY_ERROR_##sym,
35 foreach_nat44_classify_error
37 NAT44_CLASSIFY_N_ERROR,
38 } nat44_classify_error_t;
42 NAT44_CLASSIFY_NEXT_IN2OUT,
43 NAT44_CLASSIFY_NEXT_OUT2IN,
44 NAT44_CLASSIFY_NEXT_DROP,
45 NAT44_CLASSIFY_N_NEXT,
46 } nat44_classify_next_t;
52 } nat44_classify_trace_t;
55 format_nat44_classify_trace (u8 * s, va_list * args)
57 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59 nat44_classify_trace_t *t = va_arg (*args, nat44_classify_trace_t *);
63 s = format (s, "nat44-classify: fragment cached");
66 next = t->next_in2out ? "nat44-ed-in2out" : "nat44-ed-out2in";
67 s = format (s, "nat44-classify: next %s", next);
74 nat44_handoff_classify_node_fn_inline (vlib_main_t * vm,
75 vlib_node_runtime_t * node,
78 u32 n_left_from, *from, *to_next;
79 nat44_classify_next_t next_index;
80 snat_main_t *sm = &snat_main;
81 snat_static_mapping_t *m;
82 u32 next_in2out = 0, next_out2in = 0;
84 from = vlib_frame_vector_args (frame);
85 n_left_from = frame->n_vectors;
86 next_index = node->cached_next_index;
88 while (n_left_from > 0)
92 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
94 while (n_left_from > 0 && n_left_to_next > 0)
98 u32 next0 = NAT_NEXT_IN2OUT_CLASSIFY;
101 clib_bihash_kv_8_8_t kv0, value0;
103 /* speculatively enqueue b0 to the current next frame */
111 b0 = vlib_get_buffer (vm, bi0);
112 ip0 = vlib_buffer_get_current (b0);
114 vec_foreach (ap, sm->addresses)
116 if (ip0->dst_address.as_u32 == ap->addr.as_u32)
118 next0 = NAT_NEXT_OUT2IN_CLASSIFY;
123 if (PREDICT_FALSE (pool_elts (sm->static_mappings)))
125 init_nat_k (&kv0, ip0->dst_address, 0, 0, 0);
126 /* try to classify the fragment based on IP header alone */
127 if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
130 m = pool_elt_at_index (sm->static_mappings, value0.value);
131 if (m->local_addr.as_u32 != m->external_addr.as_u32)
132 next0 = NAT_NEXT_OUT2IN_CLASSIFY;
135 init_nat_k (&kv0, ip0->dst_address,
136 vnet_buffer (b0)->ip.reass.l4_dst_port, 0,
137 ip_proto_to_nat_proto (ip0->protocol));
138 if (!clib_bihash_search_8_8
139 (&sm->static_mapping_by_external, &kv0, &value0))
141 m = pool_elt_at_index (sm->static_mappings, value0.value);
142 if (m->local_addr.as_u32 != m->external_addr.as_u32)
143 next0 = NAT_NEXT_OUT2IN_CLASSIFY;
148 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
149 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
151 nat44_classify_trace_t *t =
152 vlib_add_trace (vm, node, b0, sizeof (*t));
154 t->next_in2out = next0 == NAT_NEXT_IN2OUT_CLASSIFY ? 1 : 0;
157 next_in2out += next0 == NAT_NEXT_IN2OUT_CLASSIFY;
158 next_out2in += next0 == NAT_NEXT_OUT2IN_CLASSIFY;
160 /* verify speculative enqueue, maybe switch current next frame */
161 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
162 to_next, n_left_to_next,
166 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
169 vlib_node_increment_counter (vm, node->node_index,
170 NAT44_CLASSIFY_ERROR_NEXT_IN2OUT, next_in2out);
171 vlib_node_increment_counter (vm, node->node_index,
172 NAT44_CLASSIFY_ERROR_NEXT_OUT2IN, next_out2in);
173 return frame->n_vectors;
177 nat44_ed_classify_node_fn_inline (vlib_main_t * vm,
178 vlib_node_runtime_t * node,
179 vlib_frame_t * frame)
181 u32 n_left_from, *from, *to_next;
182 nat44_classify_next_t next_index;
183 snat_main_t *sm = &snat_main;
184 snat_static_mapping_t *m;
185 u32 next_in2out = 0, next_out2in = 0;
187 from = vlib_frame_vector_args (frame);
188 n_left_from = frame->n_vectors;
189 next_index = node->cached_next_index;
191 while (n_left_from > 0)
195 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
197 while (n_left_from > 0 && n_left_to_next > 0)
201 u32 next0 = NAT_NEXT_IN2OUT_ED_FAST_PATH;
202 u32 sw_if_index0, rx_fib_index0;
205 clib_bihash_kv_8_8_t kv0, value0;
206 clib_bihash_kv_16_8_t ed_kv0, ed_value0;
208 /* speculatively enqueue b0 to the current next frame */
216 b0 = vlib_get_buffer (vm, bi0);
217 ip0 = vlib_buffer_get_current (b0);
220 vnet_feature_next (&arc_next, b0);
221 vnet_buffer2 (b0)->nat.arc_next = arc_next;
223 if (ip0->protocol != IP_PROTOCOL_ICMP)
225 /* process leading fragment/whole packet (with L4 header) */
226 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
228 fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
230 init_ed_k (&ed_kv0, ip0->src_address,
231 vnet_buffer (b0)->ip.reass.l4_src_port,
233 vnet_buffer (b0)->ip.reass.l4_dst_port,
234 rx_fib_index0, ip0->protocol);
235 /* process whole packet */
236 if (!clib_bihash_search_16_8 (&sm->flow_hash, &ed_kv0,
239 ASSERT (vm->thread_index ==
240 ed_value_get_thread_index (&ed_value0));
241 snat_main_per_thread_data_t *tsm =
242 &sm->per_thread_data[vm->thread_index];
243 snat_session_t *s = pool_elt_at_index (
244 tsm->sessions, ed_value_get_session_index (&ed_value0));
245 clib_bihash_kv_16_8_t i2o_kv;
246 nat_6t_flow_to_ed_k (&i2o_kv, &s->i2o);
247 vnet_buffer2 (b0)->nat.cached_session_index =
248 ed_value_get_session_index (&ed_value0);
249 if (i2o_kv.key[0] == ed_kv0.key[0] &&
250 i2o_kv.key[1] == ed_kv0.key[1])
252 next0 = NAT_NEXT_IN2OUT_ED_FAST_PATH;
256 next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
261 /* session doesn't exist so continue in code */
264 vec_foreach (ap, sm->addresses)
266 if (ip0->dst_address.as_u32 == ap->addr.as_u32)
268 next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
273 if (PREDICT_FALSE (pool_elts (sm->static_mappings)))
275 init_nat_k (&kv0, ip0->dst_address, 0, 0, 0);
276 /* try to classify the fragment based on IP header alone */
277 if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
280 m = pool_elt_at_index (sm->static_mappings, value0.value);
281 if (m->local_addr.as_u32 != m->external_addr.as_u32)
282 next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
285 init_nat_k (&kv0, ip0->dst_address,
286 vnet_buffer (b0)->ip.reass.l4_dst_port, 0,
287 ip_proto_to_nat_proto (ip0->protocol));
288 if (!clib_bihash_search_8_8
289 (&sm->static_mapping_by_external, &kv0, &value0))
291 m = pool_elt_at_index (sm->static_mappings, value0.value);
292 if (m->local_addr.as_u32 != m->external_addr.as_u32)
293 next0 = NAT_NEXT_OUT2IN_ED_FAST_PATH;
298 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
299 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
301 nat44_classify_trace_t *t =
302 vlib_add_trace (vm, node, b0, sizeof (*t));
304 t->next_in2out = next0 == NAT_NEXT_IN2OUT_ED_FAST_PATH ? 1 : 0;
307 next_in2out += next0 == NAT_NEXT_IN2OUT_ED_FAST_PATH;
308 next_out2in += next0 == NAT_NEXT_OUT2IN_ED_FAST_PATH;
310 /* verify speculative enqueue, maybe switch current next frame */
311 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
312 to_next, n_left_to_next,
316 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
319 vlib_node_increment_counter (vm, node->node_index,
320 NAT44_CLASSIFY_ERROR_NEXT_IN2OUT, next_in2out);
321 vlib_node_increment_counter (vm, node->node_index,
322 NAT44_CLASSIFY_ERROR_NEXT_OUT2IN, next_out2in);
323 return frame->n_vectors;
326 VLIB_NODE_FN (nat44_ed_classify_node) (vlib_main_t * vm,
327 vlib_node_runtime_t * node,
328 vlib_frame_t * frame)
330 return nat44_ed_classify_node_fn_inline (vm, node, frame);
333 VLIB_REGISTER_NODE (nat44_ed_classify_node) = {
334 .name = "nat44-ed-classify",
335 .vector_size = sizeof (u32),
336 .sibling_of = "nat-default",
337 .format_trace = format_nat44_classify_trace,
338 .type = VLIB_NODE_TYPE_INTERNAL,
341 VLIB_NODE_FN (nat44_handoff_classify_node) (vlib_main_t * vm,
342 vlib_node_runtime_t * node,
343 vlib_frame_t * frame)
345 return nat44_handoff_classify_node_fn_inline (vm, node, frame);
348 VLIB_REGISTER_NODE (nat44_handoff_classify_node) = {
349 .name = "nat44-handoff-classify",
350 .vector_size = sizeof (u32),
351 .sibling_of = "nat-default",
352 .format_trace = format_nat44_classify_trace,
353 .type = VLIB_NODE_TYPE_INTERNAL,
357 * fd.io coding-style-patch-verification: ON
360 * eval: (c-set-style "gnu")