2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief NAT44 hairpinning
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/fib/ip4_fib.h>
24 #include <nat/nat_inlines.h>
25 #include <nat/nat_reass.h>
29 SNAT_HAIRPIN_SRC_NEXT_DROP,
30 SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT,
31 SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH,
32 SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT,
33 SNAT_HAIRPIN_SRC_N_NEXT,
34 } snat_hairpin_src_next_t;
38 NAT_HAIRPIN_NEXT_LOOKUP,
39 NAT_HAIRPIN_NEXT_DROP,
43 #define foreach_nat44_hairpin_error \
44 _(PROCESSED, "NAT44 hairpinning packets processed")
48 #define _(sym,str) NAT44_HAIRPIN_ERROR_##sym,
49 foreach_nat44_hairpin_error
51 NAT44_HAIRPIN_N_ERROR,
52 } nat44_hairpin_error_t;
54 static char *nat44_hairpin_error_strings[] = {
55 #define _(sym,string) string,
56 foreach_nat44_hairpin_error
60 extern vnet_feature_arc_registration_t vnet_feat_arc_ip4_local;
62 static_always_inline int
63 is_hairpinning (snat_main_t * sm, ip4_address_t * dst_addr)
66 clib_bihash_kv_8_8_t kv, value;
67 snat_session_key_t m_key;
70 vec_foreach (ap, sm->addresses)
72 if (ap->addr.as_u32 == dst_addr->as_u32)
77 m_key.addr.as_u32 = dst_addr->as_u32;
81 kv.key = m_key.as_u64;
82 if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value))
88 #ifndef CLIB_MARCH_VARIANT
90 snat_hairpinning (snat_main_t * sm,
94 tcp_header_t * tcp0, u32 proto0, int is_ed)
96 snat_session_key_t key0, sm0;
98 clib_bihash_kv_8_8_t kv0, value0;
100 u32 new_dst_addr0 = 0, old_dst_addr0, ti = 0, si;
101 u16 new_dst_port0, old_dst_port0;
104 key0.addr = ip0->dst_address;
105 key0.port = udp0->dst_port;
106 key0.protocol = proto0;
107 key0.fib_index = sm->outside_fib_index;
108 kv0.key = key0.as_u64;
110 /* Check if destination is static mappings */
111 if (!snat_static_mapping_match (sm, key0, &sm0, 1, 0, 0, 0, 0, 0))
113 new_dst_addr0 = sm0.addr.as_u32;
114 new_dst_port0 = sm0.port;
115 vnet_buffer (b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
117 /* or active session */
120 if (sm->num_workers > 1)
122 (clib_net_to_host_u16 (udp0->dst_port) -
123 1024) / sm->port_per_thread;
125 ti = sm->num_workers;
129 clib_bihash_kv_16_8_t ed_kv, ed_value;
130 make_ed_kv (&ed_kv, &ip0->dst_address, &ip0->src_address,
131 ip0->protocol, sm->outside_fib_index, udp0->dst_port,
133 rv = clib_bihash_search_16_8 (&sm->per_thread_data[ti].out2in_ed,
139 rv = clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
146 s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
147 new_dst_addr0 = s0->in2out.addr.as_u32;
148 new_dst_port0 = s0->in2out.port;
149 vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
152 /* Destination is behind the same NAT, use internal address and port */
155 old_dst_addr0 = ip0->dst_address.as_u32;
156 ip0->dst_address.as_u32 = new_dst_addr0;
157 sum0 = ip0->checksum;
158 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
159 ip4_header_t, dst_address);
160 ip0->checksum = ip_csum_fold (sum0);
162 old_dst_port0 = tcp0->dst;
163 if (PREDICT_TRUE (new_dst_port0 != old_dst_port0))
165 if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP))
167 tcp0->dst = new_dst_port0;
168 sum0 = tcp0->checksum;
169 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
170 ip4_header_t, dst_address);
171 sum0 = ip_csum_update (sum0, old_dst_port0, new_dst_port0,
172 ip4_header_t /* cheat */ , length);
173 tcp0->checksum = ip_csum_fold (sum0);
177 udp0->dst_port = new_dst_port0;
183 if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP))
185 sum0 = tcp0->checksum;
186 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
187 ip4_header_t, dst_address);
188 tcp0->checksum = ip_csum_fold (sum0);
197 #ifndef CLIB_MARCH_VARIANT
199 snat_icmp_hairpinning (snat_main_t * sm,
201 ip4_header_t * ip0, icmp46_header_t * icmp0, int is_ed)
203 snat_session_key_t key0;
204 clib_bihash_kv_8_8_t kv0, value0;
205 u32 old_dst_addr0, new_dst_addr0;
206 u32 old_addr0, new_addr0;
207 u16 old_port0, new_port0;
208 u16 old_checksum0, new_checksum0;
212 snat_static_mapping_t *m0;
214 if (icmp_is_error_message (icmp0))
216 ip4_header_t *inner_ip0 = 0;
217 tcp_udp_header_t *l4_header = 0;
219 inner_ip0 = (ip4_header_t *) ((icmp_echo_header_t *) (icmp0 + 1) + 1);
220 l4_header = ip4_next_header (inner_ip0);
221 u32 protocol = ip_proto_to_snat_proto (inner_ip0->protocol);
223 if (protocol != SNAT_PROTOCOL_TCP && protocol != SNAT_PROTOCOL_UDP)
228 clib_bihash_kv_16_8_t ed_kv, ed_value;
229 make_ed_kv (&ed_kv, &ip0->dst_address, &ip0->src_address,
230 inner_ip0->protocol, sm->outside_fib_index,
231 l4_header->src_port, l4_header->dst_port);
232 if (clib_bihash_search_16_8 (&sm->per_thread_data[ti].out2in_ed,
239 key0.addr = ip0->dst_address;
240 key0.port = l4_header->src_port;
241 key0.protocol = protocol;
242 key0.fib_index = sm->outside_fib_index;
243 kv0.key = key0.as_u64;
244 if (clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
249 s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
250 new_dst_addr0 = s0->in2out.addr.as_u32;
251 vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
253 /* update inner source IP address */
254 old_addr0 = inner_ip0->src_address.as_u32;
255 inner_ip0->src_address.as_u32 = new_dst_addr0;
256 new_addr0 = inner_ip0->src_address.as_u32;
257 sum0 = icmp0->checksum;
258 sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
260 icmp0->checksum = ip_csum_fold (sum0);
262 /* update inner IP header checksum */
263 old_checksum0 = inner_ip0->checksum;
264 sum0 = inner_ip0->checksum;
265 sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
267 inner_ip0->checksum = ip_csum_fold (sum0);
268 new_checksum0 = inner_ip0->checksum;
269 sum0 = icmp0->checksum;
270 sum0 = ip_csum_update (sum0, old_checksum0, new_checksum0, ip4_header_t,
272 icmp0->checksum = ip_csum_fold (sum0);
274 /* update inner source port */
275 old_port0 = l4_header->src_port;
276 l4_header->src_port = s0->in2out.port;
277 new_port0 = l4_header->src_port;
278 sum0 = icmp0->checksum;
279 sum0 = ip_csum_update (sum0, old_port0, new_port0, tcp_udp_header_t,
281 icmp0->checksum = ip_csum_fold (sum0);
285 key0.addr = ip0->dst_address;
288 key0.fib_index = sm->outside_fib_index;
289 kv0.key = key0.as_u64;
291 if (clib_bihash_search_8_8
292 (&sm->static_mapping_by_external, &kv0, &value0))
296 icmp_echo_header_t *echo0 = (icmp_echo_header_t *) (icmp0 + 1);
297 u16 icmp_id0 = echo0->identifier;
298 key0.addr = ip0->dst_address;
299 key0.port = icmp_id0;
300 key0.protocol = SNAT_PROTOCOL_ICMP;
301 key0.fib_index = sm->outside_fib_index;
302 kv0.key = key0.as_u64;
303 if (sm->num_workers > 1)
305 (clib_net_to_host_u16 (icmp_id0) -
306 1024) / sm->port_per_thread;
308 ti = sm->num_workers;
310 clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
316 pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
317 new_dst_addr0 = s0->in2out.addr.as_u32;
318 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
319 s0->in2out.fib_index;
320 echo0->identifier = s0->in2out.port;
321 sum0 = icmp0->checksum;
322 sum0 = ip_csum_update (sum0, icmp_id0, s0->in2out.port,
323 icmp_echo_header_t, identifier);
324 icmp0->checksum = ip_csum_fold (sum0);
332 m0 = pool_elt_at_index (sm->static_mappings, value0.value);
334 new_dst_addr0 = m0->local_addr.as_u32;
335 if (vnet_buffer (b0)->sw_if_index[VLIB_TX] == ~0)
336 vnet_buffer (b0)->sw_if_index[VLIB_TX] = m0->fib_index;
339 /* Destination is behind the same NAT, use internal address and port */
342 old_dst_addr0 = ip0->dst_address.as_u32;
343 ip0->dst_address.as_u32 = new_dst_addr0;
344 sum0 = ip0->checksum;
345 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
346 ip4_header_t, dst_address);
347 ip0->checksum = ip_csum_fold (sum0);
353 #ifndef CLIB_MARCH_VARIANT
355 nat_hairpinning_sm_unknown_proto (snat_main_t * sm,
356 vlib_buffer_t * b, ip4_header_t * ip)
358 clib_bihash_kv_8_8_t kv, value;
359 snat_static_mapping_t *m;
360 u32 old_addr, new_addr;
363 make_sm_kv (&kv, &ip->dst_address, 0, 0, 0);
364 if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value))
367 m = pool_elt_at_index (sm->static_mappings, value.value);
369 old_addr = ip->dst_address.as_u32;
370 new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32;
372 sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, dst_address);
373 ip->checksum = ip_csum_fold (sum);
375 if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0)
376 vnet_buffer (b)->sw_if_index[VLIB_TX] = m->fib_index;
380 #ifndef CLIB_MARCH_VARIANT
382 nat44_ed_hairpinning_unknown_proto (snat_main_t * sm,
383 vlib_buffer_t * b, ip4_header_t * ip)
385 u32 old_addr, new_addr = 0, ti = 0;
386 clib_bihash_kv_8_8_t kv, value;
387 clib_bihash_kv_16_8_t s_kv, s_value;
388 snat_static_mapping_t *m;
391 snat_main_per_thread_data_t *tsm;
393 if (sm->num_workers > 1)
394 ti = sm->worker_out2in_cb (ip, sm->outside_fib_index);
396 ti = sm->num_workers;
397 tsm = &sm->per_thread_data[ti];
399 old_addr = ip->dst_address.as_u32;
400 make_ed_kv (&s_kv, &ip->dst_address, &ip->src_address, ip->protocol,
401 sm->outside_fib_index, 0, 0);
402 if (clib_bihash_search_16_8 (&tsm->out2in_ed, &s_kv, &s_value))
404 make_sm_kv (&kv, &ip->dst_address, 0, 0, 0);
405 if (clib_bihash_search_8_8
406 (&sm->static_mapping_by_external, &kv, &value))
409 m = pool_elt_at_index (sm->static_mappings, value.value);
410 if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0)
411 vnet_buffer (b)->sw_if_index[VLIB_TX] = m->fib_index;
412 new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32;
416 s = pool_elt_at_index (sm->per_thread_data[ti].sessions, s_value.value);
417 if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0)
418 vnet_buffer (b)->sw_if_index[VLIB_TX] = s->in2out.fib_index;
419 new_addr = ip->dst_address.as_u32 = s->in2out.addr.as_u32;
422 sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, dst_address);
423 ip->checksum = ip_csum_fold (sum);
427 #ifndef CLIB_MARCH_VARIANT
429 nat44_reass_hairpinning (snat_main_t * sm,
432 u16 sport, u16 dport, u32 proto0, int is_ed)
434 snat_session_key_t key0, sm0;
436 clib_bihash_kv_8_8_t kv0, value0;
438 u32 new_dst_addr0 = 0, old_dst_addr0, ti = 0, si;
439 u16 new_dst_port0, old_dst_port0;
444 key0.addr = ip0->dst_address;
446 key0.protocol = proto0;
447 key0.fib_index = sm->outside_fib_index;
448 kv0.key = key0.as_u64;
450 udp0 = ip4_next_header (ip0);
452 /* Check if destination is static mappings */
453 if (!snat_static_mapping_match (sm, key0, &sm0, 1, 0, 0, 0, 0, 0))
455 new_dst_addr0 = sm0.addr.as_u32;
456 new_dst_port0 = sm0.port;
457 vnet_buffer (b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
459 /* or active sessions */
462 if (sm->num_workers > 1)
464 (clib_net_to_host_u16 (udp0->dst_port) -
465 1024) / sm->port_per_thread;
467 ti = sm->num_workers;
471 clib_bihash_kv_16_8_t ed_kv, ed_value;
472 make_ed_kv (&ed_kv, &ip0->dst_address, &ip0->src_address,
473 ip0->protocol, sm->outside_fib_index, udp0->dst_port,
475 rv = clib_bihash_search_16_8 (&sm->per_thread_data[ti].out2in_ed,
481 rv = clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
487 s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
488 new_dst_addr0 = s0->in2out.addr.as_u32;
489 new_dst_port0 = s0->in2out.port;
490 vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
494 /* Destination is behind the same NAT, use internal address and port */
497 old_dst_addr0 = ip0->dst_address.as_u32;
498 ip0->dst_address.as_u32 = new_dst_addr0;
499 sum0 = ip0->checksum;
500 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
501 ip4_header_t, dst_address);
502 ip0->checksum = ip_csum_fold (sum0);
504 old_dst_port0 = dport;
505 if (PREDICT_TRUE (new_dst_port0 != old_dst_port0 &&
506 ip4_is_first_fragment (ip0)))
508 if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP))
510 tcp0 = ip4_next_header (ip0);
511 tcp0->dst = new_dst_port0;
512 sum0 = tcp0->checksum;
513 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
514 ip4_header_t, dst_address);
515 sum0 = ip_csum_update (sum0, old_dst_port0, new_dst_port0,
516 ip4_header_t /* cheat */ , length);
517 tcp0->checksum = ip_csum_fold (sum0);
521 udp0->dst_port = new_dst_port0;
527 if (PREDICT_TRUE (proto0 == SNAT_PROTOCOL_TCP))
529 tcp0 = ip4_next_header (ip0);
530 sum0 = tcp0->checksum;
531 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
532 ip4_header_t, dst_address);
533 tcp0->checksum = ip_csum_fold (sum0);
541 nat44_hairpinning_fn_inline (vlib_main_t * vm,
542 vlib_node_runtime_t * node,
543 vlib_frame_t * frame, int is_ed)
545 u32 n_left_from, *from, *to_next, stats_node_index;
546 nat_hairpin_next_t next_index;
547 u32 pkts_processed = 0;
548 snat_main_t *sm = &snat_main;
549 vnet_feature_main_t *fm = &feature_main;
550 u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index;
551 vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index];
553 stats_node_index = is_ed ? sm->ed_hairpinning_node_index :
554 sm->hairpinning_node_index;
555 from = vlib_frame_vector_args (frame);
556 n_left_from = frame->n_vectors;
557 next_index = node->cached_next_index;
559 while (n_left_from > 0)
563 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
565 while (n_left_from > 0 && n_left_to_next > 0)
575 /* speculatively enqueue b0 to the current next frame */
583 b0 = vlib_get_buffer (vm, bi0);
584 ip0 = vlib_buffer_get_current (b0);
585 udp0 = ip4_next_header (ip0);
586 tcp0 = (tcp_header_t *) udp0;
588 proto0 = ip_proto_to_snat_proto (ip0->protocol);
590 vnet_get_config_data (&cm->config_main, &b0->current_config_index,
593 if (snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0, is_ed))
594 next0 = NAT_HAIRPIN_NEXT_LOOKUP;
596 pkts_processed += next0 != NAT_HAIRPIN_NEXT_DROP;
598 /* verify speculative enqueue, maybe switch current next frame */
599 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
600 to_next, n_left_to_next,
604 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
607 vlib_node_increment_counter (vm, stats_node_index,
608 NAT44_HAIRPIN_ERROR_PROCESSED, pkts_processed);
609 return frame->n_vectors;
612 VLIB_NODE_FN (nat44_hairpinning_node) (vlib_main_t * vm,
613 vlib_node_runtime_t * node,
614 vlib_frame_t * frame)
616 return nat44_hairpinning_fn_inline (vm, node, frame, 0);
620 VLIB_REGISTER_NODE (nat44_hairpinning_node) = {
621 .name = "nat44-hairpinning",
622 .vector_size = sizeof (u32),
623 .type = VLIB_NODE_TYPE_INTERNAL,
624 .n_errors = ARRAY_LEN(nat44_hairpin_error_strings),
625 .error_strings = nat44_hairpin_error_strings,
626 .n_next_nodes = NAT_HAIRPIN_N_NEXT,
628 [NAT_HAIRPIN_NEXT_DROP] = "error-drop",
629 [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup",
634 VLIB_NODE_FN (nat44_ed_hairpinning_node) (vlib_main_t * vm,
635 vlib_node_runtime_t * node,
636 vlib_frame_t * frame)
638 return nat44_hairpinning_fn_inline (vm, node, frame, 1);
642 VLIB_REGISTER_NODE (nat44_ed_hairpinning_node) = {
643 .name = "nat44-ed-hairpinning",
644 .vector_size = sizeof (u32),
645 .type = VLIB_NODE_TYPE_INTERNAL,
646 .n_errors = ARRAY_LEN(nat44_hairpin_error_strings),
647 .error_strings = nat44_hairpin_error_strings,
648 .n_next_nodes = NAT_HAIRPIN_N_NEXT,
650 [NAT_HAIRPIN_NEXT_DROP] = "error-drop",
651 [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup",
657 snat_hairpin_dst_fn_inline (vlib_main_t * vm,
658 vlib_node_runtime_t * node,
659 vlib_frame_t * frame, int is_ed)
661 u32 n_left_from, *from, *to_next, stats_node_index;
662 nat_hairpin_next_t next_index;
663 u32 pkts_processed = 0;
664 snat_main_t *sm = &snat_main;
666 stats_node_index = is_ed ? sm->ed_hairpin_dst_node_index :
667 sm->hairpin_dst_node_index;
669 from = vlib_frame_vector_args (frame);
670 n_left_from = frame->n_vectors;
671 next_index = node->cached_next_index;
673 while (n_left_from > 0)
677 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
679 while (n_left_from > 0 && n_left_to_next > 0)
687 /* speculatively enqueue b0 to the current next frame */
695 b0 = vlib_get_buffer (vm, bi0);
696 next0 = NAT_HAIRPIN_NEXT_LOOKUP;
697 ip0 = vlib_buffer_get_current (b0);
699 proto0 = ip_proto_to_snat_proto (ip0->protocol);
701 vnet_buffer (b0)->snat.flags = 0;
702 if (PREDICT_FALSE (is_hairpinning (sm, &ip0->dst_address)))
704 if (proto0 == SNAT_PROTOCOL_TCP || proto0 == SNAT_PROTOCOL_UDP)
706 udp_header_t *udp0 = ip4_next_header (ip0);
707 tcp_header_t *tcp0 = (tcp_header_t *) udp0;
709 snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0, is_ed);
711 else if (proto0 == SNAT_PROTOCOL_ICMP)
713 icmp46_header_t *icmp0 = ip4_next_header (ip0);
715 snat_icmp_hairpinning (sm, b0, ip0, icmp0, is_ed);
720 nat44_ed_hairpinning_unknown_proto (sm, b0, ip0);
722 nat_hairpinning_sm_unknown_proto (sm, b0, ip0);
725 vnet_buffer (b0)->snat.flags = SNAT_FLAG_HAIRPINNING;
728 pkts_processed += next0 != NAT_HAIRPIN_NEXT_DROP;
730 /* verify speculative enqueue, maybe switch current next frame */
731 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
732 to_next, n_left_to_next,
736 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
739 vlib_node_increment_counter (vm, stats_node_index,
740 NAT44_HAIRPIN_ERROR_PROCESSED, pkts_processed);
741 return frame->n_vectors;
744 VLIB_NODE_FN (snat_hairpin_dst_node) (vlib_main_t * vm,
745 vlib_node_runtime_t * node,
746 vlib_frame_t * frame)
748 return snat_hairpin_dst_fn_inline (vm, node, frame, 0);
752 VLIB_REGISTER_NODE (snat_hairpin_dst_node) = {
753 .name = "nat44-hairpin-dst",
754 .vector_size = sizeof (u32),
755 .type = VLIB_NODE_TYPE_INTERNAL,
756 .n_errors = ARRAY_LEN(nat44_hairpin_error_strings),
757 .error_strings = nat44_hairpin_error_strings,
758 .n_next_nodes = NAT_HAIRPIN_N_NEXT,
760 [NAT_HAIRPIN_NEXT_DROP] = "error-drop",
761 [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup",
766 VLIB_NODE_FN (nat44_ed_hairpin_dst_node) (vlib_main_t * vm,
767 vlib_node_runtime_t * node,
768 vlib_frame_t * frame)
770 return snat_hairpin_dst_fn_inline (vm, node, frame, 1);
774 VLIB_REGISTER_NODE (nat44_ed_hairpin_dst_node) = {
775 .name = "nat44-ed-hairpin-dst",
776 .vector_size = sizeof (u32),
777 .type = VLIB_NODE_TYPE_INTERNAL,
778 .n_errors = ARRAY_LEN(nat44_hairpin_error_strings),
779 .error_strings = nat44_hairpin_error_strings,
780 .n_next_nodes = NAT_HAIRPIN_N_NEXT,
782 [NAT_HAIRPIN_NEXT_DROP] = "error-drop",
783 [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup",
789 snat_hairpin_src_fn_inline (vlib_main_t * vm,
790 vlib_node_runtime_t * node,
791 vlib_frame_t * frame, int is_ed)
793 u32 n_left_from, *from, *to_next, stats_node_index;
794 snat_hairpin_src_next_t next_index;
795 u32 pkts_processed = 0;
796 snat_main_t *sm = &snat_main;
798 stats_node_index = is_ed ? sm->ed_hairpin_src_node_index :
799 sm->hairpin_src_node_index;
801 from = vlib_frame_vector_args (frame);
802 n_left_from = frame->n_vectors;
803 next_index = node->cached_next_index;
805 while (n_left_from > 0)
809 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
811 while (n_left_from > 0 && n_left_to_next > 0)
819 /* speculatively enqueue b0 to the current next frame */
827 b0 = vlib_get_buffer (vm, bi0);
828 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
829 vnet_feature_next (&next0, b0);
832 pool_foreach (i, sm->output_feature_interfaces,
834 /* Only packets from NAT inside interface */
835 if ((nat_interface_is_inside(i)) && (sw_if_index0 == i->sw_if_index))
837 if (PREDICT_FALSE ((vnet_buffer (b0)->snat.flags) &
838 SNAT_FLAG_HAIRPINNING))
840 if (PREDICT_TRUE (sm->num_workers > 1))
841 next0 = SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH;
843 next0 = SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT;
850 pkts_processed += next0 != SNAT_HAIRPIN_SRC_NEXT_DROP;
852 /* verify speculative enqueue, maybe switch current next frame */
853 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
854 to_next, n_left_to_next,
858 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
861 vlib_node_increment_counter (vm, stats_node_index,
862 NAT44_HAIRPIN_ERROR_PROCESSED, pkts_processed);
863 return frame->n_vectors;
866 VLIB_NODE_FN (snat_hairpin_src_node) (vlib_main_t * vm,
867 vlib_node_runtime_t * node,
868 vlib_frame_t * frame)
870 return snat_hairpin_src_fn_inline (vm, node, frame, 0);
874 VLIB_REGISTER_NODE (snat_hairpin_src_node) = {
875 .name = "nat44-hairpin-src",
876 .vector_size = sizeof (u32),
877 .type = VLIB_NODE_TYPE_INTERNAL,
878 .n_errors = ARRAY_LEN(nat44_hairpin_error_strings),
879 .error_strings = nat44_hairpin_error_strings,
880 .n_next_nodes = SNAT_HAIRPIN_SRC_N_NEXT,
882 [SNAT_HAIRPIN_SRC_NEXT_DROP] = "error-drop",
883 [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT] = "nat44-in2out-output",
884 [SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT] = "interface-output",
885 [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH] = "nat44-in2out-output-worker-handoff",
890 VLIB_NODE_FN (nat44_ed_hairpin_src_node) (vlib_main_t * vm,
891 vlib_node_runtime_t * node,
892 vlib_frame_t * frame)
894 return snat_hairpin_src_fn_inline (vm, node, frame, 1);
898 VLIB_REGISTER_NODE (nat44_ed_hairpin_src_node) = {
899 .name = "nat44-ed-hairpin-src",
900 .vector_size = sizeof (u32),
901 .type = VLIB_NODE_TYPE_INTERNAL,
902 .n_errors = ARRAY_LEN(nat44_hairpin_error_strings),
903 .error_strings = nat44_hairpin_error_strings,
904 .n_next_nodes = SNAT_HAIRPIN_SRC_N_NEXT,
906 [SNAT_HAIRPIN_SRC_NEXT_DROP] = "error-drop",
907 [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT] = "nat44-ed-in2out-output",
908 [SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT] = "interface-output",
909 [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH] = "nat44-in2out-output-worker-handoff",
915 * fd.io coding-style-patch-verification: ON
918 * eval: (c-set-style "gnu")