2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * @brief NAT44 hairpinning
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/fib/ip4_fib.h>
24 #include <nat/nat_inlines.h>
28 SNAT_HAIRPIN_SRC_NEXT_DROP,
29 SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT,
30 SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH,
31 SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT,
32 SNAT_HAIRPIN_SRC_N_NEXT,
33 } snat_hairpin_src_next_t;
37 NAT_HAIRPIN_NEXT_LOOKUP,
38 NAT_HAIRPIN_NEXT_DROP,
48 } nat_hairpin_trace_t;
51 format_nat_hairpin_trace (u8 * s, va_list * args)
53 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
54 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
55 nat_hairpin_trace_t *t = va_arg (*args, nat_hairpin_trace_t *);
58 format (s, "new dst addr %U port %u fib-index %u", format_ip4_address,
59 &t->addr, clib_net_to_host_u16 (t->port), t->fib_index);
60 if (~0 == t->session_index)
62 s = format (s, " is-static-mapping");
66 s = format (s, " session-index %u", t->session_index);
72 extern vnet_feature_arc_registration_t vnet_feat_arc_ip4_local;
74 static_always_inline int
75 is_hairpinning (snat_main_t * sm, ip4_address_t * dst_addr)
78 clib_bihash_kv_8_8_t kv, value;
81 vec_foreach (ap, sm->addresses)
83 if (ap->addr.as_u32 == dst_addr->as_u32)
88 init_nat_k (&kv, *dst_addr, 0, 0, 0);
89 if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value))
95 #ifndef CLIB_MARCH_VARIANT
97 snat_hairpinning (vlib_main_t * vm, vlib_node_runtime_t * node,
98 snat_main_t * sm, vlib_buffer_t * b0, ip4_header_t * ip0,
99 udp_header_t * udp0, tcp_header_t * tcp0, u32 proto0,
100 int is_ed, int do_trace)
102 snat_session_t *s0 = NULL;
103 clib_bihash_kv_8_8_t kv0, value0;
105 u32 new_dst_addr0 = 0, old_dst_addr0, ti = 0, si = ~0;
106 u16 new_dst_port0 = ~0, old_dst_port0;
108 ip4_address_t sm0_addr;
111 u32 old_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
112 /* Check if destination is static mappings */
113 if (!snat_static_mapping_match
114 (sm, ip0->dst_address, udp0->dst_port, sm->outside_fib_index, proto0,
115 &sm0_addr, &sm0_port, &sm0_fib_index, 1, 0, 0, 0, 0, 0, 0))
117 new_dst_addr0 = sm0_addr.as_u32;
118 new_dst_port0 = sm0_port;
119 vnet_buffer (b0)->sw_if_index[VLIB_TX] = sm0_fib_index;
121 /* or active session */
124 if (sm->num_workers > 1)
126 (clib_net_to_host_u16 (udp0->dst_port) -
127 1024) / sm->port_per_thread;
129 ti = sm->num_workers;
133 clib_bihash_kv_16_8_t ed_kv, ed_value;
134 init_ed_k (&ed_kv, ip0->dst_address, udp0->dst_port,
135 ip0->src_address, udp0->src_port, sm->outside_fib_index,
137 rv = clib_bihash_search_16_8 (&sm->out2in_ed, &ed_kv, &ed_value);
138 ASSERT (ti == ed_value_get_thread_index (&ed_value));
139 si = ed_value_get_session_index (&ed_value);
144 init_nat_k (&kv0, ip0->dst_address, udp0->dst_port,
145 sm->outside_fib_index, proto0);
147 clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
157 s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
158 new_dst_addr0 = s0->in2out.addr.as_u32;
159 new_dst_port0 = s0->in2out.port;
160 vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
163 /* Check if anything has changed and if not, then return 0. This
164 helps avoid infinite loop, repeating the three nodes
165 nat44-hairpinning-->ip4-lookup-->ip4-local, in case nothing has
167 old_dst_addr0 = ip0->dst_address.as_u32;
168 old_dst_port0 = tcp0->dst;
169 if (new_dst_addr0 == old_dst_addr0
170 && new_dst_port0 == old_dst_port0
171 && vnet_buffer (b0)->sw_if_index[VLIB_TX] == old_sw_if_index)
174 /* Destination is behind the same NAT, use internal address and port */
177 old_dst_addr0 = ip0->dst_address.as_u32;
178 ip0->dst_address.as_u32 = new_dst_addr0;
179 sum0 = ip0->checksum;
180 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
181 ip4_header_t, dst_address);
182 ip0->checksum = ip_csum_fold (sum0);
184 old_dst_port0 = tcp0->dst;
185 if (PREDICT_TRUE (new_dst_port0 != old_dst_port0))
187 if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP))
189 tcp0->dst = new_dst_port0;
190 sum0 = tcp0->checksum;
191 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
192 ip4_header_t, dst_address);
193 sum0 = ip_csum_update (sum0, old_dst_port0, new_dst_port0,
194 ip4_header_t /* cheat */ , length);
195 tcp0->checksum = ip_csum_fold (sum0);
199 udp0->dst_port = new_dst_port0;
205 if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP))
207 sum0 = tcp0->checksum;
208 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
209 ip4_header_t, dst_address);
210 tcp0->checksum = ip_csum_fold (sum0);
218 if (do_trace && PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
219 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
221 nat_hairpin_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
222 t->addr.as_u32 = new_dst_addr0;
223 t->port = new_dst_port0;
224 t->fib_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
227 t->session_index = si;
231 t->session_index = ~0;
238 #ifndef CLIB_MARCH_VARIANT
240 snat_icmp_hairpinning (snat_main_t * sm,
242 ip4_header_t * ip0, icmp46_header_t * icmp0, int is_ed)
244 clib_bihash_kv_8_8_t kv0, value0;
245 u32 old_dst_addr0, new_dst_addr0;
246 u32 old_addr0, new_addr0;
247 u16 old_port0, new_port0;
248 u16 old_checksum0, new_checksum0;
252 snat_static_mapping_t *m0;
254 if (icmp_type_is_error_message
255 (vnet_buffer (b0)->ip.reass.icmp_type_or_tcp_flags))
257 ip4_header_t *inner_ip0 = 0;
258 tcp_udp_header_t *l4_header = 0;
260 inner_ip0 = (ip4_header_t *) ((icmp_echo_header_t *) (icmp0 + 1) + 1);
261 l4_header = ip4_next_header (inner_ip0);
262 u32 protocol = ip_proto_to_nat_proto (inner_ip0->protocol);
264 if (protocol != NAT_PROTOCOL_TCP && protocol != NAT_PROTOCOL_UDP)
269 clib_bihash_kv_16_8_t ed_kv, ed_value;
270 init_ed_k (&ed_kv, ip0->dst_address, l4_header->src_port,
271 ip0->src_address, l4_header->dst_port,
272 sm->outside_fib_index, inner_ip0->protocol);
273 if (clib_bihash_search_16_8 (&sm->out2in_ed, &ed_kv, &ed_value))
275 ASSERT (ti == ed_value_get_thread_index (&ed_value));
276 si = ed_value_get_session_index (&ed_value);
280 init_nat_k (&kv0, ip0->dst_address, l4_header->src_port,
281 sm->outside_fib_index, protocol);
282 if (clib_bihash_search_8_8
283 (&sm->per_thread_data[ti].out2in, &kv0, &value0))
287 s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
288 new_dst_addr0 = s0->in2out.addr.as_u32;
289 vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
291 /* update inner source IP address */
292 old_addr0 = inner_ip0->src_address.as_u32;
293 inner_ip0->src_address.as_u32 = new_dst_addr0;
294 new_addr0 = inner_ip0->src_address.as_u32;
295 sum0 = icmp0->checksum;
296 sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
298 icmp0->checksum = ip_csum_fold (sum0);
300 /* update inner IP header checksum */
301 old_checksum0 = inner_ip0->checksum;
302 sum0 = inner_ip0->checksum;
303 sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
305 inner_ip0->checksum = ip_csum_fold (sum0);
306 new_checksum0 = inner_ip0->checksum;
307 sum0 = icmp0->checksum;
308 sum0 = ip_csum_update (sum0, old_checksum0, new_checksum0, ip4_header_t,
310 icmp0->checksum = ip_csum_fold (sum0);
312 /* update inner source port */
313 old_port0 = l4_header->src_port;
314 l4_header->src_port = s0->in2out.port;
315 new_port0 = l4_header->src_port;
316 sum0 = icmp0->checksum;
317 sum0 = ip_csum_update (sum0, old_port0, new_port0, tcp_udp_header_t,
319 icmp0->checksum = ip_csum_fold (sum0);
323 init_nat_k (&kv0, ip0->dst_address, 0, sm->outside_fib_index, 0);
324 if (clib_bihash_search_8_8
325 (&sm->static_mapping_by_external, &kv0, &value0))
329 icmp_echo_header_t *echo0 = (icmp_echo_header_t *) (icmp0 + 1);
330 u16 icmp_id0 = echo0->identifier;
331 init_nat_k (&kv0, ip0->dst_address, icmp_id0,
332 sm->outside_fib_index, NAT_PROTOCOL_ICMP);
333 if (sm->num_workers > 1)
335 (clib_net_to_host_u16 (icmp_id0) -
336 1024) / sm->port_per_thread;
338 ti = sm->num_workers;
340 clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
346 pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
347 new_dst_addr0 = s0->in2out.addr.as_u32;
348 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
349 s0->in2out.fib_index;
350 echo0->identifier = s0->in2out.port;
351 sum0 = icmp0->checksum;
352 sum0 = ip_csum_update (sum0, icmp_id0, s0->in2out.port,
353 icmp_echo_header_t, identifier);
354 icmp0->checksum = ip_csum_fold (sum0);
362 m0 = pool_elt_at_index (sm->static_mappings, value0.value);
364 new_dst_addr0 = m0->local_addr.as_u32;
365 if (vnet_buffer (b0)->sw_if_index[VLIB_TX] == ~0)
366 vnet_buffer (b0)->sw_if_index[VLIB_TX] = m0->fib_index;
369 /* Destination is behind the same NAT, use internal address and port */
372 old_dst_addr0 = ip0->dst_address.as_u32;
373 ip0->dst_address.as_u32 = new_dst_addr0;
374 sum0 = ip0->checksum;
375 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
376 ip4_header_t, dst_address);
377 ip0->checksum = ip_csum_fold (sum0);
383 #ifndef CLIB_MARCH_VARIANT
385 nat_hairpinning_sm_unknown_proto (snat_main_t * sm,
386 vlib_buffer_t * b, ip4_header_t * ip)
388 clib_bihash_kv_8_8_t kv, value;
389 snat_static_mapping_t *m;
390 u32 old_addr, new_addr;
393 init_nat_k (&kv, ip->dst_address, 0, 0, 0);
394 if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv, &value))
397 m = pool_elt_at_index (sm->static_mappings, value.value);
399 old_addr = ip->dst_address.as_u32;
400 new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32;
402 sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, dst_address);
403 ip->checksum = ip_csum_fold (sum);
405 if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0)
406 vnet_buffer (b)->sw_if_index[VLIB_TX] = m->fib_index;
410 #ifndef CLIB_MARCH_VARIANT
412 nat44_ed_hairpinning_unknown_proto (snat_main_t * sm,
413 vlib_buffer_t * b, ip4_header_t * ip)
415 u32 old_addr, new_addr = 0, ti = 0;
416 clib_bihash_kv_8_8_t kv, value;
417 clib_bihash_kv_16_8_t s_kv, s_value;
418 snat_static_mapping_t *m;
422 if (sm->num_workers > 1)
423 ti = sm->worker_out2in_cb (b, ip, sm->outside_fib_index, 0);
425 ti = sm->num_workers;
427 old_addr = ip->dst_address.as_u32;
428 init_ed_k (&s_kv, ip->dst_address, 0, ip->src_address, 0,
429 sm->outside_fib_index, ip->protocol);
430 if (clib_bihash_search_16_8 (&sm->out2in_ed, &s_kv, &s_value))
432 init_nat_k (&kv, ip->dst_address, 0, 0, 0);
433 if (clib_bihash_search_8_8
434 (&sm->static_mapping_by_external, &kv, &value))
437 m = pool_elt_at_index (sm->static_mappings, value.value);
438 if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0)
439 vnet_buffer (b)->sw_if_index[VLIB_TX] = m->fib_index;
440 new_addr = ip->dst_address.as_u32 = m->local_addr.as_u32;
444 ASSERT (ti == ed_value_get_thread_index (&s_value));
446 pool_elt_at_index (sm->per_thread_data[ti].sessions,
447 ed_value_get_session_index (&s_value));
448 if (vnet_buffer (b)->sw_if_index[VLIB_TX] == ~0)
449 vnet_buffer (b)->sw_if_index[VLIB_TX] = s->in2out.fib_index;
450 new_addr = ip->dst_address.as_u32 = s->in2out.addr.as_u32;
453 sum = ip_csum_update (sum, old_addr, new_addr, ip4_header_t, dst_address);
454 ip->checksum = ip_csum_fold (sum);
459 nat44_hairpinning_fn_inline (vlib_main_t * vm,
460 vlib_node_runtime_t * node,
461 vlib_frame_t * frame, int is_ed)
463 u32 n_left_from, *from, *to_next;
464 nat_hairpin_next_t next_index;
465 snat_main_t *sm = &snat_main;
466 vnet_feature_main_t *fm = &feature_main;
467 u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index;
468 vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc_index];
470 from = vlib_frame_vector_args (frame);
471 n_left_from = frame->n_vectors;
472 next_index = node->cached_next_index;
474 while (n_left_from > 0)
478 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
480 while (n_left_from > 0 && n_left_to_next > 0)
491 /* speculatively enqueue b0 to the current next frame */
499 b0 = vlib_get_buffer (vm, bi0);
500 ip0 = vlib_buffer_get_current (b0);
501 udp0 = ip4_next_header (ip0);
502 tcp0 = (tcp_header_t *) udp0;
503 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
505 proto0 = ip_proto_to_nat_proto (ip0->protocol);
507 vnet_get_config_data (&cm->config_main, &b0->current_config_index,
511 (vm, node, sm, b0, ip0, udp0, tcp0, proto0, is_ed,
513 next0 = NAT_HAIRPIN_NEXT_LOOKUP;
515 if (next0 != NAT_HAIRPIN_NEXT_DROP)
517 vlib_increment_simple_counter (&sm->counters.hairpinning,
518 vm->thread_index, sw_if_index0,
522 /* verify speculative enqueue, maybe switch current next frame */
523 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
524 to_next, n_left_to_next,
528 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
531 return frame->n_vectors;
534 VLIB_NODE_FN (nat44_hairpinning_node) (vlib_main_t * vm,
535 vlib_node_runtime_t * node,
536 vlib_frame_t * frame)
538 return nat44_hairpinning_fn_inline (vm, node, frame, 0);
542 VLIB_REGISTER_NODE (nat44_hairpinning_node) = {
543 .name = "nat44-hairpinning",
544 .vector_size = sizeof (u32),
545 .type = VLIB_NODE_TYPE_INTERNAL,
546 .format_trace = format_nat_hairpin_trace,
547 .n_next_nodes = NAT_HAIRPIN_N_NEXT,
549 [NAT_HAIRPIN_NEXT_DROP] = "error-drop",
550 [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup",
555 VLIB_NODE_FN (nat44_ed_hairpinning_node) (vlib_main_t * vm,
556 vlib_node_runtime_t * node,
557 vlib_frame_t * frame)
559 return nat44_hairpinning_fn_inline (vm, node, frame, 1);
563 VLIB_REGISTER_NODE (nat44_ed_hairpinning_node) = {
564 .name = "nat44-ed-hairpinning",
565 .vector_size = sizeof (u32),
566 .type = VLIB_NODE_TYPE_INTERNAL,
567 .format_trace = format_nat_hairpin_trace,
568 .n_next_nodes = NAT_HAIRPIN_N_NEXT,
570 [NAT_HAIRPIN_NEXT_DROP] = "error-drop",
571 [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup",
577 snat_hairpin_dst_fn_inline (vlib_main_t * vm,
578 vlib_node_runtime_t * node,
579 vlib_frame_t * frame, int is_ed)
581 u32 n_left_from, *from, *to_next;
582 nat_hairpin_next_t next_index;
583 snat_main_t *sm = &snat_main;
585 from = vlib_frame_vector_args (frame);
586 n_left_from = frame->n_vectors;
587 next_index = node->cached_next_index;
589 while (n_left_from > 0)
593 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
595 while (n_left_from > 0 && n_left_to_next > 0)
604 /* speculatively enqueue b0 to the current next frame */
612 b0 = vlib_get_buffer (vm, bi0);
613 next0 = NAT_HAIRPIN_NEXT_LOOKUP;
614 ip0 = vlib_buffer_get_current (b0);
615 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
617 proto0 = ip_proto_to_nat_proto (ip0->protocol);
619 vnet_buffer (b0)->snat.flags = 0;
620 if (PREDICT_FALSE (is_hairpinning (sm, &ip0->dst_address)))
622 if (proto0 == NAT_PROTOCOL_TCP || proto0 == NAT_PROTOCOL_UDP)
624 udp_header_t *udp0 = ip4_next_header (ip0);
625 tcp_header_t *tcp0 = (tcp_header_t *) udp0;
627 snat_hairpinning (vm, node, sm, b0, ip0, udp0, tcp0, proto0,
628 is_ed, 1 /* do_trace */ );
630 else if (proto0 == NAT_PROTOCOL_ICMP)
632 icmp46_header_t *icmp0 = ip4_next_header (ip0);
634 snat_icmp_hairpinning (sm, b0, ip0, icmp0, is_ed);
639 nat44_ed_hairpinning_unknown_proto (sm, b0, ip0);
641 nat_hairpinning_sm_unknown_proto (sm, b0, ip0);
644 vnet_buffer (b0)->snat.flags = SNAT_FLAG_HAIRPINNING;
648 if (next0 != NAT_HAIRPIN_NEXT_DROP)
650 vlib_increment_simple_counter (&sm->counters.hairpinning,
651 vm->thread_index, sw_if_index0,
655 /* verify speculative enqueue, maybe switch current next frame */
656 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
657 to_next, n_left_to_next,
661 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
664 return frame->n_vectors;
667 VLIB_NODE_FN (snat_hairpin_dst_node) (vlib_main_t * vm,
668 vlib_node_runtime_t * node,
669 vlib_frame_t * frame)
671 return snat_hairpin_dst_fn_inline (vm, node, frame, 0);
675 VLIB_REGISTER_NODE (snat_hairpin_dst_node) = {
676 .name = "nat44-hairpin-dst",
677 .vector_size = sizeof (u32),
678 .type = VLIB_NODE_TYPE_INTERNAL,
679 .format_trace = format_nat_hairpin_trace,
680 .n_next_nodes = NAT_HAIRPIN_N_NEXT,
682 [NAT_HAIRPIN_NEXT_DROP] = "error-drop",
683 [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup",
688 VLIB_NODE_FN (nat44_ed_hairpin_dst_node) (vlib_main_t * vm,
689 vlib_node_runtime_t * node,
690 vlib_frame_t * frame)
692 return snat_hairpin_dst_fn_inline (vm, node, frame, 1);
696 VLIB_REGISTER_NODE (nat44_ed_hairpin_dst_node) = {
697 .name = "nat44-ed-hairpin-dst",
698 .vector_size = sizeof (u32),
699 .type = VLIB_NODE_TYPE_INTERNAL,
700 .format_trace = format_nat_hairpin_trace,
701 .n_next_nodes = NAT_HAIRPIN_N_NEXT,
703 [NAT_HAIRPIN_NEXT_DROP] = "error-drop",
704 [NAT_HAIRPIN_NEXT_LOOKUP] = "ip4-lookup",
710 snat_hairpin_src_fn_inline (vlib_main_t * vm,
711 vlib_node_runtime_t * node,
712 vlib_frame_t * frame, int is_ed)
714 u32 n_left_from, *from, *to_next;
715 snat_hairpin_src_next_t next_index;
716 snat_main_t *sm = &snat_main;
718 from = vlib_frame_vector_args (frame);
719 n_left_from = frame->n_vectors;
720 next_index = node->cached_next_index;
722 while (n_left_from > 0)
726 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
728 while (n_left_from > 0 && n_left_to_next > 0)
736 /* speculatively enqueue b0 to the current next frame */
744 b0 = vlib_get_buffer (vm, bi0);
745 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
746 vnet_feature_next (&next0, b0);
749 pool_foreach (i, sm->output_feature_interfaces)
751 /* Only packets from NAT inside interface */
752 if ((nat_interface_is_inside(i)) && (sw_if_index0 == i->sw_if_index))
754 if (PREDICT_FALSE ((vnet_buffer (b0)->snat.flags) &
755 SNAT_FLAG_HAIRPINNING))
757 if (PREDICT_TRUE (sm->num_workers > 1))
758 next0 = SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH;
760 next0 = SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT;
767 if (next0 != SNAT_HAIRPIN_SRC_NEXT_DROP)
769 vlib_increment_simple_counter (&sm->counters.hairpinning,
770 vm->thread_index, sw_if_index0,
774 /* verify speculative enqueue, maybe switch current next frame */
775 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
776 to_next, n_left_to_next,
780 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
783 return frame->n_vectors;
786 VLIB_NODE_FN (snat_hairpin_src_node) (vlib_main_t * vm,
787 vlib_node_runtime_t * node,
788 vlib_frame_t * frame)
790 return snat_hairpin_src_fn_inline (vm, node, frame, 0);
794 VLIB_REGISTER_NODE (snat_hairpin_src_node) = {
795 .name = "nat44-hairpin-src",
796 .vector_size = sizeof (u32),
797 .type = VLIB_NODE_TYPE_INTERNAL,
798 .n_next_nodes = SNAT_HAIRPIN_SRC_N_NEXT,
800 [SNAT_HAIRPIN_SRC_NEXT_DROP] = "error-drop",
801 [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT] = "nat44-in2out-output",
802 [SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT] = "interface-output",
803 [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH] = "nat44-in2out-output-worker-handoff",
808 VLIB_NODE_FN (nat44_ed_hairpin_src_node) (vlib_main_t * vm,
809 vlib_node_runtime_t * node,
810 vlib_frame_t * frame)
812 return snat_hairpin_src_fn_inline (vm, node, frame, 1);
816 VLIB_REGISTER_NODE (nat44_ed_hairpin_src_node) = {
817 .name = "nat44-ed-hairpin-src",
818 .vector_size = sizeof (u32),
819 .type = VLIB_NODE_TYPE_INTERNAL,
820 .n_next_nodes = SNAT_HAIRPIN_SRC_N_NEXT,
822 [SNAT_HAIRPIN_SRC_NEXT_DROP] = "error-drop",
823 [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT] = "nat44-ed-in2out-output",
824 [SNAT_HAIRPIN_SRC_NEXT_INTERFACE_OUTPUT] = "interface-output",
825 [SNAT_HAIRPIN_SRC_NEXT_SNAT_IN2OUT_WH] = "nat44-in2out-output-worker-handoff",
831 * fd.io coding-style-patch-verification: ON
834 * eval: (c-set-style "gnu")