2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vnet/pg/pg.h>
19 #include <vnet/handoff.h>
21 #include <vnet/ip/ip.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <snat/snat.h>
25 #include <snat/snat_ipfix_logging.h>
27 #include <vppinfra/hash.h>
28 #include <vppinfra/error.h>
29 #include <vppinfra/elog.h>
36 } snat_in2out_trace_t;
39 u32 next_worker_index;
41 } snat_in2out_worker_handoff_trace_t;
43 /* packet trace format function */
44 static u8 * format_snat_in2out_trace (u8 * s, va_list * args)
46 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
47 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
48 snat_in2out_trace_t * t = va_arg (*args, snat_in2out_trace_t *);
51 tag = t->is_slow_path ? "SNAT_IN2OUT_SLOW_PATH" : "SNAT_IN2OUT_FAST_PATH";
53 s = format (s, "%s: sw_if_index %d, next index %d, session %d", tag,
54 t->sw_if_index, t->next_index, t->session_index);
59 static u8 * format_snat_in2out_fast_trace (u8 * s, va_list * args)
61 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
62 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63 snat_in2out_trace_t * t = va_arg (*args, snat_in2out_trace_t *);
65 s = format (s, "SANT_IN2OUT_FAST: sw_if_index %d, next index %d",
66 t->sw_if_index, t->next_index);
71 static u8 * format_snat_in2out_worker_handoff_trace (u8 * s, va_list * args)
73 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
75 snat_in2out_worker_handoff_trace_t * t =
76 va_arg (*args, snat_in2out_worker_handoff_trace_t *);
79 m = t->do_handoff ? "next worker" : "same worker";
80 s = format (s, "SNAT_IN2OUT_WORKER_HANDOFF: %s %d", m, t->next_worker_index);
85 vlib_node_registration_t snat_in2out_node;
86 vlib_node_registration_t snat_in2out_slowpath_node;
87 vlib_node_registration_t snat_in2out_fast_node;
88 vlib_node_registration_t snat_in2out_worker_handoff_node;
90 #define foreach_snat_in2out_error \
91 _(UNSUPPORTED_PROTOCOL, "Unsupported protocol") \
92 _(IN2OUT_PACKETS, "Good in2out packets processed") \
93 _(OUT_OF_PORTS, "Out of ports") \
94 _(BAD_OUTSIDE_FIB, "Outside VRF ID not found") \
95 _(BAD_ICMP_TYPE, "icmp type not echo-request") \
96 _(NO_TRANSLATION, "No translation")
99 #define _(sym,str) SNAT_IN2OUT_ERROR_##sym,
100 foreach_snat_in2out_error
103 } snat_in2out_error_t;
105 static char * snat_in2out_error_strings[] = {
106 #define _(sym,string) string,
107 foreach_snat_in2out_error
112 SNAT_IN2OUT_NEXT_LOOKUP,
113 SNAT_IN2OUT_NEXT_DROP,
114 SNAT_IN2OUT_NEXT_SLOW_PATH,
116 } snat_in2out_next_t;
119 * @brief Check if packet should be translated
121 * Packets aimed at outside interface and external addresss with active session
122 * should be translated.
124 * @param sm SNAT main
125 * @param rt SNAT runtime data
126 * @param sw_if_index0 index of the inside interface
127 * @param ip0 IPv4 header
128 * @param proto0 SNAT protocol
129 * @param rx_fib_index0 RX FIB index
131 * @returns 0 if packet should be translated otherwise 1
134 snat_not_translate (snat_main_t * sm, snat_runtime_t * rt, u32 sw_if_index0,
135 ip4_header_t * ip0, u32 proto0, u32 rx_fib_index0)
137 ip4_address_t * first_int_addr;
138 udp_header_t * udp0 = ip4_next_header (ip0);
139 snat_session_key_t key0, sm0;
140 clib_bihash_kv_8_8_t kv0, value0;
141 fib_node_index_t fei = FIB_NODE_INDEX_INVALID;
143 .fp_proto = FIB_PROTOCOL_IP4,
146 .ip4.as_u32 = ip0->dst_address.as_u32,
150 if (PREDICT_FALSE(rt->cached_sw_if_index != sw_if_index0))
153 ip4_interface_first_address (sm->ip4_main, sw_if_index0,
154 0 /* just want the address */);
155 rt->cached_sw_if_index = sw_if_index0;
157 rt->cached_ip4_address = first_int_addr->as_u32;
159 rt->cached_ip4_address = 0;
162 /* Don't NAT packet aimed at the intfc address */
163 if (PREDICT_FALSE(ip0->dst_address.as_u32 == rt->cached_ip4_address))
166 key0.addr = ip0->dst_address;
167 key0.port = udp0->dst_port;
168 key0.protocol = proto0;
169 key0.fib_index = sm->outside_fib_index;
170 kv0.key = key0.as_u64;
172 /* NAT packet aimed at external address if */
173 /* has active sessions */
174 if (clib_bihash_search_8_8 (&sm->out2in, &kv0, &value0))
176 /* or is static mappings */
177 if (!snat_static_mapping_match(sm, key0, &sm0, 1))
183 fei = fib_table_lookup (rx_fib_index0, &pfx);
184 if (FIB_NODE_INDEX_INVALID != fei)
186 u32 sw_if_index = fib_entry_get_resolving_interface (fei);
187 if (sw_if_index == ~0)
189 fei = fib_table_lookup (sm->outside_fib_index, &pfx);
190 if (FIB_NODE_INDEX_INVALID != fei)
191 sw_if_index = fib_entry_get_resolving_interface (fei);
194 pool_foreach (i, sm->interfaces,
196 /* NAT packet aimed at outside interface */
197 if ((i->is_inside == 0) && (sw_if_index == i->sw_if_index))
205 static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0,
208 snat_session_key_t * key0,
209 snat_session_t ** sessionp,
210 vlib_node_runtime_t * node,
215 snat_user_key_t user_key;
217 clib_bihash_kv_8_8_t kv0, value0;
218 u32 oldest_per_user_translation_list_index;
219 dlist_elt_t * oldest_per_user_translation_list_elt;
220 dlist_elt_t * per_user_translation_list_elt;
221 dlist_elt_t * per_user_list_head_elt;
223 snat_session_key_t key1;
224 u32 address_index = ~0;
225 u32 outside_fib_index;
227 snat_worker_key_t worker_by_out_key;
229 p = hash_get (sm->ip4_main->fib_index_by_table_id, sm->outside_vrf_id);
232 b0->error = node->errors[SNAT_IN2OUT_ERROR_BAD_OUTSIDE_FIB];
233 return SNAT_IN2OUT_NEXT_DROP;
235 outside_fib_index = p[0];
237 key1.protocol = key0->protocol;
238 user_key.addr = ip0->src_address;
239 user_key.fib_index = rx_fib_index0;
240 kv0.key = user_key.as_u64;
242 /* Ever heard of the "user" = src ip4 address before? */
243 if (clib_bihash_search_8_8 (&sm->user_hash, &kv0, &value0))
245 /* no, make a new one */
246 pool_get (sm->per_thread_data[cpu_index].users, u);
247 memset (u, 0, sizeof (*u));
248 u->addr = ip0->src_address;
250 pool_get (sm->per_thread_data[cpu_index].list_pool, per_user_list_head_elt);
252 u->sessions_per_user_list_head_index = per_user_list_head_elt -
253 sm->per_thread_data[cpu_index].list_pool;
255 clib_dlist_init (sm->per_thread_data[cpu_index].list_pool,
256 u->sessions_per_user_list_head_index);
258 kv0.value = u - sm->per_thread_data[cpu_index].users;
261 clib_bihash_add_del_8_8 (&sm->user_hash, &kv0, 1 /* is_add */);
265 u = pool_elt_at_index (sm->per_thread_data[cpu_index].users,
269 /* Over quota? Recycle the least recently used dynamic translation */
270 if (u->nsessions >= sm->max_translations_per_user)
272 /* Remove the oldest dynamic translation */
274 oldest_per_user_translation_list_index =
275 clib_dlist_remove_head (sm->per_thread_data[cpu_index].list_pool,
276 u->sessions_per_user_list_head_index);
278 ASSERT (oldest_per_user_translation_list_index != ~0);
280 /* add it back to the end of the LRU list */
281 clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool,
282 u->sessions_per_user_list_head_index,
283 oldest_per_user_translation_list_index);
284 /* Get the list element */
285 oldest_per_user_translation_list_elt =
286 pool_elt_at_index (sm->per_thread_data[cpu_index].list_pool,
287 oldest_per_user_translation_list_index);
289 /* Get the session index from the list element */
290 session_index = oldest_per_user_translation_list_elt->value;
292 /* Get the session */
293 s = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions,
295 } while (snat_is_session_static (s));
297 /* Remove in2out, out2in keys */
298 kv0.key = s->in2out.as_u64;
299 if (clib_bihash_add_del_8_8 (&sm->in2out, &kv0, 0 /* is_add */))
300 clib_warning ("in2out key delete failed");
301 kv0.key = s->out2in.as_u64;
302 if (clib_bihash_add_del_8_8 (&sm->out2in, &kv0, 0 /* is_add */))
303 clib_warning ("out2in key delete failed");
306 snat_ipfix_logging_nat44_ses_delete(s->in2out.addr.as_u32,
307 s->out2in.addr.as_u32,
311 s->in2out.fib_index);
313 snat_free_outside_address_and_port
314 (sm, &s->out2in, s->outside_address_index);
315 s->outside_address_index = ~0;
317 if (snat_alloc_outside_address_and_port (sm, &key1, &address_index))
321 b0->error = node->errors[SNAT_IN2OUT_ERROR_OUT_OF_PORTS];
322 return SNAT_IN2OUT_NEXT_DROP;
324 s->outside_address_index = address_index;
328 u8 static_mapping = 1;
330 /* First try to match static mapping by local address and port */
331 if (snat_static_mapping_match (sm, *key0, &key1, 0))
334 /* Try to create dynamic translation */
335 if (snat_alloc_outside_address_and_port (sm, &key1, &address_index))
337 b0->error = node->errors[SNAT_IN2OUT_ERROR_OUT_OF_PORTS];
338 return SNAT_IN2OUT_NEXT_DROP;
342 /* Create a new session */
343 pool_get (sm->per_thread_data[cpu_index].sessions, s);
344 memset (s, 0, sizeof (*s));
346 s->outside_address_index = address_index;
350 u->nstaticsessions++;
351 s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING;
358 /* Create list elts */
359 pool_get (sm->per_thread_data[cpu_index].list_pool,
360 per_user_translation_list_elt);
361 clib_dlist_init (sm->per_thread_data[cpu_index].list_pool,
362 per_user_translation_list_elt -
363 sm->per_thread_data[cpu_index].list_pool);
365 per_user_translation_list_elt->value =
366 s - sm->per_thread_data[cpu_index].sessions;
367 s->per_user_index = per_user_translation_list_elt -
368 sm->per_thread_data[cpu_index].list_pool;
369 s->per_user_list_head_index = u->sessions_per_user_list_head_index;
371 clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool,
372 s->per_user_list_head_index,
373 per_user_translation_list_elt -
374 sm->per_thread_data[cpu_index].list_pool);
379 s->out2in.protocol = key0->protocol;
380 s->out2in.fib_index = outside_fib_index;
383 /* Add to translation hashes */
384 kv0.key = s->in2out.as_u64;
385 kv0.value = s - sm->per_thread_data[cpu_index].sessions;
386 if (clib_bihash_add_del_8_8 (&sm->in2out, &kv0, 1 /* is_add */))
387 clib_warning ("in2out key add failed");
389 kv0.key = s->out2in.as_u64;
390 kv0.value = s - sm->per_thread_data[cpu_index].sessions;
392 if (clib_bihash_add_del_8_8 (&sm->out2in, &kv0, 1 /* is_add */))
393 clib_warning ("out2in key add failed");
395 /* Add to translated packets worker lookup */
396 worker_by_out_key.addr = s->out2in.addr;
397 worker_by_out_key.port = s->out2in.port;
398 worker_by_out_key.fib_index = s->out2in.fib_index;
399 kv0.key = worker_by_out_key.as_u64;
400 kv0.value = cpu_index;
401 clib_bihash_add_del_8_8 (&sm->worker_by_out, &kv0, 1);
404 snat_ipfix_logging_nat44_ses_create(s->in2out.addr.as_u32,
405 s->out2in.addr.as_u32,
409 s->in2out.fib_index);
413 static inline u32 icmp_in2out_slow_path (snat_main_t *sm,
416 icmp46_header_t * icmp0,
419 vlib_node_runtime_t * node,
424 snat_session_key_t key0;
425 icmp_echo_header_t *echo0;
426 clib_bihash_kv_8_8_t kv0, value0;
428 u32 new_addr0, old_addr0;
429 u16 old_id0, new_id0;
431 snat_runtime_t * rt = (snat_runtime_t *)node->runtime_data;
433 if (PREDICT_FALSE(icmp0->type != ICMP4_echo_request))
435 b0->error = node->errors[SNAT_IN2OUT_ERROR_BAD_ICMP_TYPE];
436 return SNAT_IN2OUT_NEXT_DROP;
439 echo0 = (icmp_echo_header_t *)(icmp0+1);
441 key0.addr = ip0->src_address;
442 key0.port = echo0->identifier;
443 key0.protocol = SNAT_PROTOCOL_ICMP;
444 key0.fib_index = rx_fib_index0;
446 kv0.key = key0.as_u64;
448 if (clib_bihash_search_8_8 (&sm->in2out, &kv0, &value0))
450 if (PREDICT_FALSE(snat_not_translate(sm, rt, sw_if_index0, ip0,
451 IP_PROTOCOL_ICMP, rx_fib_index0)))
454 next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0,
455 &s0, node, next0, cpu_index);
457 if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
461 s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions,
464 old_addr0 = ip0->src_address.as_u32;
465 ip0->src_address = s0->out2in.addr;
466 new_addr0 = ip0->src_address.as_u32;
467 vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index;
469 sum0 = ip0->checksum;
470 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
472 src_address /* changed member */);
473 ip0->checksum = ip_csum_fold (sum0);
475 old_id0 = echo0->identifier;
476 new_id0 = s0->out2in.port;
477 echo0->identifier = new_id0;
479 sum0 = icmp0->checksum;
480 sum0 = ip_csum_update (sum0, old_id0, new_id0, icmp_echo_header_t,
482 icmp0->checksum = ip_csum_fold (sum0);
485 s0->last_heard = now;
487 s0->total_bytes += vlib_buffer_length_in_chain (sm->vlib_main, b0);
488 /* Per-user LRU list maintenance for dynamic translations */
489 if (!snat_is_session_static (s0))
491 clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool,
493 clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool,
494 s0->per_user_list_head_index,
504 * Hairpinning allows two endpoints on the internal side of the NAT to
505 * communicate even if they only use each other's external IP addresses
508 * @param sm SNAT main.
509 * @param b0 Vlib buffer.
510 * @param ip0 IP header.
511 * @param udp0 UDP header.
512 * @param tcp0 TCP header.
513 * @param proto0 SNAT protocol.
516 snat_hairpinning (snat_main_t *sm,
523 snat_session_key_t key0, sm0;
524 snat_worker_key_t k0;
526 clib_bihash_kv_8_8_t kv0, value0;
528 u32 new_dst_addr0 = 0, old_dst_addr0, ti = 0, si;
529 u16 new_dst_port0, old_dst_port0;
531 key0.addr = ip0->dst_address;
532 key0.port = udp0->dst_port;
533 key0.protocol = proto0;
534 key0.fib_index = sm->outside_fib_index;
535 kv0.key = key0.as_u64;
537 /* Check if destination is in active sessions */
538 if (clib_bihash_search_8_8 (&sm->out2in, &kv0, &value0))
540 /* or static mappings */
541 if (!snat_static_mapping_match(sm, key0, &sm0, 1))
543 new_dst_addr0 = sm0.addr.as_u32;
544 new_dst_port0 = sm0.port;
545 vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
551 if (sm->num_workers > 1)
553 k0.addr = ip0->dst_address;
554 k0.port = udp0->dst_port;
555 k0.fib_index = sm->outside_fib_index;
557 if (clib_bihash_search_8_8 (&sm->worker_by_out, &kv0, &value0))
563 ti = sm->num_workers;
565 s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
566 new_dst_addr0 = s0->in2out.addr.as_u32;
567 new_dst_port0 = s0->in2out.port;
568 vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
571 /* Destination is behind the same NAT, use internal address and port */
574 old_dst_addr0 = ip0->dst_address.as_u32;
575 ip0->dst_address.as_u32 = new_dst_addr0;
576 sum0 = ip0->checksum;
577 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
578 ip4_header_t, dst_address);
579 ip0->checksum = ip_csum_fold (sum0);
581 old_dst_port0 = tcp0->ports.dst;
582 if (PREDICT_TRUE(new_dst_port0 != old_dst_port0))
584 if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP))
586 tcp0->ports.dst = new_dst_port0;
587 sum0 = tcp0->checksum;
588 sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
589 ip4_header_t, dst_address);
590 sum0 = ip_csum_update (sum0, old_dst_port0, new_dst_port0,
591 ip4_header_t /* cheat */, length);
592 tcp0->checksum = ip_csum_fold(sum0);
596 udp0->dst_port = new_dst_port0;
604 snat_in2out_node_fn_inline (vlib_main_t * vm,
605 vlib_node_runtime_t * node,
606 vlib_frame_t * frame, int is_slow_path)
608 u32 n_left_from, * from, * to_next;
609 snat_in2out_next_t next_index;
610 u32 pkts_processed = 0;
611 snat_main_t * sm = &snat_main;
612 snat_runtime_t * rt = (snat_runtime_t *)node->runtime_data;
613 f64 now = vlib_time_now (vm);
614 u32 stats_node_index;
615 u32 cpu_index = os_get_cpu_number ();
617 stats_node_index = is_slow_path ? snat_in2out_slowpath_node.index :
618 snat_in2out_node.index;
620 from = vlib_frame_vector_args (frame);
621 n_left_from = frame->n_vectors;
622 next_index = node->cached_next_index;
624 while (n_left_from > 0)
628 vlib_get_next_frame (vm, node, next_index,
629 to_next, n_left_to_next);
631 while (n_left_from >= 4 && n_left_to_next >= 2)
634 vlib_buffer_t * b0, * b1;
636 u32 sw_if_index0, sw_if_index1;
637 ip4_header_t * ip0, * ip1;
638 ip_csum_t sum0, sum1;
639 u32 new_addr0, old_addr0, new_addr1, old_addr1;
640 u16 old_port0, new_port0, old_port1, new_port1;
641 udp_header_t * udp0, * udp1;
642 tcp_header_t * tcp0, * tcp1;
643 icmp46_header_t * icmp0, * icmp1;
644 snat_session_key_t key0, key1;
645 u32 rx_fib_index0, rx_fib_index1;
647 snat_session_t * s0 = 0, * s1 = 0;
648 clib_bihash_kv_8_8_t kv0, value0, kv1, value1;
650 /* Prefetch next iteration. */
652 vlib_buffer_t * p2, * p3;
654 p2 = vlib_get_buffer (vm, from[2]);
655 p3 = vlib_get_buffer (vm, from[3]);
657 vlib_prefetch_buffer_header (p2, LOAD);
658 vlib_prefetch_buffer_header (p3, LOAD);
660 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
661 CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
664 /* speculatively enqueue b0 and b1 to the current next frame */
665 to_next[0] = bi0 = from[0];
666 to_next[1] = bi1 = from[1];
672 b0 = vlib_get_buffer (vm, bi0);
673 b1 = vlib_get_buffer (vm, bi1);
675 ip0 = vlib_buffer_get_current (b0);
676 udp0 = ip4_next_header (ip0);
677 tcp0 = (tcp_header_t *) udp0;
678 icmp0 = (icmp46_header_t *) udp0;
680 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
681 rx_fib_index0 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
684 next0 = next1 = SNAT_IN2OUT_NEXT_LOOKUP;
686 proto0 = ip_proto_to_snat_proto (ip0->protocol);
688 /* Next configured feature, probably ip4-lookup */
691 if (PREDICT_FALSE (proto0 == ~0))
694 if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
696 next0 = icmp_in2out_slow_path
697 (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0,
698 node, next0, now, cpu_index);
704 if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP))
706 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
711 key0.addr = ip0->src_address;
712 key0.port = udp0->src_port;
713 key0.protocol = proto0;
714 key0.fib_index = rx_fib_index0;
716 kv0.key = key0.as_u64;
718 if (PREDICT_FALSE (clib_bihash_search_8_8 (&sm->in2out, &kv0, &value0) != 0))
722 if (PREDICT_FALSE(snat_not_translate(sm, rt, sw_if_index0, ip0,
723 proto0, rx_fib_index0)))
726 next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0,
727 &s0, node, next0, cpu_index);
728 if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
733 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
738 s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions,
741 old_addr0 = ip0->src_address.as_u32;
742 ip0->src_address = s0->out2in.addr;
743 new_addr0 = ip0->src_address.as_u32;
744 vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index;
746 sum0 = ip0->checksum;
747 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
749 src_address /* changed member */);
750 ip0->checksum = ip_csum_fold (sum0);
752 if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP))
754 old_port0 = tcp0->ports.src;
755 tcp0->ports.src = s0->out2in.port;
756 new_port0 = tcp0->ports.src;
758 sum0 = tcp0->checksum;
759 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
761 dst_address /* changed member */);
762 sum0 = ip_csum_update (sum0, old_port0, new_port0,
763 ip4_header_t /* cheat */,
764 length /* changed member */);
765 tcp0->checksum = ip_csum_fold(sum0);
769 old_port0 = udp0->src_port;
770 udp0->src_port = s0->out2in.port;
775 snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0);
778 s0->last_heard = now;
780 s0->total_bytes += vlib_buffer_length_in_chain (vm, b0);
781 /* Per-user LRU list maintenance for dynamic translation */
782 if (!snat_is_session_static (s0))
784 clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool,
786 clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool,
787 s0->per_user_list_head_index,
792 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
793 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
795 snat_in2out_trace_t *t =
796 vlib_add_trace (vm, node, b0, sizeof (*t));
797 t->is_slow_path = is_slow_path;
798 t->sw_if_index = sw_if_index0;
799 t->next_index = next0;
800 t->session_index = ~0;
802 t->session_index = s0 - sm->per_thread_data[cpu_index].sessions;
805 pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP;
807 ip1 = vlib_buffer_get_current (b1);
808 udp1 = ip4_next_header (ip1);
809 tcp1 = (tcp_header_t *) udp1;
810 icmp1 = (icmp46_header_t *) udp1;
812 sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_RX];
813 rx_fib_index1 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
816 proto1 = ip_proto_to_snat_proto (ip1->protocol);
818 /* Next configured feature, probably ip4-lookup */
821 if (PREDICT_FALSE (proto1 == ~0))
824 if (PREDICT_FALSE (proto1 == SNAT_PROTOCOL_ICMP))
826 next1 = icmp_in2out_slow_path
827 (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node,
828 next1, now, cpu_index);
834 if (PREDICT_FALSE (proto1 == ~0 || proto1 == SNAT_PROTOCOL_ICMP))
836 next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
841 key1.addr = ip1->src_address;
842 key1.port = udp1->src_port;
843 key1.protocol = proto1;
844 key1.fib_index = rx_fib_index1;
846 kv1.key = key1.as_u64;
848 if (PREDICT_FALSE(clib_bihash_search_8_8 (&sm->in2out, &kv1, &value1) != 0))
852 if (PREDICT_FALSE(snat_not_translate(sm, rt, sw_if_index1, ip1,
853 proto1, rx_fib_index1)))
856 next1 = slow_path (sm, b1, ip1, rx_fib_index1, &key1,
857 &s1, node, next1, cpu_index);
858 if (PREDICT_FALSE (next1 == SNAT_IN2OUT_NEXT_DROP))
863 next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
868 s1 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions,
871 old_addr1 = ip1->src_address.as_u32;
872 ip1->src_address = s1->out2in.addr;
873 new_addr1 = ip1->src_address.as_u32;
874 vnet_buffer(b1)->sw_if_index[VLIB_TX] = s1->out2in.fib_index;
876 sum1 = ip1->checksum;
877 sum1 = ip_csum_update (sum1, old_addr1, new_addr1,
879 src_address /* changed member */);
880 ip1->checksum = ip_csum_fold (sum1);
882 if (PREDICT_TRUE(proto1 == SNAT_PROTOCOL_TCP))
884 old_port1 = tcp1->ports.src;
885 tcp1->ports.src = s1->out2in.port;
886 new_port1 = tcp1->ports.src;
888 sum1 = tcp1->checksum;
889 sum1 = ip_csum_update (sum1, old_addr1, new_addr1,
891 dst_address /* changed member */);
892 sum1 = ip_csum_update (sum1, old_port1, new_port1,
893 ip4_header_t /* cheat */,
894 length /* changed member */);
895 tcp1->checksum = ip_csum_fold(sum1);
899 old_port1 = udp1->src_port;
900 udp1->src_port = s1->out2in.port;
905 snat_hairpinning (sm, b1, ip1, udp1, tcp1, proto1);
908 s1->last_heard = now;
910 s1->total_bytes += vlib_buffer_length_in_chain (vm, b1);
911 /* Per-user LRU list maintenance for dynamic translation */
912 if (!snat_is_session_static (s1))
914 clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool,
916 clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool,
917 s1->per_user_list_head_index,
922 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
923 && (b1->flags & VLIB_BUFFER_IS_TRACED)))
925 snat_in2out_trace_t *t =
926 vlib_add_trace (vm, node, b1, sizeof (*t));
927 t->sw_if_index = sw_if_index1;
928 t->next_index = next1;
929 t->session_index = ~0;
931 t->session_index = s1 - sm->per_thread_data[cpu_index].sessions;
934 pkts_processed += next1 != SNAT_IN2OUT_NEXT_DROP;
936 /* verify speculative enqueues, maybe switch current next frame */
937 vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
938 to_next, n_left_to_next,
939 bi0, bi1, next0, next1);
942 while (n_left_from > 0 && n_left_to_next > 0)
950 u32 new_addr0, old_addr0;
951 u16 old_port0, new_port0;
954 icmp46_header_t * icmp0;
955 snat_session_key_t key0;
958 snat_session_t * s0 = 0;
959 clib_bihash_kv_8_8_t kv0, value0;
961 /* speculatively enqueue b0 to the current next frame */
969 b0 = vlib_get_buffer (vm, bi0);
970 next0 = SNAT_IN2OUT_NEXT_LOOKUP;
972 ip0 = vlib_buffer_get_current (b0);
973 udp0 = ip4_next_header (ip0);
974 tcp0 = (tcp_header_t *) udp0;
975 icmp0 = (icmp46_header_t *) udp0;
977 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
978 rx_fib_index0 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
981 proto0 = ip_proto_to_snat_proto (ip0->protocol);
983 /* Next configured feature, probably ip4-lookup */
986 if (PREDICT_FALSE (proto0 == ~0))
989 if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
991 next0 = icmp_in2out_slow_path
992 (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node,
993 next0, now, cpu_index);
999 if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP))
1001 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
1006 key0.addr = ip0->src_address;
1007 key0.port = udp0->src_port;
1008 key0.protocol = proto0;
1009 key0.fib_index = rx_fib_index0;
1011 kv0.key = key0.as_u64;
1013 if (clib_bihash_search_8_8 (&sm->in2out, &kv0, &value0))
1017 if (PREDICT_FALSE(snat_not_translate(sm, rt, sw_if_index0, ip0,
1018 proto0, rx_fib_index0)))
1021 next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0,
1022 &s0, node, next0, cpu_index);
1023 if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
1028 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
1033 s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions,
1036 old_addr0 = ip0->src_address.as_u32;
1037 ip0->src_address = s0->out2in.addr;
1038 new_addr0 = ip0->src_address.as_u32;
1039 vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index;
1041 sum0 = ip0->checksum;
1042 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
1044 src_address /* changed member */);
1045 ip0->checksum = ip_csum_fold (sum0);
1047 if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP))
1049 old_port0 = tcp0->ports.src;
1050 tcp0->ports.src = s0->out2in.port;
1051 new_port0 = tcp0->ports.src;
1053 sum0 = tcp0->checksum;
1054 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
1056 dst_address /* changed member */);
1057 sum0 = ip_csum_update (sum0, old_port0, new_port0,
1058 ip4_header_t /* cheat */,
1059 length /* changed member */);
1060 tcp0->checksum = ip_csum_fold(sum0);
1064 old_port0 = udp0->src_port;
1065 udp0->src_port = s0->out2in.port;
1070 snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0);
1073 s0->last_heard = now;
1075 s0->total_bytes += vlib_buffer_length_in_chain (vm, b0);
1076 /* Per-user LRU list maintenance for dynamic translation */
1077 if (!snat_is_session_static (s0))
1079 clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool,
1080 s0->per_user_index);
1081 clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool,
1082 s0->per_user_list_head_index,
1083 s0->per_user_index);
1087 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
1088 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1090 snat_in2out_trace_t *t =
1091 vlib_add_trace (vm, node, b0, sizeof (*t));
1092 t->is_slow_path = is_slow_path;
1093 t->sw_if_index = sw_if_index0;
1094 t->next_index = next0;
1095 t->session_index = ~0;
1097 t->session_index = s0 - sm->per_thread_data[cpu_index].sessions;
1100 pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP;
1102 /* verify speculative enqueue, maybe switch current next frame */
1103 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1104 to_next, n_left_to_next,
1108 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1111 vlib_node_increment_counter (vm, stats_node_index,
1112 SNAT_IN2OUT_ERROR_IN2OUT_PACKETS,
1114 return frame->n_vectors;
1118 snat_in2out_fast_path_fn (vlib_main_t * vm,
1119 vlib_node_runtime_t * node,
1120 vlib_frame_t * frame)
1122 return snat_in2out_node_fn_inline (vm, node, frame, 0 /* is_slow_path */);
1125 VLIB_REGISTER_NODE (snat_in2out_node) = {
1126 .function = snat_in2out_fast_path_fn,
1127 .name = "snat-in2out",
1128 .vector_size = sizeof (u32),
1129 .format_trace = format_snat_in2out_trace,
1130 .type = VLIB_NODE_TYPE_INTERNAL,
1132 .n_errors = ARRAY_LEN(snat_in2out_error_strings),
1133 .error_strings = snat_in2out_error_strings,
1135 .runtime_data_bytes = sizeof (snat_runtime_t),
1137 .n_next_nodes = SNAT_IN2OUT_N_NEXT,
1139 /* edit / add dispositions here */
1141 [SNAT_IN2OUT_NEXT_DROP] = "error-drop",
1142 [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
1143 [SNAT_IN2OUT_NEXT_SLOW_PATH] = "snat-in2out-slowpath",
1147 VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_node, snat_in2out_fast_path_fn);
1150 snat_in2out_slow_path_fn (vlib_main_t * vm,
1151 vlib_node_runtime_t * node,
1152 vlib_frame_t * frame)
1154 return snat_in2out_node_fn_inline (vm, node, frame, 1 /* is_slow_path */);
1157 VLIB_REGISTER_NODE (snat_in2out_slowpath_node) = {
1158 .function = snat_in2out_slow_path_fn,
1159 .name = "snat-in2out-slowpath",
1160 .vector_size = sizeof (u32),
1161 .format_trace = format_snat_in2out_trace,
1162 .type = VLIB_NODE_TYPE_INTERNAL,
1164 .n_errors = ARRAY_LEN(snat_in2out_error_strings),
1165 .error_strings = snat_in2out_error_strings,
1167 .runtime_data_bytes = sizeof (snat_runtime_t),
1169 .n_next_nodes = SNAT_IN2OUT_N_NEXT,
1171 /* edit / add dispositions here */
1173 [SNAT_IN2OUT_NEXT_DROP] = "error-drop",
1174 [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
1175 [SNAT_IN2OUT_NEXT_SLOW_PATH] = "snat-in2out-slowpath",
1179 VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_slowpath_node, snat_in2out_slow_path_fn);
1182 snat_in2out_worker_handoff_fn (vlib_main_t * vm,
1183 vlib_node_runtime_t * node,
1184 vlib_frame_t * frame)
1186 snat_main_t *sm = &snat_main;
1187 vlib_thread_main_t *tm = vlib_get_thread_main ();
1188 u32 n_left_from, *from, *to_next = 0;
1189 static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index;
1190 static __thread vlib_frame_queue_t **congested_handoff_queue_by_worker_index
1192 vlib_frame_queue_elt_t *hf = 0;
1193 vlib_frame_t *f = 0;
1195 u32 n_left_to_next_worker = 0, *to_next_worker = 0;
1196 u32 next_worker_index = 0;
1197 u32 current_worker_index = ~0;
1198 u32 cpu_index = os_get_cpu_number ();
1200 ASSERT (vec_len (sm->workers));
1202 if (PREDICT_FALSE (handoff_queue_elt_by_worker_index == 0))
1204 vec_validate (handoff_queue_elt_by_worker_index, tm->n_vlib_mains - 1);
1206 vec_validate_init_empty (congested_handoff_queue_by_worker_index,
1207 sm->first_worker_index + sm->num_workers - 1,
1208 (vlib_frame_queue_t *) (~0));
1211 from = vlib_frame_vector_args (frame);
1212 n_left_from = frame->n_vectors;
1214 while (n_left_from > 0)
1221 snat_user_key_t key0;
1222 clib_bihash_kv_8_8_t kv0, value0;
1229 b0 = vlib_get_buffer (vm, bi0);
1231 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1232 rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index(sw_if_index0);
1234 ip0 = vlib_buffer_get_current (b0);
1236 key0.addr = ip0->src_address;
1237 key0.fib_index = rx_fib_index0;
1239 kv0.key = key0.as_u64;
1241 /* Ever heard of of the "user" before? */
1242 if (clib_bihash_search_8_8 (&sm->worker_by_in, &kv0, &value0))
1244 /* No, assign next available worker (RR) */
1245 next_worker_index = sm->first_worker_index;
1246 if (vec_len (sm->workers))
1248 next_worker_index +=
1249 sm->workers[sm->next_worker++ % _vec_len (sm->workers)];
1252 /* add non-traslated packets worker lookup */
1253 kv0.value = next_worker_index;
1254 clib_bihash_add_del_8_8 (&sm->worker_by_in, &kv0, 1);
1257 next_worker_index = value0.value;
1259 if (PREDICT_FALSE (next_worker_index != cpu_index))
1263 if (next_worker_index != current_worker_index)
1266 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1268 hf = vlib_get_worker_handoff_queue_elt (sm->fq_in2out_index,
1270 handoff_queue_elt_by_worker_index);
1272 n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
1273 to_next_worker = &hf->buffer_index[hf->n_vectors];
1274 current_worker_index = next_worker_index;
1277 /* enqueue to correct worker thread */
1278 to_next_worker[0] = bi0;
1280 n_left_to_next_worker--;
1282 if (n_left_to_next_worker == 0)
1284 hf->n_vectors = VLIB_FRAME_SIZE;
1285 vlib_put_frame_queue_elt (hf);
1286 current_worker_index = ~0;
1287 handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1294 /* if this is 1st frame */
1297 f = vlib_get_frame_to_node (vm, snat_in2out_node.index);
1298 to_next = vlib_frame_vector_args (f);
1306 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
1307 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1309 snat_in2out_worker_handoff_trace_t *t =
1310 vlib_add_trace (vm, node, b0, sizeof (*t));
1311 t->next_worker_index = next_worker_index;
1312 t->do_handoff = do_handoff;
1317 vlib_put_frame_to_node (vm, snat_in2out_node.index, f);
1320 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
1322 /* Ship frames to the worker nodes */
1323 for (i = 0; i < vec_len (handoff_queue_elt_by_worker_index); i++)
1325 if (handoff_queue_elt_by_worker_index[i])
1327 hf = handoff_queue_elt_by_worker_index[i];
1329 * It works better to let the handoff node
1330 * rate-adapt, always ship the handoff queue element.
1332 if (1 || hf->n_vectors == hf->last_n_vectors)
1334 vlib_put_frame_queue_elt (hf);
1335 handoff_queue_elt_by_worker_index[i] = 0;
1338 hf->last_n_vectors = hf->n_vectors;
1340 congested_handoff_queue_by_worker_index[i] =
1341 (vlib_frame_queue_t *) (~0);
1344 current_worker_index = ~0;
1345 return frame->n_vectors;
1348 VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node) = {
1349 .function = snat_in2out_worker_handoff_fn,
1350 .name = "snat-in2out-worker-handoff",
1351 .vector_size = sizeof (u32),
1352 .format_trace = format_snat_in2out_worker_handoff_trace,
1353 .type = VLIB_NODE_TYPE_INTERNAL,
1362 VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_worker_handoff_node, snat_in2out_worker_handoff_fn);
1364 static inline u32 icmp_in2out_static_map (snat_main_t *sm,
1367 icmp46_header_t * icmp0,
1369 vlib_node_runtime_t * node,
1373 snat_session_key_t key0, sm0;
1374 icmp_echo_header_t *echo0;
1375 u32 new_addr0, old_addr0;
1376 u16 old_id0, new_id0;
1378 snat_runtime_t * rt = (snat_runtime_t *)node->runtime_data;
1380 echo0 = (icmp_echo_header_t *)(icmp0+1);
1382 key0.addr = ip0->src_address;
1383 key0.port = echo0->identifier;
1384 key0.fib_index = rx_fib_index0;
1386 if (snat_static_mapping_match(sm, key0, &sm0, 0))
1388 if (PREDICT_FALSE(snat_not_translate(sm, rt, sw_if_index0, ip0,
1389 IP_PROTOCOL_ICMP, rx_fib_index0)))
1392 b0->error = node->errors[SNAT_IN2OUT_ERROR_NO_TRANSLATION];
1393 return SNAT_IN2OUT_NEXT_DROP;
1396 new_addr0 = sm0.addr.as_u32;
1398 vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
1399 old_addr0 = ip0->src_address.as_u32;
1400 ip0->src_address.as_u32 = new_addr0;
1402 sum0 = ip0->checksum;
1403 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
1405 src_address /* changed member */);
1406 ip0->checksum = ip_csum_fold (sum0);
1408 if (PREDICT_FALSE(new_id0 != echo0->identifier))
1410 old_id0 = echo0->identifier;
1411 echo0->identifier = new_id0;
1413 sum0 = icmp0->checksum;
1414 sum0 = ip_csum_update (sum0, old_id0, new_id0, icmp_echo_header_t,
1416 icmp0->checksum = ip_csum_fold (sum0);
1423 snat_in2out_fast_static_map_fn (vlib_main_t * vm,
1424 vlib_node_runtime_t * node,
1425 vlib_frame_t * frame)
1427 u32 n_left_from, * from, * to_next;
1428 snat_in2out_next_t next_index;
1429 u32 pkts_processed = 0;
1430 snat_main_t * sm = &snat_main;
1431 snat_runtime_t * rt = (snat_runtime_t *)node->runtime_data;
1432 u32 stats_node_index;
1434 stats_node_index = snat_in2out_fast_node.index;
1436 from = vlib_frame_vector_args (frame);
1437 n_left_from = frame->n_vectors;
1438 next_index = node->cached_next_index;
1440 while (n_left_from > 0)
1444 vlib_get_next_frame (vm, node, next_index,
1445 to_next, n_left_to_next);
1447 while (n_left_from > 0 && n_left_to_next > 0)
1455 u32 new_addr0, old_addr0;
1456 u16 old_port0, new_port0;
1457 udp_header_t * udp0;
1458 tcp_header_t * tcp0;
1459 icmp46_header_t * icmp0;
1460 snat_session_key_t key0, sm0;
1464 /* speculatively enqueue b0 to the current next frame */
1470 n_left_to_next -= 1;
1472 b0 = vlib_get_buffer (vm, bi0);
1473 next0 = SNAT_IN2OUT_NEXT_LOOKUP;
1475 ip0 = vlib_buffer_get_current (b0);
1476 udp0 = ip4_next_header (ip0);
1477 tcp0 = (tcp_header_t *) udp0;
1478 icmp0 = (icmp46_header_t *) udp0;
1480 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
1481 rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index(sw_if_index0);
1483 proto0 = ip_proto_to_snat_proto (ip0->protocol);
1485 if (PREDICT_FALSE (proto0 == ~0))
1488 if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
1490 if (PREDICT_FALSE(snat_not_translate(sm, rt, sw_if_index0, ip0,
1491 proto0, rx_fib_index0)))
1494 next0 = icmp_in2out_static_map
1495 (sm, b0, ip0, icmp0, sw_if_index0, node, next0, rx_fib_index0);
1499 key0.addr = ip0->src_address;
1500 key0.port = udp0->src_port;
1501 key0.fib_index = rx_fib_index0;
1503 if (snat_static_mapping_match(sm, key0, &sm0, 0))
1505 b0->error = node->errors[SNAT_IN2OUT_ERROR_NO_TRANSLATION];
1506 next0= SNAT_IN2OUT_NEXT_DROP;
1510 new_addr0 = sm0.addr.as_u32;
1511 new_port0 = sm0.port;
1512 vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
1513 old_addr0 = ip0->src_address.as_u32;
1514 ip0->src_address.as_u32 = new_addr0;
1516 sum0 = ip0->checksum;
1517 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
1519 src_address /* changed member */);
1520 ip0->checksum = ip_csum_fold (sum0);
1522 if (PREDICT_FALSE(new_port0 != udp0->dst_port))
1524 if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP))
1526 old_port0 = tcp0->ports.src;
1527 tcp0->ports.src = new_port0;
1529 sum0 = tcp0->checksum;
1530 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
1532 dst_address /* changed member */);
1533 sum0 = ip_csum_update (sum0, old_port0, new_port0,
1534 ip4_header_t /* cheat */,
1535 length /* changed member */);
1536 tcp0->checksum = ip_csum_fold(sum0);
1540 old_port0 = udp0->src_port;
1541 udp0->src_port = new_port0;
1547 if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP))
1549 sum0 = tcp0->checksum;
1550 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
1552 dst_address /* changed member */);
1553 tcp0->checksum = ip_csum_fold(sum0);
1558 snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0);
1561 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
1562 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1564 snat_in2out_trace_t *t =
1565 vlib_add_trace (vm, node, b0, sizeof (*t));
1566 t->sw_if_index = sw_if_index0;
1567 t->next_index = next0;
1570 pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP;
1572 /* verify speculative enqueue, maybe switch current next frame */
1573 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1574 to_next, n_left_to_next,
1578 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1581 vlib_node_increment_counter (vm, stats_node_index,
1582 SNAT_IN2OUT_ERROR_IN2OUT_PACKETS,
1584 return frame->n_vectors;
1588 VLIB_REGISTER_NODE (snat_in2out_fast_node) = {
1589 .function = snat_in2out_fast_static_map_fn,
1590 .name = "snat-in2out-fast",
1591 .vector_size = sizeof (u32),
1592 .format_trace = format_snat_in2out_fast_trace,
1593 .type = VLIB_NODE_TYPE_INTERNAL,
1595 .n_errors = ARRAY_LEN(snat_in2out_error_strings),
1596 .error_strings = snat_in2out_error_strings,
1598 .runtime_data_bytes = sizeof (snat_runtime_t),
1600 .n_next_nodes = SNAT_IN2OUT_N_NEXT,
1602 /* edit / add dispositions here */
1604 [SNAT_IN2OUT_NEXT_DROP] = "error-drop",
1605 [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
1606 [SNAT_IN2OUT_NEXT_SLOW_PATH] = "snat-in2out-slowpath",
1610 VLIB_NODE_FUNCTION_MULTIARCH (snat_in2out_fast_node, snat_in2out_fast_static_map_fn);