nat: twice-nat static mapping pool address
[vpp.git] / src / plugins / nat / in2out.c
index a448867..8e2c5fe 100644 (file)
@@ -35,6 +35,7 @@
 #include <vppinfra/hash.h>
 #include <vppinfra/error.h>
 #include <vppinfra/elog.h>
+#include <nat/lib/nat_inlines.h>
 
 typedef struct
 {
@@ -42,6 +43,7 @@ typedef struct
   u32 next_index;
   u32 session_index;
   u32 is_slow_path;
+  u32 is_hairpinning;
 } snat_in2out_trace_t;
 
 /* packet trace format function */
@@ -57,6 +59,10 @@ format_snat_in2out_trace (u8 * s, va_list * args)
 
   s = format (s, "%s: sw_if_index %d, next index %d, session %d", tag,
              t->sw_if_index, t->next_index, t->session_index);
+  if (t->is_hairpinning)
+    {
+      s = format (s, ", with-hairpinning");
+    }
 
   return s;
 }
@@ -76,20 +82,12 @@ format_snat_in2out_fast_trace (u8 * s, va_list * args)
 
 #define foreach_snat_in2out_error                       \
 _(UNSUPPORTED_PROTOCOL, "unsupported protocol")         \
-_(IN2OUT_PACKETS, "good in2out packets processed")      \
 _(OUT_OF_PORTS, "out of ports")                         \
 _(BAD_OUTSIDE_FIB, "outside VRF ID not found")          \
 _(BAD_ICMP_TYPE, "unsupported ICMP type")               \
 _(NO_TRANSLATION, "no translation")                     \
 _(MAX_SESSIONS_EXCEEDED, "maximum sessions exceeded")   \
-_(DROP_FRAGMENT, "drop fragment")                       \
-_(TCP_PACKETS, "TCP packets")                           \
-_(UDP_PACKETS, "UDP packets")                           \
-_(ICMP_PACKETS, "ICMP packets")                         \
-_(OTHER_PACKETS, "other protocol packets")              \
-_(FRAGMENTS, "fragments")                               \
-_(CACHED_FRAGMENTS, "cached fragments")                 \
-_(PROCESSED_FRAGMENTS, "processed fragments")
+_(CANNOT_CREATE_USER, "cannot create NAT user")
 
 typedef enum
 {
@@ -120,14 +118,10 @@ snat_not_translate (snat_main_t * sm, vlib_node_runtime_t * node,
                    u32 rx_fib_index0, u32 thread_index)
 {
   udp_header_t *udp0 = ip4_next_header (ip0);
-  snat_session_key_t key0, sm0;
   clib_bihash_kv_8_8_t kv0, value0;
 
-  key0.addr = ip0->dst_address;
-  key0.port = udp0->dst_port;
-  key0.protocol = proto0;
-  key0.fib_index = sm->outside_fib_index;
-  kv0.key = key0.as_u64;
+  init_nat_k (&kv0, ip0->dst_address, udp0->dst_port, sm->outside_fib_index,
+             proto0);
 
   /* NAT packet aimed at external address if */
   /* has active sessions */
@@ -135,7 +129,13 @@ snat_not_translate (snat_main_t * sm, vlib_node_runtime_t * node,
                              &value0))
     {
       /* or is static mappings */
-      if (!snat_static_mapping_match (sm, key0, &sm0, 1, 0, 0, 0, 0, 0))
+      ip4_address_t placeholder_addr;
+      u16 placeholder_port;
+      u32 placeholder_fib_index;
+      if (!snat_static_mapping_match
+         (sm, ip0->dst_address, udp0->dst_port, sm->outside_fib_index,
+          proto0, &placeholder_addr, &placeholder_port,
+          &placeholder_fib_index, 1, 0, 0, 0, 0, 0, 0))
        return 0;
     }
   else
@@ -153,26 +153,20 @@ nat_not_translate_output_feature (snat_main_t * sm, ip4_header_t * ip0,
                                  u32 proto0, u16 src_port, u16 dst_port,
                                  u32 thread_index, u32 sw_if_index)
 {
-  snat_session_key_t key0;
   clib_bihash_kv_8_8_t kv0, value0;
   snat_interface_t *i;
 
   /* src NAT check */
-  key0.addr = ip0->src_address;
-  key0.port = src_port;
-  key0.protocol = proto0;
-  key0.fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
-  kv0.key = key0.as_u64;
+  init_nat_k (&kv0, ip0->src_address, src_port,
+             ip4_fib_table_get_index_for_sw_if_index (sw_if_index), proto0);
 
   if (!clib_bihash_search_8_8
       (&sm->per_thread_data[thread_index].out2in, &kv0, &value0))
     return 1;
 
   /* dst NAT check */
-  key0.addr = ip0->dst_address;
-  key0.port = dst_port;
-  key0.protocol = proto0;
-  kv0.key = key0.as_u64;
+  init_nat_k (&kv0, ip0->dst_address, dst_port,
+             ip4_fib_table_get_index_for_sw_if_index (sw_if_index), proto0);
   if (!clib_bihash_search_8_8
       (&sm->per_thread_data[thread_index].in2out, &kv0, &value0))
     {
@@ -206,30 +200,30 @@ nat44_i2o_is_idle_session_cb (clib_bihash_kv_8_8_t * kv, void *arg)
   sess_timeout_time = s->last_heard + (f64) nat44_session_get_timeout (sm, s);
   if (ctx->now >= sess_timeout_time)
     {
-      s_kv.key = s->out2in.as_u64;
+      init_nat_o2i_k (&s_kv, s);
       if (clib_bihash_add_del_8_8 (&tsm->out2in, &s_kv, 0))
        nat_elog_warn ("out2in key del failed");
 
       snat_ipfix_logging_nat44_ses_delete (ctx->thread_index,
                                           s->in2out.addr.as_u32,
                                           s->out2in.addr.as_u32,
-                                          s->in2out.protocol,
+                                          s->nat_proto,
                                           s->in2out.port,
                                           s->out2in.port,
                                           s->in2out.fib_index);
 
       nat_syslog_nat44_apmdel (s->user_index, s->in2out.fib_index,
                               &s->in2out.addr, s->in2out.port,
-                              &s->out2in.addr, s->out2in.port,
-                              s->in2out.protocol);
+                              &s->out2in.addr, s->out2in.port, s->nat_proto);
 
       nat_ha_sdel (&s->out2in.addr, s->out2in.port, &s->ext_host_addr,
-                  s->ext_host_port, s->out2in.protocol, s->out2in.fib_index,
+                  s->ext_host_port, s->nat_proto, s->out2in.fib_index,
                   ctx->thread_index);
 
       if (!snat_is_session_static (s))
        snat_free_outside_address_and_port (sm->addresses, ctx->thread_index,
-                                           &s->out2in);
+                                           &s->out2in.addr,
+                                           s->out2in.port, s->nat_proto);
 
       nat44_delete_session (sm, s, ctx->thread_index);
       return 1;
@@ -242,15 +236,16 @@ nat44_i2o_is_idle_session_cb (clib_bihash_kv_8_8_t * kv, void *arg)
 static u32
 slow_path (snat_main_t * sm, vlib_buffer_t * b0,
           ip4_header_t * ip0,
+          ip4_address_t i2o_addr,
+          u16 i2o_port,
           u32 rx_fib_index0,
-          snat_session_key_t * key0,
+          nat_protocol_t nat_proto,
           snat_session_t ** sessionp,
           vlib_node_runtime_t * node, u32 next0, u32 thread_index, f64 now)
 {
   snat_user_t *u;
   snat_session_t *s = 0;
   clib_bihash_kv_8_8_t kv0;
-  snat_session_key_t key1;
   u8 is_sm = 0;
   nat_outside_fib_t *outside_fib;
   fib_node_index_t fei = FIB_NODE_INDEX_INVALID;
@@ -263,24 +258,29 @@ slow_path (snat_main_t * sm, vlib_buffer_t * b0,
                },
   };
   nat44_is_idle_session_ctx_t ctx0;
+  ip4_address_t sm_addr;
+  u16 sm_port;
+  u32 sm_fib_index;
 
   if (PREDICT_FALSE (nat44_maximum_sessions_exceeded (sm, thread_index)))
     {
       b0->error = node->errors[SNAT_IN2OUT_ERROR_MAX_SESSIONS_EXCEEDED];
-      nat_ipfix_logging_max_sessions (thread_index, sm->max_translations);
+      nat_ipfix_logging_max_sessions (thread_index,
+                                     sm->max_translations_per_thread);
       nat_elog_notice ("maximum sessions exceeded");
       return SNAT_IN2OUT_NEXT_DROP;
     }
 
-  key1.protocol = key0->protocol;
-
   /* First try to match static mapping by local address and port */
   if (snat_static_mapping_match
-      (sm, *key0, &key1, 0, 0, 0, 0, 0, &identity_nat))
+      (sm, i2o_addr, i2o_port, rx_fib_index0, nat_proto, &sm_addr,
+       &sm_port, &sm_fib_index, 0, 0, 0, 0, 0, &identity_nat, 0))
     {
       /* Try to create dynamic translation */
       if (snat_alloc_outside_address_and_port (sm->addresses, rx_fib_index0,
-                                              thread_index, &key1,
+                                              thread_index,
+                                              nat_proto,
+                                              &sm_addr, &sm_port,
                                               sm->port_per_thread,
                                               sm->per_thread_data
                                               [thread_index].snat_thread_index))
@@ -304,7 +304,7 @@ slow_path (snat_main_t * sm, vlib_buffer_t * b0,
                              thread_index);
   if (!u)
     {
-      nat_elog_warn ("create NAT user failed");
+      b0->error = node->errors[SNAT_IN2OUT_ERROR_CANNOT_CREATE_USER];
       return SNAT_IN2OUT_NEXT_DROP;
     }
 
@@ -319,9 +319,12 @@ slow_path (snat_main_t * sm, vlib_buffer_t * b0,
   if (is_sm)
     s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING;
   user_session_increment (sm, u, is_sm);
-  s->in2out = *key0;
-  s->out2in = key1;
-  s->out2in.protocol = key0->protocol;
+  s->in2out.addr = i2o_addr;
+  s->in2out.port = i2o_port;
+  s->in2out.fib_index = rx_fib_index0;
+  s->nat_proto = nat_proto;
+  s->out2in.addr = sm_addr;
+  s->out2in.port = sm_port;
   s->out2in.fib_index = sm->outside_fib_index;
   switch (vec_len (sm->outside_fibs))
     {
@@ -355,16 +358,13 @@ slow_path (snat_main_t * sm, vlib_buffer_t * b0,
   /* Add to translation hashes */
   ctx0.now = now;
   ctx0.thread_index = thread_index;
-  kv0.key = s->in2out.as_u64;
-  kv0.value = s - sm->per_thread_data[thread_index].sessions;
+  init_nat_i2o_kv (&kv0, s, s - sm->per_thread_data[thread_index].sessions);
   if (clib_bihash_add_or_overwrite_stale_8_8
       (&sm->per_thread_data[thread_index].in2out, &kv0,
        nat44_i2o_is_idle_session_cb, &ctx0))
     nat_elog_notice ("in2out key add failed");
 
-  kv0.key = s->out2in.as_u64;
-  kv0.value = s - sm->per_thread_data[thread_index].sessions;
-
+  init_nat_o2i_kv (&kv0, s, s - sm->per_thread_data[thread_index].sessions);
   if (clib_bihash_add_or_overwrite_stale_8_8
       (&sm->per_thread_data[thread_index].out2in, &kv0,
        nat44_o2i_is_idle_session_cb, &ctx0))
@@ -374,30 +374,28 @@ slow_path (snat_main_t * sm, vlib_buffer_t * b0,
   snat_ipfix_logging_nat44_ses_create (thread_index,
                                       s->in2out.addr.as_u32,
                                       s->out2in.addr.as_u32,
-                                      s->in2out.protocol,
+                                      s->nat_proto,
                                       s->in2out.port,
                                       s->out2in.port, s->in2out.fib_index);
 
   nat_syslog_nat44_apmadd (s->user_index, s->in2out.fib_index,
                           &s->in2out.addr, s->in2out.port, &s->out2in.addr,
-                          s->out2in.port, s->in2out.protocol);
+                          s->out2in.port, s->nat_proto);
 
   nat_ha_sadd (&s->in2out.addr, s->in2out.port, &s->out2in.addr,
               s->out2in.port, &s->ext_host_addr, s->ext_host_port,
               &s->ext_host_nat_addr, s->ext_host_nat_port,
-              s->in2out.protocol, s->in2out.fib_index, s->flags,
-              thread_index, 0);
+              s->nat_proto, s->in2out.fib_index, s->flags, thread_index, 0);
 
   return next0;
 }
 
 #ifndef CLIB_MARCH_VARIANT
-static_always_inline
-  snat_in2out_error_t icmp_get_key (vlib_buffer_t * b, ip4_header_t * ip0,
-                                   snat_session_key_t * p_key0)
+static_always_inline snat_in2out_error_t
+icmp_get_key (vlib_buffer_t * b, ip4_header_t * ip0,
+             ip4_address_t * addr, u16 * port, nat_protocol_t * nat_proto)
 {
   icmp46_header_t *icmp0;
-  snat_session_key_t key0;
   icmp_echo_header_t *echo0, *inner_echo0 = 0;
   ip4_header_t *inner_ip0 = 0;
   void *l4_header = 0;
@@ -409,32 +407,31 @@ static_always_inline
   if (!icmp_type_is_error_message
       (vnet_buffer (b)->ip.reass.icmp_type_or_tcp_flags))
     {
-      key0.protocol = NAT_PROTOCOL_ICMP;
-      key0.addr = ip0->src_address;
-      key0.port = vnet_buffer (b)->ip.reass.l4_src_port;       // TODO fixme should this be dst port?
+      *nat_proto = NAT_PROTOCOL_ICMP;
+      *addr = ip0->src_address;
+      *port = vnet_buffer (b)->ip.reass.l4_src_port;
     }
   else
     {
       inner_ip0 = (ip4_header_t *) (echo0 + 1);
       l4_header = ip4_next_header (inner_ip0);
-      key0.protocol = ip_proto_to_nat_proto (inner_ip0->protocol);
-      key0.addr = inner_ip0->dst_address;
-      switch (key0.protocol)
+      *nat_proto = ip_proto_to_nat_proto (inner_ip0->protocol);
+      *addr = inner_ip0->dst_address;
+      switch (*nat_proto)
        {
        case NAT_PROTOCOL_ICMP:
          inner_icmp0 = (icmp46_header_t *) l4_header;
          inner_echo0 = (icmp_echo_header_t *) (inner_icmp0 + 1);
-         key0.port = inner_echo0->identifier;
+         *port = inner_echo0->identifier;
          break;
        case NAT_PROTOCOL_UDP:
        case NAT_PROTOCOL_TCP:
-         key0.port = ((tcp_udp_header_t *) l4_header)->dst_port;
+         *port = ((tcp_udp_header_t *) l4_header)->dst_port;
          break;
        default:
          return SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL;
        }
     }
-  *p_key0 = key0;
   return -1;                   /* success */
 }
 
@@ -456,47 +453,40 @@ static_always_inline
 u32
 icmp_match_in2out_slow (snat_main_t * sm, vlib_node_runtime_t * node,
                        u32 thread_index, vlib_buffer_t * b0,
-                       ip4_header_t * ip0, u8 * p_proto,
-                       snat_session_key_t * p_value,
-                       u8 * p_dont_translate, void *d, void *e)
+                       ip4_header_t * ip0, ip4_address_t * addr, u16 * port,
+                       u32 * fib_index, nat_protocol_t * proto, void *d,
+                       void *e, u8 * dont_translate)
 {
   snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
   u32 sw_if_index0;
-  u32 rx_fib_index0;
-  snat_session_key_t key0;
   snat_session_t *s0 = 0;
-  u8 dont_translate = 0;
   clib_bihash_kv_8_8_t kv0, value0;
   u32 next0 = ~0;
   int err;
   vlib_main_t *vm = vlib_get_main ();
+  *dont_translate = 0;
 
   sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
+  *fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
 
-  err = icmp_get_key (b0, ip0, &key0);
+  err = icmp_get_key (b0, ip0, addr, port, proto);
   if (err != -1)
     {
       b0->error = node->errors[err];
       next0 = SNAT_IN2OUT_NEXT_DROP;
       goto out;
     }
-  key0.fib_index = rx_fib_index0;
-
-  kv0.key = key0.as_u64;
 
+  init_nat_k (&kv0, *addr, *port, *fib_index, *proto);
   if (clib_bihash_search_8_8 (&tsm->in2out, &kv0, &value0))
     {
       if (vnet_buffer (b0)->sw_if_index[VLIB_TX] != ~0)
        {
-         if (PREDICT_FALSE (nat_not_translate_output_feature (sm, ip0,
-                                                              key0.protocol,
-                                                              key0.port,
-                                                              key0.port,
-                                                              thread_index,
-                                                              sw_if_index0)))
+         if (PREDICT_FALSE
+             (nat_not_translate_output_feature
+              (sm, ip0, *proto, *port, *port, thread_index, sw_if_index0)))
            {
-             dont_translate = 1;
+             *dont_translate = 1;
              goto out;
            }
        }
@@ -504,10 +494,9 @@ icmp_match_in2out_slow (snat_main_t * sm, vlib_node_runtime_t * node,
        {
          if (PREDICT_FALSE (snat_not_translate (sm, node, sw_if_index0,
                                                 ip0, NAT_PROTOCOL_ICMP,
-                                                rx_fib_index0,
-                                                thread_index)))
+                                                *fib_index, thread_index)))
            {
-             dont_translate = 1;
+             *dont_translate = 1;
              goto out;
            }
        }
@@ -521,15 +510,16 @@ icmp_match_in2out_slow (snat_main_t * sm, vlib_node_runtime_t * node,
          goto out;
        }
 
-      next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, &s0, node, next0,
-                        thread_index, vlib_time_now (vm));
+      next0 =
+       slow_path (sm, b0, ip0, *addr, *port, *fib_index, *proto, &s0, node,
+                  next0, thread_index, vlib_time_now (vm));
 
       if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
        goto out;
 
       if (!s0)
        {
-         dont_translate = 1;
+         *dont_translate = 1;
          goto out;
        }
     }
@@ -552,12 +542,14 @@ icmp_match_in2out_slow (snat_main_t * sm, vlib_node_runtime_t * node,
     }
 
 out:
-  *p_proto = key0.protocol;
   if (s0)
-    *p_value = s0->out2in;
-  *p_dont_translate = dont_translate;
+    {
+      *addr = s0->out2in.addr;
+      *port = s0->out2in.port;
+      *fib_index = s0->out2in.fib_index;
+    }
   if (d)
-    *(snat_session_t **) d = s0;
+    *(snat_session_t **) (d) = s0;
   return next0;
 }
 #endif
@@ -580,39 +572,40 @@ out:
 u32
 icmp_match_in2out_fast (snat_main_t * sm, vlib_node_runtime_t * node,
                        u32 thread_index, vlib_buffer_t * b0,
-                       ip4_header_t * ip0, u8 * p_proto,
-                       snat_session_key_t * p_value,
-                       u8 * p_dont_translate, void *d, void *e)
+                       ip4_header_t * ip0, ip4_address_t * addr, u16 * port,
+                       u32 * fib_index, nat_protocol_t * proto, void *d,
+                       void *e, u8 * dont_translate)
 {
   u32 sw_if_index0;
-  u32 rx_fib_index0;
-  snat_session_key_t key0;
-  snat_session_key_t sm0;
-  u8 dont_translate = 0;
   u8 is_addr_only;
   u32 next0 = ~0;
   int err;
+  *dont_translate = 0;
 
   sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-  rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
+  *fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
 
-  err = icmp_get_key (b0, ip0, &key0);
+  err = icmp_get_key (b0, ip0, addr, port, proto);
   if (err != -1)
     {
       b0->error = node->errors[err];
       next0 = SNAT_IN2OUT_NEXT_DROP;
-      goto out2;
+      goto out;
     }
-  key0.fib_index = rx_fib_index0;
+
+  ip4_address_t sm_addr;
+  u16 sm_port;
+  u32 sm_fib_index;
 
   if (snat_static_mapping_match
-      (sm, key0, &sm0, 0, &is_addr_only, 0, 0, 0, 0))
+      (sm, *addr, *port, *fib_index, *proto, &sm_addr, &sm_port,
+       &sm_fib_index, 0, &is_addr_only, 0, 0, 0, 0, 0))
     {
       if (PREDICT_FALSE (snat_not_translate_fast (sm, node, sw_if_index0, ip0,
                                                  IP_PROTOCOL_ICMP,
-                                                 rx_fib_index0)))
+                                                 *fib_index)))
        {
-         dont_translate = 1;
+         *dont_translate = 1;
          goto out;
        }
 
@@ -641,10 +634,6 @@ icmp_match_in2out_fast (snat_main_t * sm, vlib_node_runtime_t * node,
     }
 
 out:
-  *p_value = sm0;
-out2:
-  *p_proto = key0.protocol;
-  *p_dont_translate = dont_translate;
   return next0;
 }
 #endif
@@ -661,8 +650,10 @@ icmp_in2out (snat_main_t * sm,
             u32 next0, u32 thread_index, void *d, void *e)
 {
   vlib_main_t *vm = vlib_get_main ();
-  snat_session_key_t sm0;
-  u8 protocol;
+  ip4_address_t addr;
+  u16 port;
+  u32 fib_index;
+  nat_protocol_t protocol;
   icmp_echo_header_t *echo0, *inner_echo0 = 0;
   ip4_header_t *inner_ip0;
   void *l4_header = 0;
@@ -677,9 +668,9 @@ icmp_in2out (snat_main_t * sm,
 
   echo0 = (icmp_echo_header_t *) (icmp0 + 1);
 
-  next0_tmp = sm->icmp_match_in2out_cb (sm, node, thread_index, b0, ip0,
-                                       &protocol, &sm0, &dont_translate, d,
-                                       e);
+  next0_tmp =
+    sm->icmp_match_in2out_cb (sm, node, thread_index, b0, ip0, &addr, &port,
+                             &fib_index, &protocol, d, e, &dont_translate);
   if (next0_tmp != ~0)
     next0 = next0_tmp;
   if (next0 == SNAT_IN2OUT_NEXT_DROP || dont_translate)
@@ -702,7 +693,7 @@ icmp_in2out (snat_main_t * sm,
     }
 
   old_addr0 = ip0->src_address.as_u32;
-  new_addr0 = ip0->src_address.as_u32 = sm0.addr.as_u32;
+  new_addr0 = ip0->src_address.as_u32 = addr.as_u32;
 
   sum0 = ip0->checksum;
   sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
@@ -716,11 +707,11 @@ icmp_in2out (snat_main_t * sm,
 
       if (!icmp_type_is_error_message (icmp0->type))
        {
-         new_id0 = sm0.port;
+         new_id0 = port;
          if (PREDICT_FALSE (new_id0 != echo0->identifier))
            {
              old_id0 = echo0->identifier;
-             new_id0 = sm0.port;
+             new_id0 = port;
              echo0->identifier = new_id0;
 
              sum0 = icmp0->checksum;
@@ -743,7 +734,7 @@ icmp_in2out (snat_main_t * sm,
 
          /* update inner destination IP address */
          old_addr0 = inner_ip0->dst_address.as_u32;
-         inner_ip0->dst_address = sm0.addr;
+         inner_ip0->dst_address = addr;
          new_addr0 = inner_ip0->dst_address.as_u32;
          sum0 = icmp0->checksum;
          sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
@@ -770,7 +761,7 @@ icmp_in2out (snat_main_t * sm,
              inner_echo0 = (icmp_echo_header_t *) (inner_icmp0 + 1);
 
              old_id0 = inner_echo0->identifier;
-             new_id0 = sm0.port;
+             new_id0 = port;
              inner_echo0->identifier = new_id0;
 
              sum0 = icmp0->checksum;
@@ -782,7 +773,7 @@ icmp_in2out (snat_main_t * sm,
            case NAT_PROTOCOL_UDP:
            case NAT_PROTOCOL_TCP:
              old_id0 = ((tcp_udp_header_t *) l4_header)->dst_port;
-             new_id0 = sm0.port;
+             new_id0 = port;
              ((tcp_udp_header_t *) l4_header)->dst_port = new_id0;
 
              sum0 = icmp0->checksum;
@@ -798,10 +789,9 @@ icmp_in2out (snat_main_t * sm,
 
   if (vnet_buffer (b0)->sw_if_index[VLIB_TX] == ~0)
     {
-      if (sm->deterministic ||
-         0 != snat_icmp_hairpinning (sm, b0, ip0, icmp0,
+      if (0 != snat_icmp_hairpinning (sm, b0, ip0, icmp0,
                                      sm->endpoint_dependent))
-       vnet_buffer (b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
+       vnet_buffer (b0)->sw_if_index[VLIB_TX] = fib_index;
     }
 
 out:
@@ -844,15 +834,10 @@ nat_in2out_sm_unknown_proto (snat_main_t * sm,
 {
   clib_bihash_kv_8_8_t kv, value;
   snat_static_mapping_t *m;
-  snat_session_key_t m_key;
   u32 old_addr, new_addr;
   ip_csum_t sum;
 
-  m_key.addr = ip->src_address;
-  m_key.port = 0;
-  m_key.protocol = 0;
-  m_key.fib_index = rx_fib_index;
-  kv.key = m_key.as_u64;
+  init_nat_k (&kv, ip->src_address, 0, rx_fib_index, 0);
   if (clib_bihash_search_8_8 (&sm->static_mapping_by_local, &kv, &value))
     return 1;
 
@@ -881,760 +866,768 @@ snat_in2out_node_fn_inline (vlib_main_t * vm,
                            vlib_frame_t * frame, int is_slow_path,
                            int is_output_feature)
 {
-  u32 n_left_from, *from, *to_next;
-  snat_in2out_next_t next_index;
-  u32 pkts_processed = 0;
+  u32 n_left_from, *from;
   snat_main_t *sm = &snat_main;
   f64 now = vlib_time_now (vm);
-  u32 stats_node_index;
   u32 thread_index = vm->thread_index;
-  u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
-    0, fragments = 0;
-
-  stats_node_index = is_slow_path ? sm->in2out_slowpath_node_index :
-    sm->in2out_node_index;
 
   from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
-  next_index = node->cached_next_index;
 
-  while (n_left_from > 0)
+  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+  vlib_get_buffers (vm, from, b, n_left_from);
+
+  while (n_left_from >= 2)
     {
-      u32 n_left_to_next;
+      vlib_buffer_t *b0, *b1;
+      u32 next0, next1;
+      u32 sw_if_index0, sw_if_index1;
+      ip4_header_t *ip0, *ip1;
+      ip_csum_t sum0, sum1;
+      u32 new_addr0, old_addr0, new_addr1, old_addr1;
+      u16 old_port0, new_port0, old_port1, new_port1;
+      udp_header_t *udp0, *udp1;
+      tcp_header_t *tcp0, *tcp1;
+      icmp46_header_t *icmp0, *icmp1;
+      u32 rx_fib_index0, rx_fib_index1;
+      u32 proto0, proto1;
+      snat_session_t *s0 = 0, *s1 = 0;
+      clib_bihash_kv_8_8_t kv0, value0, kv1, value1;
+      u32 iph_offset0 = 0, iph_offset1 = 0;
+
+      b0 = *b;
+      b++;
+      b1 = *b;
+      b++;
+
+      /* Prefetch next iteration. */
+      if (PREDICT_TRUE (n_left_from >= 4))
+       {
+         vlib_buffer_t *p2, *p3;
 
-      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+         p2 = *b;
+         p3 = *(b + 1);
 
-      while (n_left_from >= 4 && n_left_to_next >= 2)
-       {
-         u32 bi0, bi1;
-         vlib_buffer_t *b0, *b1;
-         u32 next0, next1;
-         u32 sw_if_index0, sw_if_index1;
-         ip4_header_t *ip0, *ip1;
-         ip_csum_t sum0, sum1;
-         u32 new_addr0, old_addr0, new_addr1, old_addr1;
-         u16 old_port0, new_port0, old_port1, new_port1;
-         udp_header_t *udp0, *udp1;
-         tcp_header_t *tcp0, *tcp1;
-         icmp46_header_t *icmp0, *icmp1;
-         snat_session_key_t key0, key1;
-         u32 rx_fib_index0, rx_fib_index1;
-         u32 proto0, proto1;
-         snat_session_t *s0 = 0, *s1 = 0;
-         clib_bihash_kv_8_8_t kv0, value0, kv1, value1;
-         u32 iph_offset0 = 0, iph_offset1 = 0;
-
-         /* Prefetch next iteration. */
-         {
-           vlib_buffer_t *p2, *p3;
-
-           p2 = vlib_get_buffer (vm, from[2]);
-           p3 = vlib_get_buffer (vm, from[3]);
-
-           vlib_prefetch_buffer_header (p2, LOAD);
-           vlib_prefetch_buffer_header (p3, LOAD);
-
-           CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
-           CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
-         }
-
-         /* speculatively enqueue b0 and b1 to the current next frame */
-         to_next[0] = bi0 = from[0];
-         to_next[1] = bi1 = from[1];
-         from += 2;
-         to_next += 2;
-         n_left_from -= 2;
-         n_left_to_next -= 2;
+         vlib_prefetch_buffer_header (p2, LOAD);
+         vlib_prefetch_buffer_header (p3, LOAD);
 
-         b0 = vlib_get_buffer (vm, bi0);
-         b1 = vlib_get_buffer (vm, bi1);
+         CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
+         CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, LOAD);
+       }
 
-         if (is_output_feature)
-           iph_offset0 = vnet_buffer (b0)->ip.reass.save_rewrite_length;
+      if (is_output_feature)
+       iph_offset0 = vnet_buffer (b0)->ip.reass.save_rewrite_length;
 
-         ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) +
-                                 iph_offset0);
+      ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) +
+                             iph_offset0);
 
-         udp0 = ip4_next_header (ip0);
-         tcp0 = (tcp_header_t *) udp0;
-         icmp0 = (icmp46_header_t *) udp0;
+      udp0 = ip4_next_header (ip0);
+      tcp0 = (tcp_header_t *) udp0;
+      icmp0 = (icmp46_header_t *) udp0;
 
-         sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-         rx_fib_index0 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
-                                  sw_if_index0);
+      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+      rx_fib_index0 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
+                              sw_if_index0);
 
-         next0 = next1 = SNAT_IN2OUT_NEXT_LOOKUP;
+      next0 = next1 = SNAT_IN2OUT_NEXT_LOOKUP;
 
-         if (PREDICT_FALSE (ip0->ttl == 1))
-           {
-             vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
-             icmp4_error_set_vnet_buffer (b0, ICMP4_time_exceeded,
-                                          ICMP4_time_exceeded_ttl_exceeded_in_transit,
-                                          0);
-             next0 = SNAT_IN2OUT_NEXT_ICMP_ERROR;
-             goto trace00;
-           }
+      if (PREDICT_FALSE (ip0->ttl == 1))
+       {
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+         icmp4_error_set_vnet_buffer (b0, ICMP4_time_exceeded,
+                                      ICMP4_time_exceeded_ttl_exceeded_in_transit,
+                                      0);
+         next0 = SNAT_IN2OUT_NEXT_ICMP_ERROR;
+         goto trace00;
+       }
 
-         proto0 = ip_proto_to_nat_proto (ip0->protocol);
+      proto0 = ip_proto_to_nat_proto (ip0->protocol);
 
-         /* Next configured feature, probably ip4-lookup */
-         if (is_slow_path)
+      /* Next configured feature, probably ip4-lookup */
+      if (is_slow_path)
+       {
+         if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
            {
-             if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
+             if (nat_in2out_sm_unknown_proto (sm, b0, ip0, rx_fib_index0))
                {
-                 if (nat_in2out_sm_unknown_proto
-                     (sm, b0, ip0, rx_fib_index0))
-                   {
-                     next0 = SNAT_IN2OUT_NEXT_DROP;
-                     b0->error =
-                       node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL];
-                   }
-                 other_packets++;
-                 goto trace00;
+                 next0 = SNAT_IN2OUT_NEXT_DROP;
+                 b0->error =
+                   node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL];
                }
+             vlib_increment_simple_counter (is_slow_path ? &sm->
+                                            counters.slowpath.in2out.
+                                            other : &sm->counters.fastpath.
+                                            in2out.other, thread_index,
+                                            sw_if_index0, 1);
+             goto trace00;
+           }
 
-             if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
-               {
-                 next0 = icmp_in2out_slow_path
-                   (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0,
-                    node, next0, now, thread_index, &s0);
-                 icmp_packets++;
-                 goto trace00;
-               }
+         if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
+           {
+             next0 = icmp_in2out_slow_path
+               (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0,
+                node, next0, now, thread_index, &s0);
+             vlib_increment_simple_counter (is_slow_path ? &sm->
+                                            counters.slowpath.in2out.
+                                            icmp : &sm->counters.fastpath.
+                                            in2out.icmp, thread_index,
+                                            sw_if_index0, 1);
+             goto trace00;
            }
-         else
+       }
+      else
+       {
+         if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
            {
-             if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
-               {
-                 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace00;
-               }
-
-             if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
-               {
-                 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace00;
-               }
+             next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace00;
            }
 
-         key0.addr = ip0->src_address;
-         key0.port = vnet_buffer (b0)->ip.reass.l4_src_port;
-         key0.protocol = proto0;
-         key0.fib_index = rx_fib_index0;
-
-         kv0.key = key0.as_u64;
+         if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
+           {
+             next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace00;
+           }
+       }
 
-         if (PREDICT_FALSE
-             (clib_bihash_search_8_8
-              (&sm->per_thread_data[thread_index].in2out, &kv0,
-               &value0) != 0))
+      init_nat_k (&kv0, ip0->src_address,
+                 vnet_buffer (b0)->ip.reass.l4_src_port, rx_fib_index0,
+                 proto0);
+      if (PREDICT_FALSE
+         (clib_bihash_search_8_8
+          (&sm->per_thread_data[thread_index].in2out, &kv0, &value0) != 0))
+       {
+         if (is_slow_path)
            {
-             if (is_slow_path)
+             if (is_output_feature)
                {
-                 if (is_output_feature)
-                   {
-                     if (PREDICT_FALSE
-                         (nat_not_translate_output_feature
-                          (sm, ip0, proto0,
-                           vnet_buffer (b0)->ip.reass.l4_src_port,
-                           vnet_buffer (b0)->ip.reass.l4_dst_port,
-                           thread_index, sw_if_index0)))
-                       goto trace00;
-
-                     /*
-                      * Send DHCP packets to the ipv4 stack, or we won't
-                      * be able to use dhcp client on the outside interface
-                      */
-                     if (PREDICT_FALSE
-                         (proto0 == NAT_PROTOCOL_UDP
-                          && (vnet_buffer (b0)->ip.reass.l4_dst_port ==
-                              clib_host_to_net_u16
-                              (UDP_DST_PORT_dhcp_to_server))
-                          && ip0->dst_address.as_u32 == 0xffffffff))
-                       goto trace00;
-                   }
-                 else
-                   {
-                     if (PREDICT_FALSE
-                         (snat_not_translate
-                          (sm, node, sw_if_index0, ip0, proto0,
-                           rx_fib_index0, thread_index)))
-                       goto trace00;
-                   }
-
-                 next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0,
-                                    &s0, node, next0, thread_index, now);
-                 if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
+                 if (PREDICT_FALSE
+                     (nat_not_translate_output_feature
+                      (sm, ip0, proto0,
+                       vnet_buffer (b0)->ip.reass.l4_src_port,
+                       vnet_buffer (b0)->ip.reass.l4_dst_port,
+                       thread_index, sw_if_index0)))
                    goto trace00;
 
-                 if (PREDICT_FALSE (!s0))
+                 /*
+                  * Send DHCP packets to the ipv4 stack, or we won't
+                  * be able to use dhcp client on the outside interface
+                  */
+                 if (PREDICT_FALSE
+                     (proto0 == NAT_PROTOCOL_UDP
+                      && (vnet_buffer (b0)->ip.reass.l4_dst_port ==
+                          clib_host_to_net_u16
+                          (UDP_DST_PORT_dhcp_to_server))
+                      && ip0->dst_address.as_u32 == 0xffffffff))
                    goto trace00;
                }
              else
                {
-                 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace00;
+                 if (PREDICT_FALSE
+                     (snat_not_translate
+                      (sm, node, sw_if_index0, ip0, proto0,
+                       rx_fib_index0, thread_index)))
+                   goto trace00;
                }
+
+             next0 = slow_path (sm, b0, ip0,
+                                ip0->src_address,
+                                vnet_buffer (b0)->ip.reass.l4_src_port,
+                                rx_fib_index0,
+                                proto0, &s0, node, next0, thread_index, now);
+             if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
+               goto trace00;
+
+             if (PREDICT_FALSE (!s0))
+               goto trace00;
            }
          else
-           s0 =
-             pool_elt_at_index (sm->per_thread_data[thread_index].sessions,
-                                value0.value);
+           {
+             next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace00;
+           }
+       }
+      else
+       s0 =
+         pool_elt_at_index (sm->per_thread_data[thread_index].sessions,
+                            value0.value);
 
-         b0->flags |= VNET_BUFFER_F_IS_NATED;
+      b0->flags |= VNET_BUFFER_F_IS_NATED;
 
-         old_addr0 = ip0->src_address.as_u32;
-         ip0->src_address = s0->out2in.addr;
-         new_addr0 = ip0->src_address.as_u32;
-         if (!is_output_feature)
-           vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index;
+      old_addr0 = ip0->src_address.as_u32;
+      ip0->src_address = s0->out2in.addr;
+      new_addr0 = ip0->src_address.as_u32;
+      if (!is_output_feature)
+       vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index;
 
-         sum0 = ip0->checksum;
-         sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
-                                ip4_header_t,
-                                src_address /* changed member */ );
-         ip0->checksum = ip_csum_fold (sum0);
+      sum0 = ip0->checksum;
+      sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
+                            ip4_header_t, src_address /* changed member */ );
+      ip0->checksum = ip_csum_fold (sum0);
 
 
-         if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP))
+      if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP))
+       {
+         if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
            {
-             if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
-               {
-                 old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port;
-                 new_port0 = udp0->src_port = s0->out2in.port;
-                 sum0 = tcp0->checksum;
-                 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
-                                        ip4_header_t,
-                                        dst_address /* changed member */ );
-                 sum0 = ip_csum_update (sum0, old_port0, new_port0,
-                                        ip4_header_t /* cheat */ ,
-                                        length /* changed member */ );
-                 mss_clamping (sm, tcp0, &sum0);
-                 tcp0->checksum = ip_csum_fold (sum0);
-               }
-             tcp_packets++;
+             old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port;
+             new_port0 = udp0->src_port = s0->out2in.port;
+             sum0 = tcp0->checksum;
+             sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
+                                    ip4_header_t,
+                                    dst_address /* changed member */ );
+             sum0 = ip_csum_update (sum0, old_port0, new_port0,
+                                    ip4_header_t /* cheat */ ,
+                                    length /* changed member */ );
+             mss_clamping (sm->mss_clamping, tcp0, &sum0);
+             tcp0->checksum = ip_csum_fold (sum0);
            }
-         else
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.tcp : &sm->
+                                        counters.fastpath.in2out.tcp,
+                                        thread_index, sw_if_index0, 1);
+       }
+      else
+       {
+         if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
            {
-             if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
+             udp0->src_port = s0->out2in.port;
+             if (PREDICT_FALSE (udp0->checksum))
                {
-                 udp0->src_port = s0->out2in.port;
-                 if (PREDICT_FALSE (udp0->checksum))
-                   {
-                     old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port;
-                     new_port0 = udp0->src_port;
-                     sum0 = udp0->checksum;
-                     sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, dst_address      /* changed member */
-                       );
-                     sum0 =
-                       ip_csum_update (sum0, old_port0, new_port0,
-                                       ip4_header_t /* cheat */ ,
-                                       length /* changed member */ );
-                     udp0->checksum = ip_csum_fold (sum0);
-                   }
+                 old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port;
+                 new_port0 = udp0->src_port;
+                 sum0 = udp0->checksum;
+                 sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, dst_address  /* changed member */
+                   );
+                 sum0 =
+                   ip_csum_update (sum0, old_port0, new_port0,
+                                   ip4_header_t /* cheat */ ,
+                                   length /* changed member */ );
+                 udp0->checksum = ip_csum_fold (sum0);
                }
-             udp_packets++;
            }
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.udp : &sm->
+                                        counters.fastpath.in2out.udp,
+                                        thread_index, sw_if_index0, 1);
+       }
 
-         /* Accounting */
-         nat44_session_update_counters (s0, now,
-                                        vlib_buffer_length_in_chain (vm, b0),
-                                        thread_index);
-         /* Per-user LRU list maintenance */
-         nat44_session_update_lru (sm, s0, thread_index);
-       trace00:
+      /* Accounting */
+      nat44_session_update_counters (s0, now,
+                                    vlib_buffer_length_in_chain (vm, b0),
+                                    thread_index);
+      /* Per-user LRU list maintenance */
+      nat44_session_update_lru (sm, s0, thread_index);
+    trace00:
 
-         if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
-                            && (b0->flags & VLIB_BUFFER_IS_TRACED)))
-           {
-             snat_in2out_trace_t *t =
-               vlib_add_trace (vm, node, b0, sizeof (*t));
-             t->is_slow_path = is_slow_path;
-             t->sw_if_index = sw_if_index0;
-             t->next_index = next0;
-             t->session_index = ~0;
-             if (s0)
-               t->session_index =
-                 s0 - sm->per_thread_data[thread_index].sessions;
-           }
+      if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+                        && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+       {
+         snat_in2out_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+         t->is_slow_path = is_slow_path;
+         t->sw_if_index = sw_if_index0;
+         t->next_index = next0;
+         t->session_index = ~0;
+         if (s0)
+           t->session_index =
+             s0 - sm->per_thread_data[thread_index].sessions;
+       }
 
-         pkts_processed += next0 == SNAT_IN2OUT_NEXT_LOOKUP;
+      if (next0 == SNAT_IN2OUT_NEXT_DROP)
+       {
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.
+                                        drops : &sm->counters.fastpath.
+                                        in2out.drops, thread_index,
+                                        sw_if_index0, 1);
+       }
 
-         if (is_output_feature)
-           iph_offset1 = vnet_buffer (b1)->ip.reass.save_rewrite_length;
+      if (is_output_feature)
+       iph_offset1 = vnet_buffer (b1)->ip.reass.save_rewrite_length;
 
-         ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b1) +
-                                 iph_offset1);
+      ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b1) +
+                             iph_offset1);
 
-         udp1 = ip4_next_header (ip1);
-         tcp1 = (tcp_header_t *) udp1;
-         icmp1 = (icmp46_header_t *) udp1;
+      udp1 = ip4_next_header (ip1);
+      tcp1 = (tcp_header_t *) udp1;
+      icmp1 = (icmp46_header_t *) udp1;
 
-         sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
-         rx_fib_index1 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
-                                  sw_if_index1);
+      sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+      rx_fib_index1 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
+                              sw_if_index1);
 
-         if (PREDICT_FALSE (ip1->ttl == 1))
-           {
-             vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
-             icmp4_error_set_vnet_buffer (b1, ICMP4_time_exceeded,
-                                          ICMP4_time_exceeded_ttl_exceeded_in_transit,
-                                          0);
-             next1 = SNAT_IN2OUT_NEXT_ICMP_ERROR;
-             goto trace01;
-           }
+      if (PREDICT_FALSE (ip1->ttl == 1))
+       {
+         vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+         icmp4_error_set_vnet_buffer (b1, ICMP4_time_exceeded,
+                                      ICMP4_time_exceeded_ttl_exceeded_in_transit,
+                                      0);
+         next1 = SNAT_IN2OUT_NEXT_ICMP_ERROR;
+         goto trace01;
+       }
 
-         proto1 = ip_proto_to_nat_proto (ip1->protocol);
+      proto1 = ip_proto_to_nat_proto (ip1->protocol);
 
-         /* Next configured feature, probably ip4-lookup */
-         if (is_slow_path)
+      /* Next configured feature, probably ip4-lookup */
+      if (is_slow_path)
+       {
+         if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_OTHER))
            {
-             if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_OTHER))
+             if (nat_in2out_sm_unknown_proto (sm, b1, ip1, rx_fib_index1))
                {
-                 if (nat_in2out_sm_unknown_proto
-                     (sm, b1, ip1, rx_fib_index1))
-                   {
-                     next1 = SNAT_IN2OUT_NEXT_DROP;
-                     b1->error =
-                       node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL];
-                   }
-                 other_packets++;
-                 goto trace01;
+                 next1 = SNAT_IN2OUT_NEXT_DROP;
+                 b1->error =
+                   node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL];
                }
+             vlib_increment_simple_counter (is_slow_path ? &sm->
+                                            counters.slowpath.in2out.
+                                            other : &sm->counters.fastpath.
+                                            in2out.other, thread_index,
+                                            sw_if_index1, 1);
+             goto trace01;
+           }
 
-             if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_ICMP))
-               {
-                 next1 = icmp_in2out_slow_path
-                   (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node,
-                    next1, now, thread_index, &s1);
-                 icmp_packets++;
-                 goto trace01;
-               }
+         if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_ICMP))
+           {
+             next1 = icmp_in2out_slow_path
+               (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node,
+                next1, now, thread_index, &s1);
+             vlib_increment_simple_counter (is_slow_path ? &sm->
+                                            counters.slowpath.in2out.
+                                            icmp : &sm->counters.fastpath.
+                                            in2out.icmp, thread_index,
+                                            sw_if_index1, 1);
+             goto trace01;
            }
-         else
+       }
+      else
+       {
+         if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_OTHER))
            {
-             if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_OTHER))
-               {
-                 next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace01;
-               }
-
-             if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_ICMP))
-               {
-                 next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace01;
-               }
+             next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace01;
            }
 
-         key1.addr = ip1->src_address;
-         key1.port = vnet_buffer (b1)->ip.reass.l4_src_port;
-         key1.protocol = proto1;
-         key1.fib_index = rx_fib_index1;
-
-         kv1.key = key1.as_u64;
+         if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_ICMP))
+           {
+             next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace01;
+           }
+       }
 
-         if (PREDICT_FALSE
-             (clib_bihash_search_8_8
-              (&sm->per_thread_data[thread_index].in2out, &kv1,
-               &value1) != 0))
+      init_nat_k (&kv1, ip1->src_address,
+                 vnet_buffer (b1)->ip.reass.l4_src_port, rx_fib_index1,
+                 proto1);
+      if (PREDICT_FALSE
+         (clib_bihash_search_8_8
+          (&sm->per_thread_data[thread_index].in2out, &kv1, &value1) != 0))
+       {
+         if (is_slow_path)
            {
-             if (is_slow_path)
+             if (is_output_feature)
                {
-                 if (is_output_feature)
-                   {
-                     if (PREDICT_FALSE
-                         (nat_not_translate_output_feature
-                          (sm, ip1, proto1,
-                           vnet_buffer (b1)->ip.reass.l4_src_port,
-                           vnet_buffer (b1)->ip.reass.l4_dst_port,
-                           thread_index, sw_if_index1)))
-                       goto trace01;
-
-                     /*
-                      * Send DHCP packets to the ipv4 stack, or we won't
-                      * be able to use dhcp client on the outside interface
-                      */
-                     if (PREDICT_FALSE
-                         (proto1 == NAT_PROTOCOL_UDP
-                          && (vnet_buffer (b1)->ip.reass.l4_dst_port ==
-                              clib_host_to_net_u16
-                              (UDP_DST_PORT_dhcp_to_server))
-                          && ip1->dst_address.as_u32 == 0xffffffff))
-                       goto trace01;
-                   }
-                 else
-                   {
-                     if (PREDICT_FALSE
-                         (snat_not_translate
-                          (sm, node, sw_if_index1, ip1, proto1,
-                           rx_fib_index1, thread_index)))
-                       goto trace01;
-                   }
-
-                 next1 = slow_path (sm, b1, ip1, rx_fib_index1, &key1,
-                                    &s1, node, next1, thread_index, now);
-                 if (PREDICT_FALSE (next1 == SNAT_IN2OUT_NEXT_DROP))
+                 if (PREDICT_FALSE
+                     (nat_not_translate_output_feature
+                      (sm, ip1, proto1,
+                       vnet_buffer (b1)->ip.reass.l4_src_port,
+                       vnet_buffer (b1)->ip.reass.l4_dst_port,
+                       thread_index, sw_if_index1)))
                    goto trace01;
 
-                 if (PREDICT_FALSE (!s1))
+                 /*
+                  * Send DHCP packets to the ipv4 stack, or we won't
+                  * be able to use dhcp client on the outside interface
+                  */
+                 if (PREDICT_FALSE
+                     (proto1 == NAT_PROTOCOL_UDP
+                      && (vnet_buffer (b1)->ip.reass.l4_dst_port ==
+                          clib_host_to_net_u16
+                          (UDP_DST_PORT_dhcp_to_server))
+                      && ip1->dst_address.as_u32 == 0xffffffff))
                    goto trace01;
                }
              else
                {
-                 next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace01;
+                 if (PREDICT_FALSE
+                     (snat_not_translate
+                      (sm, node, sw_if_index1, ip1, proto1,
+                       rx_fib_index1, thread_index)))
+                   goto trace01;
                }
+
+             next1 =
+               slow_path (sm, b1, ip1, ip1->src_address,
+                          vnet_buffer (b1)->ip.reass.l4_src_port,
+                          rx_fib_index1, proto1, &s1, node, next1,
+                          thread_index, now);
+             if (PREDICT_FALSE (next1 == SNAT_IN2OUT_NEXT_DROP))
+               goto trace01;
+
+             if (PREDICT_FALSE (!s1))
+               goto trace01;
            }
          else
-           s1 =
-             pool_elt_at_index (sm->per_thread_data[thread_index].sessions,
-                                value1.value);
+           {
+             next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace01;
+           }
+       }
+      else
+       s1 =
+         pool_elt_at_index (sm->per_thread_data[thread_index].sessions,
+                            value1.value);
 
-         b1->flags |= VNET_BUFFER_F_IS_NATED;
+      b1->flags |= VNET_BUFFER_F_IS_NATED;
 
-         old_addr1 = ip1->src_address.as_u32;
-         ip1->src_address = s1->out2in.addr;
-         new_addr1 = ip1->src_address.as_u32;
-         if (!is_output_feature)
-           vnet_buffer (b1)->sw_if_index[VLIB_TX] = s1->out2in.fib_index;
+      old_addr1 = ip1->src_address.as_u32;
+      ip1->src_address = s1->out2in.addr;
+      new_addr1 = ip1->src_address.as_u32;
+      if (!is_output_feature)
+       vnet_buffer (b1)->sw_if_index[VLIB_TX] = s1->out2in.fib_index;
 
-         sum1 = ip1->checksum;
-         sum1 = ip_csum_update (sum1, old_addr1, new_addr1,
-                                ip4_header_t,
-                                src_address /* changed member */ );
-         ip1->checksum = ip_csum_fold (sum1);
+      sum1 = ip1->checksum;
+      sum1 = ip_csum_update (sum1, old_addr1, new_addr1,
+                            ip4_header_t, src_address /* changed member */ );
+      ip1->checksum = ip_csum_fold (sum1);
 
-         if (PREDICT_TRUE (proto1 == NAT_PROTOCOL_TCP))
+      if (PREDICT_TRUE (proto1 == NAT_PROTOCOL_TCP))
+       {
+         if (!vnet_buffer (b1)->ip.reass.is_non_first_fragment)
            {
-             if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
-               {
-                 old_port1 = vnet_buffer (b1)->ip.reass.l4_src_port;
-                 new_port1 = udp1->src_port = s1->out2in.port;
-                 sum1 = tcp1->checksum;
-                 sum1 = ip_csum_update (sum1, old_addr1, new_addr1,
-                                        ip4_header_t,
-                                        dst_address /* changed member */ );
-                 sum1 = ip_csum_update (sum1, old_port1, new_port1,
-                                        ip4_header_t /* cheat */ ,
-                                        length /* changed member */ );
-                 mss_clamping (sm, tcp1, &sum1);
-                 tcp1->checksum = ip_csum_fold (sum1);
-               }
-             tcp_packets++;
+             old_port1 = vnet_buffer (b1)->ip.reass.l4_src_port;
+             new_port1 = udp1->src_port = s1->out2in.port;
+             sum1 = tcp1->checksum;
+             sum1 = ip_csum_update (sum1, old_addr1, new_addr1,
+                                    ip4_header_t,
+                                    dst_address /* changed member */ );
+             sum1 = ip_csum_update (sum1, old_port1, new_port1,
+                                    ip4_header_t /* cheat */ ,
+                                    length /* changed member */ );
+             mss_clamping (sm->mss_clamping, tcp1, &sum1);
+             tcp1->checksum = ip_csum_fold (sum1);
            }
-         else
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.tcp : &sm->
+                                        counters.fastpath.in2out.tcp,
+                                        thread_index, sw_if_index1, 1);
+       }
+      else
+       {
+         if (!vnet_buffer (b1)->ip.reass.is_non_first_fragment)
            {
-             if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
+             udp1->src_port = s1->out2in.port;
+             if (PREDICT_FALSE (udp1->checksum))
                {
-                 udp1->src_port = s1->out2in.port;
-                 if (PREDICT_FALSE (udp1->checksum))
-                   {
-                     old_port1 = vnet_buffer (b1)->ip.reass.l4_src_port;
-                     new_port1 = udp1->src_port;
-                     sum1 = udp1->checksum;
-                     sum1 = ip_csum_update (sum1, old_addr1, new_addr1, ip4_header_t, dst_address      /* changed member */
-                       );
-                     sum1 =
-                       ip_csum_update (sum1, old_port1, new_port1,
-                                       ip4_header_t /* cheat */ ,
-                                       length /* changed member */ );
-                     udp1->checksum = ip_csum_fold (sum1);
-                   }
+                 old_port1 = vnet_buffer (b1)->ip.reass.l4_src_port;
+                 new_port1 = udp1->src_port;
+                 sum1 = udp1->checksum;
+                 sum1 = ip_csum_update (sum1, old_addr1, new_addr1, ip4_header_t, dst_address  /* changed member */
+                   );
+                 sum1 =
+                   ip_csum_update (sum1, old_port1, new_port1,
+                                   ip4_header_t /* cheat */ ,
+                                   length /* changed member */ );
+                 udp1->checksum = ip_csum_fold (sum1);
                }
-             udp_packets++;
-           }
-
-         /* Accounting */
-         nat44_session_update_counters (s1, now,
-                                        vlib_buffer_length_in_chain (vm, b1),
-                                        thread_index);
-         /* Per-user LRU list maintenance */
-         nat44_session_update_lru (sm, s1, thread_index);
-       trace01:
-
-         if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
-                            && (b1->flags & VLIB_BUFFER_IS_TRACED)))
-           {
-             snat_in2out_trace_t *t =
-               vlib_add_trace (vm, node, b1, sizeof (*t));
-             t->sw_if_index = sw_if_index1;
-             t->next_index = next1;
-             t->session_index = ~0;
-             if (s1)
-               t->session_index =
-                 s1 - sm->per_thread_data[thread_index].sessions;
            }
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.udp : &sm->
+                                        counters.fastpath.in2out.udp,
+                                        thread_index, sw_if_index1, 1);
+       }
 
-         pkts_processed += next1 == SNAT_IN2OUT_NEXT_LOOKUP;
+      /* Accounting */
+      nat44_session_update_counters (s1, now,
+                                    vlib_buffer_length_in_chain (vm, b1),
+                                    thread_index);
+      /* Per-user LRU list maintenance */
+      nat44_session_update_lru (sm, s1, thread_index);
+    trace01:
 
-         /* verify speculative enqueues, maybe switch current next frame */
-         vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
-                                          to_next, n_left_to_next,
-                                          bi0, bi1, next0, next1);
+      if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+                        && (b1->flags & VLIB_BUFFER_IS_TRACED)))
+       {
+         snat_in2out_trace_t *t = vlib_add_trace (vm, node, b1, sizeof (*t));
+         t->sw_if_index = sw_if_index1;
+         t->next_index = next1;
+         t->session_index = ~0;
+         if (s1)
+           t->session_index =
+             s1 - sm->per_thread_data[thread_index].sessions;
        }
 
-      while (n_left_from > 0 && n_left_to_next > 0)
+      if (next1 == SNAT_IN2OUT_NEXT_DROP)
        {
-         u32 bi0;
-         vlib_buffer_t *b0;
-         u32 next0;
-         u32 sw_if_index0;
-         ip4_header_t *ip0;
-         ip_csum_t sum0;
-         u32 new_addr0, old_addr0;
-         u16 old_port0, new_port0;
-         udp_header_t *udp0;
-         tcp_header_t *tcp0;
-         icmp46_header_t *icmp0;
-         snat_session_key_t key0;
-         u32 rx_fib_index0;
-         u32 proto0;
-         snat_session_t *s0 = 0;
-         clib_bihash_kv_8_8_t kv0, value0;
-         u32 iph_offset0 = 0;
-
-         /* speculatively enqueue b0 to the current next frame */
-         bi0 = from[0];
-         to_next[0] = bi0;
-         from += 1;
-         to_next += 1;
-         n_left_from -= 1;
-         n_left_to_next -= 1;
-
-         b0 = vlib_get_buffer (vm, bi0);
-         next0 = SNAT_IN2OUT_NEXT_LOOKUP;
-
-         if (is_output_feature)
-           iph_offset0 = vnet_buffer (b0)->ip.reass.save_rewrite_length;
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.
+                                        drops : &sm->counters.fastpath.
+                                        in2out.drops, thread_index,
+                                        sw_if_index1, 1);
+       }
 
-         ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) +
-                                 iph_offset0);
+      n_left_from -= 2;
+      next[0] = next0;
+      next[1] = next1;
+      next += 2;
+    }
 
-         udp0 = ip4_next_header (ip0);
-         tcp0 = (tcp_header_t *) udp0;
-         icmp0 = (icmp46_header_t *) udp0;
+  while (n_left_from > 0)
+    {
+      vlib_buffer_t *b0;
+      u32 next0;
+      u32 sw_if_index0;
+      ip4_header_t *ip0;
+      ip_csum_t sum0;
+      u32 new_addr0, old_addr0;
+      u16 old_port0, new_port0;
+      udp_header_t *udp0;
+      tcp_header_t *tcp0;
+      icmp46_header_t *icmp0;
+      u32 rx_fib_index0;
+      u32 proto0;
+      snat_session_t *s0 = 0;
+      clib_bihash_kv_8_8_t kv0, value0;
+      u32 iph_offset0 = 0;
+
+      b0 = *b;
+      b++;
+      next0 = SNAT_IN2OUT_NEXT_LOOKUP;
+
+      if (is_output_feature)
+       iph_offset0 = vnet_buffer (b0)->ip.reass.save_rewrite_length;
+
+      ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) +
+                             iph_offset0);
+
+      udp0 = ip4_next_header (ip0);
+      tcp0 = (tcp_header_t *) udp0;
+      icmp0 = (icmp46_header_t *) udp0;
+
+      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+      rx_fib_index0 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
+                              sw_if_index0);
+
+      if (PREDICT_FALSE (ip0->ttl == 1))
+       {
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+         icmp4_error_set_vnet_buffer (b0, ICMP4_time_exceeded,
+                                      ICMP4_time_exceeded_ttl_exceeded_in_transit,
+                                      0);
+         next0 = SNAT_IN2OUT_NEXT_ICMP_ERROR;
+         goto trace0;
+       }
 
-         sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-         rx_fib_index0 = vec_elt (sm->ip4_main->fib_index_by_sw_if_index,
-                                  sw_if_index0);
+      proto0 = ip_proto_to_nat_proto (ip0->protocol);
 
-         if (PREDICT_FALSE (ip0->ttl == 1))
+      /* Next configured feature, probably ip4-lookup */
+      if (is_slow_path)
+       {
+         if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
            {
-             vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
-             icmp4_error_set_vnet_buffer (b0, ICMP4_time_exceeded,
-                                          ICMP4_time_exceeded_ttl_exceeded_in_transit,
-                                          0);
-             next0 = SNAT_IN2OUT_NEXT_ICMP_ERROR;
+             if (nat_in2out_sm_unknown_proto (sm, b0, ip0, rx_fib_index0))
+               {
+                 next0 = SNAT_IN2OUT_NEXT_DROP;
+                 b0->error =
+                   node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL];
+               }
+             vlib_increment_simple_counter (is_slow_path ? &sm->
+                                            counters.slowpath.in2out.
+                                            other : &sm->counters.fastpath.
+                                            in2out.other, thread_index,
+                                            sw_if_index0, 1);
              goto trace0;
            }
 
-         proto0 = ip_proto_to_nat_proto (ip0->protocol);
-
-         /* Next configured feature, probably ip4-lookup */
-         if (is_slow_path)
+         if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
            {
-             if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
-               {
-                 if (nat_in2out_sm_unknown_proto
-                     (sm, b0, ip0, rx_fib_index0))
-                   {
-                     next0 = SNAT_IN2OUT_NEXT_DROP;
-                     b0->error =
-                       node->errors[SNAT_IN2OUT_ERROR_UNSUPPORTED_PROTOCOL];
-                   }
-                 other_packets++;
-                 goto trace0;
-               }
-
-             if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
-               {
-                 next0 = icmp_in2out_slow_path
-                   (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node,
-                    next0, now, thread_index, &s0);
-                 icmp_packets++;
-                 goto trace0;
-               }
+             next0 = icmp_in2out_slow_path
+               (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node,
+                next0, now, thread_index, &s0);
+             vlib_increment_simple_counter (is_slow_path ? &sm->
+                                            counters.slowpath.in2out.
+                                            icmp : &sm->counters.fastpath.
+                                            in2out.icmp, thread_index,
+                                            sw_if_index0, 1);
+             goto trace0;
            }
-         else
+       }
+      else
+       {
+         if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
            {
-             if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
-               {
-                 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace0;
-               }
-
-             if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
-               {
-                 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace0;
-               }
+             next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace0;
            }
 
-         key0.addr = ip0->src_address;
-         key0.port = vnet_buffer (b0)->ip.reass.l4_src_port;
-         key0.protocol = proto0;
-         key0.fib_index = rx_fib_index0;
+         if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
+           {
+             next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace0;
+           }
+       }
 
-         kv0.key = key0.as_u64;
+      init_nat_k (&kv0, ip0->src_address,
+                 vnet_buffer (b0)->ip.reass.l4_src_port, rx_fib_index0,
+                 proto0);
 
-         if (clib_bihash_search_8_8
-             (&sm->per_thread_data[thread_index].in2out, &kv0, &value0))
+      if (clib_bihash_search_8_8
+         (&sm->per_thread_data[thread_index].in2out, &kv0, &value0))
+       {
+         if (is_slow_path)
            {
-             if (is_slow_path)
+             if (is_output_feature)
                {
-                 if (is_output_feature)
-                   {
-                     if (PREDICT_FALSE
-                         (nat_not_translate_output_feature
-                          (sm, ip0, proto0,
-                           vnet_buffer (b0)->ip.reass.l4_src_port,
-                           vnet_buffer (b0)->ip.reass.l4_dst_port,
-                           thread_index, sw_if_index0)))
-                       goto trace0;
-
-                     /*
-                      * Send DHCP packets to the ipv4 stack, or we won't
-                      * be able to use dhcp client on the outside interface
-                      */
-                     if (PREDICT_FALSE
-                         (proto0 == NAT_PROTOCOL_UDP
-                          && (vnet_buffer (b0)->ip.reass.l4_dst_port ==
-                              clib_host_to_net_u16
-                              (UDP_DST_PORT_dhcp_to_server))
-                          && ip0->dst_address.as_u32 == 0xffffffff))
-                       goto trace0;
-                   }
-                 else
-                   {
-                     if (PREDICT_FALSE
-                         (snat_not_translate
-                          (sm, node, sw_if_index0, ip0, proto0,
-                           rx_fib_index0, thread_index)))
-                       goto trace0;
-                   }
-
-                 next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0,
-                                    &s0, node, next0, thread_index, now);
-
-                 if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
+                 if (PREDICT_FALSE
+                     (nat_not_translate_output_feature
+                      (sm, ip0, proto0,
+                       vnet_buffer (b0)->ip.reass.l4_src_port,
+                       vnet_buffer (b0)->ip.reass.l4_dst_port,
+                       thread_index, sw_if_index0)))
                    goto trace0;
 
-                 if (PREDICT_FALSE (!s0))
+                 /*
+                  * Send DHCP packets to the ipv4 stack, or we won't
+                  * be able to use dhcp client on the outside interface
+                  */
+                 if (PREDICT_FALSE
+                     (proto0 == NAT_PROTOCOL_UDP
+                      && (vnet_buffer (b0)->ip.reass.l4_dst_port ==
+                          clib_host_to_net_u16
+                          (UDP_DST_PORT_dhcp_to_server))
+                      && ip0->dst_address.as_u32 == 0xffffffff))
                    goto trace0;
                }
              else
                {
-                 next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
-                 goto trace0;
+                 if (PREDICT_FALSE
+                     (snat_not_translate
+                      (sm, node, sw_if_index0, ip0, proto0, rx_fib_index0,
+                       thread_index)))
+                   goto trace0;
                }
+
+             next0 =
+               slow_path (sm, b0, ip0, ip0->src_address,
+                          vnet_buffer (b0)->ip.reass.l4_src_port,
+                          rx_fib_index0, proto0, &s0, node, next0,
+                          thread_index, now);
+
+             if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
+               goto trace0;
+
+             if (PREDICT_FALSE (!s0))
+               goto trace0;
            }
          else
-           s0 =
-             pool_elt_at_index (sm->per_thread_data[thread_index].sessions,
-                                value0.value);
+           {
+             next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+             goto trace0;
+           }
+       }
+      else
+       s0 =
+         pool_elt_at_index (sm->per_thread_data[thread_index].sessions,
+                            value0.value);
 
-         b0->flags |= VNET_BUFFER_F_IS_NATED;
+      b0->flags |= VNET_BUFFER_F_IS_NATED;
 
-         old_addr0 = ip0->src_address.as_u32;
-         ip0->src_address = s0->out2in.addr;
-         new_addr0 = ip0->src_address.as_u32;
-         if (!is_output_feature)
-           vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index;
+      old_addr0 = ip0->src_address.as_u32;
+      ip0->src_address = s0->out2in.addr;
+      new_addr0 = ip0->src_address.as_u32;
+      if (!is_output_feature)
+       vnet_buffer (b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index;
 
-         sum0 = ip0->checksum;
-         sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
-                                ip4_header_t,
-                                src_address /* changed member */ );
-         ip0->checksum = ip_csum_fold (sum0);
+      sum0 = ip0->checksum;
+      sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
+                            ip4_header_t, src_address /* changed member */ );
+      ip0->checksum = ip_csum_fold (sum0);
 
-         if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP))
+      if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP))
+       {
+         if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
            {
-             if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
-               {
-                 old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port;
-                 new_port0 = udp0->src_port = s0->out2in.port;
-                 sum0 = tcp0->checksum;
-                 sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
-                                        ip4_header_t,
-                                        dst_address /* changed member */ );
-                 sum0 = ip_csum_update (sum0, old_port0, new_port0,
-                                        ip4_header_t /* cheat */ ,
-                                        length /* changed member */ );
-                 mss_clamping (sm, tcp0, &sum0);
-                 tcp0->checksum = ip_csum_fold (sum0);
-               }
-             tcp_packets++;
+             old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port;
+             new_port0 = udp0->src_port = s0->out2in.port;
+             sum0 = tcp0->checksum;
+             sum0 =
+               ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
+                               dst_address /* changed member */ );
+             sum0 =
+               ip_csum_update (sum0, old_port0, new_port0,
+                               ip4_header_t /* cheat */ ,
+                               length /* changed member */ );
+             mss_clamping (sm->mss_clamping, tcp0, &sum0);
+             tcp0->checksum = ip_csum_fold (sum0);
            }
-         else
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.tcp : &sm->
+                                        counters.fastpath.in2out.tcp,
+                                        thread_index, sw_if_index0, 1);
+       }
+      else
+       {
+         if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
            {
-             if (!vnet_buffer (b0)->ip.reass.is_non_first_fragment)
+             udp0->src_port = s0->out2in.port;
+             if (PREDICT_FALSE (udp0->checksum))
                {
-                 udp0->src_port = s0->out2in.port;
-                 if (PREDICT_FALSE (udp0->checksum))
-                   {
-                     old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port;
-                     new_port0 = udp0->src_port;
-                     sum0 = udp0->checksum;
-                     sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t, dst_address      /* changed member */
-                       );
-                     sum0 =
-                       ip_csum_update (sum0, old_port0, new_port0,
-                                       ip4_header_t /* cheat */ ,
-                                       length /* changed member */ );
-                     udp0->checksum = ip_csum_fold (sum0);
-                   }
+                 old_port0 = vnet_buffer (b0)->ip.reass.l4_src_port;
+                 new_port0 = udp0->src_port;
+                 sum0 = udp0->checksum;
+                 sum0 =
+                   ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
+                                   dst_address /* changed member */ );
+                 sum0 =
+                   ip_csum_update (sum0, old_port0, new_port0,
+                                   ip4_header_t /* cheat */ ,
+                                   length /* changed member */ );
+                 udp0->checksum = ip_csum_fold (sum0);
                }
-             udp_packets++;
            }
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.udp : &sm->
+                                        counters.fastpath.in2out.udp,
+                                        thread_index, sw_if_index0, 1);
+       }
 
-         /* Accounting */
-         nat44_session_update_counters (s0, now,
-                                        vlib_buffer_length_in_chain (vm, b0),
-                                        thread_index);
-         /* Per-user LRU list maintenance */
-         nat44_session_update_lru (sm, s0, thread_index);
-
-       trace0:
-         if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
-                            && (b0->flags & VLIB_BUFFER_IS_TRACED)))
-           {
-             snat_in2out_trace_t *t =
-               vlib_add_trace (vm, node, b0, sizeof (*t));
-             t->is_slow_path = is_slow_path;
-             t->sw_if_index = sw_if_index0;
-             t->next_index = next0;
-             t->session_index = ~0;
-             if (s0)
-               t->session_index =
-                 s0 - sm->per_thread_data[thread_index].sessions;
-           }
+      /* Accounting */
+      nat44_session_update_counters (s0, now,
+                                    vlib_buffer_length_in_chain (vm, b0),
+                                    thread_index);
+      /* Per-user LRU list maintenance */
+      nat44_session_update_lru (sm, s0, thread_index);
 
-         pkts_processed += next0 == SNAT_IN2OUT_NEXT_LOOKUP;
+    trace0:
+      if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+                        && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+       {
+         snat_in2out_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+         t->is_slow_path = is_slow_path;
+         t->sw_if_index = sw_if_index0;
+         t->next_index = next0;
+         t->session_index = ~0;
+         if (s0)
+           t->session_index =
+             s0 - sm->per_thread_data[thread_index].sessions;
+       }
 
-         /* verify speculative enqueue, maybe switch current next frame */
-         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                          to_next, n_left_to_next,
-                                          bi0, next0);
+      if (next0 == SNAT_IN2OUT_NEXT_DROP)
+       {
+         vlib_increment_simple_counter (is_slow_path ? &sm->
+                                        counters.slowpath.in2out.
+                                        drops : &sm->counters.fastpath.
+                                        in2out.drops, thread_index,
+                                        sw_if_index0, 1);
        }
 
-      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      n_left_from--;
+      next[0] = next0;
+      next++;
     }
 
-  vlib_node_increment_counter (vm, stats_node_index,
-                              SNAT_IN2OUT_ERROR_IN2OUT_PACKETS,
-                              pkts_processed);
-  vlib_node_increment_counter (vm, stats_node_index,
-                              SNAT_IN2OUT_ERROR_TCP_PACKETS, tcp_packets);
-  vlib_node_increment_counter (vm, stats_node_index,
-                              SNAT_IN2OUT_ERROR_UDP_PACKETS, udp_packets);
-  vlib_node_increment_counter (vm, stats_node_index,
-                              SNAT_IN2OUT_ERROR_ICMP_PACKETS, icmp_packets);
-  vlib_node_increment_counter (vm, stats_node_index,
-                              SNAT_IN2OUT_ERROR_OTHER_PACKETS,
-                              other_packets);
-  vlib_node_increment_counter (vm, stats_node_index,
-                              SNAT_IN2OUT_ERROR_FRAGMENTS, fragments);
-
+  vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
+                              frame->n_vectors);
   return frame->n_vectors;
 }
 
@@ -1772,11 +1765,8 @@ VLIB_NODE_FN (snat_in2out_fast_node) (vlib_main_t * vm,
 {
   u32 n_left_from, *from, *to_next;
   snat_in2out_next_t next_index;
-  u32 pkts_processed = 0;
   snat_main_t *sm = &snat_main;
-  u32 stats_node_index;
-
-  stats_node_index = sm->in2out_fast_node_index;
+  int is_hairpinning = 0;
 
   from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
@@ -1801,9 +1791,12 @@ VLIB_NODE_FN (snat_in2out_fast_node) (vlib_main_t * vm,
          udp_header_t *udp0;
          tcp_header_t *tcp0;
          icmp46_header_t *icmp0;
-         snat_session_key_t key0, sm0;
          u32 proto0;
          u32 rx_fib_index0;
+         ip4_address_t sm0_addr;
+         u16 sm0_port;
+         u32 sm0_fib_index;
+
 
          /* speculatively enqueue b0 to the current next frame */
          bi0 = from[0];
@@ -1847,21 +1840,18 @@ VLIB_NODE_FN (snat_in2out_fast_node) (vlib_main_t * vm,
              goto trace0;
            }
 
-         key0.addr = ip0->src_address;
-         key0.protocol = proto0;
-         key0.port = udp0->src_port;
-         key0.fib_index = rx_fib_index0;
-
-         if (snat_static_mapping_match (sm, key0, &sm0, 0, 0, 0, 0, 0, 0))
+         if (snat_static_mapping_match
+             (sm, ip0->src_address, udp0->src_port, rx_fib_index0, proto0,
+              &sm0_addr, &sm0_port, &sm0_fib_index, 0, 0, 0, 0, 0, 0, 0))
            {
              b0->error = node->errors[SNAT_IN2OUT_ERROR_NO_TRANSLATION];
              next0 = SNAT_IN2OUT_NEXT_DROP;
              goto trace0;
            }
 
-         new_addr0 = sm0.addr.as_u32;
-         new_port0 = sm0.port;
-         vnet_buffer (b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
+         new_addr0 = sm0_addr.as_u32;
+         new_port0 = sm0_port;
+         vnet_buffer (b0)->sw_if_index[VLIB_TX] = sm0_fib_index;
          old_addr0 = ip0->src_address.as_u32;
          ip0->src_address.as_u32 = new_addr0;
 
@@ -1885,7 +1875,7 @@ VLIB_NODE_FN (snat_in2out_fast_node) (vlib_main_t * vm,
                  sum0 = ip_csum_update (sum0, old_port0, new_port0,
                                         ip4_header_t /* cheat */ ,
                                         length /* changed member */ );
-                 mss_clamping (sm, tcp0, &sum0);
+                 mss_clamping (sm->mss_clamping, tcp0, &sum0);
                  tcp0->checksum = ip_csum_fold (sum0);
                }
              else if (udp0->checksum)
@@ -1908,7 +1898,7 @@ VLIB_NODE_FN (snat_in2out_fast_node) (vlib_main_t * vm,
                  sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
                                         ip4_header_t,
                                         dst_address /* changed member */ );
-                 mss_clamping (sm, tcp0, &sum0);
+                 mss_clamping (sm->mss_clamping, tcp0, &sum0);
                  tcp0->checksum = ip_csum_fold (sum0);
                }
              else if (udp0->checksum)
@@ -1922,7 +1912,9 @@ VLIB_NODE_FN (snat_in2out_fast_node) (vlib_main_t * vm,
            }
 
          /* Hairpinning */
-         snat_hairpinning (sm, b0, ip0, udp0, tcp0, proto0, 0);
+         snat_hairpinning (vm, node, sm, b0, ip0, udp0, tcp0, proto0, 0,
+                           0 /* do_trace */ );
+         is_hairpinning = 1;
 
        trace0:
          if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
@@ -1932,9 +1924,16 @@ VLIB_NODE_FN (snat_in2out_fast_node) (vlib_main_t * vm,
                vlib_add_trace (vm, node, b0, sizeof (*t));
              t->sw_if_index = sw_if_index0;
              t->next_index = next0;
+             t->is_hairpinning = is_hairpinning;
            }
 
-         pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP;
+         if (next0 != SNAT_IN2OUT_NEXT_DROP)
+           {
+
+             vlib_increment_simple_counter (&sm->counters.fastpath.
+                                            in2out.other, sw_if_index0,
+                                            vm->thread_index, 1);
+           }
 
          /* verify speculative enqueue, maybe switch current next frame */
          vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
@@ -1945,9 +1944,6 @@ VLIB_NODE_FN (snat_in2out_fast_node) (vlib_main_t * vm,
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
 
-  vlib_node_increment_counter (vm, stats_node_index,
-                              SNAT_IN2OUT_ERROR_IN2OUT_PACKETS,
-                              pkts_processed);
   return frame->n_vectors;
 }