nat: fix counters increment for output feature 18/33018/8
authorAlexander Chernavin <achernavin@netgate.com>
Tue, 6 Jul 2021 10:08:26 +0000 (06:08 -0400)
committerMatthew Smith <mgsmith@netgate.com>
Tue, 17 Aug 2021 21:32:14 +0000 (21:32 +0000)
Type: fix

The NAT plugin stores packet counters and a counter represents a vector
indexed by interface index. When an interface is assigned a NAT role,
the counters are validated to be long enough for the given interface
index.

When a packet traverses NAT in2out and output feature is disabled, the
appropriate counters are updated by the RX interface index. In this
case, translation happens on the inside interface and its index was
ensured to be valid in all of the counters during NAT role assignment.

When a packet traverses NAT in2out and output feature is enabled, the
appropriate counters are updated by the RX interface index too. In this
case, translation happens on the outside interface and the packet could
be received on any interface, even with no NAT role assigned. If that's
the case and its index is greater than the greatest index validated in
the counters, a new counter value will be written to memory that does
not belong to the counter. As a result, a crash will occur at some
point.

With this change, use TX interface index to update the counters when
output feature is enabled. TX interface is an actual interface where
translation happens and its index is always valid in the counters.

Signed-off-by: Alexander Chernavin <achernavin@netgate.com>
Change-Id: I53a52af949fe96419e1b5fef4134ab4062198f51

src/plugins/nat/nat44-ed/nat44_ed_in2out.c
src/plugins/nat/nat44-ei/nat44_ei_hairpinning.c
src/plugins/nat/nat44-ei/nat44_ei_in2out.c
test/test_nat44_ed.py

index 0065d77..79c03bd 100644 (file)
@@ -988,7 +988,9 @@ nat44_ed_in2out_fast_path_node_fn_inline (vlib_main_t *vm,
   while (n_left_from > 0)
     {
       vlib_buffer_t *b0;
-      u32 sw_if_index0, rx_fib_index0, iph_offset0 = 0;
+      u32 rx_sw_if_index0, rx_fib_index0, iph_offset0 = 0;
+      u32 tx_sw_if_index0;
+      u32 cntr_sw_if_index0;
       nat_protocol_t proto0;
       ip4_header_t *ip0;
       snat_session_t *s0 = 0;
@@ -1023,9 +1025,12 @@ nat44_ed_in2out_fast_path_node_fn_inline (vlib_main_t *vm,
       ip0 =
        (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) + iph_offset0);
 
-      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-      rx_fib_index0 =
-       fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, sw_if_index0);
+      rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+      tx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+      cntr_sw_if_index0 =
+       is_output_feature ? tx_sw_if_index0 : rx_sw_if_index0;
+      rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+                                                          rx_sw_if_index0);
       lookup.fib_index = rx_fib_index0;
 
       if (PREDICT_FALSE (!is_output_feature && ip0->ttl == 1))
@@ -1196,20 +1201,20 @@ nat44_ed_in2out_fast_path_node_fn_inline (vlib_main_t *vm,
        {
        case NAT_PROTOCOL_TCP:
          vlib_increment_simple_counter (&sm->counters.fastpath.in2out.tcp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
          nat44_set_tcp_session_state_i2o (sm, now, s0, b0, thread_index);
          break;
        case NAT_PROTOCOL_UDP:
          vlib_increment_simple_counter (&sm->counters.fastpath.in2out.udp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
          break;
        case NAT_PROTOCOL_ICMP:
          vlib_increment_simple_counter (&sm->counters.fastpath.in2out.icmp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
          break;
        case NAT_PROTOCOL_OTHER:
          vlib_increment_simple_counter (&sm->counters.fastpath.in2out.other,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
          break;
        }
 
@@ -1227,7 +1232,7 @@ nat44_ed_in2out_fast_path_node_fn_inline (vlib_main_t *vm,
        {
          nat_in2out_ed_trace_t *t =
            vlib_add_trace (vm, node, b0, sizeof (*t));
-         t->sw_if_index = sw_if_index0;
+         t->sw_if_index = rx_sw_if_index0;
          t->next_index = next[0];
          t->is_slow_path = 0;
          t->translation_error = translation_error;
@@ -1250,7 +1255,7 @@ nat44_ed_in2out_fast_path_node_fn_inline (vlib_main_t *vm,
       if (next[0] == NAT_NEXT_DROP)
        {
          vlib_increment_simple_counter (&sm->counters.fastpath.in2out.drops,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
        }
 
       n_left_from--;
@@ -1285,7 +1290,9 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
   while (n_left_from > 0)
     {
       vlib_buffer_t *b0;
-      u32 sw_if_index0, rx_fib_index0, iph_offset0 = 0;
+      u32 rx_sw_if_index0, rx_fib_index0, iph_offset0 = 0;
+      u32 tx_sw_if_index0;
+      u32 cntr_sw_if_index0;
       nat_protocol_t proto0;
       ip4_header_t *ip0;
       udp_header_t *udp0;
@@ -1304,9 +1311,12 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
       ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) +
                              iph_offset0);
 
-      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-      rx_fib_index0 =
-       fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4, sw_if_index0);
+      rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+      tx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+      cntr_sw_if_index0 =
+       is_output_feature ? tx_sw_if_index0 : rx_sw_if_index0;
+      rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+                                                          rx_sw_if_index0);
 
       if (PREDICT_FALSE (!is_output_feature && ip0->ttl == 1))
        {
@@ -1342,14 +1352,14 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
            }
 
          vlib_increment_simple_counter (&sm->counters.slowpath.in2out.other,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
          goto trace0;
        }
 
       if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
        {
          next[0] = icmp_in2out_ed_slow_path (
-           sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, next[0],
+           sm, b0, ip0, icmp0, rx_sw_if_index0, rx_fib_index0, node, next[0],
            now, thread_index, proto0, &s0, is_multi_worker);
          if (NAT_NEXT_DROP != next[0] && s0 &&
              NAT_ED_TRNSL_ERR_SUCCESS !=
@@ -1364,7 +1374,7 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
            }
 
          vlib_increment_simple_counter (&sm->counters.slowpath.in2out.icmp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
          goto trace0;
        }
 
@@ -1394,8 +1404,7 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
              if (PREDICT_FALSE (nat44_ed_not_translate_output_feature (
                    sm, b0, ip0, vnet_buffer (b0)->ip.reass.l4_src_port,
                    vnet_buffer (b0)->ip.reass.l4_dst_port, thread_index,
-                   sw_if_index0, vnet_buffer (b0)->sw_if_index[VLIB_TX], now,
-                   is_multi_worker)))
+                   rx_sw_if_index0, tx_sw_if_index0, now, is_multi_worker)))
                goto trace0;
 
              /*
@@ -1412,8 +1421,8 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
          else
            {
              if (PREDICT_FALSE (nat44_ed_not_translate (
-                   vm, sm, node, sw_if_index0, b0, ip0, proto0, rx_fib_index0,
-                   thread_index)))
+                   vm, sm, node, rx_sw_if_index0, b0, ip0, proto0,
+                   rx_fib_index0, thread_index)))
                goto trace0;
            }
 
@@ -1447,13 +1456,13 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
       if (PREDICT_TRUE (proto0 == NAT_PROTOCOL_TCP))
        {
          vlib_increment_simple_counter (&sm->counters.slowpath.in2out.tcp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
          nat44_set_tcp_session_state_i2o (sm, now, s0, b0, thread_index);
        }
       else
        {
          vlib_increment_simple_counter (&sm->counters.slowpath.in2out.udp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
        }
 
       /* Accounting */
@@ -1469,7 +1478,7 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
        {
          nat_in2out_ed_trace_t *t =
            vlib_add_trace (vm, node, b0, sizeof (*t));
-         t->sw_if_index = sw_if_index0;
+         t->sw_if_index = rx_sw_if_index0;
          t->next_index = next[0];
          t->is_slow_path = 1;
          t->translation_error = translation_error;
@@ -1492,7 +1501,7 @@ nat44_ed_in2out_slow_path_node_fn_inline (vlib_main_t *vm,
       if (next[0] == NAT_NEXT_DROP)
        {
          vlib_increment_simple_counter (&sm->counters.slowpath.in2out.drops,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
        }
 
       n_left_from--;
index a049e46..c3d3cfb 100644 (file)
@@ -428,7 +428,8 @@ VLIB_NODE_FN (nat44_ei_hairpin_src_node)
          vlib_buffer_t *b0;
          u32 next0;
          nat44_ei_interface_t *i;
-         u32 sw_if_index0;
+         u32 rx_sw_if_index0;
+         u32 tx_sw_if_index0;
 
          /* speculatively enqueue b0 to the current next frame */
          bi0 = from[0];
@@ -439,13 +440,14 @@ VLIB_NODE_FN (nat44_ei_hairpin_src_node)
          n_left_to_next -= 1;
 
          b0 = vlib_get_buffer (vm, bi0);
-         sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+         rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+         tx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
 
          pool_foreach (i, nm->output_feature_interfaces)
            {
              /* Only packets from NAT inside interface */
              if ((nat44_ei_interface_is_inside (i)) &&
-                 (sw_if_index0 == i->sw_if_index))
+                 (rx_sw_if_index0 == i->sw_if_index))
                {
                  if (PREDICT_FALSE ((vnet_buffer (b0)->snat.flags) &
                                     NAT44_EI_FLAG_HAIRPINNING))
@@ -470,8 +472,9 @@ VLIB_NODE_FN (nat44_ei_hairpin_src_node)
 
          if (next0 != NAT44_EI_HAIRPIN_SRC_NEXT_DROP)
            {
-             vlib_increment_simple_counter (
-               &nm->counters.hairpinning, vm->thread_index, sw_if_index0, 1);
+             vlib_increment_simple_counter (&nm->counters.hairpinning,
+                                            vm->thread_index, tx_sw_if_index0,
+                                            1);
            }
 
          /* verify speculative enqueue, maybe switch current next frame */
index 7ac1a92..1e28ed6 100644 (file)
@@ -934,7 +934,9 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
     {
       vlib_buffer_t *b0, *b1;
       u32 next0, next1;
-      u32 sw_if_index0, sw_if_index1;
+      u32 rx_sw_if_index0, rx_sw_if_index1;
+      u32 tx_sw_if_index0, tx_sw_if_index1;
+      u32 cntr_sw_if_index0, cntr_sw_if_index1;
       ip4_header_t *ip0, *ip1;
       ip_csum_t sum0, sum1;
       u32 new_addr0, old_addr0, new_addr1, old_addr1;
@@ -978,9 +980,12 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       tcp0 = (tcp_header_t *) udp0;
       icmp0 = (icmp46_header_t *) udp0;
 
-      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+      rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+      tx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+      cntr_sw_if_index0 =
+       is_output_feature ? tx_sw_if_index0 : rx_sw_if_index0;
       rx_fib_index0 =
-       vec_elt (nm->ip4_main->fib_index_by_sw_if_index, sw_if_index0);
+       vec_elt (nm->ip4_main->fib_index_by_sw_if_index, rx_sw_if_index0);
 
       next0 = next1 = NAT44_EI_IN2OUT_NEXT_LOOKUP;
 
@@ -1010,19 +1015,19 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
              vlib_increment_simple_counter (
                is_slow_path ? &nm->counters.slowpath.in2out.other :
                               &nm->counters.fastpath.in2out.other,
-               thread_index, sw_if_index0, 1);
+               thread_index, cntr_sw_if_index0, 1);
              goto trace00;
            }
 
          if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
            {
              next0 = nat44_ei_icmp_in2out_slow_path (
-               nm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, next0,
-               now, thread_index, &s0);
+               nm, b0, ip0, icmp0, rx_sw_if_index0, rx_fib_index0, node,
+               next0, now, thread_index, &s0);
              vlib_increment_simple_counter (
                is_slow_path ? &nm->counters.slowpath.in2out.icmp :
                               &nm->counters.fastpath.in2out.icmp,
-               thread_index, sw_if_index0, 1);
+               thread_index, cntr_sw_if_index0, 1);
              goto trace00;
            }
        }
@@ -1055,7 +1060,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                        nm, ip0, proto0,
                        vnet_buffer (b0)->ip.reass.l4_src_port,
                        vnet_buffer (b0)->ip.reass.l4_dst_port, thread_index,
-                       sw_if_index0)))
+                       rx_sw_if_index0)))
                    goto trace00;
 
                  /*
@@ -1073,7 +1078,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
              else
                {
                  if (PREDICT_FALSE (nat44_ei_not_translate (
-                       nm, node, sw_if_index0, ip0, proto0, rx_fib_index0,
+                       nm, node, rx_sw_if_index0, ip0, proto0, rx_fib_index0,
                        thread_index)))
                    goto trace00;
                }
@@ -1131,7 +1136,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (is_slow_path ?
                                           &nm->counters.slowpath.in2out.tcp :
                                           &nm->counters.fastpath.in2out.tcp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
        }
       else
        {
@@ -1155,7 +1160,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (is_slow_path ?
                                           &nm->counters.slowpath.in2out.udp :
                                           &nm->counters.fastpath.in2out.udp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
        }
 
       /* Accounting */
@@ -1171,7 +1176,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          nat44_ei_in2out_trace_t *t =
            vlib_add_trace (vm, node, b0, sizeof (*t));
          t->is_slow_path = is_slow_path;
-         t->sw_if_index = sw_if_index0;
+         t->sw_if_index = rx_sw_if_index0;
          t->next_index = next0;
          t->session_index = ~0;
          if (s0)
@@ -1183,7 +1188,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (
            is_slow_path ? &nm->counters.slowpath.in2out.drops :
                           &nm->counters.fastpath.in2out.drops,
-           thread_index, sw_if_index0, 1);
+           thread_index, cntr_sw_if_index0, 1);
        }
 
       if (is_output_feature)
@@ -1196,9 +1201,12 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       tcp1 = (tcp_header_t *) udp1;
       icmp1 = (icmp46_header_t *) udp1;
 
-      sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+      rx_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+      tx_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+      cntr_sw_if_index1 =
+       is_output_feature ? tx_sw_if_index1 : rx_sw_if_index1;
       rx_fib_index1 =
-       vec_elt (nm->ip4_main->fib_index_by_sw_if_index, sw_if_index1);
+       vec_elt (nm->ip4_main->fib_index_by_sw_if_index, rx_sw_if_index1);
 
       if (PREDICT_FALSE (ip1->ttl == 1))
        {
@@ -1226,19 +1234,19 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
              vlib_increment_simple_counter (
                is_slow_path ? &nm->counters.slowpath.in2out.other :
                               &nm->counters.fastpath.in2out.other,
-               thread_index, sw_if_index1, 1);
+               thread_index, cntr_sw_if_index1, 1);
              goto trace01;
            }
 
          if (PREDICT_FALSE (proto1 == NAT_PROTOCOL_ICMP))
            {
              next1 = nat44_ei_icmp_in2out_slow_path (
-               nm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node, next1,
-               now, thread_index, &s1);
+               nm, b1, ip1, icmp1, rx_sw_if_index1, rx_fib_index1, node,
+               next1, now, thread_index, &s1);
              vlib_increment_simple_counter (
                is_slow_path ? &nm->counters.slowpath.in2out.icmp :
                               &nm->counters.fastpath.in2out.icmp,
-               thread_index, sw_if_index1, 1);
+               thread_index, cntr_sw_if_index1, 1);
              goto trace01;
            }
        }
@@ -1271,7 +1279,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                        nm, ip1, proto1,
                        vnet_buffer (b1)->ip.reass.l4_src_port,
                        vnet_buffer (b1)->ip.reass.l4_dst_port, thread_index,
-                       sw_if_index1)))
+                       rx_sw_if_index1)))
                    goto trace01;
 
                  /*
@@ -1289,7 +1297,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
              else
                {
                  if (PREDICT_FALSE (nat44_ei_not_translate (
-                       nm, node, sw_if_index1, ip1, proto1, rx_fib_index1,
+                       nm, node, rx_sw_if_index1, ip1, proto1, rx_fib_index1,
                        thread_index)))
                    goto trace01;
                }
@@ -1346,7 +1354,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (is_slow_path ?
                                           &nm->counters.slowpath.in2out.tcp :
                                           &nm->counters.fastpath.in2out.tcp,
-                                        thread_index, sw_if_index1, 1);
+                                        thread_index, cntr_sw_if_index1, 1);
        }
       else
        {
@@ -1370,7 +1378,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (is_slow_path ?
                                           &nm->counters.slowpath.in2out.udp :
                                           &nm->counters.fastpath.in2out.udp,
-                                        thread_index, sw_if_index1, 1);
+                                        thread_index, cntr_sw_if_index1, 1);
        }
 
       /* Accounting */
@@ -1385,7 +1393,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          nat44_ei_in2out_trace_t *t =
            vlib_add_trace (vm, node, b1, sizeof (*t));
-         t->sw_if_index = sw_if_index1;
+         t->sw_if_index = rx_sw_if_index1;
          t->next_index = next1;
          t->session_index = ~0;
          if (s1)
@@ -1397,7 +1405,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (
            is_slow_path ? &nm->counters.slowpath.in2out.drops :
                           &nm->counters.fastpath.in2out.drops,
-           thread_index, sw_if_index1, 1);
+           thread_index, cntr_sw_if_index1, 1);
        }
 
       n_left_from -= 2;
@@ -1410,7 +1418,9 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
     {
       vlib_buffer_t *b0;
       u32 next0;
-      u32 sw_if_index0;
+      u32 rx_sw_if_index0;
+      u32 tx_sw_if_index0;
+      u32 cntr_sw_if_index0;
       ip4_header_t *ip0;
       ip_csum_t sum0;
       u32 new_addr0, old_addr0;
@@ -1438,9 +1448,12 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       tcp0 = (tcp_header_t *) udp0;
       icmp0 = (icmp46_header_t *) udp0;
 
-      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+      rx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+      tx_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+      cntr_sw_if_index0 =
+       is_output_feature ? tx_sw_if_index0 : rx_sw_if_index0;
       rx_fib_index0 =
-       vec_elt (nm->ip4_main->fib_index_by_sw_if_index, sw_if_index0);
+       vec_elt (nm->ip4_main->fib_index_by_sw_if_index, rx_sw_if_index0);
 
       if (PREDICT_FALSE (ip0->ttl == 1))
        {
@@ -1468,19 +1481,19 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
              vlib_increment_simple_counter (
                is_slow_path ? &nm->counters.slowpath.in2out.other :
                               &nm->counters.fastpath.in2out.other,
-               thread_index, sw_if_index0, 1);
+               thread_index, cntr_sw_if_index0, 1);
              goto trace0;
            }
 
          if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_ICMP))
            {
              next0 = nat44_ei_icmp_in2out_slow_path (
-               nm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, next0,
-               now, thread_index, &s0);
+               nm, b0, ip0, icmp0, rx_sw_if_index0, rx_fib_index0, node,
+               next0, now, thread_index, &s0);
              vlib_increment_simple_counter (
                is_slow_path ? &nm->counters.slowpath.in2out.icmp :
                               &nm->counters.fastpath.in2out.icmp,
-               thread_index, sw_if_index0, 1);
+               thread_index, cntr_sw_if_index0, 1);
              goto trace0;
            }
        }
@@ -1513,7 +1526,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                        nm, ip0, proto0,
                        vnet_buffer (b0)->ip.reass.l4_src_port,
                        vnet_buffer (b0)->ip.reass.l4_dst_port, thread_index,
-                       sw_if_index0)))
+                       rx_sw_if_index0)))
                    goto trace0;
 
                  /*
@@ -1531,7 +1544,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
              else
                {
                  if (PREDICT_FALSE (nat44_ei_not_translate (
-                       nm, node, sw_if_index0, ip0, proto0, rx_fib_index0,
+                       nm, node, rx_sw_if_index0, ip0, proto0, rx_fib_index0,
                        thread_index)))
                    goto trace0;
                }
@@ -1590,7 +1603,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (is_slow_path ?
                                           &nm->counters.slowpath.in2out.tcp :
                                           &nm->counters.fastpath.in2out.tcp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
        }
       else
        {
@@ -1615,7 +1628,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (is_slow_path ?
                                           &nm->counters.slowpath.in2out.udp :
                                           &nm->counters.fastpath.in2out.udp,
-                                        thread_index, sw_if_index0, 1);
+                                        thread_index, cntr_sw_if_index0, 1);
        }
 
       /* Accounting */
@@ -1631,7 +1644,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          nat44_ei_in2out_trace_t *t =
            vlib_add_trace (vm, node, b0, sizeof (*t));
          t->is_slow_path = is_slow_path;
-         t->sw_if_index = sw_if_index0;
+         t->sw_if_index = rx_sw_if_index0;
          t->next_index = next0;
          t->session_index = ~0;
          if (s0)
@@ -1643,7 +1656,7 @@ nat44_ei_in2out_node_fn_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          vlib_increment_simple_counter (
            is_slow_path ? &nm->counters.slowpath.in2out.drops :
                           &nm->counters.fastpath.in2out.drops,
-           thread_index, sw_if_index0, 1);
+           thread_index, cntr_sw_if_index0, 1);
        }
 
       n_left_from--;
index b11e3c0..258bee3 100644 (file)
@@ -3681,7 +3681,7 @@ class TestNAT44EDMW(TestNAT44ED):
             capture = self.pg8.get_capture(len(pkts))
             self.verify_capture_out(capture, ignore_port=True)
 
-            if_idx = self.pg7.sw_if_index
+            if_idx = self.pg8.sw_if_index
             cnt = self.statistics['/nat44-ed/in2out/slowpath/tcp']
             self.assertEqual(cnt[:, if_idx].sum() - tcpn[:, if_idx].sum(), 2)
             cnt = self.statistics['/nat44-ed/in2out/slowpath/udp']