ipsec: add per-SA error counters 73/37673/15
authorArthur de Kerhor <arthurdekerhor@gmail.com>
Wed, 16 Nov 2022 18:12:05 +0000 (19:12 +0100)
committerBeno�t Ganne <bganne@cisco.com>
Thu, 23 Mar 2023 08:58:55 +0000 (08:58 +0000)
Error counters are added on a per-node basis. In Ipsec, it is
useful to also track the errors that occured per SA.

Type: feature
Change-Id: Iabcdcb439f67ad3c6c202b36ffc44ab39abac1bc
Signed-off-by: Arthur de Kerhor <arthurdekerhor@gmail.com>
15 files changed:
src/vnet/ipsec/ah.h
src/vnet/ipsec/ah_decrypt.c
src/vnet/ipsec/ah_encrypt.c
src/vnet/ipsec/esp.h
src/vnet/ipsec/esp_decrypt.c
src/vnet/ipsec/esp_encrypt.c
src/vnet/ipsec/ipsec.h
src/vnet/ipsec/ipsec_cli.c
src/vnet/ipsec/ipsec_format.c
src/vnet/ipsec/ipsec_sa.c
src/vnet/ipsec/ipsec_sa.h
test/template_ipsec.py
test/test_ipsec_spd_fp_input.py
test/test_ipsec_tun_if_esp.py
test/vpp_ipsec.py

index d0b4c21..ae4cd0b 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <vnet/ip/ip.h>
 #include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
 
 typedef struct
 {
@@ -43,6 +44,58 @@ typedef CLIB_PACKED (struct {
 }) ip6_and_ah_header_t;
 /* *INDENT-ON* */
 
+always_inline u32
+ah_encrypt_err_to_sa_err (u32 err)
+{
+  switch (err)
+    {
+    case AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR:
+      return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
+    case AH_ENCRYPT_ERROR_SEQ_CYCLED:
+      return IPSEC_SA_ERROR_SEQ_CYCLED;
+    }
+  return ~0;
+}
+
+always_inline u32
+ah_decrypt_err_to_sa_err (u32 err)
+{
+  switch (err)
+    {
+    case AH_DECRYPT_ERROR_DECRYPTION_FAILED:
+      return IPSEC_SA_ERROR_DECRYPTION_FAILED;
+    case AH_DECRYPT_ERROR_INTEG_ERROR:
+      return IPSEC_SA_ERROR_INTEG_ERROR;
+    case AH_DECRYPT_ERROR_NO_TAIL_SPACE:
+      return IPSEC_SA_ERROR_NO_TAIL_SPACE;
+    case AH_DECRYPT_ERROR_DROP_FRAGMENTS:
+      return IPSEC_SA_ERROR_DROP_FRAGMENTS;
+    case AH_DECRYPT_ERROR_REPLAY:
+      return IPSEC_SA_ERROR_REPLAY;
+    }
+  return ~0;
+}
+
+always_inline void
+ah_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+                          u32 thread_index, u32 err, u16 index, u16 *nexts,
+                          u16 drop_next, u32 sa_index)
+{
+  ipsec_set_next_index (b, node, thread_index, err,
+                       ah_encrypt_err_to_sa_err (err), index, nexts,
+                       drop_next, sa_index);
+}
+
+always_inline void
+ah_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+                          u32 thread_index, u32 err, u16 index, u16 *nexts,
+                          u16 drop_next, u32 sa_index)
+{
+  ipsec_set_next_index (b, node, thread_index, err,
+                       ah_decrypt_err_to_sa_err (err), index, nexts,
+                       drop_next, sa_index);
+}
+
 always_inline u8
 ah_calc_icv_padding_len (u8 icv_size, int is_ipv6)
 {
index c9209d6..ce4610d 100644 (file)
@@ -23,7 +23,6 @@
 #include <vnet/ipsec/esp.h>
 #include <vnet/ipsec/ah.h>
 #include <vnet/ipsec/ipsec_io.h>
-#include <vnet/ipsec/ipsec.api_enum.h>
 
 #define foreach_ah_decrypt_next                 \
   _(DROP, "error-drop")                         \
@@ -104,8 +103,9 @@ ah_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
        {
          u32 bi = op->user_data;
-         b[bi]->error = node->errors[AH_DECRYPT_ERROR_INTEG_ERROR];
-         nexts[bi] = AH_DECRYPT_NEXT_DROP;
+         ah_decrypt_set_next_index (
+           b[bi], node, vm->thread_index, AH_DECRYPT_ERROR_INTEG_ERROR, bi,
+           nexts, AH_DECRYPT_NEXT_DROP, vnet_buffer (b[bi])->ipsec.sad_index);
          n_fail--;
        }
       op++;
@@ -145,8 +145,7 @@ ah_decrypt_inline (vlib_main_t * vm,
        {
          if (current_sa_index != ~0)
            vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
-                                            current_sa_index,
-                                            current_sa_pkts,
+                                            current_sa_index, current_sa_pkts,
                                             current_sa_bytes);
          current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
          sa0 = ipsec_sa_get (current_sa_index);
@@ -190,8 +189,9 @@ ah_decrypt_inline (vlib_main_t * vm,
        {
          if (ip4_is_fragment (ih4))
            {
-             b[0]->error = node->errors[AH_DECRYPT_ERROR_DROP_FRAGMENTS];
-             next[0] = AH_DECRYPT_NEXT_DROP;
+             ah_decrypt_set_next_index (
+               b[0], node, vm->thread_index, AH_DECRYPT_ERROR_DROP_FRAGMENTS,
+               0, next, AH_DECRYPT_NEXT_DROP, current_sa_index);
              goto next;
            }
          pd->ip_hdr_size = ip4_header_bytes (ih4);
@@ -204,8 +204,9 @@ ah_decrypt_inline (vlib_main_t * vm,
       if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
                                               &pd->seq_hi))
        {
-         b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
-         next[0] = AH_DECRYPT_NEXT_DROP;
+         ah_decrypt_set_next_index (b[0], node, vm->thread_index,
+                                    AH_DECRYPT_ERROR_REPLAY, 0, next,
+                                    AH_DECRYPT_NEXT_DROP, current_sa_index);
          goto next;
        }
 
@@ -220,8 +221,9 @@ ah_decrypt_inline (vlib_main_t * vm,
                             pd->current_data + b[0]->current_length
                             + sizeof (u32) > buffer_data_size))
            {
-             b[0]->error = node->errors[AH_DECRYPT_ERROR_NO_TAIL_SPACE];
-             next[0] = AH_DECRYPT_NEXT_DROP;
+             ah_decrypt_set_next_index (
+               b[0], node, vm->thread_index, AH_DECRYPT_ERROR_NO_TAIL_SPACE,
+               0, next, AH_DECRYPT_NEXT_DROP, current_sa_index);
              goto next;
            }
 
@@ -307,14 +309,16 @@ ah_decrypt_inline (vlib_main_t * vm,
          if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi,
                                                   true, NULL))
            {
-             b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
-             next[0] = AH_DECRYPT_NEXT_DROP;
+             ah_decrypt_set_next_index (b[0], node, vm->thread_index,
+                                        AH_DECRYPT_ERROR_REPLAY, 0, next,
+                                        AH_DECRYPT_NEXT_DROP, pd->sa_index);
              goto trace;
            }
          n_lost = ipsec_sa_anti_replay_advance (sa0, thread_index, pd->seq,
                                                 pd->seq_hi);
-         vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, thread_index,
-                                       pd->sa_index);
+         vlib_prefetch_simple_counter (
+           &ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
+           pd->sa_index);
        }
 
       u16 ah_hdr_len = sizeof (ah_header_t) + pd->icv_size
@@ -330,8 +334,10 @@ ah_decrypt_inline (vlib_main_t * vm,
            next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
          else
            {
-             b[0]->error = node->errors[AH_DECRYPT_ERROR_DECRYPTION_FAILED];
-             next[0] = AH_DECRYPT_NEXT_DROP;
+             ah_decrypt_set_next_index (b[0], node, vm->thread_index,
+                                        AH_DECRYPT_ERROR_DECRYPTION_FAILED, 0,
+                                        next, AH_DECRYPT_NEXT_DROP,
+                                        pd->sa_index);
              goto trace;
            }
        }
@@ -382,8 +388,9 @@ ah_decrypt_inline (vlib_main_t * vm,
        }
 
       if (PREDICT_FALSE (n_lost))
-       vlib_increment_simple_counter (&ipsec_sa_lost_counters, thread_index,
-                                      pd->sa_index, n_lost);
+       vlib_increment_simple_counter (
+         &ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
+         pd->sa_index, n_lost);
 
       vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
     trace:
index 7116a16..e2d17d4 100644 (file)
@@ -81,8 +81,10 @@ ah_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
        {
          u32 bi = op->user_data;
-         b[bi]->error = node->errors[AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
-         nexts[bi] = AH_ENCRYPT_NEXT_DROP;
+         ah_encrypt_set_next_index (b[bi], node, vm->thread_index,
+                                    AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR, bi,
+                                    nexts, AH_ENCRYPT_NEXT_DROP,
+                                    vnet_buffer (b[bi])->ipsec.sad_index);
          n_fail--;
        }
       op++;
@@ -153,13 +155,14 @@ ah_encrypt_inline (vlib_main_t * vm,
        {
          if (current_sa_index != ~0)
            vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
-                                            current_sa_index,
-                                            current_sa_pkts,
+                                            current_sa_index, current_sa_pkts,
                                             current_sa_bytes);
          current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
          sa0 = ipsec_sa_get (current_sa_index);
 
          current_sa_bytes = current_sa_pkts = 0;
+         vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+                                         current_sa_index);
        }
 
       pd->sa_index = current_sa_index;
@@ -183,7 +186,9 @@ ah_encrypt_inline (vlib_main_t * vm,
 
       if (PREDICT_FALSE (esp_seq_advance (sa0)))
        {
-         b[0]->error = node->errors[AH_ENCRYPT_ERROR_SEQ_CYCLED];
+         ah_encrypt_set_next_index (b[0], node, vm->thread_index,
+                                    AH_ENCRYPT_ERROR_SEQ_CYCLED, 0, next,
+                                    AH_ENCRYPT_NEXT_DROP, current_sa_index);
          pd->skip = 1;
          goto next;
        }
index 8d7e056..05773a2 100644 (file)
@@ -18,6 +18,7 @@
 #include <vnet/ip/ip.h>
 #include <vnet/crypto/crypto.h>
 #include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
 
 typedef struct
 {
@@ -141,33 +142,96 @@ esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa,
     }
 }
 
-/* Special case to drop or hand off packets for sync/async modes.
- *
- * Different than sync mode, async mode only enqueue drop or hand-off packets
- * to next nodes.
- */
+always_inline u32
+esp_encrypt_err_to_sa_err (u32 err)
+{
+  switch (err)
+    {
+    case ESP_ENCRYPT_ERROR_HANDOFF:
+      return IPSEC_SA_ERROR_HANDOFF;
+    case ESP_ENCRYPT_ERROR_SEQ_CYCLED:
+      return IPSEC_SA_ERROR_SEQ_CYCLED;
+    case ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR:
+      return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
+    case ESP_ENCRYPT_ERROR_CRYPTO_QUEUE_FULL:
+      return IPSEC_SA_ERROR_CRYPTO_QUEUE_FULL;
+    case ESP_ENCRYPT_ERROR_NO_BUFFERS:
+      return IPSEC_SA_ERROR_NO_BUFFERS;
+    case ESP_ENCRYPT_ERROR_NO_ENCRYPTION:
+      return IPSEC_SA_ERROR_NO_ENCRYPTION;
+    }
+  return ~0;
+}
+
+always_inline u32
+esp_decrypt_err_to_sa_err (u32 err)
+{
+  switch (err)
+    {
+    case ESP_DECRYPT_ERROR_HANDOFF:
+      return IPSEC_SA_ERROR_HANDOFF;
+    case ESP_DECRYPT_ERROR_DECRYPTION_FAILED:
+      return IPSEC_SA_ERROR_DECRYPTION_FAILED;
+    case ESP_DECRYPT_ERROR_INTEG_ERROR:
+      return IPSEC_SA_ERROR_INTEG_ERROR;
+    case ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR:
+      return IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR;
+    case ESP_DECRYPT_ERROR_REPLAY:
+      return IPSEC_SA_ERROR_REPLAY;
+    case ESP_DECRYPT_ERROR_RUNT:
+      return IPSEC_SA_ERROR_RUNT;
+    case ESP_DECRYPT_ERROR_NO_BUFFERS:
+      return IPSEC_SA_ERROR_NO_BUFFERS;
+    case ESP_DECRYPT_ERROR_OVERSIZED_HEADER:
+      return IPSEC_SA_ERROR_OVERSIZED_HEADER;
+    case ESP_DECRYPT_ERROR_NO_TAIL_SPACE:
+      return IPSEC_SA_ERROR_NO_TAIL_SPACE;
+    case ESP_DECRYPT_ERROR_TUN_NO_PROTO:
+      return IPSEC_SA_ERROR_TUN_NO_PROTO;
+    case ESP_DECRYPT_ERROR_UNSUP_PAYLOAD:
+      return IPSEC_SA_ERROR_UNSUP_PAYLOAD;
+    }
+  return ~0;
+}
+
+always_inline void
+esp_encrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+                           u32 thread_index, u32 err, u16 index, u16 *nexts,
+                           u16 drop_next, u32 sa_index)
+{
+  ipsec_set_next_index (b, node, thread_index, err,
+                       esp_encrypt_err_to_sa_err (err), index, nexts,
+                       drop_next, sa_index);
+}
+
 always_inline void
-esp_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node, u32 err,
-                   u16 index, u16 *nexts, u16 drop_next)
+esp_decrypt_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+                           u32 thread_index, u32 err, u16 index, u16 *nexts,
+                           u16 drop_next, u32 sa_index)
 {
-  nexts[index] = drop_next;
-  b->error = node->errors[err];
+  ipsec_set_next_index (b, node, thread_index, err,
+                       esp_decrypt_err_to_sa_err (err), index, nexts,
+                       drop_next, sa_index);
 }
 
 /* when submitting a frame is failed, drop all buffers in the frame */
 always_inline u32
 esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
-                                vlib_node_runtime_t *node, u32 err, u16 index,
-                                u32 *from, u16 *nexts, u16 drop_next_index)
+                                vlib_node_runtime_t *node, u32 err,
+                                u32 ipsec_sa_err, u16 index, u32 *from,
+                                u16 *nexts, u16 drop_next_index)
 {
+  vlib_buffer_t *b;
   u32 n_drop = f->n_elts;
   u32 *bi = f->buffer_indices;
 
   while (n_drop--)
     {
       from[index] = bi[0];
-      esp_set_next_index (vlib_get_buffer (vm, bi[0]), node, err, index, nexts,
-                         drop_next_index);
+      b = vlib_get_buffer (vm, bi[0]);
+      ipsec_set_next_index (b, node, vm->thread_index, err, ipsec_sa_err,
+                           index, nexts, drop_next_index,
+                           vnet_buffer (b)->ipsec.sad_index);
       bi++;
       index++;
     }
index 306fb7d..1bcc65c 100644 (file)
@@ -23,7 +23,6 @@
 #include <vnet/ipsec/esp.h>
 #include <vnet/ipsec/ipsec_io.h>
 #include <vnet/ipsec/ipsec_tun.h>
-#include <vnet/ipsec/ipsec.api_enum.h>
 
 #include <vnet/gre/packet.h>
 
@@ -114,8 +113,9 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
            err = e;
          else
            err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
-         b[bi]->error = node->errors[err];
-         nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+         esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
+                                     nexts, ESP_DECRYPT_NEXT_DROP,
+                                     vnet_buffer (b[bi])->ipsec.sad_index);
          n_fail--;
        }
       op++;
@@ -146,8 +146,9 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
            err = e;
          else
            err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
-         b[bi]->error = node->errors[err];
-         nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+         esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
+                                     nexts, ESP_DECRYPT_NEXT_DROP,
+                                     vnet_buffer (b[bi])->ipsec.sad_index);
          n_fail--;
        }
       op++;
@@ -525,8 +526,9 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
                                       payload, pd->current_length,
                                       &op->digest, &op->n_chunks, 0) < 0)
            {
-             b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
-             next[0] = ESP_DECRYPT_NEXT_DROP;
+             esp_decrypt_set_next_index (
+               b, node, vm->thread_index, ESP_DECRYPT_ERROR_NO_BUFFERS, 0,
+               next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
              return;
            }
        }
@@ -721,7 +723,7 @@ out:
 }
 
 static_always_inline void
-esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
+esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
                         const u16 *next_by_next_header,
                         const esp_decrypt_packet_data_t *pd,
                         const esp_decrypt_packet_data2_t *pd2,
@@ -760,16 +762,17 @@ esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
   if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
                                           NULL))
     {
-      b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
-      next[0] = ESP_DECRYPT_NEXT_DROP;
+      esp_decrypt_set_next_index (b, node, vm->thread_index,
+                                 ESP_DECRYPT_ERROR_REPLAY, 0, next,
+                                 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
       return;
     }
 
   u64 n_lost =
     ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi);
 
-  vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
-                               pd->sa_index);
+  vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+                               vm->thread_index, pd->sa_index);
 
   if (pd->is_chain)
     {
@@ -918,8 +921,9 @@ esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
              next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
              break;
            default:
-             b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
-             next[0] = ESP_DECRYPT_NEXT_DROP;
+             esp_decrypt_set_next_index (
+               b, node, vm->thread_index, ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0,
+               next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
              break;
            }
        }
@@ -932,8 +936,9 @@ esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
        }
       else
        {
-         next[0] = ESP_DECRYPT_NEXT_DROP;
-         b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
+         esp_decrypt_set_next_index (b, node, vm->thread_index,
+                                     ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, next,
+                                     ESP_DECRYPT_NEXT_DROP, pd->sa_index);
          return;
        }
 
@@ -973,8 +978,10 @@ esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
                      !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
                                                 &ip4->src_address))
                    {
-                     next[0] = ESP_DECRYPT_NEXT_DROP;
-                     b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
+                     esp_decrypt_set_next_index (
+                       b, node, vm->thread_index,
+                       ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
+                       ESP_DECRYPT_NEXT_DROP, pd->sa_index);
                    }
                }
              else if (next_header == IP_PROTOCOL_IPV6)
@@ -988,8 +995,10 @@ esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
                      !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
                                                 &ip6->src_address))
                    {
-                     next[0] = ESP_DECRYPT_NEXT_DROP;
-                     b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
+                     esp_decrypt_set_next_index (
+                       b, node, vm->thread_index,
+                       ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
+                       ESP_DECRYPT_NEXT_DROP, pd->sa_index);
                    }
                }
            }
@@ -997,8 +1006,8 @@ esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
     }
 
   if (PREDICT_FALSE (n_lost))
-    vlib_increment_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
-                                  pd->sa_index, n_lost);
+    vlib_increment_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+                                  vm->thread_index, pd->sa_index, n_lost);
 }
 
 always_inline uword
@@ -1066,8 +1075,9 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (n_bufs == 0)
        {
          err = ESP_DECRYPT_ERROR_NO_BUFFERS;
-         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                             ESP_DECRYPT_NEXT_DROP);
+         esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, ESP_DECRYPT_NEXT_DROP,
+                                     vnet_buffer (b[0])->ipsec.sad_index);
          goto next;
        }
 
@@ -1075,12 +1085,13 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          if (current_sa_pkts)
            vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
-                                            current_sa_index,
-                                            current_sa_pkts,
+                                            current_sa_index, current_sa_pkts,
                                             current_sa_bytes);
          current_sa_bytes = current_sa_pkts = 0;
 
          current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
+         vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+                                         current_sa_index);
          sa0 = ipsec_sa_get (current_sa_index);
 
          /* fetch the second cacheline ASAP */
@@ -1105,8 +1116,9 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
          err = ESP_DECRYPT_ERROR_HANDOFF;
-         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                             ESP_DECRYPT_NEXT_HANDOFF);
+         esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
+                                     current_sa_index);
          goto next;
        }
 
@@ -1144,16 +1156,18 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                                               &pd->seq_hi))
        {
          err = ESP_DECRYPT_ERROR_REPLAY;
-         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                             ESP_DECRYPT_NEXT_DROP);
+         esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, ESP_DECRYPT_NEXT_DROP,
+                                     current_sa_index);
          goto next;
        }
 
       if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
        {
          err = ESP_DECRYPT_ERROR_RUNT;
-         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                             ESP_DECRYPT_NEXT_DROP);
+         esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, ESP_DECRYPT_NEXT_DROP,
+                                     current_sa_index);
          goto next;
        }
 
@@ -1182,8 +1196,9 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
            async_next_node);
          if (ESP_DECRYPT_ERROR_RX_PKTS != err)
            {
-             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                                 ESP_DECRYPT_NEXT_DROP);
+             esp_decrypt_set_next_index (
+               b[0], node, thread_index, err, n_noop, noop_nexts,
+               ESP_DECRYPT_NEXT_DROP, current_sa_index);
            }
        }
       else
@@ -1233,7 +1248,8 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          n_noop += esp_async_recycle_failed_submit (
            vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
-           n_noop, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
+           IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi, noop_nexts,
+           ESP_DECRYPT_NEXT_DROP);
          vnet_crypto_async_reset_frame (*async_frame);
          vnet_crypto_async_free_frame (vm, *async_frame);
        }
index aa0fb0a..88e93b9 100644 (file)
@@ -254,8 +254,10 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
        {
          u32 bi = op->user_data;
-         b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
-         nexts[bi] = drop_next;
+         esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+                                     ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+                                     bi, nexts, drop_next,
+                                     vnet_buffer (b[bi])->ipsec.sad_index);
          n_fail--;
        }
       op++;
@@ -282,8 +284,10 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
        {
          u32 bi = op->user_data;
-         b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
-         nexts[bi] = drop_next;
+         esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+                                     ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+                                     bi, nexts, drop_next,
+                                     vnet_buffer (b[bi])->ipsec.sad_index);
          n_fail--;
        }
       op++;
@@ -659,8 +663,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
            {
              err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
-             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                                 drop_next);
+             noop_nexts[n_noop] = drop_next;
+             b[0]->error = node->errors[err];
              goto trace;
            }
        }
@@ -670,10 +674,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (sa_index0 != current_sa_index)
        {
          if (current_sa_packets)
-           vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
-                                            current_sa_index,
-                                            current_sa_packets,
-                                            current_sa_bytes);
+           vlib_increment_combined_counter (
+             &ipsec_sa_counters, thread_index, current_sa_index,
+             current_sa_packets, current_sa_bytes);
          current_sa_packets = current_sa_bytes = 0;
 
          sa0 = ipsec_sa_get (sa_index0);
@@ -683,14 +686,18 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                             !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
            {
              err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
-             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                                 drop_next);
+             esp_encrypt_set_next_index (b[0], node, thread_index, err,
+                                         n_noop, noop_nexts, drop_next,
+                                         sa_index0);
              goto trace;
            }
+         current_sa_index = sa_index0;
+         vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+                                         current_sa_index);
+
          /* fetch the second cacheline ASAP */
          clib_prefetch_load (sa0->cacheline1);
 
-         current_sa_index = sa_index0;
          spi = clib_net_to_host_u32 (sa0->spi);
          esp_align = sa0->esp_block_align;
          icv_sz = sa0->integ_icv_size;
@@ -711,8 +718,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
          err = ESP_ENCRYPT_ERROR_HANDOFF;
-         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                             handoff_next);
+         esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, handoff_next,
+                                     current_sa_index);
          goto trace;
        }
 
@@ -721,7 +729,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (n_bufs == 0)
        {
          err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
-         esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+         esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, drop_next, current_sa_index);
          goto trace;
        }
 
@@ -735,7 +744,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (PREDICT_FALSE (esp_seq_advance (sa0)))
        {
          err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
-         esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+         esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+                                     noop_nexts, drop_next, current_sa_index);
          goto trace;
        }
 
@@ -751,8 +761,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          if (!next_hdr_ptr)
            {
              err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
-             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                                 drop_next);
+             esp_encrypt_set_next_index (b[0], node, thread_index, err,
+                                         n_noop, noop_nexts, drop_next,
+                                         current_sa_index);
              goto trace;
            }
          b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -873,8 +884,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
            {
              err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
-             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                                 drop_next);
+             esp_encrypt_set_next_index (b[0], node, thread_index, err,
+                                         n_noop, noop_nexts, drop_next,
+                                         current_sa_index);
              goto trace;
            }
 
@@ -886,8 +898,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          if (!next_hdr_ptr)
            {
              err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
-             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
-                                 drop_next);
+             esp_encrypt_set_next_index (b[0], node, thread_index, err,
+                                         n_noop, noop_nexts, drop_next,
+                                         current_sa_index);
              goto trace;
            }
 
@@ -1076,7 +1089,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
            {
              n_noop += esp_async_recycle_failed_submit (
                vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
-               n_noop, noop_bi, noop_nexts, drop_next);
+               IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi,
+               noop_nexts, drop_next);
              vnet_crypto_async_reset_frame (*async_frame);
              vnet_crypto_async_free_frame (vm, *async_frame);
            }
index 69aa661..5b51529 100644 (file)
@@ -347,6 +347,23 @@ ipsec_spinlock_unlock (i32 *lock)
   clib_atomic_release (lock);
 }
 
+/* Special case to drop or hand off packets for sync/async modes.
+ *
+ * Different than sync mode, async mode only enqueue drop or hand-off packets
+ * to next nodes.
+ */
+always_inline void
+ipsec_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node,
+                     u32 thread_index, u32 err, u32 ipsec_sa_err, u16 index,
+                     u16 *nexts, u16 drop_next, u32 sa_index)
+{
+  nexts[index] = drop_next;
+  b->error = node->errors[err];
+  if (PREDICT_TRUE (ipsec_sa_err != ~0))
+    vlib_increment_simple_counter (&ipsec_sa_err_counters[ipsec_sa_err],
+                                  thread_index, sa_index, 1);
+}
+
 u32 ipsec_register_ah_backend (vlib_main_t * vm, ipsec_main_t * im,
                               const char *name,
                               const char *ah4_encrypt_node_name,
index 8b436b6..35fee29 100644 (file)
@@ -769,7 +769,8 @@ clear_ipsec_counters_command_fn (vlib_main_t * vm,
 {
   vlib_clear_combined_counters (&ipsec_spd_policy_counters);
   vlib_clear_combined_counters (&ipsec_sa_counters);
-  vlib_clear_simple_counters (&ipsec_sa_lost_counters);
+  for (int i = 0; i < IPSEC_SA_N_ERRORS; i++)
+    vlib_clear_simple_counters (&ipsec_sa_err_counters[i]);
 
   return (NULL);
 }
index 86ec368..d1511ac 100644 (file)
@@ -444,7 +444,7 @@ format_ipsec_sa (u8 * s, va_list * args)
   u32 sai = va_arg (*args, u32);
   ipsec_format_flags_t flags = va_arg (*args, ipsec_format_flags_t);
   vlib_counter_t counts;
-  counter_t lost;
+  counter_t errors;
   ipsec_sa_t *sa;
 
   if (pool_is_free_index (ipsec_sa_pool, sai))
@@ -485,12 +485,17 @@ format_ipsec_sa (u8 * s, va_list * args)
              clib_host_to_net_u16 (sa->udp_hdr.dst_port));
 
   vlib_get_combined_counter (&ipsec_sa_counters, sai, &counts);
-  lost = vlib_get_simple_counter (&ipsec_sa_lost_counters, sai);
-  s = format (s, "\n   tx/rx:[packets:%Ld bytes:%Ld], lost:[packets:%Ld]",
-             counts.packets, counts.bytes, lost);
+  s = format (s, "\n   tx/rx:[packets:%Ld bytes:%Ld]", counts.packets,
+             counts.bytes);
+  s = format (s, "\n   SA errors:");
+#define _(index, val, err, desc)                                              \
+  errors = vlib_get_simple_counter (&ipsec_sa_err_counters[index], sai);      \
+  s = format (s, "\n   " #desc ":[packets:%Ld]", errors);
+  foreach_ipsec_sa_err
+#undef _
 
-  if (ipsec_sa_is_set_IS_TUNNEL (sa))
-    s = format (s, "\n%U", format_tunnel, &sa->tunnel, 3);
+    if (ipsec_sa_is_set_IS_TUNNEL (sa)) s =
+      format (s, "\n%U", format_tunnel, &sa->tunnel, 3);
 
 done:
   return (s);
index 12f8ece..eed71a4 100644 (file)
@@ -19,6 +19,7 @@
 #include <vnet/fib/fib_table.h>
 #include <vnet/fib/fib_entry_track.h>
 #include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
 
 /**
  * @brief
@@ -28,10 +29,8 @@ vlib_combined_counter_main_t ipsec_sa_counters = {
   .name = "SA",
   .stat_segment_name = "/net/ipsec/sa",
 };
-vlib_simple_counter_main_t ipsec_sa_lost_counters = {
-  .name = "SA-lost",
-  .stat_segment_name = "/net/ipsec/sa/lost",
-};
+/* Per-SA error counters */
+vlib_simple_counter_main_t ipsec_sa_err_counters[IPSEC_SA_N_ERRORS];
 
 ipsec_sa_t *ipsec_sa_pool;
 
@@ -329,8 +328,11 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
 
   vlib_validate_combined_counter (&ipsec_sa_counters, sa_index);
   vlib_zero_combined_counter (&ipsec_sa_counters, sa_index);
-  vlib_validate_simple_counter (&ipsec_sa_lost_counters, sa_index);
-  vlib_zero_simple_counter (&ipsec_sa_lost_counters, sa_index);
+  for (int i = 0; i < IPSEC_SA_N_ERRORS; i++)
+    {
+      vlib_validate_simple_counter (&ipsec_sa_err_counters[i], sa_index);
+      vlib_zero_simple_counter (&ipsec_sa_err_counters[i], sa_index);
+    }
 
   tunnel_copy (tun, &sa->tunnel);
   sa->id = id;
@@ -567,7 +569,8 @@ void
 ipsec_sa_clear (index_t sai)
 {
   vlib_zero_combined_counter (&ipsec_sa_counters, sai);
-  vlib_zero_simple_counter (&ipsec_sa_lost_counters, sai);
+  for (int i = 0; i < IPSEC_SA_N_ERRORS; i++)
+    vlib_zero_simple_counter (&ipsec_sa_err_counters[i], sai);
 }
 
 void
@@ -640,16 +643,24 @@ const static fib_node_vft_t ipsec_sa_vft = {
   .fnv_back_walk = ipsec_sa_back_walk,
 };
 
-/* force inclusion from application's main.c */
+/* Init per-SA error counters and node type */
 clib_error_t *
-ipsec_sa_interface_init (vlib_main_t * vm)
+ipsec_sa_init (vlib_main_t *vm)
 {
   fib_node_register_type (FIB_NODE_TYPE_IPSEC_SA, &ipsec_sa_vft);
 
-  return 0;
+#define _(index, val, err, desc)                                              \
+  ipsec_sa_err_counters[index].name =                                         \
+    (char *) format (0, "SA-" #err "%c", 0);                                  \
+  ipsec_sa_err_counters[index].stat_segment_name =                            \
+    (char *) format (0, "/net/ipsec/sa/err/" #err "%c", 0);                   \
+  ipsec_sa_err_counters[index].counters = 0;
+  foreach_ipsec_sa_err
+#undef _
+    return 0;
 }
 
-VLIB_INIT_FUNCTION (ipsec_sa_interface_init);
+VLIB_INIT_FUNCTION (ipsec_sa_init);
 
 /*
  * fd.io coding-style-patch-verification: ON
index 88d5c42..4ef8f87 100644 (file)
@@ -118,6 +118,35 @@ typedef enum ipsec_sad_flags_t_
 
 STATIC_ASSERT (sizeof (ipsec_sa_flags_t) == 2, "IPSEC SA flags != 2 byte");
 
+#define foreach_ipsec_sa_err                                                  \
+  _ (0, LOST, lost, "packets lost")                                           \
+  _ (1, HANDOFF, handoff, "hand-off")                                         \
+  _ (2, INTEG_ERROR, integ_error, "Integrity check failed")                   \
+  _ (3, DECRYPTION_FAILED, decryption_failed, "Decryption failed")            \
+  _ (4, CRYPTO_ENGINE_ERROR, crypto_engine_error,                             \
+     "crypto engine error (dropped)")                                         \
+  _ (5, REPLAY, replay, "SA replayed packet")                                 \
+  _ (6, RUNT, runt, "undersized packet")                                      \
+  _ (7, NO_BUFFERS, no_buffers, "no buffers (dropped)")                       \
+  _ (8, OVERSIZED_HEADER, oversized_header,                                   \
+     "buffer with oversized header (dropped)")                                \
+  _ (9, NO_TAIL_SPACE, no_tail_space,                                         \
+     "no enough buffer tail space (dropped)")                                 \
+  _ (10, TUN_NO_PROTO, tun_no_proto, "no tunnel protocol")                    \
+  _ (11, UNSUP_PAYLOAD, unsup_payload, "unsupported payload")                 \
+  _ (12, SEQ_CYCLED, seq_cycled, "sequence number cycled (dropped)")          \
+  _ (13, CRYPTO_QUEUE_FULL, crypto_queue_full, "crypto queue full (dropped)") \
+  _ (14, NO_ENCRYPTION, no_encryption, "no Encrypting SA (dropped)")          \
+  _ (15, DROP_FRAGMENTS, drop_fragments, "IP fragments drop")
+
+typedef enum
+{
+#define _(v, f, s, d) IPSEC_SA_ERROR_##f = v,
+  foreach_ipsec_sa_err
+#undef _
+    IPSEC_SA_N_ERRORS,
+} __clib_packed ipsec_sa_err_t;
+
 typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
@@ -266,7 +295,7 @@ foreach_ipsec_sa_flags
  * SA packet & bytes counters
  */
 extern vlib_combined_counter_main_t ipsec_sa_counters;
-extern vlib_simple_counter_main_t ipsec_sa_lost_counters;
+extern vlib_simple_counter_main_t ipsec_sa_err_counters[IPSEC_SA_N_ERRORS];
 
 extern void ipsec_mk_key (ipsec_key_t * key, const u8 * data, u8 len);
 
index 72784dd..ba1c246 100644 (file)
@@ -632,10 +632,17 @@ class IpsecTra4(object):
         replay_count = self.get_replay_counts(p)
         hash_failed_count = self.get_hash_failed_counts(p)
         seq_cycle_count = self.statistics.get_err_counter(seq_cycle_node_name)
+        hash_err = "integ_error"
 
         if ESP == self.encryption_type:
             undersize_node_name = "/err/%s/runt" % self.tra4_decrypt_node_name[0]
             undersize_count = self.statistics.get_err_counter(undersize_node_name)
+            # For AES-GCM an error in the hash is reported as a decryption failure
+            if p.crypt_algo == "AES-GCM":
+                hash_err = "decryption_failed"
+        # In async mode, we don't report errors in the hash.
+        if p.async_mode:
+            hash_err = ""
 
         #
         # send packets with seq numbers 1->34
@@ -661,6 +668,8 @@ class IpsecTra4(object):
         self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
         replay_count += len(pkts)
         self.assertEqual(self.get_replay_counts(p), replay_count)
+        err = p.tra_sa_in.get_err("replay")
+        self.assertEqual(err, replay_count)
 
         #
         # now send a batch of packets all with the same sequence number
@@ -677,6 +686,8 @@ class IpsecTra4(object):
         recv_pkts = self.send_and_expect(self.tra_if, pkts * 8, self.tra_if, n_rx=1)
         replay_count += 7
         self.assertEqual(self.get_replay_counts(p), replay_count)
+        err = p.tra_sa_in.get_err("replay")
+        self.assertEqual(err, replay_count)
 
         #
         # now move the window over to 257 (more than one byte) and into Case A
@@ -694,6 +705,8 @@ class IpsecTra4(object):
         self.send_and_assert_no_replies(self.tra_if, pkt * 3, timeout=0.2)
         replay_count += 3
         self.assertEqual(self.get_replay_counts(p), replay_count)
+        err = p.tra_sa_in.get_err("replay")
+        self.assertEqual(err, replay_count)
 
         # the window size is 64 packets
         # in window are still accepted
@@ -724,6 +737,9 @@ class IpsecTra4(object):
 
         hash_failed_count += 17
         self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
+        if hash_err != "":
+            err = p.tra_sa_in.get_err(hash_err)
+            self.assertEqual(err, hash_failed_count)
 
         # a malformed 'runt' packet
         #  created by a mis-constructed SA
@@ -739,6 +755,8 @@ class IpsecTra4(object):
 
             undersize_count += 17
             self.assert_error_counter_equal(undersize_node_name, undersize_count)
+            err = p.tra_sa_in.get_err("runt")
+            self.assertEqual(err, undersize_count)
 
         # which we can determine since this packet is still in the window
         pkt = Ether(
@@ -767,10 +785,15 @@ class IpsecTra4(object):
             # wrap. but since it isn't then the verify will fail.
             hash_failed_count += 17
             self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
+            if hash_err != "":
+                err = p.tra_sa_in.get_err(hash_err)
+                self.assertEqual(err, hash_failed_count)
 
         else:
             replay_count += 17
             self.assertEqual(self.get_replay_counts(p), replay_count)
+            err = p.tra_sa_in.get_err("replay")
+            self.assertEqual(err, replay_count)
 
         # valid packet moves the window over to 258
         pkt = Ether(
@@ -861,6 +884,9 @@ class IpsecTra4(object):
 
             hash_failed_count += 1
             self.assertEqual(self.get_hash_failed_counts(p), hash_failed_count)
+            if hash_err != "":
+                err = p.tra_sa_in.get_err(hash_err)
+                self.assertEqual(err, hash_failed_count)
 
             #
             # but if we move the window forward to case B, then we can wrap
@@ -894,6 +920,8 @@ class IpsecTra4(object):
             self.send_and_assert_no_replies(self.tra_if, pkts, timeout=0.2)
             seq_cycle_count += len(pkts)
             self.assert_error_counter_equal(seq_cycle_node_name, seq_cycle_count)
+            err = p.tra_sa_out.get_err("seq_cycled")
+            self.assertEqual(err, seq_cycle_count)
 
         # move the security-associations seq number on to the last we used
         self.vapi.cli("test ipsec sa %d seq 0x15f" % p.scapy_tra_sa_id)
@@ -924,7 +952,7 @@ class IpsecTra4(object):
         ]
         self.send_and_expect(self.tra_if, pkts, self.tra_if)
 
-        self.assertEqual(p.tra_sa_in.get_lost(), 0)
+        self.assertEqual(p.tra_sa_in.get_err("lost"), 0)
 
         # skip a sequence number
         pkts = [
@@ -939,7 +967,7 @@ class IpsecTra4(object):
         ]
         self.send_and_expect(self.tra_if, pkts, self.tra_if)
 
-        self.assertEqual(p.tra_sa_in.get_lost(), 0)
+        self.assertEqual(p.tra_sa_in.get_err("lost"), 0)
 
         # the lost packet are counted untill we get up past the first
         # sizeof(replay_window) packets
@@ -955,7 +983,7 @@ class IpsecTra4(object):
         ]
         self.send_and_expect(self.tra_if, pkts, self.tra_if)
 
-        self.assertEqual(p.tra_sa_in.get_lost(), 1)
+        self.assertEqual(p.tra_sa_in.get_err("lost"), 1)
 
         # lost of holes in the sequence
         pkts = [
@@ -982,7 +1010,7 @@ class IpsecTra4(object):
         ]
         self.send_and_expect(self.tra_if, pkts, self.tra_if)
 
-        self.assertEqual(p.tra_sa_in.get_lost(), 51)
+        self.assertEqual(p.tra_sa_in.get_err("lost"), 51)
 
         # a big hole in the seq number space
         pkts = [
@@ -997,7 +1025,7 @@ class IpsecTra4(object):
         ]
         self.send_and_expect(self.tra_if, pkts, self.tra_if)
 
-        self.assertEqual(p.tra_sa_in.get_lost(), 151)
+        self.assertEqual(p.tra_sa_in.get_err("lost"), 151)
 
     def verify_tra_basic4(self, count=1, payload_size=54):
         """ipsec v4 transport basic test"""
@@ -1036,8 +1064,8 @@ class IpsecTra4(object):
         self.assertEqual(
             pkts, count, "incorrect SA out counts: expected %d != %d" % (count, pkts)
         )
-        self.assertEqual(p.tra_sa_out.get_lost(), 0)
-        self.assertEqual(p.tra_sa_in.get_lost(), 0)
+        self.assertEqual(p.tra_sa_out.get_err("lost"), 0)
+        self.assertEqual(p.tra_sa_in.get_err("lost"), 0)
 
         self.assert_packet_counter_equal(self.tra4_encrypt_node_name, count)
         self.assert_packet_counter_equal(self.tra4_decrypt_node_name[0], count)
index bf00c1c..d70623e 100644 (file)
@@ -293,8 +293,8 @@ class IPSec4SpdTestCaseProtect(SpdFastPathInboundProtect):
             pkt_count,
             "incorrect SA out counts: expected %d != %d" % (pkt_count, pkts),
         )
-        self.assertEqual(p.tra_sa_out.get_lost(), 0)
-        self.assertEqual(p.tra_sa_in.get_lost(), 0)
+        self.assertEqual(p.tra_sa_out.get_err("lost"), 0)
+        self.assertEqual(p.tra_sa_in.get_err("lost"), 0)
 
 
 class IPSec4SpdTestCaseAddIPRange(SpdFastPathInbound):
@@ -876,8 +876,8 @@ class IPSec6SpdTestCaseProtect(SpdFastPathIPv6InboundProtect):
             pkt_count,
             "incorrect SA out counts: expected %d != %d" % (pkt_count, pkts),
         )
-        self.assertEqual(p.tra_sa_out.get_lost(), 0)
-        self.assertEqual(p.tra_sa_in.get_lost(), 0)
+        self.assertEqual(p.tra_sa_out.get_err("lost"), 0)
+        self.assertEqual(p.tra_sa_in.get_err("lost"), 0)
 
 
 if __name__ == "__main__":
index 38d0dc3..06b63ca 100644 (file)
@@ -1990,6 +1990,8 @@ class TestIpsecGreIfEspTra(TemplateIpsec, IpsecTun4Tests):
         self.send_and_assert_no_replies(self.tun_if, tx)
         node_name = "/err/%s/unsup_payload" % self.tun4_decrypt_node_name[0]
         self.assertEqual(1, self.statistics.get_err_counter(node_name))
+        err = p.tun_sa_in.get_err("unsup_payload")
+        self.assertEqual(err, 1)
 
 
 class TestIpsecGre6IfEspTra(TemplateIpsec, IpsecTun6Tests):
index f50d491..7a5a95a 100644 (file)
@@ -358,8 +358,8 @@ class VppIpsecSA(VppObject):
             # +1 to skip main thread
             return c[worker + 1][self.stat_index]
 
-    def get_lost(self, worker=None):
-        c = self.test.statistics.get_counter("/net/ipsec/sa/lost")
+    def get_err(self, name, worker=None):
+        c = self.test.statistics.get_counter("/net/ipsec/sa/err/" + name)
         if worker is None:
             total = 0
             for t in c: