ipsec: Support async mode per-SA 75/31475/7
authorNeale Ranns <neale@graphiant.com>
Thu, 25 Feb 2021 19:09:24 +0000 (19:09 +0000)
committerDamjan Marion <dmarion@me.com>
Fri, 5 Mar 2021 10:34:55 +0000 (10:34 +0000)
Type: feature

This feautre only applies to ESP not AH SAs.
As well as the gobal switch for ayncs mode, allow individual SAs to be
async.
If global async is on, all SAs are async. If global async mode is off,
then if then an SA can be individually set to async. This preserves the
global switch behaviour.

the stratergy in the esp encrypt.decrypt nodes is to separate the frame
into, 1) sync buffers, 2) async buffers and 3) no-op buffers.
Sync buffer will undergo a cyrpto/ath operation, no-op will not, they
are dropped or handed-off.

Signed-off-by: Neale Ranns <neale@graphiant.com>
Change-Id: Ifc15b10b870b19413ad030ce7f92ed56275d6791

12 files changed:
src/vnet/devices/pipe/pipe.c
src/vnet/ipsec/esp.h
src/vnet/ipsec/esp_decrypt.c
src/vnet/ipsec/esp_encrypt.c
src/vnet/ipsec/ipsec.c
src/vnet/ipsec/ipsec_api.c
src/vnet/ipsec/ipsec_cli.c
src/vnet/ipsec/ipsec_sa.c
src/vnet/ipsec/ipsec_sa.h
src/vnet/ipsec/ipsec_types.api
src/vnet/ipsec/ipsec_types_api.c
test/test_ipsec_esp.py

index 384c2c5..ec50f63 100644 (file)
@@ -167,7 +167,7 @@ pipe_tx (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
     }
 
-  return n_left_from;
+  return frame->n_vectors;
 }
 
 static u8 *
index 51386e6..a0643c3 100644 (file)
@@ -146,38 +146,33 @@ esp_aad_fill (u8 * data, const esp_header_t * esp, const ipsec_sa_t * sa)
  * to next nodes.
  */
 always_inline void
-esp_set_next_index (int is_async, u32 * from, u16 * nexts, u32 bi,
-                   u16 * drop_index, u16 drop_next, u16 * next)
+esp_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node, u32 err,
+                   u16 index, u16 *nexts, u16 drop_next)
 {
-  if (is_async)
-    {
-      from[*drop_index] = bi;
-      nexts[*drop_index] = drop_next;
-      *drop_index += 1;
-    }
-  else
-    next[0] = drop_next;
+  nexts[index] = drop_next;
+  b->error = node->errors[err];
 }
 
 /* when submitting a frame is failed, drop all buffers in the frame */
-always_inline void
-esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
-                                vlib_buffer_t ** b, u32 * from, u16 * nexts,
-                                u16 * n_dropped, u16 drop_next_index,
-                                vlib_error_t err)
+always_inline u32
+esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
+                                vlib_node_runtime_t *node, u32 err, u16 index,
+                                u32 *from, u16 *nexts, u16 drop_next_index)
 {
   u32 n_drop = f->n_elts;
   u32 *bi = f->buffer_indices;
-  b -= n_drop;
+
   while (n_drop--)
     {
-      b[0]->error = err;
-      esp_set_next_index (1, from, nexts, bi[0], n_dropped, drop_next_index,
-                         NULL);
+      from[index] = bi[0];
+      esp_set_next_index (vlib_get_buffer (vm, bi[0]), node, err, index, nexts,
+                         drop_next_index);
       bi++;
-      b++;
+      index++;
     }
   vnet_crypto_async_reset_frame (f);
+
+  return (f->n_elts);
 }
 
 /**
index 141b1b9..ea5a99c 100644 (file)
@@ -58,20 +58,20 @@ typedef enum
     ESP_DECRYPT_POST_N_NEXT,
 } esp_decrypt_post_next_t;
 
-#define foreach_esp_decrypt_error                               \
_(RX_PKTS, "ESP pkts received")                                \
_(RX_POST_PKTS, "ESP-POST pkts received")                      \
_(DECRYPTION_FAILED, "ESP decryption failed")                  \
_(INTEG_ERROR, "Integrity check failed")                       \
_(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
_(REPLAY, "SA replayed packet")                                \
_(RUNT, "undersized packet")                                   \
_(NO_BUFFERS, "no buffers (packet dropped)")                   \
_(OVERSIZED_HEADER, "buffer with oversized header (dropped)")  \
_(NO_TAIL_SPACE, "no enough buffer tail space (dropped)")      \
_(TUN_NO_PROTO, "no tunnel protocol")                          \
_(UNSUP_PAYLOAD, "unsupported payload")                        \
-
+#define foreach_esp_decrypt_error                                             \
 _ (RX_PKTS, "ESP pkts received")                                            \
 _ (RX_POST_PKTS, "ESP-POST pkts received")                                  \
 _ (HANDOFF, "hand-off")                                                     \
 _ (DECRYPTION_FAILED, "ESP decryption failed")                              \
 _ (INTEG_ERROR, "Integrity check failed")                                   \
 _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)")             \
 _ (REPLAY, "SA replayed packet")                                            \
 _ (RUNT, "undersized packet")                                               \
 _ (NO_BUFFERS, "no buffers (packet dropped)")                               \
 _ (OVERSIZED_HEADER, "buffer with oversized header (dropped)")              \
 _ (NO_TAIL_SPACE, "no enough buffer tail space (dropped)")                  \
 _ (TUN_NO_PROTO, "no tunnel protocol")                                      \
+  _ (UNSUP_PAYLOAD, "unsupported payload")
 
 typedef enum
 {
@@ -154,7 +154,7 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
   vnet_crypto_op_t *op = ops;
   u32 n_fail, n_ops = vec_len (ops);
 
-  if (n_ops == 0)
+  if (PREDICT_TRUE (n_ops == 0))
     return;
 
   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
@@ -1009,9 +1009,9 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
 }
 
 always_inline uword
-esp_decrypt_inline (vlib_main_t * vm,
-                   vlib_node_runtime_t * node, vlib_frame_t * from_frame,
-                   int is_ip6, int is_tun, u16 async_next)
+esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+                   vlib_frame_t *from_frame, int is_ip6, int is_tun,
+                   u16 async_next_node)
 {
   ipsec_main_t *im = &ipsec_main;
   u32 thread_index = vm->thread_index;
@@ -1020,7 +1020,12 @@ esp_decrypt_inline (vlib_main_t * vm,
   u32 *from = vlib_frame_vector_args (from_frame);
   u32 n_left = from_frame->n_vectors;
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+  vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+  u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+  u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
+  u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+  u32 sync_bi[VLIB_FRAME_SIZE];
+  u32 noop_bi[VLIB_FRAME_SIZE];
   esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
   esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
   esp_decrypt_packet_data_t cpd = { };
@@ -1032,8 +1037,8 @@ esp_decrypt_inline (vlib_main_t * vm,
   vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
   int is_async = im->async_mode;
   vnet_crypto_async_op_id_t async_op = ~0;
-  u16 n_async_drop = 0;
   vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+  esp_decrypt_error_t err;
 
   vlib_get_buffers (vm, from, b, n_left);
   if (!is_async)
@@ -1045,13 +1050,14 @@ esp_decrypt_inline (vlib_main_t * vm,
     }
   vec_reset_length (ptd->async_frames);
   vec_reset_length (ptd->chunks);
-  clib_memset_u16 (nexts, -1, n_left);
+  clib_memset (sync_nexts, -1, sizeof (sync_nexts));
   clib_memset (async_frames, 0, sizeof (async_frames));
 
   while (n_left > 0)
     {
       u8 *payload;
 
+      err = ESP_DECRYPT_ERROR_RX_PKTS;
       if (n_left > 2)
        {
          u8 *p;
@@ -1065,10 +1071,9 @@ esp_decrypt_inline (vlib_main_t * vm,
       u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
       if (n_bufs == 0)
        {
-         b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
-         next[0] = ESP_DECRYPT_NEXT_DROP;
+         err = ESP_DECRYPT_ERROR_NO_BUFFERS;
+         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                             ESP_DECRYPT_NEXT_DROP);
          goto next;
        }
 
@@ -1090,19 +1095,13 @@ esp_decrypt_inline (vlib_main_t * vm,
          cpd.iv_sz = sa0->crypto_iv_size;
          cpd.flags = sa0->flags;
          cpd.sa_index = current_sa_index;
+         is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
        }
 
       if (is_async)
        {
          async_op = sa0->crypto_async_dec_op_id;
 
-         if (PREDICT_FALSE (async_op == 0))
-           {
-             esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                                 &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
-             goto next;
-           }
-
          /* get a frame for this op if we don't yet have one or it's full
           */
          if (NULL == async_frames[async_op] ||
@@ -1127,9 +1126,9 @@ esp_decrypt_inline (vlib_main_t * vm,
       if (PREDICT_FALSE (thread_index != sa0->thread_index))
        {
          vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
-         next[0] = ESP_DECRYPT_NEXT_HANDOFF;
+         err = ESP_DECRYPT_ERROR_HANDOFF;
+         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                             ESP_DECRYPT_NEXT_HANDOFF);
          goto next;
        }
 
@@ -1160,17 +1159,17 @@ esp_decrypt_inline (vlib_main_t * vm,
       /* anti-reply check */
       if (ipsec_sa_anti_replay_check (sa0, pd->seq))
        {
-         b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+         err = ESP_DECRYPT_ERROR_REPLAY;
+         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                             ESP_DECRYPT_NEXT_DROP);
          goto next;
        }
 
       if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
        {
-         b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+         err = ESP_DECRYPT_ERROR_RUNT;
+         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                             ESP_DECRYPT_NEXT_DROP);
          goto next;
        }
 
@@ -1180,30 +1179,44 @@ esp_decrypt_inline (vlib_main_t * vm,
 
       if (is_async)
        {
-         esp_decrypt_error_t err;
 
          err = esp_decrypt_prepare_async_frame (
            vm, node, ptd, async_frames[async_op], sa0, payload, len,
-           cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], next,
-           async_next);
+           cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
+           async_next_node);
          if (ESP_DECRYPT_ERROR_RX_PKTS != err)
            {
-             b[0]->error = err;
-             esp_set_next_index (1, from, nexts, from[b - bufs],
-                                 &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 ESP_DECRYPT_NEXT_DROP);
            }
        }
       else
-       esp_decrypt_prepare_sync_op (vm, node, ptd, &crypto_ops, &integ_ops,
-                                    op, sa0, payload, len, cpd.icv_sz,
-                                    cpd.iv_sz, pd, pd2, b[0], next,
-                                    b - bufs);
+       esp_decrypt_prepare_sync_op (
+         vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
+         cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, b - bufs);
       /* next */
     next:
+      if (ESP_DECRYPT_ERROR_RX_PKTS != err)
+       {
+         noop_bi[n_noop] = from[b - bufs];
+         n_noop++;
+         noop_next++;
+       }
+      else if (!is_async)
+       {
+         sync_bi[n_sync] = from[b - bufs];
+         sync_bufs[n_sync] = b[0];
+         n_sync++;
+         sync_next++;
+         pd += 1;
+         pd2 += 1;
+       }
+      else
+       {
+         n_async++;
+         async_next++;
+       }
       n_left -= 1;
-      next += 1;
-      pd += 1;
-      pd2 += 1;
       b += 1;
     }
 
@@ -1212,7 +1225,7 @@ esp_decrypt_inline (vlib_main_t * vm,
                                     current_sa_index, current_sa_pkts,
                                     current_sa_bytes);
 
-  if (is_async)
+  if (n_async)
     {
       /* submit all of the open frames */
       vnet_crypto_async_frame_t **async_frame;
@@ -1221,45 +1234,38 @@ esp_decrypt_inline (vlib_main_t * vm,
        {
          if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
            {
-             esp_async_recycle_failed_submit (
-               *async_frame, b, from, nexts, &n_async_drop,
-               ESP_DECRYPT_NEXT_DROP, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+             n_noop += esp_async_recycle_failed_submit (
+               vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+               n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
              vnet_crypto_async_reset_frame (*async_frame);
              vnet_crypto_async_free_frame (vm, *async_frame);
            }
        }
-
-      /* no post process in async */
-      vlib_node_increment_counter (vm, node->node_index,
-                                  ESP_DECRYPT_ERROR_RX_PKTS,
-                                  from_frame->n_vectors);
-      if (n_async_drop)
-       vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
-
-      return n_left;
     }
-  else
+
+  if (n_sync)
     {
-      esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts,
+      esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
                       ESP_DECRYPT_ERROR_INTEG_ERROR);
-      esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
-                              ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR);
+      esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
+                              sync_nexts, ptd->chunks,
+                              ESP_DECRYPT_ERROR_INTEG_ERROR);
 
-      esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts,
+      esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
                       ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
-      esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
-                              ptd->chunks,
+      esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
+                              sync_nexts, ptd->chunks,
                               ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
     }
 
   /* Post decryption ronud - adjust packet data start and length and next
      node */
 
-  n_left = from_frame->n_vectors;
-  next = nexts;
+  n_left = n_sync;
+  sync_next = sync_nexts;
   pd = pkt_data;
   pd2 = pkt_data2;
-  b = bufs;
+  b = sync_bufs;
 
   while (n_left)
     {
@@ -1283,8 +1289,8 @@ esp_decrypt_inline (vlib_main_t * vm,
       if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
        current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
 
-      if (next[0] >= ESP_DECRYPT_N_NEXT)
-       esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
+      if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
+       esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6,
                                 is_tun, 0);
 
       /* trace: */
@@ -1302,19 +1308,22 @@ esp_decrypt_inline (vlib_main_t * vm,
 
       /* next */
       n_left -= 1;
-      next += 1;
+      sync_next += 1;
       pd += 1;
       pd2 += 1;
       b += 1;
     }
 
-  n_left = from_frame->n_vectors;
-  vlib_node_increment_counter (vm, node->node_index,
-                              ESP_DECRYPT_ERROR_RX_PKTS, n_left);
+  vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
+                              from_frame->n_vectors);
 
-  vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
+  if (n_sync)
+    vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
 
-  return n_left;
+  if (n_noop)
+    vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
+
+  return (from_frame->n_vectors);
 }
 
 always_inline uword
index 1fc53a5..214cf67 100644 (file)
@@ -43,13 +43,14 @@ typedef enum
     ESP_ENCRYPT_N_NEXT,
 } esp_encrypt_next_t;
 
-#define foreach_esp_encrypt_error                               \
- _(RX_PKTS, "ESP pkts received")                                \
- _(POST_RX_PKTS, "ESP-post pkts received")                      \
- _(SEQ_CYCLED, "sequence number cycled (packet dropped)")       \
- _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)")     \
- _(NO_BUFFERS, "no buffers (packet dropped)")                   \
+#define foreach_esp_encrypt_error                                             \
+  _ (RX_PKTS, "ESP pkts received")                                            \
+  _ (POST_RX_PKTS, "ESP-post pkts received")                                  \
+  _ (HANDOFF, "Hand-off")                                                     \
+  _ (SEQ_CYCLED, "sequence number cycled (packet dropped)")                   \
+  _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)")             \
+  _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)")                 \
+  _ (NO_BUFFERS, "no buffers (packet dropped)")
 
 typedef enum
 {
@@ -112,9 +113,8 @@ format_esp_post_encrypt_trace (u8 * s, va_list * args)
 
 /* pad packet in input buffer */
 static_always_inline u8 *
-esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
-                       u8 esp_align, u8 icv_sz,
-                       u16 * next, vlib_node_runtime_t * node,
+esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
+                       u8 icv_sz, vlib_node_runtime_t *node,
                        u16 buffer_data_size, uword total_len)
 {
   static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
@@ -379,9 +379,9 @@ always_inline void
 esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
                     vnet_crypto_op_t **crypto_ops,
                     vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0,
-                    u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz,
-                    vlib_buffer_t **bufs, vlib_buffer_t **b,
-                    vlib_buffer_t *lb, u32 hdr_len, esp_header_t *esp)
+                    u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
+                    vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
+                    esp_header_t *esp)
 {
   if (sa0->crypto_enc_op_id)
     {
@@ -392,7 +392,7 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
       op->src = op->dst = payload;
       op->key_index = sa0->crypto_key_index;
       op->len = payload_len - icv_sz;
-      op->user_data = b - bufs;
+      op->user_data = bi;
 
       if (ipsec_sa_is_set_IS_CTR (sa0))
        {
@@ -447,7 +447,7 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
       op->key_index = sa0->integ_key_index;
       op->digest_len = icv_sz;
       op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
-      op->user_data = b - bufs;
+      op->user_data = bi;
 
       if (lb != b[0])
        {
@@ -564,14 +564,13 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
 always_inline uword
 esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                    vlib_frame_t *frame, vnet_link_t lt, int is_tun,
-                   u16 async_next)
+                   u16 async_next_node)
 {
   ipsec_main_t *im = &ipsec_main;
   ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
   u32 *from = vlib_frame_vector_args (frame);
   u32 n_left = frame->n_vectors;
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
   u32 thread_index = vm->thread_index;
   u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
   u32 current_sa_index = ~0, current_sa_packets = 0;
@@ -592,16 +591,20 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
                        ESP_ENCRYPT_NEXT_HANDOFF6 :
                        (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
                                               ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
-  u16 n_async_drop = 0;
+  vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+  u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+  u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
+  u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+  u32 sync_bi[VLIB_FRAME_SIZE];
+  u32 noop_bi[VLIB_FRAME_SIZE];
+  esp_encrypt_error_t err;
 
   vlib_get_buffers (vm, from, b, n_left);
-  if (!is_async)
-    {
-      vec_reset_length (ptd->crypto_ops);
-      vec_reset_length (ptd->integ_ops);
-      vec_reset_length (ptd->chained_crypto_ops);
-      vec_reset_length (ptd->chained_integ_ops);
-    }
+
+  vec_reset_length (ptd->crypto_ops);
+  vec_reset_length (ptd->integ_ops);
+  vec_reset_length (ptd->chained_crypto_ops);
+  vec_reset_length (ptd->chained_integ_ops);
   vec_reset_length (ptd->async_frames);
   vec_reset_length (ptd->chunks);
   clib_memset (async_frames, 0, sizeof (async_frames));
@@ -615,6 +618,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       u16 payload_len, payload_len_total, n_bufs;
       u32 hdr_len;
 
+      err = ESP_ENCRYPT_ERROR_RX_PKTS;
+
       if (n_left > 2)
        {
          u8 *p;
@@ -657,19 +662,13 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          esp_align = sa0->esp_block_align;
          icv_sz = sa0->integ_icv_size;
          iv_sz = sa0->crypto_iv_size;
+         is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
        }
 
       if (is_async)
        {
          async_op = sa0->crypto_async_enc_op_id;
 
-         if (PREDICT_FALSE (async_op == 0))
-           {
-             esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                                 &n_async_drop, drop_next, next);
-             goto trace;
-           }
-
          /* get a frame for this op if we don't yet have one or it's full
           */
          if (NULL == async_frames[async_op] ||
@@ -694,8 +693,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (PREDICT_FALSE (thread_index != sa0->thread_index))
        {
          vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, handoff_next, next);
+         err = ESP_ENCRYPT_ERROR_HANDOFF;
+         esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                             handoff_next);
          goto trace;
        }
 
@@ -703,9 +703,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
       if (n_bufs == 0)
        {
-         b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, drop_next, next);
+         err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+         esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
          goto trace;
        }
 
@@ -718,9 +717,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
 
       if (PREDICT_FALSE (esp_seq_advance (sa0)))
        {
-         b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
-         esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                             &n_async_drop, drop_next, next);
+         err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
+         esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
          goto trace;
        }
 
@@ -730,16 +728,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (ipsec_sa_is_set_IS_TUNNEL (sa0))
        {
          payload = vlib_buffer_get_current (b[0]);
-         next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
-                                                next, node,
-                                                buffer_data_size,
-                                                vlib_buffer_length_in_chain
-                                                (vm, b[0]));
+         next_hdr_ptr = esp_add_footer_and_icv (
+           vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+           vlib_buffer_length_in_chain (vm, b[0]));
          if (!next_hdr_ptr)
            {
-             b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
-             esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                                 &n_async_drop, drop_next, next);
+             err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 drop_next);
              goto trace;
            }
          b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -833,11 +829,11 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          dpo = &sa0->dpo;
          if (!is_tun)
            {
-             next[0] = dpo->dpoi_next_node;
+             sync_next[0] = dpo->dpoi_next_node;
              vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
            }
          else
-           next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+           sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
          b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
        }
       else                     /* transport mode */
@@ -855,15 +851,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
 
          vlib_buffer_advance (b[0], ip_len);
          payload = vlib_buffer_get_current (b[0]);
-         next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
-                                                next, node,
-                                                buffer_data_size,
-                                                vlib_buffer_length_in_chain
-                                                (vm, b[0]));
+         next_hdr_ptr = esp_add_footer_and_icv (
+           vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+           vlib_buffer_length_in_chain (vm, b[0]));
          if (!next_hdr_ptr)
            {
-             esp_set_next_index (is_async, from, nexts, from[b - bufs],
-                                 &n_async_drop, drop_next, next);
+             err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+             esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+                                 drop_next);
              goto trace;
            }
 
@@ -938,7 +933,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
              esp_fill_udp_hdr (sa0, udp, udp_len);
            }
 
-         next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+         sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
        }
 
       if (lb != b[0])
@@ -958,12 +953,12 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
       if (is_async)
        esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
                                 esp, payload, payload_len, iv_sz, icv_sz,
-                                from[b - bufs], next[0], hdr_len, async_next,
-                                lb);
+                                from[b - bufs], sync_next[0], hdr_len,
+                                async_next_node, lb);
       else
-         esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
-                              payload_len, iv_sz, icv_sz, bufs, b, lb,
-                              hdr_len, esp);
+       esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
+                            payload_len, iv_sz, icv_sz, n_sync, b, lb,
+                            hdr_len, esp);
 
       vlib_buffer_advance (b[0], 0LL - hdr_len);
 
@@ -983,31 +978,48 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
          tr->crypto_alg = sa0->crypto_alg;
          tr->integ_alg = sa0->integ_alg;
        }
+
       /* next */
+      if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
+       {
+         noop_bi[n_noop] = from[b - bufs];
+         n_noop++;
+         noop_next++;
+       }
+      else if (!is_async)
+       {
+         sync_bi[n_sync] = from[b - bufs];
+         sync_bufs[n_sync] = b[0];
+         n_sync++;
+         sync_next++;
+       }
+      else
+       {
+         n_async++;
+         async_next++;
+       }
       n_left -= 1;
-      next += 1;
       b += 1;
     }
 
   vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
                                   current_sa_index, current_sa_packets,
                                   current_sa_bytes);
-  if (!is_async)
+  if (n_sync)
     {
-      esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
-      esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
-                              ptd->chunks, drop_next);
+      esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
+                      drop_next);
+      esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
+                              sync_nexts, ptd->chunks, drop_next);
 
-      esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
-      esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
-                              ptd->chunks, drop_next);
+      esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
+                      drop_next);
+      esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
+                              sync_nexts, ptd->chunks, drop_next);
 
-      vlib_node_increment_counter (
-       vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
-
-      vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+      vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
     }
-  else
+  if (n_async)
     {
       /* submit all of the open frames */
       vnet_crypto_async_frame_t **async_frame;
@@ -1016,20 +1028,19 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
        {
          if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
            {
-             esp_async_recycle_failed_submit (
-               *async_frame, b, from, nexts, &n_async_drop, drop_next,
-               ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+             n_noop += esp_async_recycle_failed_submit (
+               vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+               n_sync, noop_bi, noop_nexts, drop_next);
              vnet_crypto_async_reset_frame (*async_frame);
              vnet_crypto_async_free_frame (vm, *async_frame);
            }
        }
-
-      vlib_node_increment_counter (vm, node->node_index,
-                                  ESP_ENCRYPT_ERROR_RX_PKTS,
-                                  frame->n_vectors);
-      if (n_async_drop)
-       vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
     }
+  if (n_noop)
+    vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
+
+  vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
+                              frame->n_vectors);
 
   return frame->n_vectors;
 }
index 45ae5ac..7471345 100644 (file)
@@ -329,20 +329,15 @@ ipsec_set_async_mode (u32 is_enabled)
   ipsec_main_t *im = &ipsec_main;
   ipsec_sa_t *sa;
 
-  /* lock all SAs before change im->async_mode */
-  pool_foreach (sa, ipsec_sa_pool)
-    {
-      fib_node_lock (&sa->node);
-    }
+  vnet_crypto_request_async_mode (is_enabled);
 
   im->async_mode = is_enabled;
 
-  /* change SA crypto op data before unlock them */
+  /* change SA crypto op data */
   pool_foreach (sa, ipsec_sa_pool)
     {
       sa->crypto_op_data =
-       is_enabled ? sa->async_op_data.data : sa->sync_op_data.data;
-      fib_node_unlock (&sa->node);
+       (is_enabled ? sa->async_op_data.data : sa->sync_op_data.data);
     }
 }
 
index 45e4e6f..5ce64d9 100644 (file)
@@ -1154,7 +1154,6 @@ vl_api_ipsec_set_async_mode_t_handler (vl_api_ipsec_set_async_mode_t * mp)
   vl_api_ipsec_set_async_mode_reply_t *rmp;
   int rv = 0;
 
-  vnet_crypto_request_async_mode (mp->async_enable);
   ipsec_set_async_mode (mp->async_enable);
 
   REPLY_MACRO (VL_API_IPSEC_SET_ASYNC_MODE_REPLY);
index bb80b32..2c7a923 100644 (file)
@@ -98,7 +98,7 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
   u16 udp_src, udp_dst;
   int is_add, rv;
   u32 m_args = 0;
-  tunnel_t tun;
+  tunnel_t tun = {};
 
   salt = 0;
   error = NULL;
@@ -161,6 +161,8 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
        flags |= IPSEC_SA_FLAG_USE_ESN;
       else if (unformat (line_input, "udp-encap"))
        flags |= IPSEC_SA_FLAG_UDP_ENCAP;
+      else if (unformat (line_input, "async"))
+       flags |= IPSEC_SA_FLAG_IS_ASYNC;
       else
        {
          error = clib_error_return (0, "parse error: '%U'",
@@ -198,7 +200,7 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
     }
 
   if (rv)
-    error = clib_error_return (0, "failed");
+    error = clib_error_return (0, "failed: %d", rv);
 
 done:
   unformat_free (line_input);
@@ -940,7 +942,6 @@ set_async_mode_command_fn (vlib_main_t * vm, unformat_input_t * input,
                                   format_unformat_error, line_input));
     }
 
-  vnet_crypto_request_async_mode (async_enable);
   ipsec_set_async_mode (async_enable);
 
   unformat_free (line_input);
index 7e2dc20..b1e3374 100644 (file)
@@ -245,7 +245,15 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
   if (im->async_mode)
     sa->crypto_op_data = sa->async_op_data.data;
   else
-    sa->crypto_op_data = sa->sync_op_data.data;
+    {
+      if (ipsec_sa_is_set_IS_ASYNC (sa))
+       {
+         vnet_crypto_request_async_mode (1);
+         sa->crypto_op_data = sa->async_op_data.data;
+       }
+      else
+       sa->crypto_op_data = sa->sync_op_data.data;
+    }
 
   err = ipsec_check_support_cb (im, sa);
   if (err)
@@ -332,6 +340,8 @@ ipsec_sa_del (ipsec_sa_t * sa)
   /* no recovery possible when deleting an SA */
   (void) ipsec_call_add_del_callbacks (im, sa, sa_index, 0);
 
+  if (ipsec_sa_is_set_IS_ASYNC (sa))
+    vnet_crypto_request_async_mode (0);
   if (ipsec_sa_is_set_UDP_ENCAP (sa) && ipsec_sa_is_set_IS_INBOUND (sa))
     ipsec_unregister_udp_port (clib_net_to_host_u16 (sa->udp_hdr.dst_port));
 
index 705034e..7827ef1 100644 (file)
@@ -101,7 +101,8 @@ typedef struct ipsec_key_t_
   _ (32, IS_PROTECT, "Protect")                                               \
   _ (64, IS_INBOUND, "inbound")                                               \
   _ (128, IS_AEAD, "aead")                                                    \
-  _ (256, IS_CTR, "ctr")
+  _ (256, IS_CTR, "ctr")                                                      \
+  _ (512, IS_ASYNC, "async")
 
 typedef enum ipsec_sad_flags_t_
 {
index b473559..9fa7e05 100644 (file)
@@ -74,6 +74,8 @@ enum ipsec_sad_flags
   IPSEC_API_SAD_FLAG_UDP_ENCAP = 0x10,
   /* IPsec SA is for inbound traffic */
   IPSEC_API_SAD_FLAG_IS_INBOUND = 0x40,
+  /* IPsec SA uses an Async driver */
+  IPSEC_API_SAD_FLAG_ASYNC = 0x80 [backwards_compatible],
 };
 
 enum ipsec_proto
index 44b129b..7044f1e 100644 (file)
@@ -147,6 +147,8 @@ ipsec_sa_flags_decode (vl_api_ipsec_sad_flags_t in)
     flags |= IPSEC_SA_FLAG_UDP_ENCAP;
   if (in & IPSEC_API_SAD_FLAG_IS_INBOUND)
     flags |= IPSEC_SA_FLAG_IS_INBOUND;
+  if (in & IPSEC_API_SAD_FLAG_ASYNC)
+    flags |= IPSEC_SA_FLAG_IS_ASYNC;
 
   return (flags);
 }
@@ -168,6 +170,8 @@ ipsec_sad_flags_encode (const ipsec_sa_t * sa)
     flags |= IPSEC_API_SAD_FLAG_UDP_ENCAP;
   if (ipsec_sa_is_set_IS_INBOUND (sa))
     flags |= IPSEC_API_SAD_FLAG_IS_INBOUND;
+  if (ipsec_sa_is_set_IS_ASYNC (sa))
+    flags |= IPSEC_API_SAD_FLAG_ASYNC;
 
   return clib_host_to_net_u32 (flags);
 }
index 50c6f5c..dcbbb82 100644 (file)
@@ -22,6 +22,7 @@ from vpp_papi import VppEnum
 
 NUM_PKTS = 67
 engines_supporting_chain_bufs = ["openssl"]
+engines = ["ia32", "ipsecmb", "openssl"]
 
 
 class ConfigIpsecESP(TemplateIpsec):
@@ -474,56 +475,112 @@ class TestIpsecEspAsync(TemplateIpsecEsp):
     def setUp(self):
         super(TestIpsecEspAsync, self).setUp()
 
-        self.vapi.ipsec_set_async_mode(async_enable=True)
-        self.p4 = IPsecIPv4Params()
-
-        self.p4.crypt_algo_vpp_id = (VppEnum.vl_api_ipsec_crypto_alg_t.
-                                     IPSEC_API_CRYPTO_ALG_AES_CBC_256)
-        self.p4.crypt_algo = 'AES-CBC'  # scapy name
-        self.p4.crypt_key = b'JPjyOWBeVEQiMe7hJPjyOWBeVEQiMe7h'
-
-        self.p4.scapy_tun_sa_id += 0xf0000
-        self.p4.scapy_tun_spi += 0xf0000
-        self.p4.vpp_tun_sa_id += 0xf0000
-        self.p4.vpp_tun_spi += 0xf0000
-        self.p4.remote_tun_if_host = "2.2.2.2"
+        self.p_sync = IPsecIPv4Params()
+
+        self.p_sync.crypt_algo_vpp_id = (VppEnum.vl_api_ipsec_crypto_alg_t.
+                                         IPSEC_API_CRYPTO_ALG_AES_CBC_256)
+        self.p_sync.crypt_algo = 'AES-CBC'  # scapy name
+        self.p_sync.crypt_key = b'JPjyOWBeVEQiMe7hJPjyOWBeVEQiMe7h'
+
+        self.p_sync.scapy_tun_sa_id += 0xf0000
+        self.p_sync.scapy_tun_spi += 0xf0000
+        self.p_sync.vpp_tun_sa_id += 0xf0000
+        self.p_sync.vpp_tun_spi += 0xf0000
+        self.p_sync.remote_tun_if_host = "2.2.2.2"
         e = VppEnum.vl_api_ipsec_spd_action_t
 
-        self.p4.sa = VppIpsecSA(
+        self.p_sync.sa = VppIpsecSA(
             self,
-            self.p4.vpp_tun_sa_id,
-            self.p4.vpp_tun_spi,
-            self.p4.auth_algo_vpp_id,
-            self.p4.auth_key,
-            self.p4.crypt_algo_vpp_id,
-            self.p4.crypt_key,
+            self.p_sync.vpp_tun_sa_id,
+            self.p_sync.vpp_tun_spi,
+            self.p_sync.auth_algo_vpp_id,
+            self.p_sync.auth_key,
+            self.p_sync.crypt_algo_vpp_id,
+            self.p_sync.crypt_key,
             self.vpp_esp_protocol,
-            self.tun_if.local_addr[self.p4.addr_type],
-            self.tun_if.remote_addr[self.p4.addr_type]).add_vpp_config()
-        self.p4.spd = VppIpsecSpdEntry(
+            self.tun_if.local_addr[self.p_sync.addr_type],
+            self.tun_if.remote_addr[self.p_sync.addr_type]).add_vpp_config()
+        self.p_sync.spd = VppIpsecSpdEntry(
             self,
             self.tun_spd,
-            self.p4.vpp_tun_sa_id,
-            self.pg1.remote_addr[self.p4.addr_type],
-            self.pg1.remote_addr[self.p4.addr_type],
-            self.p4.remote_tun_if_host,
-            self.p4.remote_tun_if_host,
+            self.p_sync.vpp_tun_sa_id,
+            self.pg1.remote_addr[self.p_sync.addr_type],
+            self.pg1.remote_addr[self.p_sync.addr_type],
+            self.p_sync.remote_tun_if_host,
+            self.p_sync.remote_tun_if_host,
             0,
             priority=1,
             policy=e.IPSEC_API_SPD_ACTION_PROTECT,
             is_outbound=1).add_vpp_config()
-        VppIpRoute(self,  self.p4.remote_tun_if_host, self.p4.addr_len,
-                   [VppRoutePath(self.tun_if.remote_addr[self.p4.addr_type],
-                                 0xffffffff)]).add_vpp_config()
-        config_tun_params(self.p4, self.encryption_type, self.tun_if)
+        VppIpRoute(self,
+                   self.p_sync.remote_tun_if_host,
+                   self.p_sync.addr_len,
+                   [VppRoutePath(
+                       self.tun_if.remote_addr[self.p_sync.addr_type],
+                       0xffffffff)]).add_vpp_config()
+        config_tun_params(self.p_sync, self.encryption_type, self.tun_if)
+
+        self.p_async = IPsecIPv4Params()
+
+        self.p_async.crypt_algo_vpp_id = (VppEnum.vl_api_ipsec_crypto_alg_t.
+                                          IPSEC_API_CRYPTO_ALG_AES_GCM_256)
+        self.p_async.auth_algo_vpp_id = (VppEnum.vl_api_ipsec_integ_alg_t.
+                                         IPSEC_API_INTEG_ALG_NONE)
+        self.p_async.crypt_algo = 'AES-GCM'  # scapy name
+        self.p_async.crypt_key = b'JPjyOWBeVEQiMe7hJPjyOWBeVEQiMe7h'
+        self.p_async.auth_algo = 'NULL'
+
+        self.p_async.scapy_tun_sa_id += 0xe0000
+        self.p_async.scapy_tun_spi += 0xe0000
+        self.p_async.vpp_tun_sa_id += 0xe0000
+        self.p_async.vpp_tun_spi += 0xe0000
+        self.p_async.remote_tun_if_host = "2.2.2.3"
+
+        iflags = VppEnum.vl_api_ipsec_sad_flags_t
+        self.p_async.flags = (iflags.IPSEC_API_SAD_FLAG_USE_ESN |
+                              iflags.IPSEC_API_SAD_FLAG_USE_ANTI_REPLAY |
+                              iflags.IPSEC_API_SAD_FLAG_ASYNC)
+
+        self.p_async.sa = VppIpsecSA(
+            self,
+            self.p_async.vpp_tun_sa_id,
+            self.p_async.vpp_tun_spi,
+            self.p_async.auth_algo_vpp_id,
+            self.p_async.auth_key,
+            self.p_async.crypt_algo_vpp_id,
+            self.p_async.crypt_key,
+            self.vpp_esp_protocol,
+            self.tun_if.local_addr[self.p_async.addr_type],
+            self.tun_if.remote_addr[self.p_async.addr_type],
+            flags=self.p_async.flags).add_vpp_config()
+        self.p_async.spd = VppIpsecSpdEntry(
+            self,
+            self.tun_spd,
+            self.p_async.vpp_tun_sa_id,
+            self.pg1.remote_addr[self.p_async.addr_type],
+            self.pg1.remote_addr[self.p_async.addr_type],
+            self.p_async.remote_tun_if_host,
+            self.p_async.remote_tun_if_host,
+            0,
+            priority=2,
+            policy=e.IPSEC_API_SPD_ACTION_PROTECT,
+            is_outbound=1).add_vpp_config()
+        VppIpRoute(self,
+                   self.p_async.remote_tun_if_host,
+                   self.p_async.addr_len,
+                   [VppRoutePath(
+                       self.tun_if.remote_addr[self.p_async.addr_type],
+                       0xffffffff)]).add_vpp_config()
+        config_tun_params(self.p_async, self.encryption_type, self.tun_if)
 
     def test_dual_stream(self):
         """ Alternating SAs """
-        p = self.params[self.p4.addr_type]
+        p = self.params[self.p_sync.addr_type]
+        self.vapi.ipsec_set_async_mode(async_enable=True)
 
         pkts = [(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
                  IP(src=self.pg1.remote_ip4,
-                    dst=self.p4.remote_tun_if_host) /
+                    dst=self.p_sync.remote_tun_if_host) /
                  UDP(sport=4444, dport=4444) /
                  Raw(b'0x0' * 200)),
                 (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
@@ -540,14 +597,76 @@ class TestIpsecEspAsync(TemplateIpsecEsp):
         for rx in rxs:
             if rx[ESP].spi == p.scapy_tun_spi:
                 decrypted = p.vpp_tun_sa.decrypt(rx[IP])
-            elif rx[ESP].spi == self.p4.vpp_tun_spi:
-                decrypted = self.p4.scapy_tun_sa.decrypt(rx[IP])
+            elif rx[ESP].spi == self.p_sync.vpp_tun_spi:
+                decrypted = self.p_sync.scapy_tun_sa.decrypt(rx[IP])
+            else:
+                rx.show()
+                self.assertTrue(False)
+
+        self.p_sync.spd.remove_vpp_config()
+        self.p_sync.sa.remove_vpp_config()
+        self.p_async.spd.remove_vpp_config()
+        self.p_async.sa.remove_vpp_config()
+        self.vapi.ipsec_set_async_mode(async_enable=False)
+
+    def test_sync_async_noop_stream(self):
+        """ Alternating SAs sync/async/noop """
+        p = self.params[self.p_sync.addr_type]
+
+        # first pin the default/noop SA to worker 0
+        pkts = [(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+                 IP(src=self.pg1.remote_ip4,
+                    dst=p.remote_tun_if_host) /
+                 UDP(sport=4444, dport=4444) /
+                 Raw(b'0x0' * 200))]
+        rxs = self.send_and_expect(self.pg1, pkts, self.pg0, worker=0)
+
+        self.logger.info(self.vapi.cli("sh ipsec sa"))
+        self.logger.info(self.vapi.cli("sh crypto async status"))
+
+        # then use all the other SAs on worker 1.
+        # some will handoff, other take the sync and async paths
+        pkts = [(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+                 IP(src=self.pg1.remote_ip4,
+                    dst=self.p_sync.remote_tun_if_host) /
+                 UDP(sport=4444, dport=4444) /
+                 Raw(b'0x0' * 200)),
+                (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+                 IP(src=self.pg1.remote_ip4,
+                    dst=p.remote_tun_if_host) /
+                 UDP(sport=4444, dport=4444) /
+                 Raw(b'0x0' * 200)),
+                (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+                 IP(src=self.pg1.remote_ip4,
+                    dst=self.p_async.remote_tun_if_host) /
+                 UDP(sport=4444, dport=4444) /
+                 Raw(b'0x0' * 200))]
+        pkts *= 1023
+
+        rxs = self.send_and_expect(self.pg1, pkts, self.pg0, worker=1)
+
+        self.assertEqual(len(rxs), len(pkts))
+
+        for rx in rxs:
+            if rx[ESP].spi == p.scapy_tun_spi:
+                decrypted = p.vpp_tun_sa.decrypt(rx[IP])
+            elif rx[ESP].spi == self.p_sync.vpp_tun_spi:
+                decrypted = self.p_sync.scapy_tun_sa.decrypt(rx[IP])
+            elif rx[ESP].spi == self.p_async.vpp_tun_spi:
+                decrypted = self.p_async.scapy_tun_sa.decrypt(rx[IP])
             else:
                 rx.show()
                 self.assertTrue(False)
 
-        self.p4.spd.remove_vpp_config()
-        self.p4.sa.remove_vpp_config()
+        self.p_sync.spd.remove_vpp_config()
+        self.p_sync.sa.remove_vpp_config()
+        self.p_async.spd.remove_vpp_config()
+        self.p_async.sa.remove_vpp_config()
+
+        # async mode should have been disabled now that there are
+        # no async SAs. there's no API for this, so a reluctant
+        # screen scrape.
+        self.assertTrue("DISABLED" in self.vapi.cli("sh crypto async status"))
 
 
 class TestIpsecEspHandoff(TemplateIpsecEsp,
@@ -618,7 +737,6 @@ class TestIpsecEspUdp(TemplateIpsecEspUdp, IpsecTra4Tests):
 
 class MyParameters():
     def __init__(self):
-        self.engines = ["ia32", "ipsecmb", "openssl"]
         flag_esn = VppEnum.vl_api_ipsec_sad_flags_t.IPSEC_API_SAD_FLAG_USE_ESN
         self.flags = [0, flag_esn]
         # foreach crypto algorithm
@@ -828,6 +946,14 @@ class RunTestIpsecEspAll(ConfigIpsecESP,
                 self.verify_tun_44(self.params[socket.AF_INET],
                                    count=NUM_PKTS, payload_size=sz)
 
+        #
+        # swap the handlers while SAs are up
+        #
+        for e in engines:
+            if e != engine:
+                self.vapi.cli("set crypto handler all %s" % e)
+                self.verify_tra_basic4(count=NUM_PKTS)
+
         #
         # remove the SPDs, SAs, etc
         #