ipsec: add support for chained buffers 58/23358/17
authorFilip Tehlar <ftehlar@cisco.com>
Tue, 4 Feb 2020 09:36:04 +0000 (09:36 +0000)
committerDamjan Marion <dmarion@me.com>
Tue, 11 Feb 2020 23:07:38 +0000 (23:07 +0000)
Type: feature

Change-Id: Ie072a7c2bbb1e4a77f7001754f01897efd30fc53
Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
17 files changed:
src/plugins/crypto_openssl/main.c
src/plugins/unittest/crypto/aes_cbc.c
src/plugins/unittest/crypto/aes_gcm.c
src/plugins/unittest/crypto/crypto.h
src/plugins/unittest/crypto/rfc2202_hmac_md5.c
src/plugins/unittest/crypto/rfc2202_hmac_sha1.c
src/plugins/unittest/crypto/rfc4231.c
src/plugins/unittest/crypto_test.c
src/vnet/crypto/cli.c
src/vnet/crypto/crypto.c
src/vnet/crypto/crypto.h
src/vnet/crypto/format.c
src/vnet/ipsec/esp_decrypt.c
src/vnet/ipsec/esp_encrypt.c
src/vnet/ipsec/ipsec.h
test/template_ipsec.py
test/test_ipsec_esp.py

index 7362d6b..7775958 100644 (file)
@@ -58,13 +58,17 @@ static openssl_per_thread_data_t *per_thread_data = 0;
   _(SHA512, EVP_sha512)
 
 static_always_inline u32
-openssl_ops_enc_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
+openssl_ops_enc_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+                    vnet_crypto_op_chunk_t * chunks, u32 n_ops,
                     const EVP_CIPHER * cipher)
 {
   openssl_per_thread_data_t *ptd = vec_elt_at_index (per_thread_data,
                                                     vm->thread_index);
   EVP_CIPHER_CTX *ctx = ptd->evp_cipher_ctx;
-  u32 i;
+  vnet_crypto_op_chunk_t *chp;
+  u32 i, j, curr_len = 0;
+  u8 out_buf[VLIB_BUFFER_DEFAULT_DATA_SIZE * 5];
+
   for (i = 0; i < n_ops; i++)
     {
       vnet_crypto_op_t *op = ops[i];
@@ -81,22 +85,57 @@ openssl_ops_enc_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
        RAND_bytes (op->iv, iv_len);
 
       EVP_EncryptInit_ex (ctx, cipher, NULL, key->data, op->iv);
-      EVP_EncryptUpdate (ctx, op->dst, &out_len, op->src, op->len);
-      if (out_len < op->len)
-       EVP_EncryptFinal_ex (ctx, op->dst + out_len, &out_len);
+
+      if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+       EVP_CIPHER_CTX_set_padding (ctx, 0);
+
+      if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+       {
+         chp = chunks + op->chunk_index;
+         u32 offset = 0;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             EVP_EncryptUpdate (ctx, out_buf + offset, &out_len, chp->src,
+                                chp->len);
+             curr_len = chp->len;
+             offset += out_len;
+             chp += 1;
+           }
+         if (out_len < curr_len)
+           EVP_EncryptFinal_ex (ctx, out_buf + offset, &out_len);
+
+         offset = 0;
+         chp = chunks + op->chunk_index;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             clib_memcpy_fast (chp->dst, out_buf + offset, chp->len);
+             offset += chp->len;
+             chp += 1;
+           }
+       }
+      else
+       {
+         EVP_EncryptUpdate (ctx, op->dst, &out_len, op->src, op->len);
+         if (out_len < op->len)
+           EVP_EncryptFinal_ex (ctx, op->dst + out_len, &out_len);
+       }
       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
     }
   return n_ops;
 }
 
 static_always_inline u32
-openssl_ops_dec_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
+openssl_ops_dec_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+                    vnet_crypto_op_chunk_t * chunks, u32 n_ops,
                     const EVP_CIPHER * cipher)
 {
   openssl_per_thread_data_t *ptd = vec_elt_at_index (per_thread_data,
                                                     vm->thread_index);
   EVP_CIPHER_CTX *ctx = ptd->evp_cipher_ctx;
-  u32 i;
+  vnet_crypto_op_chunk_t *chp;
+  u32 i, j, curr_len = 0;
+  u8 out_buf[VLIB_BUFFER_DEFAULT_DATA_SIZE * 5];
+
   for (i = 0; i < n_ops; i++)
     {
       vnet_crypto_op_t *op = ops[i];
@@ -104,22 +143,55 @@ openssl_ops_dec_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
       int out_len;
 
       EVP_DecryptInit_ex (ctx, cipher, NULL, key->data, op->iv);
-      EVP_DecryptUpdate (ctx, op->dst, &out_len, op->src, op->len);
-      if (out_len < op->len)
-       EVP_DecryptFinal_ex (ctx, op->dst + out_len, &out_len);
+
+      if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+       EVP_CIPHER_CTX_set_padding (ctx, 0);
+
+      if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+       {
+         chp = chunks + op->chunk_index;
+         u32 offset = 0;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             EVP_DecryptUpdate (ctx, out_buf + offset, &out_len, chp->src,
+                                chp->len);
+             curr_len = chp->len;
+             offset += out_len;
+             chp += 1;
+           }
+         if (out_len < curr_len)
+           EVP_DecryptFinal_ex (ctx, out_buf + offset, &out_len);
+
+         offset = 0;
+         chp = chunks + op->chunk_index;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             clib_memcpy_fast (chp->dst, out_buf + offset, chp->len);
+             offset += chp->len;
+             chp += 1;
+           }
+       }
+      else
+       {
+         EVP_DecryptUpdate (ctx, op->dst, &out_len, op->src, op->len);
+         if (out_len < op->len)
+           EVP_DecryptFinal_ex (ctx, op->dst + out_len, &out_len);
+       }
       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
     }
   return n_ops;
 }
 
 static_always_inline u32
-openssl_ops_enc_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
+openssl_ops_enc_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+                    vnet_crypto_op_chunk_t * chunks, u32 n_ops,
                     const EVP_CIPHER * cipher)
 {
   openssl_per_thread_data_t *ptd = vec_elt_at_index (per_thread_data,
                                                     vm->thread_index);
   EVP_CIPHER_CTX *ctx = ptd->evp_cipher_ctx;
-  u32 i;
+  vnet_crypto_op_chunk_t *chp;
+  u32 i, j;
   for (i = 0; i < n_ops; i++)
     {
       vnet_crypto_op_t *op = ops[i];
@@ -134,7 +206,17 @@ openssl_ops_enc_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
       EVP_EncryptInit_ex (ctx, 0, 0, key->data, op->iv);
       if (op->aad_len)
        EVP_EncryptUpdate (ctx, NULL, &len, op->aad, op->aad_len);
-      EVP_EncryptUpdate (ctx, op->dst, &len, op->src, op->len);
+      if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+       {
+         chp = chunks + op->chunk_index;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             EVP_EncryptUpdate (ctx, chp->dst, &len, chp->src, chp->len);
+             chp += 1;
+           }
+       }
+      else
+       EVP_EncryptUpdate (ctx, op->dst, &len, op->src, op->len);
       EVP_EncryptFinal_ex (ctx, op->dst + len, &len);
       EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_GCM_GET_TAG, op->tag_len, op->tag);
       op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
@@ -143,13 +225,15 @@ openssl_ops_enc_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
 }
 
 static_always_inline u32
-openssl_ops_dec_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
+openssl_ops_dec_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+                    vnet_crypto_op_chunk_t * chunks, u32 n_ops,
                     const EVP_CIPHER * cipher)
 {
   openssl_per_thread_data_t *ptd = vec_elt_at_index (per_thread_data,
                                                     vm->thread_index);
   EVP_CIPHER_CTX *ctx = ptd->evp_cipher_ctx;
-  u32 i, n_fail = 0;
+  vnet_crypto_op_chunk_t *chp;
+  u32 i, j, n_fail = 0;
   for (i = 0; i < n_ops; i++)
     {
       vnet_crypto_op_t *op = ops[i];
@@ -161,7 +245,17 @@ openssl_ops_dec_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
       EVP_DecryptInit_ex (ctx, 0, 0, key->data, op->iv);
       if (op->aad_len)
        EVP_DecryptUpdate (ctx, 0, &len, op->aad, op->aad_len);
-      EVP_DecryptUpdate (ctx, op->dst, &len, op->src, op->len);
+      if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+       {
+         chp = chunks + op->chunk_index;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             EVP_DecryptUpdate (ctx, chp->dst, &len, chp->src, chp->len);
+             chp += 1;
+           }
+       }
+      else
+       EVP_DecryptUpdate (ctx, op->dst, &len, op->src, op->len);
       EVP_CIPHER_CTX_ctrl (ctx, EVP_CTRL_GCM_SET_TAG, op->tag_len, op->tag);
 
       if (EVP_DecryptFinal_ex (ctx, op->dst + len, &len) > 0)
@@ -176,14 +270,16 @@ openssl_ops_dec_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
 }
 
 static_always_inline u32
-openssl_ops_hmac (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
+openssl_ops_hmac (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+                 vnet_crypto_op_chunk_t * chunks, u32 n_ops,
                  const EVP_MD * md)
 {
   u8 buffer[64];
   openssl_per_thread_data_t *ptd = vec_elt_at_index (per_thread_data,
                                                     vm->thread_index);
   HMAC_CTX *ctx = ptd->hmac_ctx;
-  u32 i, n_fail = 0;
+  vnet_crypto_op_chunk_t *chp;
+  u32 i, j, n_fail = 0;
   for (i = 0; i < n_ops; i++)
     {
       vnet_crypto_op_t *op = ops[i];
@@ -192,7 +288,17 @@ openssl_ops_hmac (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
       size_t sz = op->digest_len ? op->digest_len : EVP_MD_size (md);
 
       HMAC_Init_ex (ctx, key->data, vec_len (key->data), md, NULL);
-      HMAC_Update (ctx, op->src, op->len);
+      if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+       {
+         chp = chunks + op->chunk_index;
+         for (j = 0; j < op->n_chunks; j++)
+           {
+             HMAC_Update (ctx, chp->src, chp->len);
+             chp += 1;
+           }
+       }
+      else
+       HMAC_Update (ctx, op->src, op->len);
       HMAC_Final (ctx, buffer, &out_len);
 
       if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
@@ -211,14 +317,24 @@ openssl_ops_hmac (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops,
   return n_ops - n_fail;
 }
 
-#define _(m, a, b) \
-static u32 \
-openssl_ops_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return openssl_ops_enc_##m (vm, ops, n_ops, b ()); } \
-\
-u32 \
-openssl_ops_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return openssl_ops_dec_##m (vm, ops, n_ops, b ()); }
+#define _(m, a, b)                                                            \
+static u32                                                                    \
+openssl_ops_enc_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops)   \
+{ return openssl_ops_enc_##m (vm, ops, 0, n_ops, b ()); }                     \
+                                                                              \
+u32                                                                           \
+openssl_ops_dec_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops)   \
+{ return openssl_ops_dec_##m (vm, ops, 0, n_ops, b ()); }                     \
+                                                                              \
+static u32                                                                    \
+openssl_ops_enc_chained_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],      \
+    vnet_crypto_op_chunk_t *chunks, u32 n_ops)                                \
+{ return openssl_ops_enc_##m (vm, ops, chunks, n_ops, b ()); }                \
+                                                                              \
+static u32                                                                    \
+openssl_ops_dec_chained_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[],      \
+    vnet_crypto_op_chunk_t *chunks, u32 n_ops)                                \
+{ return openssl_ops_dec_##m (vm, ops, chunks, n_ops, b ()); }
 
 foreach_openssl_evp_op;
 #undef _
@@ -226,7 +342,11 @@ foreach_openssl_evp_op;
 #define _(a, b) \
 static u32 \
 openssl_ops_hmac_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
-{ return openssl_ops_hmac (vm, ops, n_ops, b ()); } \
+{ return openssl_ops_hmac (vm, ops, 0, n_ops, b ()); } \
+static u32 \
+openssl_ops_hmac_chained_##a (vlib_main_t * vm, vnet_crypto_op_t * ops[], \
+    vnet_crypto_op_chunk_t *chunks, u32 n_ops) \
+{ return openssl_ops_hmac (vm, ops, chunks, n_ops, b ()); } \
 
 foreach_openssl_hmac_op;
 #undef _
@@ -244,17 +364,20 @@ crypto_openssl_init (vlib_main_t * vm)
   u32 eidx = vnet_crypto_register_engine (vm, "openssl", 50, "OpenSSL");
 
 #define _(m, a, b) \
-  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
-                                   openssl_ops_enc_##a); \
-  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
-                                   openssl_ops_dec_##a);
+  vnet_crypto_register_ops_handlers (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
+                                   openssl_ops_enc_##a, \
+                                    openssl_ops_enc_chained_##a); \
+  vnet_crypto_register_ops_handlers (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
+                                   openssl_ops_dec_##a, \
+                                    openssl_ops_dec_chained_##a); \
 
   foreach_openssl_evp_op;
 #undef _
 
 #define _(a, b) \
-  vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
-                                   openssl_ops_hmac_##a); \
+  vnet_crypto_register_ops_handlers (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
+                                   openssl_ops_hmac_##a, \
+                                    openssl_ops_hmac_chained_##a); \
 
   foreach_openssl_hmac_op;
 #undef _
index b52f728..b3e95e4 100644 (file)
@@ -122,6 +122,21 @@ UNITTEST_REGISTER_CRYPTO_TEST (nist_aes256_cbc) = {
   .ciphertext = TEST_DATA (ciphertext256),
 };
 
+UNITTEST_REGISTER_CRYPTO_TEST (nist_aes256_cbc_chained) = {
+  .name = "NIST SP 800-38A [chained]",
+  .alg = VNET_CRYPTO_ALG_AES_256_CBC,
+  .iv = TEST_DATA (iv),
+  .key = TEST_DATA (key256),
+  .is_chained = 1,
+  .pt_chunks = {
+    TEST_DATA_CHUNK (plaintext, 0, 32),
+    TEST_DATA_CHUNK (plaintext, 32, 32),
+  },
+  .ct_chunks = {
+    TEST_DATA_CHUNK (ciphertext256, 0, 32),
+    TEST_DATA_CHUNK (ciphertext256, 32, 32),
+  },
+};
 /* *INDENT-ON* */
 
 /*
index 0a7aafc..764ca9e 100644 (file)
@@ -244,6 +244,26 @@ UNITTEST_REGISTER_CRYPTO_TEST (aes_gcm256_tc4) = {
   .aad = TEST_DATA(tc4_aad),
   .tag = TEST_DATA (tc4_tag256),
 };
+
+UNITTEST_REGISTER_CRYPTO_TEST (aes_gcm256_tc4_chain) = {
+  .name = "256-GCM Spec. TC4 [chained]",
+  .alg = VNET_CRYPTO_ALG_AES_256_GCM,
+  .iv = TEST_DATA (tc3_iv),
+  .key = TEST_DATA (tc3_key256),
+  .aad = TEST_DATA(tc4_aad),
+  .tag = TEST_DATA (tc4_tag256),
+  .is_chained = 1,
+  .pt_chunks = {
+    TEST_DATA_CHUNK (tc4_plaintext, 0, 20),
+    TEST_DATA_CHUNK (tc4_plaintext, 20, 20),
+    TEST_DATA_CHUNK (tc4_plaintext, 40, 20),
+  },
+  .ct_chunks = {
+    TEST_DATA_CHUNK (tc4_ciphertext256, 0, 20),
+    TEST_DATA_CHUNK (tc4_ciphertext256, 20, 20),
+    TEST_DATA_CHUNK (tc4_ciphertext256, 40, 20),
+  },
+};
 /* *INDENT-ON* */
 
 /*
index f15e34b..d95c994 100644 (file)
@@ -17,6 +17,8 @@
 #ifndef included_unittest_crypto_crypto_h
 #define included_unittest_crypto_crypto_h
 
+#define CRYPTO_TEST_MAX_OP_CHUNKS 8
+
 typedef struct
 {
   u32 length;
@@ -29,6 +31,11 @@ typedef struct unittest_crypto_test_registration
   vnet_crypto_alg_t alg;
   unittest_crypto_test_data_t iv, key, digest, plaintext, ciphertext, aad,
     tag;
+  u8 is_chained;
+
+  /* plaintext and cipher text data used for testing chained buffers */
+  unittest_crypto_test_data_t pt_chunks[CRYPTO_TEST_MAX_OP_CHUNKS + 1];
+  unittest_crypto_test_data_t ct_chunks[CRYPTO_TEST_MAX_OP_CHUNKS + 1];
 
   /* next */
   struct unittest_crypto_test_registration *next;
@@ -52,6 +59,7 @@ typedef struct
 extern crypto_test_main_t crypto_test_main;
 
 #define TEST_DATA(n) { .data = (u8 *) n, .length = sizeof (n)}
+#define TEST_DATA_CHUNK(s,off,n) { .data = (u8 *) s + off, .length = n}
 
 #define UNITTEST_REGISTER_CRYPTO_TEST(x)                                     \
   unittest_crypto_test_registration_t __unittest_crypto_test_##x;            \
index 76bd8a5..7a39aed 100644 (file)
@@ -191,6 +191,18 @@ UNITTEST_REGISTER_CRYPTO_TEST (rfc_2202_md5_tc7) = {
   .plaintext = TEST_DATA (md5_tc7_data),
   .digest = TEST_DATA (md5_tc7_digest),
 };
+
+UNITTEST_REGISTER_CRYPTO_TEST (rfc_2202_md5_tc7_chained) = {
+  .name = "RFC2202 HMAC-MD5 TC7 [chained]",
+  .alg = VNET_CRYPTO_ALG_HMAC_MD5,
+  .key = TEST_DATA (md5_tc6_key),
+  .digest = TEST_DATA (md5_tc7_digest),
+  .is_chained = 1,
+  .pt_chunks = {
+    TEST_DATA_CHUNK (md5_tc7_data, 0, 40),
+    TEST_DATA_CHUNK (md5_tc7_data, 40, 33)
+  },
+};
 /* *INDENT-ON* */
 
 /*
index b3942aa..d009afe 100644 (file)
@@ -218,6 +218,21 @@ UNITTEST_REGISTER_CRYPTO_TEST (rfc_2202_sha1_tc7) = {
 };
 /* *INDENT-ON* */
 
+/* *INDENT-OFF* */
+UNITTEST_REGISTER_CRYPTO_TEST (rfc_2202_sha1_tc7_chained) = {
+  .name = "RFC2202 HMAC-SHA-1 TC7 [chained]",
+  .alg = VNET_CRYPTO_ALG_HMAC_SHA1,
+  .key = TEST_DATA (sha1_tc6_key),
+  .digest = TEST_DATA (sha1_tc7_digest),
+
+  .is_chained = 1,
+  .pt_chunks = {
+    TEST_DATA_CHUNK (sha1_tc7_data, 0, 40),
+    TEST_DATA_CHUNK (sha1_tc7_data, 40, 33)
+  },
+};
+/* *INDENT-ON* */
+
 /*
  * fd.io coding-style-patch-verification: ON
  *
index b247d62..127e1bf 100644 (file)
@@ -584,6 +584,20 @@ UNITTEST_REGISTER_CRYPTO_TEST (rfc4231_tc7_sha512) = {
   .plaintext = TEST_DATA (tc7_data),
   .digest = TEST_DATA (tc7_digest_sha512),
 };
+
+UNITTEST_REGISTER_CRYPTO_TEST (rfc4231_tc7_sha512_chain) = {
+  .name = "RFC4231 TC7 [chained]",
+  .alg = VNET_CRYPTO_ALG_HMAC_SHA512,
+  .key = TEST_DATA (tc7_key),
+  .digest = TEST_DATA (tc7_digest_sha512),
+  .is_chained = 1,
+  .pt_chunks = {
+    TEST_DATA_CHUNK (tc7_data, 0, 50),
+    TEST_DATA_CHUNK (tc7_data, 50, 50),
+    TEST_DATA_CHUNK (tc7_data, 100, 50),
+    TEST_DATA_CHUNK (tc7_data, 150, 2),
+  },
+};
 /* *INDENT-ON* */
 
 /*
index 9030415..3bc0659 100644 (file)
@@ -30,6 +30,137 @@ sort_registrations (void *a0, void *a1)
   return (strncmp (r0[0]->name, r1[0]->name, 256));
 }
 
+static void
+print_results (vlib_main_t * vm, unittest_crypto_test_registration_t ** rv,
+              vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
+              u32 n_ops, int verbose)
+{
+  int i;
+  unittest_crypto_test_registration_t *r;
+  vnet_crypto_op_chunk_t *chp;
+  u8 *s = 0, *err = 0;
+  vnet_crypto_op_t *op;
+
+  vec_foreach (op, ops)
+  {
+    int fail = 0;
+    r = rv[op->user_data];
+    unittest_crypto_test_data_t *exp_pt = 0, *exp_ct = 0;
+    unittest_crypto_test_data_t *exp_digest = 0, *exp_tag = 0;
+    unittest_crypto_test_data_t *exp_pt_chunks = 0, *exp_ct_chunks = 0;
+
+    switch (vnet_crypto_get_op_type (op->op))
+      {
+      case VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT:
+       exp_tag = &r->tag;
+       /* fall through */
+      case VNET_CRYPTO_OP_TYPE_ENCRYPT:
+       exp_ct = &r->ciphertext;
+       exp_ct_chunks = r->ct_chunks;
+       break;
+      case VNET_CRYPTO_OP_TYPE_AEAD_DECRYPT:
+      case VNET_CRYPTO_OP_TYPE_DECRYPT:
+       exp_pt = &r->plaintext;
+       exp_pt_chunks = r->pt_chunks;
+       break;
+      case VNET_CRYPTO_OP_TYPE_HMAC:
+       exp_digest = &r->digest;
+       break;
+      default:
+       ASSERT (0);
+      }
+
+    vec_reset_length (err);
+
+    if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+      err = format (err, "%sengine error: %U", vec_len (err) ? ", " : "",
+                   format_vnet_crypto_op_status, op->status);
+
+    if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+      {
+       if (exp_ct_chunks)
+         {
+           chp = vec_elt_at_index (chunks, op->chunk_index);
+           for (i = 0; i < op->n_chunks; i++)
+             {
+               if (memcmp (chp->dst, exp_ct_chunks[i].data, chp->len))
+                 err = format (err, "%sciphertext mismatch [chunk %d]",
+                               vec_len (err) ? ", " : "", i);
+               chp += 1;
+             }
+         }
+
+       if (exp_pt_chunks)
+         {
+           chp = vec_elt_at_index (chunks, op->chunk_index);
+           for (i = 0; i < op->n_chunks; i++)
+             {
+               if (memcmp (chp->dst, exp_pt_chunks[i].data, chp->len))
+                 err = format (err, "%splaintext mismatch [chunk %d]",
+                               vec_len (err) ? ", " : "", i);
+               chp += 1;
+             }
+         }
+      }
+    else
+      {
+       if (exp_ct && memcmp (op->dst, exp_ct->data, exp_ct->length) != 0)
+         err = format (err, "%sciphertext mismatch",
+                       vec_len (err) ? ", " : "");
+
+       if (exp_pt && memcmp (op->dst, exp_pt->data, exp_pt->length) != 0)
+         err = format (err, "%splaintext mismatch",
+                       vec_len (err) ? ", " : "");
+      }
+
+    if (exp_tag && memcmp (op->tag, exp_tag->data, exp_tag->length) != 0)
+      err = format (err, "%stag mismatch", vec_len (err) ? ", " : "");
+
+    if (exp_digest &&
+       memcmp (op->digest, exp_digest->data, exp_digest->length) != 0)
+      err = format (err, "%sdigest mismatch", vec_len (err) ? ", " : "");
+
+    vec_reset_length (s);
+    s = format (s, "%s (%U)", r->name, format_vnet_crypto_op, op->op,
+               r->is_chained);
+
+    if (vec_len (err))
+      fail = 1;
+
+    vlib_cli_output (vm, "%-60v%s%v", s, vec_len (err) ? "FAIL: " : "OK",
+                    err);
+    if (verbose)
+      {
+       if (verbose == 2)
+         fail = 1;
+
+       if (exp_ct && fail)
+         vlib_cli_output (vm, "Expected ciphertext:\n%U"
+                          "\nCalculated ciphertext:\n%U",
+                          format_hexdump, exp_ct->data, exp_ct->length,
+                          format_hexdump, op->dst, exp_ct->length);
+       if (exp_pt && fail)
+         vlib_cli_output (vm, "Expected plaintext:\n%U"
+                          "\nCalculated plaintext:\n%U",
+                          format_hexdump, exp_pt->data, exp_pt->length,
+                          format_hexdump, op->dst, exp_pt->length);
+       if (r->tag.length && fail)
+         vlib_cli_output (vm, "Expected tag:\n%U"
+                          "\nCalculated tag:\n%U",
+                          format_hexdump, r->tag.data, r->tag.length,
+                          format_hexdump, op->tag, op->tag_len);
+       if (exp_digest && fail)
+         vlib_cli_output (vm, "Expected digest:\n%U"
+                          "\nCalculated Digest:\n%U",
+                          format_hexdump, exp_digest->data,
+                          exp_digest->length, format_hexdump, op->digest,
+                          op->digest_len);
+      }
+  }
+  vec_free (err);
+  vec_free (s);
+}
+
 static clib_error_t *
 test_crypto (vlib_main_t * vm, crypto_test_main_t * tm)
 {
@@ -37,11 +168,14 @@ test_crypto (vlib_main_t * vm, crypto_test_main_t * tm)
   unittest_crypto_test_registration_t *r = tm->test_registrations;
   unittest_crypto_test_registration_t **rv = 0;
   vnet_crypto_alg_data_t *ad;
-  vnet_crypto_op_t *ops = 0, *op;
+  vnet_crypto_op_t *ops = 0, *op, *chained_ops = 0;
+  vnet_crypto_op_t *current_chained_op = 0, *current_op = 0;
+  vnet_crypto_op_chunk_t *chunks = 0, ch;
   vnet_crypto_key_index_t *key_indices = 0;
-  u8 *computed_data = 0, *s = 0, *err = 0;
-  u32 computed_data_total_len = 0, n_ops = 0;
-  u32 i;
+  u8 *computed_data = 0;
+  u32 computed_data_total_len = 0, n_ops = 0, n_chained_ops = 0;
+  unittest_crypto_test_data_t *pt, *ct;
+  u32 i, j;
 
   /* construct registration vector */
   while (r)
@@ -61,17 +195,56 @@ test_crypto (vlib_main_t * vm, crypto_test_main_t * tm)
            case VNET_CRYPTO_OP_TYPE_ENCRYPT:
            case VNET_CRYPTO_OP_TYPE_DECRYPT:
            case VNET_CRYPTO_OP_TYPE_AEAD_DECRYPT:
-             computed_data_total_len += r->ciphertext.length;
-             n_ops += 1;
+             if (r->is_chained)
+               {
+                 ct = r->ct_chunks;
+                 j = 0;
+                 while (ct->data)
+                   {
+                     if (j > CRYPTO_TEST_MAX_OP_CHUNKS)
+                       return clib_error_return (0,
+                                                 "test case '%s' exceeds extra data!",
+                                                 r->name);
+                     computed_data_total_len += ct->length;
+                     ct++;
+                     j++;
+                   }
+                 n_chained_ops += 1;
+               }
+             else
+               {
+                 computed_data_total_len += r->ciphertext.length;
+                 n_ops += 1;
+               }
              break;
            case VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT:
              computed_data_total_len += r->ciphertext.length;
              computed_data_total_len += r->tag.length;
-             n_ops += 1;
+             if (r->is_chained)
+               {
+                 ct = r->ct_chunks;
+                 j = 0;
+                 while (ct->data)
+                   {
+                     if (j > CRYPTO_TEST_MAX_OP_CHUNKS)
+                       return clib_error_return (0,
+                                                 "test case '%s' exceeds extra data!",
+                                                 r->name);
+                     computed_data_total_len += ct->length;
+                     ct++;
+                     j++;
+                   }
+                 n_chained_ops += 1;
+               }
+             else
+               n_ops += 1;
              break;
            case VNET_CRYPTO_OP_TYPE_HMAC:
              computed_data_total_len += r->digest.length;
-             n_ops += 1;
+             if (r->is_chained)
+               n_chained_ops += 1;
+             else
+               n_ops += 1;
              break;
            default:
              break;
@@ -91,9 +264,12 @@ test_crypto (vlib_main_t * vm, crypto_test_main_t * tm)
   vec_validate_aligned (computed_data, computed_data_total_len - 1,
                        CLIB_CACHE_LINE_BYTES);
   vec_validate_aligned (ops, n_ops - 1, CLIB_CACHE_LINE_BYTES);
+  vec_validate_aligned (chained_ops, n_chained_ops - 1,
+                       CLIB_CACHE_LINE_BYTES);
   computed_data_total_len = 0;
 
-  op = ops;
+  current_op = ops;
+  current_chained_op = chained_ops;
   /* *INDENT-OFF* */
   vec_foreach_index (i, rv)
     {
@@ -107,7 +283,18 @@ test_crypto (vlib_main_t * vm, crypto_test_main_t * tm)
          if (id == 0)
            continue;
 
-         vnet_crypto_op_init (op, id);
+          if (r->is_chained)
+          {
+            op = current_chained_op;
+            current_chained_op += 1;
+          }
+          else
+          {
+            op = current_op;
+            current_op += 1;
+          }
+
+          vnet_crypto_op_init (op, id);
 
          switch (t)
            {
@@ -118,14 +305,85 @@ test_crypto (vlib_main_t * vm, crypto_test_main_t * tm)
                                                   r->key.data,
                                                   r->key.length);
              vec_add1 (key_indices, op->key_index);
-             op->len = r->plaintext.length;
-             op->src = t == VNET_CRYPTO_OP_TYPE_ENCRYPT ?
-               r->plaintext.data : r->ciphertext.data;
-             op->dst = computed_data + computed_data_total_len;
-             computed_data_total_len += r->ciphertext.length;
+
+              if (r->is_chained)
+              {
+              pt = r->pt_chunks;
+              ct = r->ct_chunks;
+              op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+              op->chunk_index = vec_len (chunks);
+              while (pt->data)
+                {
+                  ch.src = t == VNET_CRYPTO_OP_TYPE_ENCRYPT ?
+                    pt->data : ct->data;
+                  ch.len = pt->length;
+                  ch.dst = computed_data + computed_data_total_len;
+                  computed_data_total_len += pt->length;
+                  vec_add1 (chunks, ch);
+                  op->n_chunks++;
+                  pt++;
+                  ct++;
+                }
+              }
+              else
+              {
+              op->len = r->plaintext.length;
+              op->src = t == VNET_CRYPTO_OP_TYPE_ENCRYPT ?
+                r->plaintext.data : r->ciphertext.data;
+              op->dst = computed_data + computed_data_total_len;
+              computed_data_total_len += r->ciphertext.length;
+              }
              break;
            case VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT:
            case VNET_CRYPTO_OP_TYPE_AEAD_DECRYPT:
+              if (r->is_chained)
+              {
+             op->iv = r->iv.data;
+             op->key_index = vnet_crypto_key_add (vm, r->alg,
+                                                  r->key.data,
+                                                  r->key.length);
+             vec_add1 (key_indices, op->key_index);
+             op->aad = r->aad.data;
+             op->aad_len = r->aad.length;
+             if (t == VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT)
+               {
+                  pt = r->pt_chunks;
+                  op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+                  op->chunk_index = vec_len (chunks);
+                  while (pt->data)
+                    {
+                      ch.src = pt->data;
+                      ch.len = pt->length;
+                      ch.dst = computed_data + computed_data_total_len;
+                      computed_data_total_len += pt->length;
+                      vec_add1 (chunks, ch);
+                      op->n_chunks++;
+                      pt++;
+                    }
+                  op->tag = computed_data + computed_data_total_len;
+                  computed_data_total_len += r->tag.length;
+                }
+              else
+                {
+                  ct = r->ct_chunks;
+                  op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+                  op->chunk_index = vec_len (chunks);
+                  while (ct->data)
+                    {
+                      ch.src = ct->data;
+                      ch.len = ct->length;
+                      ch.dst = computed_data + computed_data_total_len;
+                      computed_data_total_len += ct->length;
+                      vec_add1 (chunks, ch);
+                      op->n_chunks++;
+                      ct++;
+                    }
+                  op->tag = r->tag.data;
+                }
+             op->tag_len = r->tag.length;
+              }
+              else
+              {
              op->iv = r->iv.data;
              op->key_index = vnet_crypto_key_add (vm, r->alg,
                                                   r->key.data,
@@ -136,135 +394,80 @@ test_crypto (vlib_main_t * vm, crypto_test_main_t * tm)
              op->len = r->plaintext.length;
              op->dst = computed_data + computed_data_total_len;
              computed_data_total_len += r->ciphertext.length;
+
              if (t == VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT)
                {
-                 op->src = r->plaintext.data;
+                  op->src = r->plaintext.data;
                  op->tag = computed_data + computed_data_total_len;
                  computed_data_total_len += r->tag.length;
                }
              else
                {
-                 op->src = r->ciphertext.data;
-                 op->tag = r->tag.data;
+                  op->tag = r->tag.data;
+                  op->src = r->ciphertext.data;
                }
              op->tag_len = r->tag.length;
+              }
              break;
            case VNET_CRYPTO_OP_TYPE_HMAC:
+              if (r->is_chained)
+              {
              op->key_index = vnet_crypto_key_add (vm, r->alg,
                                                   r->key.data,
                                                   r->key.length);
              vec_add1 (key_indices, op->key_index);
-             op->src = r->plaintext.data;
-             op->len = r->plaintext.length;
-             op->digest_len = r->digest.length;
-             op->digest = computed_data + computed_data_total_len;
-             computed_data_total_len += r->digest.length;
+              op->digest_len = r->digest.length;
+              op->digest = computed_data + computed_data_total_len;
+              computed_data_total_len += r->digest.length;
+              pt = r->pt_chunks;
+              op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+              op->chunk_index = vec_len (chunks);
+              while (pt->data)
+                {
+                  ch.src = pt->data;
+                  ch.len = pt->length;
+                  vec_add1 (chunks, ch);
+                  op->n_chunks++;
+                  pt++;
+                }
+              }
+              else
+              {
+             op->key_index = vnet_crypto_key_add (vm, r->alg,
+                                                  r->key.data,
+                                                  r->key.length);
+             vec_add1 (key_indices, op->key_index);
+              op->digest_len = r->digest.length;
+              op->digest = computed_data + computed_data_total_len;
+              computed_data_total_len += r->digest.length;
+              op->src = r->plaintext.data;
+              op->len = r->plaintext.length;
+              }
              break;
            default:
              break;
            };
 
          op->user_data = i;
-         op++;
        }
     }
   /* *INDENT-ON* */
 
   vnet_crypto_process_ops (vm, ops, vec_len (ops));
+  vnet_crypto_process_chained_ops (vm, chained_ops, chunks,
+                                  vec_len (chained_ops));
 
-  /* *INDENT-OFF* */
-  vec_foreach (op, ops)
-    {
-      int fail = 0;
-      r = rv[op->user_data];
-      unittest_crypto_test_data_t *exp_pt = 0, *exp_ct = 0;
-      unittest_crypto_test_data_t *exp_digest = 0, *exp_tag = 0;
+  print_results (vm, rv, ops, chunks, vec_len (ops), tm->verbose);
+  print_results (vm, rv, chained_ops, chunks, vec_len (chained_ops),
+                tm->verbose);
 
-      switch (vnet_crypto_get_op_type (op->op))
-       {
-       case VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT:
-         exp_tag = &r->tag;
-          /* fall through */
-       case VNET_CRYPTO_OP_TYPE_ENCRYPT:
-         exp_ct = &r->ciphertext;
-         break;
-       case VNET_CRYPTO_OP_TYPE_AEAD_DECRYPT:
-       case VNET_CRYPTO_OP_TYPE_DECRYPT:
-         exp_pt = &r->plaintext;
-         break;
-       case VNET_CRYPTO_OP_TYPE_HMAC:
-         exp_digest = &r->digest;
-         break;
-       default:
-         break;
-       }
-
-      vec_reset_length (err);
-
-      if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
-       err = format (err, "%sengine error: %U", vec_len (err) ? ", " : "",
-                     format_vnet_crypto_op_status, op->status);
-
-      if (exp_ct && memcmp (op->dst, exp_ct->data, exp_ct->length) != 0)
-       err = format (err, "%sciphertext mismatch",
-                     vec_len (err) ? ", " : "");
-
-      if (exp_pt && memcmp (op->dst, exp_pt->data, exp_pt->length) != 0)
-       err = format (err, "%splaintext mismatch", vec_len (err) ? ", " : "");
-
-      if (exp_tag && memcmp (op->tag, exp_tag->data, exp_tag->length) != 0)
-       err = format (err, "%stag mismatch", vec_len (err) ? ", " : "");
-
-      if (exp_digest &&
-         memcmp (op->digest, exp_digest->data, exp_digest->length) != 0)
-       err = format (err, "%sdigest mismatch", vec_len (err) ? ", " : "");
-
-      vec_reset_length (s);
-      s = format (s, "%s (%U)", r->name, format_vnet_crypto_op, op->op);
-
-      if (vec_len (err))
-       fail = 1;
-
-      vlib_cli_output (vm, "%-60v%s%v", s, vec_len (err) ? "FAIL: " : "OK",
-                      err);
-      if (tm->verbose)
-       {
-         if (tm->verbose == 2)
-           fail = 1;
-
-         if (exp_ct && fail)
-           vlib_cli_output (vm, "Expected ciphertext:\n%U"
-                            "\nCalculated ciphertext:\n%U",
-                            format_hexdump, exp_ct->data, exp_ct->length,
-                            format_hexdump, op->dst, exp_ct->length);
-         if (exp_pt && fail)
-           vlib_cli_output (vm, "Expected plaintext:\n%U"
-                            "\nCalculated plaintext:\n%U",
-                            format_hexdump, exp_pt->data, exp_pt->length,
-                            format_hexdump, op->dst, exp_pt->length);
-         if (r->tag.length && fail)
-           vlib_cli_output (vm, "Expected tag:\n%U"
-                            "\nCalculated tag:\n%U",
-                            format_hexdump, r->tag.data, r->tag.length,
-                            format_hexdump, op->tag, op->tag_len);
-         if (exp_digest && fail)
-           vlib_cli_output (vm, "Expected digest:\n%U"
-                            "\nCalculated Digest:\n%U",
-                            format_hexdump, exp_digest->data,
-                            exp_digest->length, format_hexdump, op->digest,
-                            op->digest_len);
-       }
-    }
-
-  vec_foreach_index (i, key_indices)
-    vnet_crypto_key_del (vm, key_indices[i]);
-  /* *INDENT-ON* */
+  vec_foreach_index (i, key_indices) vnet_crypto_key_del (vm, key_indices[i]);
 
   vec_free (computed_data);
   vec_free (ops);
-  vec_free (err);
+  vec_free (chained_ops);
+  vec_free (chunks);
   vec_free (rv);
-  vec_free (s);
   return 0;
 }
 
index 8d523ae..ef6371a 100644 (file)
@@ -74,14 +74,24 @@ format_vnet_crypto_handlers (u8 * s, va_list * args)
       od = cm->opt_data + id;
       if (first == 0)
         s = format (s, "\n%U", format_white_space, indent);
-      s = format (s, "%-20U%-20U", format_vnet_crypto_op_type, od->type,
-                 format_vnet_crypto_engine, od->active_engine_index,s);
+      s = format (s, "%-22U%-20U", format_vnet_crypto_op_type, od->type, 0,
+                 format_vnet_crypto_engine, od->active_engine_index_simple,s);
 
       vec_foreach (e, cm->engines)
        {
          if (e->ops_handlers[id] != 0)
            s = format (s, "%U ", format_vnet_crypto_engine, e - cm->engines);
        }
+
+      s = format (s, "\n%U", format_white_space, indent);
+      s = format (s, "%-22U%-20U", format_vnet_crypto_op_type, od->type, 1,
+                  format_vnet_crypto_engine,
+                  od->active_engine_index_chained);
+      vec_foreach (e, cm->engines)
+       {
+         if (e->chained_ops_handlers[id] != 0)
+           s = format (s, "%U ", format_vnet_crypto_engine, e - cm->engines);
+       }
       first = 0;
     }
   return s;
@@ -98,7 +108,7 @@ show_crypto_handlers_command_fn (vlib_main_t * vm,
   if (unformat_user (input, unformat_line_input, line_input))
     unformat_free (line_input);
 
-  vlib_cli_output (vm, "%-20s%-20s%-20s%s", "Algo", "Type", "Active",
+  vlib_cli_output (vm, "%-20s%-22s%-20s%s", "Algo", "Type", "Active",
                   "Candidates");
 
   for (i = 0; i < VNET_CRYPTO_N_ALGS; i++)
@@ -128,6 +138,7 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
   char **args = 0, *s, **arg, *engine = 0;
   int all = 0;
   clib_error_t *error = 0;
+  crypto_op_class_type_t oct = CRYPTO_OP_BOTH;
 
   if (!unformat_user (input, unformat_line_input, line_input))
     return 0;
@@ -136,6 +147,12 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
     {
       if (unformat (line_input, "all"))
        all = 1;
+      else if (unformat (line_input, "simple"))
+       oct = CRYPTO_OP_SIMPLE;
+      else if (unformat (line_input, "chained"))
+       oct = CRYPTO_OP_CHAINED;
+      else if (unformat (line_input, "both"))
+       oct = CRYPTO_OP_BOTH;
       else if (unformat (line_input, "%s", &s))
        vec_add1 (args, s);
       else
@@ -163,7 +180,7 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
       hash_foreach_mem (key, value, cm->alg_index_by_name,
       ({
         (void) value;
-        rc += vnet_crypto_set_handler (key, engine);
+        rc += vnet_crypto_set_handler2 (key, engine, oct);
       }));
       /* *INDENT-ON* */
 
@@ -174,7 +191,7 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
     {
       vec_foreach (arg, args)
       {
-       rc = vnet_crypto_set_handler (arg[0], engine);
+       rc = vnet_crypto_set_handler2 (arg[0], engine, oct);
        if (rc)
          {
            vlib_cli_output (vm, "failed to set engine %s for %s!",
@@ -195,7 +212,8 @@ done:
 VLIB_CLI_COMMAND (set_crypto_handler_command, static) =
 {
   .path = "set crypto handler",
-  .short_help = "set crypto handler cipher [cipher2 cipher3 ...] engine",
+  .short_help = "set crypto handler cipher [cipher2 cipher3 ...] engine"
+    " [simple|chained]",
   .function = set_crypto_handler_command_fn,
 };
 /* *INDENT-ON* */
index 6cd1210..4458bfc 100644 (file)
 
 vnet_crypto_main_t crypto_main;
 
+static_always_inline void
+crypto_set_op_status (vnet_crypto_op_t * ops[], u32 n_ops, int status)
+{
+  while (n_ops--)
+    {
+      ops[0]->status = status;
+      ops++;
+    }
+}
+
 static_always_inline u32
 vnet_crypto_process_ops_call_handler (vlib_main_t * vm,
                                      vnet_crypto_main_t * cm,
                                      vnet_crypto_op_id_t opt,
-                                     vnet_crypto_op_t * ops[], u32 n_ops)
+                                     vnet_crypto_op_t * ops[],
+                                     vnet_crypto_op_chunk_t * chunks,
+                                     u32 n_ops)
 {
+  u32 rv = 0;
   if (n_ops == 0)
     return 0;
 
-  if (cm->ops_handlers[opt] == 0)
+  if (chunks)
     {
-      while (n_ops--)
-       {
-         ops[0]->status = VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER;
-         ops++;
-       }
-      return 0;
-    }
 
-  return (cm->ops_handlers[opt]) (vm, ops, n_ops);
+      if (cm->chained_ops_handlers[opt] == 0)
+       crypto_set_op_status (ops, n_ops,
+                             VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER);
+      else
+       rv = (cm->chained_ops_handlers[opt]) (vm, ops, chunks, n_ops);
+    }
+  else
+    {
+      if (cm->ops_handlers[opt] == 0)
+       crypto_set_op_status (ops, n_ops,
+                             VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER);
+      else
+       rv = (cm->ops_handlers[opt]) (vm, ops, n_ops);
+    }
+  return rv;
 }
 
 
-u32
-vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops)
+static_always_inline u32
+vnet_crypto_process_ops_inline (vlib_main_t * vm, vnet_crypto_op_t ops[],
+                               vnet_crypto_op_chunk_t * chunks, u32 n_ops)
 {
   vnet_crypto_main_t *cm = &crypto_main;
   const int op_q_size = VLIB_FRAME_SIZE;
@@ -61,7 +82,8 @@ vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops)
       if (current_op_type != opt || n_op_queue >= op_q_size)
        {
          rv += vnet_crypto_process_ops_call_handler (vm, cm, current_op_type,
-                                                     op_queue, n_op_queue);
+                                                     op_queue, chunks,
+                                                     n_op_queue);
          n_op_queue = 0;
          current_op_type = opt;
        }
@@ -70,10 +92,23 @@ vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops)
     }
 
   rv += vnet_crypto_process_ops_call_handler (vm, cm, current_op_type,
-                                             op_queue, n_op_queue);
+                                             op_queue, chunks, n_op_queue);
   return rv;
 }
 
+u32
+vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops)
+{
+  return vnet_crypto_process_ops_inline (vm, ops, 0, n_ops);
+}
+
+u32
+vnet_crypto_process_chained_ops (vlib_main_t * vm, vnet_crypto_op_t ops[],
+                                vnet_crypto_op_chunk_t * chunks, u32 n_ops)
+{
+  return vnet_crypto_process_ops_inline (vm, ops, chunks, n_ops);
+}
+
 u32
 vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
                             char *desc)
@@ -91,13 +126,40 @@ vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
   return p - cm->engines;
 }
 
+static_always_inline void
+crypto_set_active_engine (vnet_crypto_op_data_t * od,
+                         vnet_crypto_op_id_t id, u32 ei,
+                         crypto_op_class_type_t oct)
+{
+  vnet_crypto_main_t *cm = &crypto_main;
+  vnet_crypto_engine_t *ce = vec_elt_at_index (cm->engines, ei);
+
+  if (oct == CRYPTO_OP_BOTH || oct == CRYPTO_OP_CHAINED)
+    {
+      if (ce->chained_ops_handlers[id])
+       {
+         od->active_engine_index_chained = ei;
+         cm->chained_ops_handlers[id] = ce->chained_ops_handlers[id];
+       }
+    }
+
+  if (oct == CRYPTO_OP_BOTH || oct == CRYPTO_OP_SIMPLE)
+    {
+      if (ce->ops_handlers[id])
+       {
+         od->active_engine_index_simple = ei;
+         cm->ops_handlers[id] = ce->ops_handlers[id];
+       }
+    }
+}
+
 int
-vnet_crypto_set_handler (char *alg_name, char *engine)
+vnet_crypto_set_handler2 (char *alg_name, char *engine,
+                         crypto_op_class_type_t oct)
 {
   uword *p;
   vnet_crypto_main_t *cm = &crypto_main;
   vnet_crypto_alg_data_t *ad;
-  vnet_crypto_engine_t *ce;
   int i;
 
   p = hash_get_mem (cm->alg_index_by_name, alg_name);
@@ -110,20 +172,15 @@ vnet_crypto_set_handler (char *alg_name, char *engine)
   if (!p)
     return -1;
 
-  ce = vec_elt_at_index (cm->engines, p[0]);
-
-  for (i = 0; i < VNET_CRYPTO_OP_N_TYPES; i++)
+  for (i = 0; i < VNET_CRYPTO_OP_N_TYPES; i += 2)
     {
       vnet_crypto_op_data_t *od;
       vnet_crypto_op_id_t id = ad->op_by_type[i];
       if (id == 0)
        continue;
+
       od = cm->opt_data + id;
-      if (ce->ops_handlers[id])
-       {
-         od->active_engine_index = p[0];
-         cm->ops_handlers[id] = ce->ops_handlers[id];
-       }
+      crypto_set_active_engine (od, id, p[0], oct);
     }
 
   return 0;
@@ -138,33 +195,77 @@ vnet_crypto_is_set_handler (vnet_crypto_alg_t alg)
 }
 
 void
-vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index,
-                                 vnet_crypto_op_id_t opt,
-                                 vnet_crypto_ops_handler_t * fn)
+vnet_crypto_register_ops_handler_inline (vlib_main_t * vm, u32 engine_index,
+                                        vnet_crypto_op_id_t opt,
+                                        vnet_crypto_ops_handler_t * fn,
+                                        vnet_crypto_chained_ops_handler_t *
+                                        cfn)
 {
   vnet_crypto_main_t *cm = &crypto_main;
   vnet_crypto_engine_t *ae, *e = vec_elt_at_index (cm->engines, engine_index);
   vnet_crypto_op_data_t *otd = cm->opt_data + opt;
   vec_validate_aligned (cm->ops_handlers, VNET_CRYPTO_N_OP_IDS - 1,
                        CLIB_CACHE_LINE_BYTES);
-  e->ops_handlers[opt] = fn;
+  vec_validate_aligned (cm->chained_ops_handlers, VNET_CRYPTO_N_OP_IDS - 1,
+                       CLIB_CACHE_LINE_BYTES);
 
-  if (otd->active_engine_index == ~0)
+  if (fn)
     {
-      otd->active_engine_index = engine_index;
-      cm->ops_handlers[opt] = fn;
-      return;
+      e->ops_handlers[opt] = fn;
+      if (otd->active_engine_index_simple == ~0)
+       {
+         otd->active_engine_index_simple = engine_index;
+         cm->ops_handlers[opt] = fn;
+       }
+
+      ae = vec_elt_at_index (cm->engines, otd->active_engine_index_simple);
+      if (ae->priority < e->priority)
+       crypto_set_active_engine (otd, opt, engine_index, CRYPTO_OP_SIMPLE);
     }
-  ae = vec_elt_at_index (cm->engines, otd->active_engine_index);
-  if (ae->priority < e->priority)
+
+  if (cfn)
     {
-      otd->active_engine_index = engine_index;
-      cm->ops_handlers[opt] = fn;
+      e->chained_ops_handlers[opt] = cfn;
+      if (otd->active_engine_index_chained == ~0)
+       {
+         otd->active_engine_index_chained = engine_index;
+         cm->chained_ops_handlers[opt] = cfn;
+       }
+
+      ae = vec_elt_at_index (cm->engines, otd->active_engine_index_chained);
+      if (ae->priority < e->priority)
+       crypto_set_active_engine (otd, opt, engine_index, CRYPTO_OP_CHAINED);
     }
 
   return;
 }
 
+void
+vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index,
+                                 vnet_crypto_op_id_t opt,
+                                 vnet_crypto_ops_handler_t * fn)
+{
+  vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, fn, 0);
+}
+
+void
+vnet_crypto_register_chained_ops_handler (vlib_main_t * vm, u32 engine_index,
+                                         vnet_crypto_op_id_t opt,
+                                         vnet_crypto_chained_ops_handler_t *
+                                         fn)
+{
+  vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, 0, fn);
+}
+
+void
+vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index,
+                                  vnet_crypto_op_id_t opt,
+                                  vnet_crypto_ops_handler_t * fn,
+                                  vnet_crypto_chained_ops_handler_t * cfn)
+{
+  vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, fn, cfn);
+}
+
 void
 vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index,
                                  vnet_crypto_key_handler_t * key_handler)
@@ -253,10 +354,13 @@ vnet_crypto_init_cipher_data (vnet_crypto_alg_t alg, vnet_crypto_op_id_t eid,
 {
   vnet_crypto_op_type_t eopt, dopt;
   vnet_crypto_main_t *cm = &crypto_main;
+
   cm->algs[alg].name = name;
   cm->opt_data[eid].alg = cm->opt_data[did].alg = alg;
-  cm->opt_data[eid].active_engine_index = ~0;
-  cm->opt_data[did].active_engine_index = ~0;
+  cm->opt_data[eid].active_engine_index_simple = ~0;
+  cm->opt_data[did].active_engine_index_simple = ~0;
+  cm->opt_data[eid].active_engine_index_chained = ~0;
+  cm->opt_data[did].active_engine_index_chained = ~0;
   if (is_aead)
     {
       eopt = VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT;
@@ -282,7 +386,8 @@ vnet_crypto_init_hmac_data (vnet_crypto_alg_t alg,
   cm->algs[alg].name = name;
   cm->algs[alg].op_by_type[VNET_CRYPTO_OP_TYPE_HMAC] = id;
   cm->opt_data[id].alg = alg;
-  cm->opt_data[id].active_engine_index = ~0;
+  cm->opt_data[id].active_engine_index_simple = ~0;
+  cm->opt_data[id].active_engine_index_chained = ~0;
   cm->opt_data[id].type = VNET_CRYPTO_OP_TYPE_HMAC;
   hash_set_mem (cm->alg_index_by_name, name, alg);
 }
index 626e71d..f89ecf9 100644 (file)
@@ -116,38 +116,82 @@ typedef enum
 } vnet_crypto_op_id_t;
 /* *INDENT-ON* */
 
+typedef enum
+{
+  CRYPTO_OP_SIMPLE,
+  CRYPTO_OP_CHAINED,
+  CRYPTO_OP_BOTH,
+} crypto_op_class_type_t;
+
 typedef struct
 {
   char *name;
   vnet_crypto_op_id_t op_by_type[VNET_CRYPTO_OP_N_TYPES];
 } vnet_crypto_alg_data_t;
 
+typedef struct
+{
+  u8 *src;
+  u8 *dst;
+  u32 len;
+} vnet_crypto_op_chunk_t;
+
 typedef struct
 {
   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+  uword user_data;
   vnet_crypto_op_id_t op:16;
   vnet_crypto_op_status_t status:8;
   u8 flags;
 #define VNET_CRYPTO_OP_FLAG_INIT_IV (1 << 0)
 #define VNET_CRYPTO_OP_FLAG_HMAC_CHECK (1 << 1)
-  u32 key_index;
-  u32 len;
+#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS (1 << 2)
+
+  union
+  {
+    u8 digest_len;
+    u8 tag_len;
+  };
   u16 aad_len;
-  u8 digest_len, tag_len;
+
+  union
+  {
+    struct
+    {
+      u8 *src;
+      u8 *dst;
+    };
+
+    /* valid if VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS is set */
+    u16 n_chunks;
+  };
+
+  union
+  {
+    u32 len;
+    /* valid if VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS is set */
+    u32 chunk_index;
+  };
+
+  u32 key_index;
   u8 *iv;
-  u8 *src;
-  u8 *dst;
   u8 *aad;
-  u8 *tag;
-  u8 *digest;
-  uword user_data;
+
+  union
+  {
+    u8 *tag;
+    u8 *digest;
+  };
 } vnet_crypto_op_t;
 
+STATIC_ASSERT_SIZEOF (vnet_crypto_op_t, CLIB_CACHE_LINE_BYTES);
+
 typedef struct
 {
   vnet_crypto_op_type_t type;
   vnet_crypto_alg_t alg;
-  u32 active_engine_index;
+  u32 active_engine_index_simple;
+  u32 active_engine_index_chained;
 } vnet_crypto_op_data_t;
 
 typedef struct
@@ -158,6 +202,11 @@ typedef struct
 
 typedef u32 vnet_crypto_key_index_t;
 
+typedef u32 (vnet_crypto_chained_ops_handler_t) (vlib_main_t * vm,
+                                                vnet_crypto_op_t * ops[],
+                                                vnet_crypto_op_chunk_t *
+                                                chunks, u32 n_ops);
+
 typedef u32 (vnet_crypto_ops_handler_t) (vlib_main_t * vm,
                                         vnet_crypto_op_t * ops[], u32 n_ops);
 
@@ -171,6 +220,18 @@ u32 vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
 void vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index,
                                       vnet_crypto_op_id_t opt,
                                       vnet_crypto_ops_handler_t * oph);
+
+void vnet_crypto_register_chained_ops_handler (vlib_main_t * vm,
+                                              u32 engine_index,
+                                              vnet_crypto_op_id_t opt,
+                                              vnet_crypto_chained_ops_handler_t
+                                              * oph);
+void vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index,
+                                       vnet_crypto_op_id_t opt,
+                                       vnet_crypto_ops_handler_t * fn,
+                                       vnet_crypto_chained_ops_handler_t *
+                                       cfn);
+
 void vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index,
                                       vnet_crypto_key_handler_t * keyh);
 
@@ -181,6 +242,8 @@ typedef struct
   int priority;
   vnet_crypto_key_handler_t *key_op_handler;
   vnet_crypto_ops_handler_t *ops_handlers[VNET_CRYPTO_N_OP_IDS];
+    vnet_crypto_chained_ops_handler_t
+    * chained_ops_handlers[VNET_CRYPTO_N_OP_IDS];
 } vnet_crypto_engine_t;
 
 typedef struct
@@ -188,6 +251,7 @@ typedef struct
   vnet_crypto_alg_data_t *algs;
   vnet_crypto_thread_t *threads;
   vnet_crypto_ops_handler_t **ops_handlers;
+  vnet_crypto_chained_ops_handler_t **chained_ops_handlers;
   vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS];
   vnet_crypto_engine_t *engines;
   vnet_crypto_key_t *keys;
@@ -200,10 +264,14 @@ extern vnet_crypto_main_t crypto_main;
 u32 vnet_crypto_submit_ops (vlib_main_t * vm, vnet_crypto_op_t ** jobs,
                            u32 n_jobs);
 
+u32 vnet_crypto_process_chained_ops (vlib_main_t * vm, vnet_crypto_op_t ops[],
+                                    vnet_crypto_op_chunk_t * chunks,
+                                    u32 n_ops);
 u32 vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[],
                             u32 n_ops);
 
-int vnet_crypto_set_handler (char *ops_handler_name, char *engine);
+int vnet_crypto_set_handler2 (char *ops_handler_name, char *engine,
+                             crypto_op_class_type_t oct);
 int vnet_crypto_is_set_handler (vnet_crypto_alg_t alg);
 
 u32 vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg,
@@ -225,6 +293,7 @@ vnet_crypto_op_init (vnet_crypto_op_t * op, vnet_crypto_op_id_t type)
   op->op = type;
   op->flags = 0;
   op->key_index = ~0;
+  op->n_chunks = 0;
 }
 
 static_always_inline vnet_crypto_op_type_t
@@ -243,6 +312,12 @@ vnet_crypto_get_key (vnet_crypto_key_index_t index)
   return vec_elt_at_index (cm->keys, index);
 }
 
+static_always_inline int
+vnet_crypto_set_handler (char *alg_name, char *engine)
+{
+  return vnet_crypto_set_handler2 (alg_name, engine, CRYPTO_OP_BOTH);
+}
+
 #endif /* included_vnet_crypto_crypto_h */
 
 /*
index 715941e..3210ab9 100644 (file)
@@ -54,7 +54,7 @@ format_vnet_crypto_op (u8 * s, va_list * args)
   vnet_crypto_op_id_t op = va_arg (*args, int);        // vnet_crypto_op_id_t);
   vnet_crypto_op_data_t *otd = cm->opt_data + op;
 
-  return format (s, "%U-%U", format_vnet_crypto_op_type, otd->type,
+  return format (s, "%U-%U", format_vnet_crypto_op_type, otd->type, 0,
                 format_vnet_crypto_alg, otd->alg);
 }
 
@@ -62,6 +62,7 @@ u8 *
 format_vnet_crypto_op_type (u8 * s, va_list * args)
 {
   vnet_crypto_op_type_t opt = va_arg (*args, vnet_crypto_op_type_t);
+  int is_chained = va_arg (*args, int);
   char *strings[] = {
 #define _(n, s) [VNET_CRYPTO_OP_TYPE_##n] = s,
     foreach_crypto_op_type
@@ -71,7 +72,7 @@ format_vnet_crypto_op_type (u8 * s, va_list * args)
   if (opt >= VNET_CRYPTO_OP_N_TYPES)
     return format (s, "unknown");
 
-  return format (s, "%s", strings[opt]);
+  return format (s, "%s%s", strings[opt], is_chained ? "-chained" : "");
 }
 
 u8 *
index 56724c0..f29dacb 100644 (file)
@@ -50,7 +50,7 @@ typedef enum
  _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
  _(REPLAY, "SA replayed packet")                                \
  _(RUNT, "undersized packet")                                   \
- _(CHAINED_BUFFER, "chained buffers (packet dropped)")          \
+ _(NO_BUFFERS, "no buffers (packet dropped)")                   \
  _(OVERSIZED_HEADER, "buffer with oversized header (dropped)")  \
  _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)")      \
  _(TUN_NO_PROTO, "no tunnel protocol")                          \
@@ -114,12 +114,130 @@ typedef struct
   i16 current_data;
   i16 current_length;
   u16 hdr_sz;
+  vlib_buffer_t *lb;
+  u32 free_buffer_index;
+  u8 icv_removed;
 } esp_decrypt_packet_data_t;
 
-STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
+STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 5 * sizeof (u64));
 
 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
 
+static_always_inline void
+esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
+                vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
+                int e)
+{
+  vnet_crypto_op_t *op = ops;
+  u32 n_fail, n_ops = vec_len (ops);
+
+  if (n_ops == 0)
+    return;
+
+  n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
+
+  while (n_fail)
+    {
+      ASSERT (op - ops < n_ops);
+      if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+       {
+         u32 err, bi = op->user_data;
+         if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
+           err = e;
+         else
+           err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
+         b[bi]->error = node->errors[err];
+         nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+         n_fail--;
+       }
+      op++;
+    }
+}
+
+static_always_inline void
+esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
+                        vnet_crypto_op_t * ops, vlib_buffer_t * b[],
+                        u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
+{
+
+  vnet_crypto_op_t *op = ops;
+  u32 n_fail, n_ops = vec_len (ops);
+
+  if (n_ops == 0)
+    return;
+
+  n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
+
+  while (n_fail)
+    {
+      ASSERT (op - ops < n_ops);
+      if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+       {
+         u32 err, bi = op->user_data;
+         if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
+           err = e;
+         else
+           err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
+         b[bi]->error = node->errors[err];
+         nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+         n_fail--;
+       }
+      op++;
+    }
+}
+
+always_inline void
+esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
+                u16 tail)
+{
+  vlib_buffer_t *before_last = b;
+
+  if (last->current_length > tail)
+    {
+      last->current_length -= tail;
+      return;
+    }
+  ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
+
+  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
+    {
+      before_last = b;
+      b = vlib_get_buffer (vm, b->next_buffer);
+    }
+  before_last->current_length -= tail - last->current_length;
+  vlib_buffer_free_one (vm, before_last->next_buffer);
+  before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+}
+
+/* ICV is splitted in last two buffers so move it to the last buffer and
+   return pointer to it */
+static_always_inline u8 *
+esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
+             esp_decrypt_packet_data_t * pd, u16 icv_sz)
+{
+  vlib_buffer_t *before_last, *bp;
+  u16 last_sz = pd->lb->current_length;
+  u16 first_sz = icv_sz - last_sz;
+
+  bp = before_last = first;
+  while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
+    {
+      before_last = bp;
+      bp = vlib_get_buffer (vm, bp->next_buffer);
+    }
+
+  u8 *lb_curr = vlib_buffer_get_current (pd->lb);
+  memmove (lb_curr + first_sz, lb_curr, last_sz);
+  clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
+                   first_sz);
+  before_last->current_length -= first_sz;
+  pd->lb = before_last;
+  pd->icv_removed = 1;
+  pd->free_buffer_index = before_last->next_buffer;
+  before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+  return lb_curr;
+}
+
 always_inline uword
 esp_decrypt_inline (vlib_main_t * vm,
                    vlib_node_runtime_t * node, vlib_frame_t * from_frame,
@@ -131,7 +249,7 @@ esp_decrypt_inline (vlib_main_t * vm,
   u16 len;
   ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
   u32 *from = vlib_frame_vector_args (from_frame);
-  u32 n, n_left = from_frame->n_vectors;
+  u32 n_left = from_frame->n_vectors;
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
   u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
   esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
@@ -139,10 +257,16 @@ esp_decrypt_inline (vlib_main_t * vm,
   u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
   const u8 esp_sz = sizeof (esp_header_t);
   ipsec_sa_t *sa0 = 0;
+  vnet_crypto_op_chunk_t *ch;
+  vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
+  vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
 
   vlib_get_buffers (vm, from, b, n_left);
   vec_reset_length (ptd->crypto_ops);
   vec_reset_length (ptd->integ_ops);
+  vec_reset_length (ptd->chained_crypto_ops);
+  vec_reset_length (ptd->chained_integ_ops);
+  vec_reset_length (ptd->chunks);
   clib_memset_u16 (nexts, -1, n_left);
 
   while (n_left > 0)
@@ -159,9 +283,10 @@ esp_decrypt_inline (vlib_main_t * vm,
          CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
        }
 
-      if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
+      u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
+      if (n_bufs == 0)
        {
-         b[0]->error = node->errors[ESP_DECRYPT_ERROR_CHAINED_BUFFER];
+         b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
          next[0] = ESP_DECRYPT_NEXT_DROP;
          goto next;
        }
@@ -205,10 +330,26 @@ esp_decrypt_inline (vlib_main_t * vm,
       pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
       payload = b[0]->data + pd->current_data;
       pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
+      pd->free_buffer_index = 0;
+      pd->icv_removed = 0;
+
+      pd->lb = b[0];
+      if (n_bufs > 1)
+       {
+         /* find last buffer in the chain */
+         while (pd->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+           pd->lb = vlib_get_buffer (vm, pd->lb->next_buffer);
+
+         crypto_ops = &ptd->chained_crypto_ops;
+         integ_ops = &ptd->chained_integ_ops;
+       }
+      pd->current_length = b[0]->current_length;
 
       /* we need 4 extra bytes for HMAC calculation when ESN are used */
-      if (ipsec_sa_is_set_USE_ESN (sa0) && pd->icv_sz &&
-         (pd->current_data + pd->current_length + 4 > buffer_data_size))
+      /* Chained buffers can process ESN as a separate chunk */
+      if (pd->lb == b[0] && ipsec_sa_is_set_USE_ESN (sa0) && cpd.icv_sz &&
+         (pd->lb->current_data + pd->lb->current_length + 4
+          > buffer_data_size))
        {
          b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_TAIL_SPACE];
          next[0] = ESP_DECRYPT_NEXT_DROP;
@@ -232,12 +373,12 @@ esp_decrypt_inline (vlib_main_t * vm,
 
       len = pd->current_length - cpd.icv_sz;
       current_sa_pkts += 1;
-      current_sa_bytes += pd->current_length;
+      current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
 
       if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
        {
          vnet_crypto_op_t *op;
-         vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
+         vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
 
          vnet_crypto_op_init (op, sa0->integ_op_id);
          op->key_index = sa0->integ_key_index;
@@ -247,7 +388,100 @@ esp_decrypt_inline (vlib_main_t * vm,
          op->digest = payload + len;
          op->digest_len = cpd.icv_sz;
          op->len = len;
-         if (ipsec_sa_is_set_USE_ESN (sa0))
+
+         if (pd->lb != b[0])
+           {
+             /* buffer is chained */
+             vlib_buffer_t *cb = b[0];
+             op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+             op->chunk_index = vec_len (ptd->chunks);
+
+             if (pd->lb->current_length < cpd.icv_sz)
+               op->digest = esp_move_icv (vm, b[0], pd, cpd.icv_sz);
+             else
+               op->digest = vlib_buffer_get_tail (pd->lb) - cpd.icv_sz;
+
+             vec_add2 (ptd->chunks, ch, 1);
+             ch->len = pd->current_length;
+             ch->src = payload;
+             cb = vlib_get_buffer (vm, cb->next_buffer);
+             op->n_chunks = 1;
+             while (1)
+               {
+                 vec_add2 (ptd->chunks, ch, 1);
+                 op->n_chunks += 1;
+                 ch->src = vlib_buffer_get_current (cb);
+                 if (pd->lb == cb)
+                   {
+                     if (pd->icv_removed)
+                       ch->len = cb->current_length;
+                     else
+                       ch->len = cb->current_length - cpd.icv_sz;
+                     if (ipsec_sa_is_set_USE_ESN (sa0))
+                       {
+                         u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
+                         u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
+                         u8 *esn;
+                         vlib_buffer_t *tmp_b;
+                         u16 space_left = vlib_buffer_space_left_at_end
+                           (vm, pd->lb);
+                         if (space_left < sz)
+                           {
+                             if (pd->icv_removed)
+                               {
+                                 /* use pre-data area from the last bufer
+                                    that was removed from the chain */
+                                 tmp_b =
+                                   vlib_get_buffer (vm,
+                                                    pd->free_buffer_index);
+                                 esn = tmp_b->data - sz;
+                               }
+                             else
+                               {
+                                 /* no space, need to allocate new buffer */
+                                 u32 tmp_bi = 0;
+                                 vlib_buffer_alloc (vm, &tmp_bi, 1);
+                                 tmp_b = vlib_get_buffer (vm, tmp_bi);
+                                 esn = tmp_b->data;
+                                 pd->free_buffer_index = tmp_bi;
+                               }
+                             clib_memcpy_fast (esn, &seq_hi, sz);
+
+                             vec_add2 (ptd->chunks, ch, 1);
+                             op->n_chunks += 1;
+                             ch->src = esn;
+                             ch->len = sz;
+                           }
+                         else
+                           {
+                             if (pd->icv_removed)
+                               {
+                                 clib_memcpy_fast (vlib_buffer_get_tail
+                                                   (pd->lb), &seq_hi, sz);
+                               }
+                             else
+                               {
+                                 clib_memcpy_fast (tmp, op->digest,
+                                                   ESP_MAX_ICV_SIZE);
+                                 clib_memcpy_fast (op->digest, &seq_hi, sz);
+                                 clib_memcpy_fast (op->digest + sz, tmp,
+                                                   ESP_MAX_ICV_SIZE);
+                                 op->digest += sz;
+                               }
+                             ch->len += sz;
+                           }
+                       }
+                   }
+                 else
+                   ch->len = cb->current_length;
+
+                 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
+                   break;
+
+                 cb = vlib_get_buffer (vm, cb->next_buffer);
+               }
+           }
+         else if (ipsec_sa_is_set_USE_ESN (sa0))
            {
              /* shift ICV by 4 bytes to insert ESN */
              u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
@@ -263,10 +497,10 @@ esp_decrypt_inline (vlib_main_t * vm,
       payload += esp_sz;
       len -= esp_sz;
 
-      if (sa0->crypto_enc_op_id != VNET_CRYPTO_OP_NONE)
+      if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
        {
          vnet_crypto_op_t *op;
-         vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
+         vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
          vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
          op->key_index = sa0->crypto_key_index;
          op->iv = payload;
@@ -303,6 +537,61 @@ esp_decrypt_inline (vlib_main_t * vm,
          op->src = op->dst = payload += cpd.iv_sz;
          op->len = len - cpd.iv_sz;
          op->user_data = b - bufs;
+
+         if (pd->lb != b[0])
+           {
+             /* buffer is chained */
+             vlib_buffer_t *cb = b[0];
+             op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+             op->chunk_index = vec_len (ptd->chunks);
+             vec_add2 (ptd->chunks, ch, 1);
+             ch->len = len - cpd.iv_sz + cpd.icv_sz;
+             ch->src = ch->dst = payload;
+             cb = vlib_get_buffer (vm, cb->next_buffer);
+             op->n_chunks = 1;
+
+             while (1)
+               {
+                 vec_add2 (ptd->chunks, ch, 1);
+                 op->n_chunks += 1;
+                 ch->src = ch->dst = vlib_buffer_get_current (cb);
+                 if (pd->lb == cb)
+                   {
+                     if (ipsec_sa_is_set_IS_AEAD (sa0))
+                       {
+                         if (pd->lb->current_length < cpd.icv_sz)
+                           {
+                             op->tag =
+                               esp_move_icv (vm, b[0], pd, cpd.icv_sz);
+
+                             /* this chunk does not contain crypto data */
+                             op->n_chunks -= 1;
+
+                             /* and fix previous chunk's length as it might have
+                                been changed */
+                             ASSERT (op->n_chunks > 0);
+                             ch[-1].len = pd->lb->current_length;
+                             break;
+                           }
+                         else
+                           op->tag =
+                             vlib_buffer_get_tail (pd->lb) - cpd.icv_sz;
+                       }
+
+                     if (pd->icv_removed)
+                       ch->len = cb->current_length;
+                     else
+                       ch->len = cb->current_length - cpd.icv_sz;
+                   }
+                 else
+                   ch->len = cb->current_length;
+
+                 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
+                   break;
+
+                 cb = vlib_get_buffer (vm, cb->next_buffer);
+               }
+           }
        }
 
       /* next */
@@ -318,52 +607,15 @@ esp_decrypt_inline (vlib_main_t * vm,
                                     current_sa_index, current_sa_pkts,
                                     current_sa_bytes);
 
-  if ((n = vec_len (ptd->integ_ops)))
-    {
-      vnet_crypto_op_t *op = ptd->integ_ops;
-      n -= vnet_crypto_process_ops (vm, op, n);
-      while (n)
-       {
-         ASSERT (op - ptd->integ_ops < vec_len (ptd->integ_ops));
-         if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
-           {
-             u32 err, bi = op->user_data;
-             if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
-               err = ESP_DECRYPT_ERROR_INTEG_ERROR;
-             else
-               err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
-             bufs[bi]->error = node->errors[err];
-             nexts[bi] = ESP_DECRYPT_NEXT_DROP;
-             n--;
-           }
-         op++;
-       }
-    }
-  if ((n = vec_len (ptd->crypto_ops)))
-    {
-      vnet_crypto_op_t *op = ptd->crypto_ops;
-      n -= vnet_crypto_process_ops (vm, op, n);
-      while (n)
-       {
-         ASSERT (op - ptd->crypto_ops < vec_len (ptd->crypto_ops));
-         if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
-           {
-             u32 err, bi;
-
-             bi = op->user_data;
-
-             if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
-               err = ESP_DECRYPT_ERROR_DECRYPTION_FAILED;
-             else
-               err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
+  esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts,
+                  ESP_DECRYPT_ERROR_INTEG_ERROR);
+  esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
+                          ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR);
 
-             bufs[bi]->error = node->errors[err];
-             nexts[bi] = ESP_DECRYPT_NEXT_DROP;
-             n--;
-           }
-         op++;
-       }
-    }
+  esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts,
+                  ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
+  esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
+                          ptd->chunks, ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
 
   /* Post decryption ronud - adjust packet data start and length and next
      node */
@@ -430,11 +682,51 @@ esp_decrypt_inline (vlib_main_t * vm,
 
       ipsec_sa_anti_replay_advance (sa0, pd->seq);
 
-      esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data +
-                                         pd->current_length - sizeof (*f) -
-                                         pd->icv_sz);
+      u8 pad_length = 0, next_header = 0;
+      u16 icv_sz = pd->icv_removed ? 0 : pd->icv_sz;
+
+      if (pd->free_buffer_index)
+       vlib_buffer_free_one (vm, pd->free_buffer_index);
+
+      if (pd->lb->current_length < sizeof (esp_footer_t) + icv_sz)
+       {
+         /* esp footer is either splitted in two buffers or in the before
+          * last buffer */
+
+         vlib_buffer_t *before_last = b[0], *bp = b[0];
+         while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
+           {
+             before_last = bp;
+             bp = vlib_get_buffer (vm, bp->next_buffer);
+           }
+         u8 *bt = vlib_buffer_get_tail (before_last);
+
+         if (pd->lb->current_length == icv_sz)
+           {
+             esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
+             pad_length = f->pad_length;
+             next_header = f->next_header;
+           }
+         else
+           {
+             pad_length = (bt - 1)[0];
+             next_header = ((u8 *) vlib_buffer_get_current (pd->lb))[0];
+           }
+       }
+      else
+       {
+         esp_footer_t *f =
+           (esp_footer_t *) (pd->lb->data + pd->lb->current_data +
+                             pd->lb->current_length - sizeof (esp_footer_t) -
+                             icv_sz);
+         pad_length = f->pad_length;
+         next_header = f->next_header;
+       }
+
       u16 adv = pd->iv_sz + esp_sz;
-      u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz;
+      u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
+      u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
+      b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
 
       if ((pd->flags & tun_flags) == 0 && !is_tun)     /* transport mode */
        {
@@ -450,15 +742,16 @@ esp_decrypt_inline (vlib_main_t * vm,
            clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
 
          b[0]->current_data = pd->current_data + adv - ip_hdr_sz;
-         b[0]->current_length = pd->current_length + ip_hdr_sz - tail - adv;
+         b[0]->current_length = pd->current_length + ip_hdr_sz - adv;
+         esp_remove_tail (vm, b[0], pd->lb, tail);
 
          if (is_ip6)
            {
              ip6_header_t *ip6 = (ip6_header_t *) ip;
              u16 len = clib_net_to_host_u16 (ip6->payload_length);
-             len -= adv + tail;
+             len -= adv + tail_orig;
              ip6->payload_length = clib_host_to_net_u16 (len);
-             ip6->protocol = f->next_header;
+             ip6->protocol = next_header;
              next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
            }
          else
@@ -466,34 +759,36 @@ esp_decrypt_inline (vlib_main_t * vm,
              ip4_header_t *ip4 = (ip4_header_t *) ip;
              ip_csum_t sum = ip4->checksum;
              u16 len = clib_net_to_host_u16 (ip4->length);
-             len = clib_host_to_net_u16 (len - adv - tail - udp_sz);
-             sum = ip_csum_update (sum, ip4->protocol, f->next_header,
+             len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
+             sum = ip_csum_update (sum, ip4->protocol, next_header,
                                    ip4_header_t, protocol);
              sum = ip_csum_update (sum, ip4->length, len,
                                    ip4_header_t, length);
              ip4->checksum = ip_csum_fold (sum);
-             ip4->protocol = f->next_header;
+             ip4->protocol = next_header;
              ip4->length = len;
              next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
            }
        }
       else
        {
-         if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
+         if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
            {
              next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
              b[0]->current_data = pd->current_data + adv;
-             b[0]->current_length = pd->current_length - adv - tail;
+             b[0]->current_length = pd->current_length - adv;
+             esp_remove_tail (vm, b[0], pd->lb, tail);
            }
-         else if (f->next_header == IP_PROTOCOL_IPV6)
+         else if (next_header == IP_PROTOCOL_IPV6)
            {
              next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
              b[0]->current_data = pd->current_data + adv;
-             b[0]->current_length = pd->current_length - adv - tail;
+             b[0]->current_length = pd->current_length - adv;
+             esp_remove_tail (vm, b[0], pd->lb, tail);
            }
          else
            {
-             if (is_tun && f->next_header == IP_PROTOCOL_GRE)
+             if (is_tun && next_header == IP_PROTOCOL_GRE)
                {
                  gre_header_t *gre;
 
@@ -555,7 +850,7 @@ esp_decrypt_inline (vlib_main_t * vm,
                  itp = ipsec_tun_protect_get
                    (vnet_buffer (b[0])->ipsec.protect_index);
 
-                 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
+                 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
                    {
                      const ip4_header_t *ip4;
 
@@ -571,7 +866,7 @@ esp_decrypt_inline (vlib_main_t * vm,
                            node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
                        }
                    }
-                 else if (f->next_header == IP_PROTOCOL_IPV6)
+                 else if (next_header == IP_PROTOCOL_IPV6)
                    {
                      const ip6_header_t *ip6;
 
@@ -618,7 +913,6 @@ esp_decrypt_inline (vlib_main_t * vm,
 
   vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
 
-  b = bufs;
   return n_left;
 }
 
index 3c2fdf4..2c4da5d 100644 (file)
@@ -42,7 +42,7 @@ typedef enum
  _(RX_PKTS, "ESP pkts received")                                \
  _(SEQ_CYCLED, "sequence number cycled (packet dropped)")       \
  _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(CHAINED_BUFFER, "chained buffers (packet dropped)")          \
+ _(NO_BUFFERS, "no buffers (packet dropped)")                   \
  _(NO_TRAILER_SPACE, "no trailer space (packet dropped)")
 
 typedef enum
@@ -92,21 +92,23 @@ format_esp_encrypt_trace (u8 * s, va_list * args)
 static_always_inline u8 *
 esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz,
                        u16 * next, vlib_node_runtime_t * node,
-                       u16 buffer_data_size)
+                       u16 buffer_data_size, uword total_len)
 {
   static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
     0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
     0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00,
   };
 
-  u16 min_length = b->current_length + sizeof (esp_footer_t);
+  u16 min_length = total_len + sizeof (esp_footer_t);
   u16 new_length = round_pow2 (min_length, block_size);
   u8 pad_bytes = new_length - min_length;
   esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (b) +
-                                     new_length - sizeof (esp_footer_t));
+                                     b->current_length + pad_bytes);
+  u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
 
-  if (b->current_data + new_length + icv_sz > buffer_data_size)
+  if (b->current_data + tail_sz > buffer_data_size)
     {
+      // TODO alloc new buffer
       b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE];
       next[0] = ESP_ENCRYPT_NEXT_DROP;
       return 0;
@@ -120,7 +122,7 @@ esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz,
     }
 
   f->pad_length = pad_bytes;
-  b->current_length = new_length + icv_sz;
+  b->current_length += tail_sz;
   return &f->next_header;
 }
 
@@ -203,6 +205,34 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
   return len;
 }
 
+static_always_inline void
+esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
+                        vnet_crypto_op_t * ops, vlib_buffer_t * b[],
+                        u16 * nexts, vnet_crypto_op_chunk_t * chunks)
+{
+  u32 n_fail, n_ops = vec_len (ops);
+  vnet_crypto_op_t *op = ops;
+
+  if (n_ops == 0)
+    return;
+
+  n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
+
+  while (n_fail)
+    {
+      ASSERT (op - ops < n_ops);
+
+      if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+       {
+         u32 bi = op->user_data;
+         b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
+         nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
+         n_fail--;
+       }
+      op++;
+    }
+}
+
 static_always_inline void
 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
                 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
@@ -255,10 +285,17 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   u32 current_sa_bytes = 0, spi = 0;
   u8 block_sz = 0, iv_sz = 0, icv_sz = 0;
   ipsec_sa_t *sa0 = 0;
+  vnet_crypto_op_chunk_t *ch;
+  vlib_buffer_t *lb;
+  vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
+  vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
 
   vlib_get_buffers (vm, from, b, n_left);
   vec_reset_length (ptd->crypto_ops);
   vec_reset_length (ptd->integ_ops);
+  vec_reset_length (ptd->chained_crypto_ops);
+  vec_reset_length (ptd->chained_integ_ops);
+  vec_reset_length (ptd->chunks);
 
   while (n_left > 0)
     {
@@ -266,7 +303,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       dpo_id_t *dpo;
       esp_header_t *esp;
       u8 *payload, *next_hdr_ptr;
-      u16 payload_len;
+      u16 payload_len, payload_len_total, n_bufs;
       u32 hdr_len, config_index;
 
       if (n_left > 2)
@@ -329,13 +366,30 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          goto trace;
        }
 
-      if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
+      lb = b[0];
+      n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
+      if (n_bufs == 0)
        {
-         b[0]->error = node->errors[ESP_ENCRYPT_ERROR_CHAINED_BUFFER];
+         b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
          next[0] = ESP_ENCRYPT_NEXT_DROP;
          goto trace;
        }
 
+      if (n_bufs > 1)
+       {
+         crypto_ops = &ptd->chained_crypto_ops;
+         integ_ops = &ptd->chained_integ_ops;
+
+         /* find last buffer in the chain */
+         while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+           lb = vlib_get_buffer (vm, lb->next_buffer);
+       }
+      else
+       {
+         crypto_ops = &ptd->crypto_ops;
+         integ_ops = &ptd->integ_ops;
+       }
+
       if (PREDICT_FALSE (esp_seq_advance (sa0)))
        {
          b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
@@ -349,12 +403,16 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (ipsec_sa_is_set_IS_TUNNEL (sa0))
        {
          payload = vlib_buffer_get_current (b[0]);
-         next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz,
+         next_hdr_ptr = esp_add_footer_and_icv (lb, block_sz, icv_sz,
                                                 next, node,
-                                                buffer_data_size);
+                                                buffer_data_size,
+                                                vlib_buffer_length_in_chain
+                                                (vm, b[0]));
          if (!next_hdr_ptr)
            goto trace;
+         b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
          payload_len = b[0]->current_length;
+         payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
 
          /* ESP header */
          hdr_len += sizeof (*esp);
@@ -365,7 +423,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
            {
              hdr_len += sizeof (udp_header_t);
              esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
-                               payload_len + hdr_len);
+                               payload_len_total + hdr_len);
            }
 
          /* IP header */
@@ -378,7 +436,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
              clib_memcpy_fast (ip6, &sa0->ip6_hdr, len);
              *next_hdr_ptr = (is_ip6 ?
                               IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
-             len = payload_len + hdr_len - len;
+             len = payload_len_total + hdr_len - len;
              ip6->payload_length = clib_net_to_host_u16 (len);
            }
          else
@@ -390,7 +448,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
              clib_memcpy_fast (ip4, &sa0->ip4_hdr, len);
              *next_hdr_ptr = (is_ip6 ?
                               IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
-             len = payload_len + hdr_len;
+             len = payload_len_total + hdr_len;
              esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
            }
 
@@ -414,12 +472,17 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
 
          vlib_buffer_advance (b[0], ip_len);
          payload = vlib_buffer_get_current (b[0]);
-         next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz,
+         next_hdr_ptr = esp_add_footer_and_icv (lb, block_sz, icv_sz,
                                                 next, node,
-                                                buffer_data_size);
+                                                buffer_data_size,
+                                                vlib_buffer_length_in_chain
+                                                (vm, b[0]));
          if (!next_hdr_ptr)
            goto trace;
+
+         b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
          payload_len = b[0]->current_length;
+         payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
 
          /* ESP header */
          hdr_len += sizeof (*esp);
@@ -463,7 +526,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
                  ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
                }
              ip6->payload_length =
-               clib_host_to_net_u16 (payload_len + hdr_len - l2_len -
+               clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
                                      sizeof (ip6_header_t));
            }
          else
@@ -471,7 +534,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
              u16 len;
              ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
              *next_hdr_ptr = ip4->protocol;
-             len = payload_len + hdr_len - l2_len;
+             len = payload_len_total + hdr_len - l2_len;
              if (udp)
                {
                  esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
@@ -493,8 +556,9 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (sa0->crypto_enc_op_id)
        {
          vnet_crypto_op_t *op;
-         vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
+         vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
          vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+
          op->src = op->dst = payload;
          op->key_index = sa0->crypto_key_index;
          op->len = payload_len - icv_sz;
@@ -524,12 +588,42 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
              op->iv = payload - iv_sz;
              op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
            }
+
+         if (lb != b[0])
+           {
+             /* is chained */
+             vlib_buffer_t *cb = b[0];
+             op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+             op->chunk_index = vec_len (ptd->chunks);
+             op->tag = vlib_buffer_get_tail (lb) - icv_sz;
+             vec_add2 (ptd->chunks, ch, 1);
+             ch->len = payload_len;
+             ch->src = ch->dst = payload;
+             cb = vlib_get_buffer (vm, cb->next_buffer);
+             op->n_chunks = 1;
+
+             while (1)
+               {
+                 vec_add2 (ptd->chunks, ch, 1);
+                 op->n_chunks += 1;
+                 if (lb == cb)
+                   ch->len = cb->current_length - icv_sz;
+                 else
+                   ch->len = cb->current_length;
+                 ch->src = ch->dst = vlib_buffer_get_current (cb);
+
+                 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
+                   break;
+
+                 cb = vlib_get_buffer (vm, cb->next_buffer);
+               }
+           }
        }
 
       if (sa0->integ_op_id)
        {
          vnet_crypto_op_t *op;
-         vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
+         vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
          vnet_crypto_op_init (op, sa0->integ_op_id);
          op->src = payload - iv_sz - sizeof (esp_header_t);
          op->digest = payload + payload_len - icv_sz;
@@ -537,7 +631,46 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
          op->digest_len = icv_sz;
          op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
          op->user_data = b - bufs;
-         if (ipsec_sa_is_set_USE_ESN (sa0))
+
+         if (lb != b[0])
+           {
+             /* is chained */
+             op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+             vlib_buffer_t *cb = b[0];
+             op->chunk_index = vec_len (ptd->chunks);
+             op->digest = vlib_buffer_get_tail (lb) - icv_sz;
+             vec_add2 (ptd->chunks, ch, 1);
+             ch->len = payload_len + iv_sz + sizeof (esp_header_t);
+             ch->src = payload - iv_sz - sizeof (esp_header_t);
+             cb = vlib_get_buffer (vm, cb->next_buffer);
+             op->n_chunks = 1;
+
+             while (1)
+               {
+                 vec_add2 (ptd->chunks, ch, 1);
+                 op->n_chunks += 1;
+                 if (lb == cb)
+                   {
+                     ch->len = cb->current_length - icv_sz;
+                     if (ipsec_sa_is_set_USE_ESN (sa0))
+                       {
+                         u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
+                         clib_memcpy_fast (op->digest, &seq_hi,
+                                           sizeof (seq_hi));
+                         ch->len += sizeof (seq_hi);
+                       }
+                   }
+                 else
+                   ch->len = cb->current_length;
+                 ch->src = vlib_buffer_get_current (cb);
+
+                 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
+                   break;
+
+                 cb = vlib_get_buffer (vm, cb->next_buffer);
+               }
+           }
+         else if (ipsec_sa_is_set_USE_ESN (sa0))
            {
              u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
              clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
@@ -548,7 +681,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
       vlib_buffer_advance (b[0], 0LL - hdr_len);
 
       current_sa_packets += 1;
-      current_sa_bytes += payload_len;
+      current_sa_bytes += payload_len_total;
 
     trace:
       if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
@@ -572,8 +705,14 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
   vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
                                   current_sa_index, current_sa_packets,
                                   current_sa_bytes);
+
   esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts);
+  esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
+                          ptd->chunks);
+
   esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
+  esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
+                          ptd->chunks);
 
   vlib_node_increment_counter (vm, node->node_index,
                               ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
index 0c3e577..f1b7daf 100644 (file)
@@ -90,8 +90,12 @@ typedef struct
 
 typedef struct
 {
+  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
   vnet_crypto_op_t *crypto_ops;
   vnet_crypto_op_t *integ_ops;
+  vnet_crypto_op_t *chained_crypto_ops;
+  vnet_crypto_op_t *chained_integ_ops;
+  vnet_crypto_op_chunk_t *chunks;
 } ipsec_per_thread_data_t;
 
 typedef struct
index 2eeb63c..56f4b45 100644 (file)
@@ -556,7 +556,7 @@ class IpsecTra4(object):
         p.scapy_tra_sa.seq_num = 351
         p.vpp_tra_sa.seq_num = 351
 
-    def verify_tra_basic4(self, count=1):
+    def verify_tra_basic4(self, count=1, payload_size=54):
         """ ipsec v4 transport basic test """
         self.vapi.cli("clear errors")
         self.vapi.cli("clear ipsec sa")
@@ -565,7 +565,8 @@ class IpsecTra4(object):
             send_pkts = self.gen_encrypt_pkts(p.scapy_tra_sa, self.tra_if,
                                               src=self.tra_if.remote_ip4,
                                               dst=self.tra_if.local_ip4,
-                                              count=count)
+                                              count=count,
+                                              payload_size=payload_size)
             recv_pkts = self.send_and_expect(self.tra_if, send_pkts,
                                              self.tra_if)
             for rx in recv_pkts:
@@ -611,14 +612,16 @@ class IpsecTra4Tests(IpsecTra4):
 
 class IpsecTra6(object):
     """ verify methods for Transport v6 """
-    def verify_tra_basic6(self, count=1):
+    def verify_tra_basic6(self, count=1, payload_size=54):
         self.vapi.cli("clear errors")
+        self.vapi.cli("clear ipsec sa")
         try:
             p = self.params[socket.AF_INET6]
             send_pkts = self.gen_encrypt_pkts6(p.scapy_tra_sa, self.tra_if,
                                                src=self.tra_if.remote_ip6,
                                                dst=self.tra_if.local_ip6,
-                                               count=count)
+                                               count=count,
+                                               payload_size=payload_size)
             recv_pkts = self.send_and_expect(self.tra_if, send_pkts,
                                              self.tra_if)
             for rx in recv_pkts:
@@ -834,7 +837,8 @@ class IpsecTun4(object):
             send_pkts = self.gen_encrypt_pkts(p.scapy_tun_sa, self.tun_if,
                                               src=p.remote_tun_if_host,
                                               dst=self.pg1.remote_ip4,
-                                              count=count)
+                                              count=count,
+                                              payload_size=payload_size)
             recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
             self.verify_decrypted(p, recv_pkts)
 
@@ -857,41 +861,6 @@ class IpsecTun4(object):
         self.logger.info(self.vapi.ppcli("show ipsec sa 4"))
         self.verify_counters4(p, count, n_rx)
 
-    """ verify methods for Transport v4 """
-    def verify_tun_44_bad_packet_sizes(self, p):
-        # with a buffer size of 2048, 1989 bytes of payload
-        # means there isn't space to insert the ESP header
-        N_PKTS = 63
-        for p_siz in [1989, 8500]:
-            send_pkts = self.gen_encrypt_pkts(p.scapy_tun_sa, self.tun_if,
-                                              src=p.remote_tun_if_host,
-                                              dst=self.pg1.remote_ip4,
-                                              count=N_PKTS,
-                                              payload_size=p_siz)
-            self.send_and_assert_no_replies(self.tun_if, send_pkts)
-            send_pkts = self.gen_pkts(self.pg1, src=self.pg1.remote_ip4,
-                                      dst=p.remote_tun_if_host, count=N_PKTS,
-                                      payload_size=p_siz)
-            self.send_and_assert_no_replies(self.pg1, send_pkts,
-                                            self.tun_if)
-
-        # both large packets on decrpyt count against chained buffers
-        # the 9000 bytes one does on encrypt
-        self.assertEqual(2 * N_PKTS,
-                         self.statistics.get_err_counter(
-                             '/err/%s/chained buffers (packet dropped)' %
-                             self.tun4_decrypt_node_name))
-        self.assertEqual(N_PKTS,
-                         self.statistics.get_err_counter(
-                             '/err/%s/chained buffers (packet dropped)' %
-                             self.tun4_encrypt_node_name))
-
-        # on encrypt the 1989 size is no trailer space
-        self.assertEqual(N_PKTS,
-                         self.statistics.get_err_counter(
-                             '/err/%s/no trailer space (packet dropped)' %
-                             self.tun4_encrypt_node_name))
-
     def verify_tun_reass_44(self, p):
         self.vapi.cli("clear errors")
         self.vapi.ip_reassembly_enable_disable(
@@ -996,12 +965,6 @@ class IpsecTun4Tests(IpsecTun4):
         self.verify_tun_44(self.params[socket.AF_INET], count=127)
 
 
-class IpsecTunEsp4Tests(IpsecTun4):
-    def test_tun_bad_packet_sizes(self):
-        """ ipsec v4 tunnel bad packet size """
-        self.verify_tun_44_bad_packet_sizes(self.params[socket.AF_INET])
-
-
 class IpsecTun6(object):
     """ verify methods for Tunnel v6 """
     def verify_counters6(self, p_in, p_out, count, worker=None):
@@ -1064,7 +1027,8 @@ class IpsecTun6(object):
             send_pkts = self.gen_encrypt_pkts6(p_in.scapy_tun_sa, self.tun_if,
                                                src=p_in.remote_tun_if_host,
                                                dst=self.pg1.remote_ip6,
-                                               count=count)
+                                               count=count,
+                                               payload_size=payload_size)
             recv_pkts = self.send_and_expect(self.tun_if, send_pkts, self.pg1)
             self.verify_decrypted6(p_in, recv_pkts)
 
index 60e5c93..5b057e7 100644 (file)
@@ -10,7 +10,7 @@ from template_ipsec import IpsecTra46Tests, IpsecTun46Tests, TemplateIpsec, \
     config_tun_params, IPsecIPv4Params, IPsecIPv6Params, \
     IpsecTra4, IpsecTun4, IpsecTra6, IpsecTun6, \
     IpsecTun6HandoffTests, IpsecTun4HandoffTests, \
-    IpsecTra6ExtTests, IpsecTunEsp4Tests
+    IpsecTra6ExtTests
 from vpp_ipsec import VppIpsecSpd, VppIpsecSpdEntry, VppIpsecSA,\
     VppIpsecSpdItfBinding
 from vpp_ip_route import VppIpRoute, VppRoutePath
@@ -18,6 +18,7 @@ from vpp_ip import DpoProto
 from vpp_papi import VppEnum
 
 NUM_PKTS = 67
+engines_supporting_chain_bufs = ["openssl"]
 
 
 class ConfigIpsecESP(TemplateIpsec):
@@ -288,8 +289,7 @@ class TemplateIpsecEsp(ConfigIpsecESP):
 
 
 class TestIpsecEsp1(TemplateIpsecEsp, IpsecTra46Tests,
-                    IpsecTun46Tests, IpsecTunEsp4Tests,
-                    IpsecTra6ExtTests):
+                    IpsecTun46Tests, IpsecTra6ExtTests):
     """ Ipsec ESP - TUN & TRA tests """
     pass
 
@@ -469,7 +469,7 @@ class RunTestIpsecEspAll(ConfigIpsecESP,
     def run_test(self):
         self.run_a_test(self.engine, self.flag, self.algo)
 
-    def run_a_test(self, engine, flag, algo):
+    def run_a_test(self, engine, flag, algo, payload_size=None):
         self.vapi.cli("set crypto handler all %s" % engine)
 
         self.ipv4_params = IPsecIPv4Params()
@@ -508,6 +508,21 @@ class RunTestIpsecEspAll(ConfigIpsecESP,
         self.verify_tun_44(self.params[socket.AF_INET],
                            count=NUM_PKTS)
 
+        LARGE_PKT_SZ = [
+            4010,  # ICV ends up splitted accross 2 buffers in esp_decrypt
+                   # for transport4; transport6 takes normal path
+
+            4020,  # same as above but tra4 and tra6 are switched
+        ]
+        if self.engine in engines_supporting_chain_bufs:
+            for sz in LARGE_PKT_SZ:
+                self.verify_tra_basic4(count=NUM_PKTS, payload_size=sz)
+                self.verify_tra_basic6(count=NUM_PKTS, payload_size=sz)
+                self.verify_tun_66(self.params[socket.AF_INET6],
+                                   count=NUM_PKTS, payload_size=sz)
+                self.verify_tun_44(self.params[socket.AF_INET],
+                                   count=NUM_PKTS, payload_size=sz)
+
         #
         # remove the SPDs, SAs, etc
         #