2 * ipsecmb.c - Intel IPSec Multi-buffer library Crypto Engine
4 * Copyright (c) 2019 Cisco Systemss
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <intel-ipsec-mb.h>
20 #include <vnet/vnet.h>
21 #include <vnet/plugin/plugin.h>
22 #include <vpp/app/version.h>
23 #include <vnet/crypto/crypto.h>
24 #include <vppinfra/cpu.h>
29 } ipsecmb_per_thread_data_t;
31 typedef struct ipsecmb_main_t_
33 ipsecmb_per_thread_data_t *per_thread_data;
36 static ipsecmb_main_t ipsecmb_main;
38 #define foreach_ipsecmb_hmac_op \
40 _(SHA256, SHA_256, sha256) \
41 _(SHA384, SHA_384, sha384) \
42 _(SHA512, SHA_512, sha512)
44 #define foreach_ipsecmb_cipher_op \
50 hash_expand_keys (const MB_MGR * mgr,
54 u8 ipad[256], u8 opad[256], hash_one_block_t fn)
59 if (length > block_size)
64 memset (buf, 0x36, sizeof (buf));
65 for (i = 0; i < length; i++)
71 memset (buf, 0x5c, sizeof (buf));
73 for (i = 0; i < length; i++)
81 ipsecmb_retire_hmac_job (JOB_AES_HMAC * job, u32 * n_fail)
83 vnet_crypto_op_t *op = job->user_data;
85 if (STS_COMPLETED != job->status)
87 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
88 *n_fail = *n_fail + 1;
91 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
93 if (op->flags & VNET_CRYPTO_OP_FLAG_HMAC_CHECK)
95 if ((memcmp (op->digest, job->auth_tag_output, op->digest_len)))
97 *n_fail = *n_fail + 1;
98 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
102 clib_memcpy_fast (op->digest, job->auth_tag_output, op->digest_len);
105 static_always_inline u32
106 ipsecmb_ops_hmac_inline (vlib_main_t * vm,
107 const ipsecmb_per_thread_data_t * ptd,
108 vnet_crypto_op_t * ops[],
111 hash_one_block_t fn, JOB_HASH_ALG alg)
115 u8 scratch[n_ops][64];
118 * queue all the jobs first ...
120 for (i = 0; i < n_ops; i++)
122 vnet_crypto_op_t *op = ops[i];
123 u8 ipad[256], opad[256];
125 hash_expand_keys (ptd->mgr, op->key, op->key_len,
126 block_size, ipad, opad, fn);
128 job = IMB_GET_NEXT_JOB (ptd->mgr);
131 job->hash_start_src_offset_in_bytes = 0;
132 job->msg_len_to_hash_in_bytes = op->len;
134 job->auth_tag_output_len_in_bytes = op->digest_len;
135 job->auth_tag_output = scratch[i];
137 job->cipher_mode = NULL_CIPHER;
138 job->cipher_direction = DECRYPT;
139 job->chain_order = HASH_CIPHER;
141 job->aes_key_len_in_bytes = op->key_len;
143 job->u.HMAC._hashed_auth_key_xor_ipad = ipad;
144 job->u.HMAC._hashed_auth_key_xor_opad = opad;
147 job = IMB_SUBMIT_JOB (ptd->mgr);
150 ipsecmb_retire_hmac_job (job, &n_fail);
154 * .. then flush (i.e. complete) them
155 * We will have queued enough to satisfy the 'multi' buffer
157 while ((job = IMB_FLUSH_JOB (ptd->mgr)))
159 ipsecmb_retire_hmac_job (job, &n_fail);
162 return n_ops - n_fail;
166 static_always_inline u32 \
167 ipsecmb_ops_hmac_##a (vlib_main_t * vm, \
168 vnet_crypto_op_t * ops[], \
171 ipsecmb_per_thread_data_t *ptd; \
172 ipsecmb_main_t *imbm; \
174 imbm = &ipsecmb_main; \
175 ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \
177 return ipsecmb_ops_hmac_inline (vm, ptd, ops, n_ops, \
179 ptd->mgr->c##_one_block, \
182 foreach_ipsecmb_hmac_op;
185 #define EXPANDED_KEY_N_BYTES (16 * 15)
188 ipsecmb_retire_cipher_job (JOB_AES_HMAC * job, u32 * n_fail)
190 vnet_crypto_op_t *op = job->user_data;
192 if (STS_COMPLETED != job->status)
194 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
195 *n_fail = *n_fail + 1;
198 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
201 static_always_inline u32
202 ipsecmb_ops_cipher_inline (vlib_main_t * vm,
203 const ipsecmb_per_thread_data_t * ptd,
204 vnet_crypto_op_t * ops[],
206 keyexp_t fn, JOB_CIPHER_DIRECTION direction)
212 * queue all the jobs first ...
214 for (i = 0; i < n_ops; i++)
216 u8 aes_enc_key_expanded[EXPANDED_KEY_N_BYTES];
217 u8 aes_dec_key_expanded[EXPANDED_KEY_N_BYTES];
218 vnet_crypto_op_t *op = ops[i];
220 fn (op->key, aes_enc_key_expanded, aes_dec_key_expanded);
222 job = IMB_GET_NEXT_JOB (ptd->mgr);
226 job->msg_len_to_cipher_in_bytes = op->len;
227 job->cipher_start_src_offset_in_bytes = 0;
229 job->hash_alg = NULL_HASH;
230 job->cipher_mode = CBC;
231 job->cipher_direction = direction;
232 job->chain_order = (direction == ENCRYPT ? CIPHER_HASH : HASH_CIPHER);
234 job->aes_key_len_in_bytes = op->key_len;
235 job->aes_enc_key_expanded = aes_enc_key_expanded;
236 job->aes_dec_key_expanded = aes_dec_key_expanded;
238 job->iv_len_in_bytes = op->iv_len;
242 job = IMB_SUBMIT_JOB (ptd->mgr);
245 ipsecmb_retire_cipher_job (job, &n_fail);
249 * .. then flush (i.e. complete) them
250 * We will have queued enough to satisfy the 'multi' buffer
252 while ((job = IMB_FLUSH_JOB (ptd->mgr)))
254 ipsecmb_retire_cipher_job (job, &n_fail);
257 return n_ops - n_fail;
261 static_always_inline u32 \
262 ipsecmb_ops_cipher_enc_##a (vlib_main_t * vm, \
263 vnet_crypto_op_t * ops[], \
266 ipsecmb_per_thread_data_t *ptd; \
267 ipsecmb_main_t *imbm; \
269 imbm = &ipsecmb_main; \
270 ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \
272 return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops, \
273 ptd->mgr->keyexp_##b, \
276 foreach_ipsecmb_cipher_op;
280 static_always_inline u32 \
281 ipsecmb_ops_cipher_dec_##a (vlib_main_t * vm, \
282 vnet_crypto_op_t * ops[], \
285 ipsecmb_per_thread_data_t *ptd; \
286 ipsecmb_main_t *imbm; \
288 imbm = &ipsecmb_main; \
289 ptd = vec_elt_at_index (imbm->per_thread_data, vm->thread_index); \
291 return ipsecmb_ops_cipher_inline (vm, ptd, ops, n_ops, \
292 ptd->mgr->keyexp_##b, \
295 foreach_ipsecmb_cipher_op;
298 static clib_error_t *
299 crypto_ipsecmb_init (vlib_main_t * vm)
301 ipsecmb_main_t *imbm = &ipsecmb_main;
302 ipsecmb_per_thread_data_t *ptd;
303 vlib_thread_main_t *tm = vlib_get_thread_main ();
307 if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
311 * A priority that is better than OpenSSL but worse than VPP natvie
313 eidx = vnet_crypto_register_engine (vm, "ipsecmb", 80,
314 "Intel IPSEC multi-buffer");
316 vec_validate (imbm->per_thread_data, tm->n_vlib_mains - 1);
318 if (clib_cpu_supports_avx512f ())
320 vec_foreach (ptd, imbm->per_thread_data)
322 ptd->mgr = alloc_mb_mgr (0);
323 init_mb_mgr_avx512 (ptd->mgr);
326 else if (clib_cpu_supports_avx2 ())
328 vec_foreach (ptd, imbm->per_thread_data)
330 ptd->mgr = alloc_mb_mgr (0);
331 init_mb_mgr_avx2 (ptd->mgr);
336 vec_foreach (ptd, imbm->per_thread_data)
338 ptd->mgr = alloc_mb_mgr (0);
339 init_mb_mgr_sse (ptd->mgr);
344 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_HMAC, \
345 ipsecmb_ops_hmac_##a); \
347 foreach_ipsecmb_hmac_op;
350 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_ENC, \
351 ipsecmb_ops_cipher_enc_##a); \
353 foreach_ipsecmb_cipher_op;
356 vnet_crypto_register_ops_handler (vm, eidx, VNET_CRYPTO_OP_##a##_DEC, \
357 ipsecmb_ops_cipher_dec_##a); \
359 foreach_ipsecmb_cipher_op;
365 VLIB_INIT_FUNCTION (crypto_ipsecmb_init);
368 VLIB_PLUGIN_REGISTER () =
370 .version = VPP_BUILD_VER,
371 .description = "Intel IPSEC multi-buffer",
376 * fd.io coding-style-patch-verification: ON
379 * eval: (c-set-style "gnu")