2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/ipsec/ipsec.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
36 #define always_inline static inline
38 #define always_inline static inline __attribute__ ((__always_inline__))
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_NB_SESSION 10240
43 #define CRYPTODEV_DEF_DRIVE crypto_aesni_mb
45 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv))
46 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
48 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
49 #define foreach_vnet_aead_crypto_conversion \
50 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \
51 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
52 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \
53 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
54 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \
55 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
58 * crypto (alg, cryptodev_alg), hash (alg, digest-size)
60 #define foreach_cryptodev_link_async_alg \
61 _ (AES_128_CBC, AES_CBC, SHA1, 12) \
62 _ (AES_192_CBC, AES_CBC, SHA1, 12) \
63 _ (AES_256_CBC, AES_CBC, SHA1, 12) \
64 _ (AES_128_CBC, AES_CBC, SHA224, 14) \
65 _ (AES_192_CBC, AES_CBC, SHA224, 14) \
66 _ (AES_256_CBC, AES_CBC, SHA224, 14) \
67 _ (AES_128_CBC, AES_CBC, SHA256, 16) \
68 _ (AES_192_CBC, AES_CBC, SHA256, 16) \
69 _ (AES_256_CBC, AES_CBC, SHA256, 16) \
70 _ (AES_128_CBC, AES_CBC, SHA384, 24) \
71 _ (AES_192_CBC, AES_CBC, SHA384, 24) \
72 _ (AES_256_CBC, AES_CBC, SHA384, 24) \
73 _ (AES_128_CBC, AES_CBC, SHA512, 32) \
74 _ (AES_192_CBC, AES_CBC, SHA512, 32) \
75 _ (AES_256_CBC, AES_CBC, SHA512, 32)
77 #define foreach_vnet_crypto_status_conversion \
78 _(SUCCESS, COMPLETED) \
79 _(NOT_PROCESSED, WORK_IN_PROGRESS) \
80 _(AUTH_FAILED, FAIL_BAD_HMAC) \
81 _(INVALID_SESSION, FAIL_ENGINE_ERR) \
82 _(INVALID_ARGS, FAIL_ENGINE_ERR) \
83 _(ERROR, FAIL_ENGINE_ERR)
85 static const vnet_crypto_op_status_t cryptodev_status_conversion[] = {
86 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
87 foreach_vnet_crypto_status_conversion
93 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
94 struct rte_crypto_op op;
95 struct rte_crypto_sym_op sop;
98 vnet_crypto_async_frame_t *frame;
104 CRYPTODEV_OP_TYPE_ENCRYPT = 0,
105 CRYPTODEV_OP_TYPE_DECRYPT,
106 CRYPTODEV_N_OP_TYPES,
107 } cryptodev_op_type_t;
111 struct rte_cryptodev_sym_session ***keys;
123 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
124 struct rte_mempool *cop_pool;
125 struct rte_mempool *sess_pool;
126 struct rte_mempool *sess_priv_pool;
127 } cryptodev_numa_data_t;
131 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
135 cryptodev_op_t **cops;
136 struct rte_ring *ring;
137 } cryptodev_engine_thread_t;
141 cryptodev_numa_data_t *per_numa_data;
142 cryptodev_key_t *keys;
143 cryptodev_engine_thread_t *per_thread_data;
144 enum rte_iova_mode iova_mode;
145 cryptodev_inst_t *cryptodev_inst;
146 clib_bitmap_t *active_cdev_inst_mask;
147 clib_spinlock_t tlock;
150 cryptodev_main_t cryptodev_main;
152 static_always_inline int
153 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
154 cryptodev_op_type_t op_type, const vnet_crypto_key_t *key,
157 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
158 memset (xform, 0, sizeof (*xform));
159 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
162 if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
163 key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
164 key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
167 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
168 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
169 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
170 aead_xform->aad_length = aad_len;
171 aead_xform->digest_length = 16;
172 aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
173 aead_xform->iv.length = 12;
174 aead_xform->key.data = key->data;
175 aead_xform->key.length = vec_len (key->data);
180 static_always_inline int
181 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
182 cryptodev_op_type_t op_type,
183 const vnet_crypto_key_t *key)
185 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
186 vnet_crypto_key_t *key_cipher, *key_auth;
187 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
188 enum rte_crypto_auth_algorithm auth_algo = ~0;
191 key_cipher = vnet_crypto_get_key (key->index_crypto);
192 key_auth = vnet_crypto_get_key (key->index_integ);
193 if (!key_cipher || !key_auth)
196 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
198 xform_cipher = xforms;
199 xform_auth = xforms + 1;
200 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
201 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
205 xform_cipher = xforms + 1;
207 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
208 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
211 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
212 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
213 xforms->next = xforms + 1;
215 switch (key->async_alg)
217 #define _(a, b, c, d) \
218 case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
219 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
220 auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
224 foreach_cryptodev_link_async_alg
230 xform_cipher->cipher.algo = cipher_algo;
231 xform_cipher->cipher.key.data = key_cipher->data;
232 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
233 xform_cipher->cipher.iv.length = 16;
234 xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
236 xform_auth->auth.algo = auth_algo;
237 xform_auth->auth.digest_length = digest_len;
238 xform_auth->auth.key.data = key_auth->data;
239 xform_auth->auth.key.length = vec_len (key_auth->data);
244 static_always_inline void
245 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
252 n_devs = rte_cryptodev_count ();
254 for (i = 0; i < n_devs; i++)
255 rte_cryptodev_sym_session_clear (i, sess);
257 rte_cryptodev_sym_session_free (sess);
260 static_always_inline int
261 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t *key)
263 vnet_crypto_alg_t alg;
264 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
269 #define _(a, b, c, d, e, f) \
270 if (alg == VNET_CRYPTO_ALG_##a) \
273 foreach_vnet_aead_crypto_conversion
278 static_always_inline int
279 cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
282 cryptodev_main_t *cmt = &cryptodev_main;
283 cryptodev_numa_data_t *numa_data;
284 cryptodev_inst_t *dev_inst;
285 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
286 struct rte_mempool *sess_pool, *sess_priv_pool;
287 cryptodev_key_t *ckey = vec_elt_at_index (cmt->keys, idx);
288 struct rte_crypto_sym_xform xforms_enc[2] = { { 0 } };
289 struct rte_crypto_sym_xform xforms_dec[2] = { { 0 } };
290 struct rte_cryptodev_sym_session *sessions[CRYPTODEV_N_OP_TYPES] = { 0 };
291 u32 numa_node = vm->numa_node;
294 numa_data = vec_elt_at_index (cmt->per_numa_data, numa_node);
295 sess_pool = numa_data->sess_pool;
296 sess_priv_pool = numa_data->sess_priv_pool;
298 sessions[CRYPTODEV_OP_TYPE_ENCRYPT] =
299 rte_cryptodev_sym_session_create (sess_pool);
300 if (!sessions[CRYPTODEV_OP_TYPE_ENCRYPT])
306 sessions[CRYPTODEV_OP_TYPE_DECRYPT] =
307 rte_cryptodev_sym_session_create (sess_pool);
308 if (!sessions[CRYPTODEV_OP_TYPE_DECRYPT])
314 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
315 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
318 prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key, aad_len);
322 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
323 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
325 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
327 vec_foreach (dev_inst, cmt->cryptodev_inst)
329 u32 dev_id = dev_inst->dev_id;
330 struct rte_cryptodev *cdev = rte_cryptodev_pmd_get_dev (dev_id);
332 /* if the session is already configured for the driver type, avoid
333 configuring it again to increase the session data's refcnt */
334 if (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]
335 ->sess_data[cdev->driver_id]
337 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->sess_data[cdev->driver_id].data)
340 ret = rte_cryptodev_sym_session_init (
341 dev_id, sessions[CRYPTODEV_OP_TYPE_ENCRYPT], xforms_enc,
343 ret = rte_cryptodev_sym_session_init (
344 dev_id, sessions[CRYPTODEV_OP_TYPE_DECRYPT], xforms_dec,
350 sessions[CRYPTODEV_OP_TYPE_ENCRYPT]->opaque_data = aad_len;
351 sessions[CRYPTODEV_OP_TYPE_DECRYPT]->opaque_data = aad_len;
353 CLIB_MEMORY_STORE_BARRIER ();
354 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_ENCRYPT] =
355 sessions[CRYPTODEV_OP_TYPE_ENCRYPT];
356 ckey->keys[numa_node][CRYPTODEV_OP_TYPE_DECRYPT] =
357 sessions[CRYPTODEV_OP_TYPE_DECRYPT];
362 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_ENCRYPT]);
363 cryptodev_session_del (sessions[CRYPTODEV_OP_TYPE_DECRYPT]);
368 static_always_inline void
369 cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
370 vnet_crypto_key_index_t idx, u32 aad_len)
372 cryptodev_main_t *cmt = &cryptodev_main;
373 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
374 cryptodev_key_t *ckey = 0;
377 vec_validate (cmt->keys, idx);
378 ckey = vec_elt_at_index (cmt->keys, idx);
380 if (kop == VNET_CRYPTO_KEY_OP_DEL || kop == VNET_CRYPTO_KEY_OP_MODIFY)
382 if (idx >= vec_len (cmt->keys))
385 vec_foreach_index (i, cmt->per_numa_data)
387 if (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT])
389 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT]);
390 cryptodev_session_del (ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT]);
392 CLIB_MEMORY_STORE_BARRIER ();
393 ckey->keys[i][CRYPTODEV_OP_TYPE_ENCRYPT] = 0;
394 ckey->keys[i][CRYPTODEV_OP_TYPE_DECRYPT] = 0;
402 /* do not create session for unsupported alg */
403 if (cryptodev_check_supported_vnet_alg (key))
406 vec_validate (ckey->keys, vec_len (cmt->per_numa_data) - 1);
407 vec_foreach_index (i, ckey->keys)
408 vec_validate (ckey->keys[i], CRYPTODEV_N_OP_TYPES - 1);
412 cryptodev_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
413 vnet_crypto_key_index_t idx)
415 cryptodev_sess_handler (vm, kop, idx, 8);
418 static_always_inline void
419 cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t * f,
420 vnet_crypto_op_status_t s)
422 u32 n_elts = f->n_elts, i;
424 for (i = 0; i < n_elts; i++)
425 f->elts[i].status = s;
428 static_always_inline rte_iova_t
429 cryptodev_get_iova (clib_pmalloc_main_t * pm, enum rte_iova_mode mode,
433 if (mode == RTE_IOVA_VA)
434 return (rte_iova_t) pointer_to_uword (data);
436 index = clib_pmalloc_get_page_index (pm, data);
437 return pointer_to_uword (data) - pm->lookup_table[index];
440 static_always_inline void
441 cryptodev_validate_mbuf_chain (vlib_main_t * vm, struct rte_mbuf *mb,
444 struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
445 /* when input node is not dpdk, mbuf data len is not initialized, for
446 * single buffer it is not a problem since the data length is written
447 * into cryptodev operation. For chained buffer a reference data length
448 * has to be computed through vlib_buffer.
450 * even when input node is dpdk, it is possible chained vlib_buffers
451 * are updated (either added or removed a buffer) but not not mbuf fields.
452 * we have to re-link every mbuf in the chain.
454 u16 data_len = b->current_length + (b->data + b->current_data -
455 rte_pktmbuf_mtod (mb, u8 *));
457 first_mb->nb_segs = 1;
458 first_mb->pkt_len = first_mb->data_len = data_len;
460 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
462 b = vlib_get_buffer (vm, b->next_buffer);
463 mb = rte_mbuf_from_vlib_buffer (b);
464 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
465 rte_pktmbuf_reset (mb);
468 mb->data_len = b->current_length;
469 mb->pkt_len = b->current_length;
470 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
472 if (PREDICT_FALSE (b->ref_count > 1))
474 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
478 static_always_inline int
479 cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
480 vnet_crypto_async_frame_t * frame,
481 cryptodev_op_type_t op_type)
483 cryptodev_main_t *cmt = &cryptodev_main;
484 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
485 cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
486 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
487 vnet_crypto_async_frame_elt_t *fe;
488 struct rte_cryptodev_sym_session *sess = 0;
489 cryptodev_op_t **cop;
491 u32 n_enqueue, n_elts;
492 u32 last_key_index = ~0;
494 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
496 n_elts = frame->n_elts;
498 if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
500 cryptodev_mark_frame_err_status (frame,
501 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
505 if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
506 (void **) cet->cops, n_elts) < 0))
508 cryptodev_mark_frame_err_status (frame,
509 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
515 bi = frame->buffer_indices;
516 cop[0]->frame = frame;
517 cop[0]->n_elts = n_elts;
521 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
522 struct rte_crypto_sym_op *sop = &cop[0]->sop;
523 i16 crypto_offset = fe->crypto_start_offset;
524 i16 integ_offset = fe->integ_start_offset;
525 u32 offset_diff = crypto_offset - integ_offset;
529 CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
530 CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
531 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
532 CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
534 if (last_key_index != fe->key_index)
536 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
537 last_key_index = fe->key_index;
539 if (key->keys[vm->numa_node][op_type] == 0)
542 cryptodev_session_create (vm, last_key_index, 0) < 0))
544 cryptodev_mark_frame_err_status (
545 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
549 sess = key->keys[vm->numa_node][op_type];
552 sop->m_src = rte_mbuf_from_vlib_buffer (b);
553 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
555 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
556 * so we have to manually adjust mbuf data_off here so cryptodev can
557 * correctly compute the data pointer. The prepend here will be later
558 * rewritten by tx. */
559 if (PREDICT_TRUE (fe->integ_start_offset < 0))
561 sop->m_src->data_off += fe->integ_start_offset;
563 crypto_offset = offset_diff;
566 sop->cipher.data.offset = crypto_offset;
567 sop->cipher.data.length = fe->crypto_total_length;
568 sop->auth.data.offset = integ_offset;
569 sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
570 sop->auth.digest.data = fe->digest;
571 sop->auth.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode,
573 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
574 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
576 /* for input nodes that are not dpdk-input, it is possible the mbuf
577 * was updated before as one of the chained mbufs. Setting nb_segs
578 * to 1 here to prevent the cryptodev PMD to access potentially
579 * invalid m_src->next pointers.
581 sop->m_src->nb_segs = 1;
582 clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
589 n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
591 (struct rte_crypto_op **)
592 cet->cops, frame->n_elts);
593 ASSERT (n_enqueue == frame->n_elts);
594 cet->inflight += n_enqueue;
599 static_always_inline int
600 cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
601 vnet_crypto_async_frame_t * frame,
602 cryptodev_op_type_t op_type, u8 aad_len)
604 cryptodev_main_t *cmt = &cryptodev_main;
605 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
606 cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
607 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
608 vnet_crypto_async_frame_elt_t *fe;
609 struct rte_cryptodev_sym_session *sess = 0;
610 cryptodev_op_t **cop;
612 u32 n_enqueue = 0, n_elts;
613 u32 last_key_index = ~0;
615 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
617 n_elts = frame->n_elts;
619 if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
621 cryptodev_mark_frame_err_status (frame,
622 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
626 if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
627 (void **) cet->cops, n_elts) < 0))
629 cryptodev_mark_frame_err_status (frame,
630 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
636 bi = frame->buffer_indices;
637 cop[0]->frame = frame;
638 cop[0]->n_elts = n_elts;
642 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
643 struct rte_crypto_sym_op *sop = &cop[0]->sop;
644 u16 crypto_offset = fe->crypto_start_offset;
648 CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
649 CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
650 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
651 CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
653 if (last_key_index != fe->key_index)
655 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
657 last_key_index = fe->key_index;
658 if (key->keys[vm->numa_node][op_type] == 0)
660 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
663 cryptodev_mark_frame_err_status (
664 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
668 else if (PREDICT_FALSE (
669 key->keys[vm->numa_node][op_type]->opaque_data !=
672 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
673 fe->key_index, aad_len);
674 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
677 cryptodev_mark_frame_err_status (
678 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
683 sess = key->keys[vm->numa_node][op_type];
686 sop->m_src = rte_mbuf_from_vlib_buffer (b);
688 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
689 * so we have to manually adjust mbuf data_off here so cryptodev can
690 * correctly compute the data pointer. The prepend here will be later
691 * rewritten by tx. */
692 if (PREDICT_FALSE (fe->crypto_start_offset < 0))
694 rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
699 sop->aead.aad.data = cop[0]->aad;
700 sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
701 sop->aead.data.length = fe->crypto_total_length;
702 sop->aead.data.offset = crypto_offset;
703 sop->aead.digest.data = fe->tag;
704 sop->aead.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode,
706 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
707 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
709 /* for input nodes that are not dpdk-input, it is possible the mbuf
710 * was updated before as one of the chained mbufs. Setting nb_segs
711 * to 1 here to prevent the cryptodev PMD to access potentially
712 * invalid m_src->next pointers.
714 sop->m_src->nb_segs = 1;
715 clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
716 clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
723 n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
725 (struct rte_crypto_op **)
726 cet->cops, frame->n_elts);
727 ASSERT (n_enqueue == frame->n_elts);
728 cet->inflight += n_enqueue;
733 static_always_inline cryptodev_op_t *
734 cryptodev_get_ring_head (struct rte_ring * ring)
736 cryptodev_op_t **r = (void *) &ring[1];
737 return r[ring->cons.head & ring->mask];
740 static_always_inline vnet_crypto_async_frame_t *
741 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
742 u32 * enqueue_thread_idx)
744 cryptodev_main_t *cmt = &cryptodev_main;
745 cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
746 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
747 cryptodev_op_t *cop0, **cop = cet->cops;
748 vnet_crypto_async_frame_elt_t *fe;
749 vnet_crypto_async_frame_t *frame;
750 u32 n_elts, n_completed_ops = rte_ring_count (cet->ring);
751 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
755 n_elts = clib_min (CRYPTODEV_NB_CRYPTO_OPS - n_completed_ops,
756 VNET_CRYPTO_FRAME_SIZE);
757 n_elts = rte_cryptodev_dequeue_burst
758 (cet->cryptodev_id, cet->cryptodev_q,
759 (struct rte_crypto_op **) cet->cops, n_elts);
760 cet->inflight -= n_elts;
761 n_completed_ops += n_elts;
763 rte_ring_sp_enqueue_burst (cet->ring, (void *) cet->cops, n_elts, NULL);
766 if (PREDICT_FALSE (n_completed_ops == 0))
769 cop0 = cryptodev_get_ring_head (cet->ring);
770 /* not a single frame is finished */
771 if (PREDICT_FALSE (cop0->n_elts > rte_ring_count (cet->ring)))
775 n_elts = cop0->n_elts;
776 n_elts = rte_ring_sc_dequeue_bulk (cet->ring, (void **) cet->cops,
782 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
783 ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
784 ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
785 ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
794 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
800 frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
801 VNET_CRYPTO_FRAME_STATE_SUCCESS : VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
803 rte_mempool_put_bulk (numa->cop_pool, (void **) cet->cops, frame->n_elts);
804 *nb_elts_processed = frame->n_elts;
805 *enqueue_thread_idx = frame->enqueue_thread_index;
810 static_always_inline int
811 cryptodev_enqueue_gcm_aad_8_enc (vlib_main_t * vm,
812 vnet_crypto_async_frame_t * frame)
814 return cryptodev_frame_gcm_enqueue (vm, frame,
815 CRYPTODEV_OP_TYPE_ENCRYPT, 8);
817 static_always_inline int
818 cryptodev_enqueue_gcm_aad_12_enc (vlib_main_t * vm,
819 vnet_crypto_async_frame_t * frame)
821 return cryptodev_frame_gcm_enqueue (vm, frame,
822 CRYPTODEV_OP_TYPE_ENCRYPT, 12);
825 static_always_inline int
826 cryptodev_enqueue_gcm_aad_8_dec (vlib_main_t * vm,
827 vnet_crypto_async_frame_t * frame)
829 return cryptodev_frame_gcm_enqueue (vm, frame,
830 CRYPTODEV_OP_TYPE_DECRYPT, 8);
832 static_always_inline int
833 cryptodev_enqueue_gcm_aad_12_dec (vlib_main_t * vm,
834 vnet_crypto_async_frame_t * frame)
836 return cryptodev_frame_gcm_enqueue (vm, frame,
837 CRYPTODEV_OP_TYPE_DECRYPT, 12);
840 static_always_inline int
841 cryptodev_enqueue_linked_alg_enc (vlib_main_t * vm,
842 vnet_crypto_async_frame_t * frame)
844 return cryptodev_frame_linked_algs_enqueue (vm, frame,
845 CRYPTODEV_OP_TYPE_ENCRYPT);
848 static_always_inline int
849 cryptodev_enqueue_linked_alg_dec (vlib_main_t * vm,
850 vnet_crypto_async_frame_t * frame)
852 return cryptodev_frame_linked_algs_enqueue (vm, frame,
853 CRYPTODEV_OP_TYPE_DECRYPT);
858 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
859 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
860 } cryptodev_resource_assign_op_t;
863 * assign a cryptodev resource to a worker.
864 * @param cet: the worker thread data
865 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
866 * @param op: the assignment method.
867 * @return: 0 if successfully, negative number otherwise.
869 static_always_inline int
870 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
871 u32 cryptodev_inst_index,
872 cryptodev_resource_assign_op_t op)
874 cryptodev_main_t *cmt = &cryptodev_main;
875 cryptodev_inst_t *cinst = 0;
878 /* assign resource is only allowed when no inflight op is in the queue */
884 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
885 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
886 vec_len (cmt->cryptodev_inst))
889 clib_spinlock_lock (&cmt->tlock);
890 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
891 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
892 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
893 cet->cryptodev_id = cinst->dev_id;
894 cet->cryptodev_q = cinst->q_id;
895 clib_spinlock_unlock (&cmt->tlock);
897 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
898 /* assigning a used cryptodev resource is not allowed */
899 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
902 vec_foreach_index (idx, cmt->cryptodev_inst)
904 cinst = cmt->cryptodev_inst + idx;
905 if (cinst->dev_id == cet->cryptodev_id &&
906 cinst->q_id == cet->cryptodev_q)
909 /* invalid existing worker resource assignment */
910 if (idx == vec_len (cmt->cryptodev_inst))
912 clib_spinlock_lock (&cmt->tlock);
913 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
914 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
915 cryptodev_inst_index, 1);
916 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
917 cet->cryptodev_id = cinst->dev_id;
918 cet->cryptodev_q = cinst->q_id;
919 clib_spinlock_unlock (&cmt->tlock);
928 format_cryptodev_inst (u8 * s, va_list * args)
930 cryptodev_main_t *cmt = &cryptodev_main;
931 u32 inst = va_arg (*args, u32);
932 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
933 u32 thread_index = 0;
934 struct rte_cryptodev_info info;
936 rte_cryptodev_info_get (cit->dev_id, &info);
937 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
939 vec_foreach_index (thread_index, cmt->per_thread_data)
941 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
942 if (vlib_num_workers () > 0 && thread_index == 0)
945 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
947 s = format (s, "%u (%v)\n", thread_index,
948 vlib_worker_threads[thread_index].name);
953 if (thread_index == vec_len (cmt->per_thread_data))
954 s = format (s, "%s\n", "free");
959 static clib_error_t *
960 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
961 vlib_cli_command_t * cmd)
963 cryptodev_main_t *cmt = &cryptodev_main;
966 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
968 if (vec_len (cmt->cryptodev_inst) == 0)
970 vlib_cli_output (vm, "(nil)\n");
974 vec_foreach_index (inst, cmt->cryptodev_inst)
975 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
980 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
981 .path = "show cryptodev assignment",
982 .short_help = "show cryptodev assignment",
983 .function = cryptodev_show_assignment_fn,
986 static clib_error_t *
987 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
988 vlib_cli_command_t * cmd)
990 cryptodev_main_t *cmt = &cryptodev_main;
991 cryptodev_engine_thread_t *cet;
992 unformat_input_t _line_input, *line_input = &_line_input;
993 u32 thread_index, inst_index;
994 u32 thread_present = 0, inst_present = 0;
995 clib_error_t *error = 0;
998 /* Get a line of input. */
999 if (!unformat_user (input, unformat_line_input, line_input))
1002 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
1004 if (unformat (line_input, "thread %u", &thread_index))
1006 else if (unformat (line_input, "resource %u", &inst_index))
1010 error = clib_error_return (0, "unknown input `%U'",
1011 format_unformat_error, line_input);
1016 if (!thread_present || !inst_present)
1018 error = clib_error_return (0, "mandatory argument(s) missing");
1022 if (thread_index == 0 && vlib_num_workers () > 0)
1025 clib_error_return (0, "assign crypto resource for master thread");
1029 if (thread_index > vec_len (cmt->per_thread_data) ||
1030 inst_index > vec_len (cmt->cryptodev_inst))
1032 error = clib_error_return (0, "wrong thread id or resource id");
1036 cet = cmt->per_thread_data + thread_index;
1037 ret = cryptodev_assign_resource (cet, inst_index,
1038 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
1041 error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1049 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1050 .path = "set cryptodev assignment",
1051 .short_help = "set cryptodev assignment thread <thread_index> "
1052 "resource <inst_index>",
1053 .function = cryptodev_set_assignment_fn,
1057 check_cryptodev_alg_support (u32 dev_id)
1059 const struct rte_cryptodev_symmetric_capability *cap;
1060 struct rte_cryptodev_sym_capability_idx cap_idx;
1062 #define _(a, b, c, d, e, f) \
1063 cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
1064 cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
1065 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1067 return -RTE_CRYPTO_##b##_##c; \
1070 if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
1071 return -RTE_CRYPTO_##b##_##c; \
1072 if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
1073 return -RTE_CRYPTO_##b##_##c; \
1074 if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
1075 return -RTE_CRYPTO_##b##_##c; \
1078 foreach_vnet_aead_crypto_conversion
1081 #define _(a, b, c, d) \
1082 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1083 cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1084 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1086 return -RTE_CRYPTO_CIPHER_##b; \
1087 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1088 cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
1089 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1091 return -RTE_CRYPTO_AUTH_##c;
1093 foreach_cryptodev_link_async_alg
1099 cryptodev_count_queue (u32 numa)
1101 struct rte_cryptodev_info info;
1102 u32 n_cryptodev = rte_cryptodev_count ();
1105 for (i = 0; i < n_cryptodev; i++)
1107 rte_cryptodev_info_get (i, &info);
1109 /* only device support symmetric crypto is used */
1110 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
1112 q_count += info.max_nb_queue_pairs;
1119 cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id)
1121 struct rte_cryptodev_info info;
1122 struct rte_cryptodev *cdev;
1123 cryptodev_main_t *cmt = &cryptodev_main;
1127 rte_cryptodev_info_get (cryptodev_id, &info);
1129 /* do not configure the device that does not support symmetric crypto */
1130 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
1133 ret = check_cryptodev_alg_support (cryptodev_id);
1137 cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1138 /** If the device is already started, we reuse it, otherwise configure
1139 * both the device and queue pair.
1141 if (!cdev->data->dev_started)
1143 struct rte_cryptodev_config cfg;
1145 cfg.socket_id = info.device->numa_node;
1146 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1148 rte_cryptodev_configure (cryptodev_id, &cfg);
1150 for (i = 0; i < info.max_nb_queue_pairs; i++)
1152 struct rte_cryptodev_qp_conf qp_cfg;
1156 qp_cfg.mp_session = 0;
1157 qp_cfg.mp_session_private = 0;
1158 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1160 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1161 info.device->numa_node);
1165 if (i != info.max_nb_queue_pairs)
1167 /* start the device */
1168 rte_cryptodev_start (i);
1171 for (i = 0; i < cdev->data->nb_queue_pairs; i++)
1173 cryptodev_inst_t *cdev_inst;
1174 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1175 cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1176 cdev_inst->dev_id = cryptodev_id;
1177 cdev_inst->q_id = i;
1179 snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1180 "%s_q%u", info.device->name, i);
1187 cryptodev_cmp (void *v1, void *v2)
1189 cryptodev_inst_t *a1 = v1;
1190 cryptodev_inst_t *a2 = v2;
1192 if (a1->q_id > a2->q_id)
1194 if (a1->q_id < a2->q_id)
1200 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1202 cryptodev_main_t *cmt = &cryptodev_main;
1203 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1207 /* If there is not enough queues, exit */
1208 if (n_queues < n_workers)
1211 for (i = 0; i < rte_cryptodev_count (); i++)
1213 ret = cryptodev_configure (vm, i);
1218 vec_sort_with_function(cmt->cryptodev_inst, cryptodev_cmp);
1224 cryptodev_get_session_sz (vlib_main_t *vm, uint32_t n_workers)
1226 u32 sess_data_sz = 0, i;
1228 if (rte_cryptodev_count () == 0)
1231 for (i = 0; i < rte_cryptodev_count (); i++)
1233 u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
1235 sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1238 return sess_data_sz;
1242 dpdk_disable_cryptodev_engine (vlib_main_t * vm)
1244 cryptodev_main_t *cmt = &cryptodev_main;
1245 cryptodev_numa_data_t *numa_data;
1247 vec_validate (cmt->per_numa_data, vm->numa_node);
1248 numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1250 if (numa_data->sess_pool)
1251 rte_mempool_free (numa_data->sess_pool);
1252 if (numa_data->sess_priv_pool)
1253 rte_mempool_free (numa_data->sess_priv_pool);
1254 if (numa_data->cop_pool)
1255 rte_mempool_free (numa_data->cop_pool);
1259 crypto_op_init (struct rte_mempool *mempool,
1260 void *_arg __attribute__ ((unused)),
1261 void *_obj, unsigned i __attribute__ ((unused)))
1263 struct rte_crypto_op *op = _obj;
1265 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1266 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1267 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1268 op->phys_addr = rte_mempool_virt2iova (_obj);
1269 op->mempool = mempool;
1274 dpdk_cryptodev_init (vlib_main_t * vm)
1276 cryptodev_main_t *cmt = &cryptodev_main;
1277 vlib_thread_main_t *tm = vlib_get_thread_main ();
1278 cryptodev_engine_thread_t *ptd;
1279 cryptodev_numa_data_t *numa_data;
1280 struct rte_mempool *mp;
1281 u32 skip_master = vlib_num_workers () > 0;
1282 u32 n_workers = tm->n_vlib_mains - skip_master;
1283 u32 numa = vm->numa_node;
1289 clib_error_t *error;
1290 struct rte_crypto_op_pool_private *priv;
1292 cmt->iova_mode = rte_eal_iova_mode ();
1294 sess_sz = cryptodev_get_session_sz(vm, n_workers);
1297 error = clib_error_return (0, "Not enough cryptodevs");
1301 /* A total of 4 times n_worker threads * frame size as crypto ops */
1302 n_cop_elts = max_pow2 ((u64)n_workers * CRYPTODEV_NB_CRYPTO_OPS);
1304 /* probe all cryptodev devices and get queue info */
1305 if (cryptodev_probe (vm, n_workers) < 0)
1307 error = clib_error_return (0, "Failed to configure cryptodev");
1311 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1312 clib_spinlock_init (&cmt->tlock);
1314 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1315 CLIB_CACHE_LINE_BYTES);
1316 for (i = skip_master; i < tm->n_vlib_mains; i++)
1318 ptd = cmt->per_thread_data + i;
1320 cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
1321 name = format (0, "frames_ring_%u%c", i, 0);
1322 ptd->ring = rte_ring_create((char *) name, CRYPTODEV_NB_CRYPTO_OPS,
1323 vm->numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ);
1326 error = clib_error_return (0, "Not enough memory for mp %s", name);
1330 vec_validate (ptd->cops, VNET_CRYPTO_FRAME_SIZE - 1);
1333 numa = vlib_mains[i]->numa_node;
1335 vec_validate (cmt->per_numa_data, numa);
1336 numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1338 if (numa_data->sess_pool)
1341 /* create session pool for the numa node */
1342 name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1343 mp = rte_cryptodev_sym_session_pool_create (
1344 (char *) name, CRYPTODEV_NB_SESSION, 0, 0, 0, numa);
1347 error = clib_error_return (0, "Not enough memory for mp %s", name);
1352 numa_data->sess_pool = mp;
1354 /* create session private pool for the numa node */
1355 name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1356 mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
1357 0, NULL, NULL, NULL, NULL, numa, 0);
1360 error = clib_error_return (0, "Not enough memory for mp %s", name);
1367 numa_data->sess_priv_pool = mp;
1369 /* create cryptodev op pool */
1370 name = format (0, "cryptodev_op_pool_%u%c", numa, 0);
1372 mp = rte_mempool_create ((char *) name, n_cop_elts,
1373 sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2,
1374 sizeof (struct rte_crypto_op_pool_private),
1375 NULL, NULL, crypto_op_init, NULL, numa, 0);
1378 error = clib_error_return (0, "Not enough memory for mp %s", name);
1383 priv = rte_mempool_get_priv (mp);
1384 priv->priv_size = sizeof (struct rte_crypto_op_pool_private);
1385 priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1387 numa_data->cop_pool = mp;
1390 /* register handler */
1391 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 100,
1392 "DPDK Cryptodev Engine");
1394 #define _(a, b, c, d, e, f) \
1395 vnet_crypto_register_async_handler \
1396 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1397 cryptodev_enqueue_gcm_aad_##f##_enc,\
1398 cryptodev_frame_dequeue); \
1399 vnet_crypto_register_async_handler \
1400 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1401 cryptodev_enqueue_gcm_aad_##f##_dec, \
1402 cryptodev_frame_dequeue);
1404 foreach_vnet_aead_crypto_conversion
1407 #define _(a, b, c, d) \
1408 vnet_crypto_register_async_handler \
1409 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
1410 cryptodev_enqueue_linked_alg_enc, \
1411 cryptodev_frame_dequeue); \
1412 vnet_crypto_register_async_handler \
1413 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
1414 cryptodev_enqueue_linked_alg_dec, \
1415 cryptodev_frame_dequeue);
1417 foreach_cryptodev_link_async_alg
1420 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1422 /* this engine is only enabled when cryptodev device(s) are presented in
1423 * startup.conf. Assume it is wanted to be used, turn on async mode here.
1425 vnet_crypto_request_async_mode (1);
1426 ipsec_set_async_mode (1);
1431 dpdk_disable_cryptodev_engine (vm);
1438 * fd.io coding-style-patch-verification: ON
1441 * eval: (c-set-style "gnu")