3 *------------------------------------------------------------------
4 * Copyright (c) 2019 - 2021 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vnet/crypto/crypto.h>
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
24 #include <dpdk/device/dpdk_priv.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_cryptodev.h>
28 #include <rte_crypto_sym.h>
29 #include <rte_crypto.h>
30 #include <rte_ring_peek_zc.h>
31 #include <rte_config.h>
33 #include "cryptodev.h"
36 #define always_inline static inline
38 #define always_inline static inline __attribute__ ((__always_inline__))
41 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
43 #define foreach_vnet_crypto_status_conversion \
44 _ (SUCCESS, COMPLETED) \
45 _ (NOT_PROCESSED, WORK_IN_PROGRESS) \
46 _ (AUTH_FAILED, FAIL_BAD_HMAC) \
47 _ (INVALID_SESSION, FAIL_ENGINE_ERR) \
48 _ (INVALID_ARGS, FAIL_ENGINE_ERR) \
49 _ (ERROR, FAIL_ENGINE_ERR)
51 static const vnet_crypto_op_status_t cryptodev_status_conversion[] = {
52 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
53 foreach_vnet_crypto_status_conversion
57 static_always_inline rte_iova_t
58 cryptodev_get_iova (clib_pmalloc_main_t *pm, enum rte_iova_mode mode,
62 if (mode == RTE_IOVA_VA)
63 return (rte_iova_t) pointer_to_uword (data);
65 index = clib_pmalloc_get_page_index (pm, data);
66 return pointer_to_uword (data) - pm->lookup_table[index];
69 static_always_inline void
70 cryptodev_validate_mbuf (struct rte_mbuf *mb, vlib_buffer_t *b)
72 /* on vnet side vlib_buffer current_length is updated by cipher padding and
73 * icv_sh. mbuf needs to be sync with these changes */
74 u16 data_len = b->current_length +
75 (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
77 /* for input nodes that are not dpdk-input, it is possible the mbuf
78 * was updated before as one of the chained mbufs. Setting nb_segs
79 * to 1 here to prevent the cryptodev PMD to access potentially
80 * invalid m_src->next pointers.
83 mb->pkt_len = mb->data_len = data_len;
86 static_always_inline void
87 cryptodev_validate_mbuf_chain (vlib_main_t *vm, struct rte_mbuf *mb,
90 struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
91 /* when input node is not dpdk, mbuf data len is not initialized, for
92 * single buffer it is not a problem since the data length is written
93 * into cryptodev operation. For chained buffer a reference data length
94 * has to be computed through vlib_buffer.
96 * even when input node is dpdk, it is possible chained vlib_buffers
97 * are updated (either added or removed a buffer) but not not mbuf fields.
98 * we have to re-link every mbuf in the chain.
100 u16 data_len = b->current_length +
101 (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
103 first_mb->nb_segs = 1;
104 first_mb->pkt_len = first_mb->data_len = data_len;
106 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
108 b = vlib_get_buffer (vm, b->next_buffer);
109 mb = rte_mbuf_from_vlib_buffer (b);
110 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
111 rte_pktmbuf_reset (mb);
114 mb->data_len = b->current_length;
115 mb->pkt_len = b->current_length;
116 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
118 if (PREDICT_FALSE (b->ref_count > 1))
120 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
124 static_always_inline void
125 crypto_op_init (struct rte_mempool *mempool,
126 void *_arg __attribute__ ((unused)), void *_obj,
127 unsigned i __attribute__ ((unused)))
129 struct rte_crypto_op *op = _obj;
131 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
132 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
133 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
134 op->phys_addr = rte_mempool_virt2iova (_obj);
135 op->mempool = mempool;
138 static_always_inline int
139 cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
140 vnet_crypto_async_frame_t *frame,
141 cryptodev_op_type_t op_type)
143 cryptodev_main_t *cmt = &cryptodev_main;
144 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
145 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
146 vnet_crypto_async_frame_elt_t *fe;
147 cryptodev_session_t *sess = 0;
148 cryptodev_op_t **cop;
150 u32 n_enqueue, n_elts;
151 u32 last_key_index = ~0;
153 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
155 n_elts = frame->n_elts;
157 if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
159 cryptodev_mark_frame_err_status (frame,
160 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
161 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
166 rte_mempool_get_bulk (cet->cop_pool, (void **) cet->cops, n_elts) < 0))
168 cryptodev_mark_frame_err_status (frame,
169 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
170 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
176 bi = frame->buffer_indices;
177 cop[0]->frame = frame;
178 cop[0]->n_elts = n_elts;
182 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
183 struct rte_crypto_sym_op *sop = &cop[0]->sop;
184 i16 crypto_offset = fe->crypto_start_offset;
185 i16 integ_offset = fe->integ_start_offset;
186 u32 offset_diff = crypto_offset - integ_offset;
190 CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
191 CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
192 clib_prefetch_load (&fe[1]);
193 clib_prefetch_load (&fe[2]);
195 if (last_key_index != fe->key_index)
197 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
198 last_key_index = fe->key_index;
200 if (key->keys[vm->numa_node][op_type] == 0)
203 cryptodev_session_create (vm, last_key_index, 0) < 0))
205 cryptodev_mark_frame_err_status (
206 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
207 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
211 sess = key->keys[vm->numa_node][op_type];
214 sop->m_src = rte_mbuf_from_vlib_buffer (b);
215 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
217 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
218 * so we have to manually adjust mbuf data_off here so cryptodev can
219 * correctly compute the data pointer. The prepend here will be later
220 * rewritten by tx. */
221 if (PREDICT_TRUE (fe->integ_start_offset < 0))
223 sop->m_src->data_off += fe->integ_start_offset;
225 crypto_offset = offset_diff;
228 sop->cipher.data.offset = crypto_offset;
229 sop->cipher.data.length = fe->crypto_total_length;
230 sop->auth.data.offset = integ_offset;
231 sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
232 sop->auth.digest.data = fe->digest;
233 sop->auth.digest.phys_addr =
234 cryptodev_get_iova (pm, cmt->iova_mode, fe->digest);
235 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
236 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
238 cryptodev_validate_mbuf (sop->m_src, b);
240 clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
247 n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
248 (struct rte_crypto_op **) cet->cops,
250 ASSERT (n_enqueue == frame->n_elts);
251 cet->inflight += n_enqueue;
256 static_always_inline int
257 cryptodev_frame_aead_enqueue (vlib_main_t *vm,
258 vnet_crypto_async_frame_t *frame,
259 cryptodev_op_type_t op_type, u8 aad_len)
261 cryptodev_main_t *cmt = &cryptodev_main;
262 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
263 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
264 vnet_crypto_async_frame_elt_t *fe;
265 cryptodev_session_t *sess = 0;
266 cryptodev_op_t **cop;
268 u32 n_enqueue = 0, n_elts;
269 u32 last_key_index = ~0;
271 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
273 n_elts = frame->n_elts;
275 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
277 cryptodev_mark_frame_err_status (frame,
278 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
279 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
284 rte_mempool_get_bulk (cet->cop_pool, (void **) cet->cops, n_elts) < 0))
286 cryptodev_mark_frame_err_status (frame,
287 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
288 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
294 bi = frame->buffer_indices;
295 cop[0]->frame = frame;
296 cop[0]->n_elts = n_elts;
300 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
301 struct rte_crypto_sym_op *sop = &cop[0]->sop;
302 u16 crypto_offset = fe->crypto_start_offset;
306 CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
307 CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
308 clib_prefetch_load (&fe[1]);
309 clib_prefetch_load (&fe[2]);
311 if (last_key_index != fe->key_index)
313 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
315 last_key_index = fe->key_index;
316 if (key->keys[vm->numa_node][op_type] == 0)
318 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
321 cryptodev_mark_frame_err_status (
322 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
323 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
327 else if (PREDICT_FALSE (
328 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
329 rte_cryptodev_sym_session_opaque_data_get (
330 key->keys[vm->numa_node][op_type]) != (u64) aad_len
332 key->keys[vm->numa_node][op_type]->opaque_data != aad_len
336 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
337 fe->key_index, aad_len);
338 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
341 cryptodev_mark_frame_err_status (
342 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
343 VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
348 sess = key->keys[vm->numa_node][op_type];
351 sop->m_src = rte_mbuf_from_vlib_buffer (b);
352 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
354 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
355 * so we have to manually adjust mbuf data_off here so cryptodev can
356 * correctly compute the data pointer. The prepend here will be later
357 * rewritten by tx. */
358 if (PREDICT_FALSE (fe->crypto_start_offset < 0))
360 rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
365 sop->aead.aad.data = cop[0]->aad;
366 sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
367 sop->aead.data.length = fe->crypto_total_length;
368 sop->aead.data.offset = crypto_offset;
369 sop->aead.digest.data = fe->tag;
370 sop->aead.digest.phys_addr =
371 cryptodev_get_iova (pm, cmt->iova_mode, fe->tag);
372 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
373 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
375 cryptodev_validate_mbuf (sop->m_src, b);
377 clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
378 clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
385 n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
386 (struct rte_crypto_op **) cet->cops,
388 ASSERT (n_enqueue == frame->n_elts);
389 cet->inflight += n_enqueue;
394 static_always_inline u16
395 cryptodev_ring_deq (struct rte_ring *r, cryptodev_op_t **cops)
399 n = rte_ring_dequeue_bulk_start (r, (void **) cops, 1, 0);
400 rte_ring_dequeue_finish (r, 0);
405 if (rte_ring_count (r) < n)
408 n_elts = rte_ring_sc_dequeue_bulk (r, (void **) cops, n, 0);
409 ASSERT (n_elts == n);
414 static_always_inline vnet_crypto_async_frame_t *
415 cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
416 u32 *enqueue_thread_idx)
418 cryptodev_main_t *cmt = &cryptodev_main;
419 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
420 cryptodev_op_t **cop = cet->cops;
421 vnet_crypto_async_frame_elt_t *fe;
422 vnet_crypto_async_frame_t *frame;
423 u32 n_elts, n_completed_ops = rte_ring_count (cet->ring);
424 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
428 n_elts = rte_cryptodev_dequeue_burst (
429 cet->cryptodev_id, cet->cryptodev_q,
430 (struct rte_crypto_op **) cet->cops, VNET_CRYPTO_FRAME_SIZE);
434 cet->inflight -= n_elts;
435 n_completed_ops += n_elts;
437 rte_ring_sp_enqueue_burst (cet->ring, (void **) cet->cops, n_elts,
442 if (PREDICT_FALSE (n_completed_ops == 0))
445 n_elts = cryptodev_ring_deq (cet->ring, cop);
449 frame = cop[0]->frame;
454 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
455 ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
456 ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
457 ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
466 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
472 frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
473 VNET_CRYPTO_FRAME_STATE_SUCCESS :
474 VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
476 rte_mempool_put_bulk (cet->cop_pool, (void **) cet->cops, frame->n_elts);
477 *nb_elts_processed = frame->n_elts;
478 *enqueue_thread_idx = frame->enqueue_thread_index;
482 static_always_inline int
483 cryptodev_enqueue_aead_aad_0_enc (vlib_main_t *vm,
484 vnet_crypto_async_frame_t *frame)
486 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
489 static_always_inline int
490 cryptodev_enqueue_aead_aad_8_enc (vlib_main_t *vm,
491 vnet_crypto_async_frame_t *frame)
493 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
496 static_always_inline int
497 cryptodev_enqueue_aead_aad_12_enc (vlib_main_t *vm,
498 vnet_crypto_async_frame_t *frame)
500 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
504 static_always_inline int
505 cryptodev_enqueue_aead_aad_0_dec (vlib_main_t *vm,
506 vnet_crypto_async_frame_t *frame)
508 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
511 static_always_inline int
512 cryptodev_enqueue_aead_aad_8_dec (vlib_main_t *vm,
513 vnet_crypto_async_frame_t *frame)
515 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
518 static_always_inline int
519 cryptodev_enqueue_aead_aad_12_dec (vlib_main_t *vm,
520 vnet_crypto_async_frame_t *frame)
522 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
526 static_always_inline int
527 cryptodev_enqueue_linked_alg_enc (vlib_main_t *vm,
528 vnet_crypto_async_frame_t *frame)
530 return cryptodev_frame_linked_algs_enqueue (vm, frame,
531 CRYPTODEV_OP_TYPE_ENCRYPT);
534 static_always_inline int
535 cryptodev_enqueue_linked_alg_dec (vlib_main_t *vm,
536 vnet_crypto_async_frame_t *frame)
538 return cryptodev_frame_linked_algs_enqueue (vm, frame,
539 CRYPTODEV_OP_TYPE_DECRYPT);
543 cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
545 cryptodev_main_t *cmt = &cryptodev_main;
546 cryptodev_engine_thread_t *cet;
547 struct rte_cryptodev_sym_capability_idx cap_auth_idx;
548 struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
549 struct rte_cryptodev_sym_capability_idx cap_aead_idx;
551 clib_error_t *error = 0;
554 vec_foreach (cet, cmt->per_thread_data)
556 u32 thread_index = cet - cmt->per_thread_data;
557 u32 numa = vlib_get_main_by_index (thread_index)->numa_node;
558 name = format (0, "vpp_cop_pool_%u_%u", numa, thread_index);
559 cet->cop_pool = rte_mempool_create (
560 (char *) name, CRYPTODEV_NB_CRYPTO_OPS, sizeof (cryptodev_op_t), 0,
561 sizeof (struct rte_crypto_op_pool_private), NULL, NULL, crypto_op_init,
562 NULL, vm->numa_node, 0);
565 error = clib_error_return (
566 0, "Failed to create cryptodev op pool %s", name);
572 name = format (0, "frames_ring_%u_%u", numa, thread_index);
574 rte_ring_create ((char *) name, CRYPTODEV_NB_CRYPTO_OPS, vm->numa_node,
575 RING_F_SP_ENQ | RING_F_SC_DEQ);
578 error = clib_error_return (
579 0, "Failed to create cryptodev op pool %s", name);
585 vec_validate (cet->cops, VNET_CRYPTO_FRAME_SIZE - 1);
588 #define _(a, b, c, d, e, f, g) \
589 cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
590 cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
591 if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
593 vnet_crypto_register_enqueue_handler ( \
594 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
595 cryptodev_enqueue_aead_aad_##f##_enc); \
596 vnet_crypto_register_enqueue_handler ( \
597 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
598 cryptodev_enqueue_aead_aad_##f##_dec); \
601 foreach_vnet_aead_crypto_conversion
604 #define _(a, b, c, d, e) \
605 cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
606 cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
607 cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
608 cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
609 if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
610 cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
612 vnet_crypto_register_enqueue_handler ( \
613 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
614 cryptodev_enqueue_linked_alg_enc); \
615 vnet_crypto_register_enqueue_handler ( \
616 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
617 cryptodev_enqueue_linked_alg_dec); \
620 foreach_cryptodev_link_async_alg
624 vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_frame_dequeue);
629 vec_foreach (cet, cmt->per_thread_data)
632 rte_ring_free (cet->ring);
635 rte_mempool_free (cet->cop_pool);