3 *------------------------------------------------------------------
4 * Copyright (c) 2019 - 2021 Intel and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vnet/crypto/crypto.h>
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
24 #include <dpdk/device/dpdk_priv.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_cryptodev.h>
28 #include <rte_crypto_sym.h>
29 #include <rte_crypto.h>
30 #include <rte_cryptodev_pmd.h>
31 #include <rte_ring_peek_zc.h>
32 #include <rte_config.h>
34 #include "cryptodev.h"
37 #define always_inline static inline
39 #define always_inline static inline __attribute__ ((__always_inline__))
42 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
44 #define foreach_vnet_crypto_status_conversion \
45 _ (SUCCESS, COMPLETED) \
46 _ (NOT_PROCESSED, WORK_IN_PROGRESS) \
47 _ (AUTH_FAILED, FAIL_BAD_HMAC) \
48 _ (INVALID_SESSION, FAIL_ENGINE_ERR) \
49 _ (INVALID_ARGS, FAIL_ENGINE_ERR) \
50 _ (ERROR, FAIL_ENGINE_ERR)
52 static const vnet_crypto_op_status_t cryptodev_status_conversion[] = {
53 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
54 foreach_vnet_crypto_status_conversion
58 static_always_inline rte_iova_t
59 cryptodev_get_iova (clib_pmalloc_main_t *pm, enum rte_iova_mode mode,
63 if (mode == RTE_IOVA_VA)
64 return (rte_iova_t) pointer_to_uword (data);
66 index = clib_pmalloc_get_page_index (pm, data);
67 return pointer_to_uword (data) - pm->lookup_table[index];
70 static_always_inline void
71 cryptodev_validate_mbuf_chain (vlib_main_t *vm, struct rte_mbuf *mb,
74 struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
75 /* when input node is not dpdk, mbuf data len is not initialized, for
76 * single buffer it is not a problem since the data length is written
77 * into cryptodev operation. For chained buffer a reference data length
78 * has to be computed through vlib_buffer.
80 * even when input node is dpdk, it is possible chained vlib_buffers
81 * are updated (either added or removed a buffer) but not not mbuf fields.
82 * we have to re-link every mbuf in the chain.
84 u16 data_len = b->current_length +
85 (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
87 first_mb->nb_segs = 1;
88 first_mb->pkt_len = first_mb->data_len = data_len;
90 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
92 b = vlib_get_buffer (vm, b->next_buffer);
93 mb = rte_mbuf_from_vlib_buffer (b);
94 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
95 rte_pktmbuf_reset (mb);
98 mb->data_len = b->current_length;
99 mb->pkt_len = b->current_length;
100 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
102 if (PREDICT_FALSE (b->ref_count > 1))
104 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
108 static_always_inline void
109 crypto_op_init (struct rte_mempool *mempool,
110 void *_arg __attribute__ ((unused)), void *_obj,
111 unsigned i __attribute__ ((unused)))
113 struct rte_crypto_op *op = _obj;
115 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
116 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
117 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
118 op->phys_addr = rte_mempool_virt2iova (_obj);
119 op->mempool = mempool;
122 static_always_inline int
123 cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
124 vnet_crypto_async_frame_t *frame,
125 cryptodev_op_type_t op_type)
127 cryptodev_main_t *cmt = &cryptodev_main;
128 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
129 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
130 vnet_crypto_async_frame_elt_t *fe;
131 struct rte_cryptodev_sym_session *sess = 0;
132 cryptodev_op_t **cop;
134 u32 n_enqueue, n_elts;
135 u32 last_key_index = ~0;
137 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
139 n_elts = frame->n_elts;
141 if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
143 cryptodev_mark_frame_err_status (frame,
144 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
149 rte_mempool_get_bulk (cet->cop_pool, (void **) cet->cops, n_elts) < 0))
151 cryptodev_mark_frame_err_status (frame,
152 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
158 bi = frame->buffer_indices;
159 cop[0]->frame = frame;
160 cop[0]->n_elts = n_elts;
164 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
165 struct rte_crypto_sym_op *sop = &cop[0]->sop;
166 i16 crypto_offset = fe->crypto_start_offset;
167 i16 integ_offset = fe->integ_start_offset;
168 u32 offset_diff = crypto_offset - integ_offset;
172 CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
173 CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
174 clib_prefetch_load (&fe[1]);
175 clib_prefetch_load (&fe[2]);
177 if (last_key_index != fe->key_index)
179 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
180 last_key_index = fe->key_index;
182 if (key->keys[vm->numa_node][op_type] == 0)
185 cryptodev_session_create (vm, last_key_index, 0) < 0))
187 cryptodev_mark_frame_err_status (
188 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
192 sess = key->keys[vm->numa_node][op_type];
195 sop->m_src = rte_mbuf_from_vlib_buffer (b);
196 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
198 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
199 * so we have to manually adjust mbuf data_off here so cryptodev can
200 * correctly compute the data pointer. The prepend here will be later
201 * rewritten by tx. */
202 if (PREDICT_TRUE (fe->integ_start_offset < 0))
204 sop->m_src->data_off += fe->integ_start_offset;
206 crypto_offset = offset_diff;
209 sop->cipher.data.offset = crypto_offset;
210 sop->cipher.data.length = fe->crypto_total_length;
211 sop->auth.data.offset = integ_offset;
212 sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
213 sop->auth.digest.data = fe->digest;
214 sop->auth.digest.phys_addr =
215 cryptodev_get_iova (pm, cmt->iova_mode, fe->digest);
216 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
217 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
219 /* for input nodes that are not dpdk-input, it is possible the mbuf
220 * was updated before as one of the chained mbufs. Setting nb_segs
221 * to 1 here to prevent the cryptodev PMD to access potentially
222 * invalid m_src->next pointers.
224 sop->m_src->nb_segs = 1;
225 clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
232 n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
233 (struct rte_crypto_op **) cet->cops,
235 ASSERT (n_enqueue == frame->n_elts);
236 cet->inflight += n_enqueue;
241 static_always_inline int
242 cryptodev_frame_aead_enqueue (vlib_main_t *vm,
243 vnet_crypto_async_frame_t *frame,
244 cryptodev_op_type_t op_type, u8 aad_len)
246 cryptodev_main_t *cmt = &cryptodev_main;
247 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
248 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
249 vnet_crypto_async_frame_elt_t *fe;
250 struct rte_cryptodev_sym_session *sess = 0;
251 cryptodev_op_t **cop;
253 u32 n_enqueue = 0, n_elts;
254 u32 last_key_index = ~0;
256 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
258 n_elts = frame->n_elts;
260 if (PREDICT_FALSE (CRYPTODEV_MAX_INFLIGHT - cet->inflight < n_elts))
262 cryptodev_mark_frame_err_status (frame,
263 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
268 rte_mempool_get_bulk (cet->cop_pool, (void **) cet->cops, n_elts) < 0))
270 cryptodev_mark_frame_err_status (frame,
271 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
277 bi = frame->buffer_indices;
278 cop[0]->frame = frame;
279 cop[0]->n_elts = n_elts;
283 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
284 struct rte_crypto_sym_op *sop = &cop[0]->sop;
285 u16 crypto_offset = fe->crypto_start_offset;
289 CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
290 CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
291 clib_prefetch_load (&fe[1]);
292 clib_prefetch_load (&fe[2]);
294 if (last_key_index != fe->key_index)
296 cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
298 last_key_index = fe->key_index;
299 if (key->keys[vm->numa_node][op_type] == 0)
301 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
304 cryptodev_mark_frame_err_status (
305 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
309 else if (PREDICT_FALSE (
310 key->keys[vm->numa_node][op_type]->opaque_data !=
313 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
314 fe->key_index, aad_len);
315 if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
318 cryptodev_mark_frame_err_status (
319 frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
324 sess = key->keys[vm->numa_node][op_type];
327 sop->m_src = rte_mbuf_from_vlib_buffer (b);
328 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
330 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
331 * so we have to manually adjust mbuf data_off here so cryptodev can
332 * correctly compute the data pointer. The prepend here will be later
333 * rewritten by tx. */
334 if (PREDICT_FALSE (fe->crypto_start_offset < 0))
336 rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
341 sop->aead.aad.data = cop[0]->aad;
342 sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
343 sop->aead.data.length = fe->crypto_total_length;
344 sop->aead.data.offset = crypto_offset;
345 sop->aead.digest.data = fe->tag;
346 sop->aead.digest.phys_addr =
347 cryptodev_get_iova (pm, cmt->iova_mode, fe->tag);
348 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
349 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
351 /* for input nodes that are not dpdk-input, it is possible the mbuf
352 * was updated before as one of the chained mbufs. Setting nb_segs
353 * to 1 here to prevent the cryptodev PMD to access potentially
354 * invalid m_src->next pointers.
356 sop->m_src->nb_segs = 1;
357 clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
358 clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
365 n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
366 (struct rte_crypto_op **) cet->cops,
368 ASSERT (n_enqueue == frame->n_elts);
369 cet->inflight += n_enqueue;
374 static_always_inline u16
375 cryptodev_ring_deq (struct rte_ring *r, cryptodev_op_t **cops)
379 n = rte_ring_dequeue_bulk_start (r, (void **) cops, 1, 0);
380 rte_ring_dequeue_finish (r, 0);
385 if (rte_ring_count (r) < n)
388 n_elts = rte_ring_sc_dequeue_bulk (r, (void **) cops, n, 0);
389 ASSERT (n_elts == n);
394 static_always_inline vnet_crypto_async_frame_t *
395 cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
396 u32 *enqueue_thread_idx)
398 cryptodev_main_t *cmt = &cryptodev_main;
399 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
400 cryptodev_op_t **cop = cet->cops;
401 vnet_crypto_async_frame_elt_t *fe;
402 vnet_crypto_async_frame_t *frame;
403 u32 n_elts, n_completed_ops = rte_ring_count (cet->ring);
404 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
408 n_elts = rte_cryptodev_dequeue_burst (
409 cet->cryptodev_id, cet->cryptodev_q,
410 (struct rte_crypto_op **) cet->cops, VNET_CRYPTO_FRAME_SIZE);
414 cet->inflight -= n_elts;
415 n_completed_ops += n_elts;
417 rte_ring_sp_enqueue_burst (cet->ring, (void **) cet->cops, n_elts,
422 if (PREDICT_FALSE (n_completed_ops == 0))
425 n_elts = cryptodev_ring_deq (cet->ring, cop);
429 frame = cop[0]->frame;
434 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
435 ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
436 ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
437 ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
446 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
452 frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
453 VNET_CRYPTO_FRAME_STATE_SUCCESS :
454 VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
456 rte_mempool_put_bulk (cet->cop_pool, (void **) cet->cops, frame->n_elts);
457 *nb_elts_processed = frame->n_elts;
458 *enqueue_thread_idx = frame->enqueue_thread_index;
462 static_always_inline int
463 cryptodev_enqueue_aead_aad_8_enc (vlib_main_t *vm,
464 vnet_crypto_async_frame_t *frame)
466 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
469 static_always_inline int
470 cryptodev_enqueue_aead_aad_12_enc (vlib_main_t *vm,
471 vnet_crypto_async_frame_t *frame)
473 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
477 static_always_inline int
478 cryptodev_enqueue_aead_aad_8_dec (vlib_main_t *vm,
479 vnet_crypto_async_frame_t *frame)
481 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
484 static_always_inline int
485 cryptodev_enqueue_aead_aad_12_dec (vlib_main_t *vm,
486 vnet_crypto_async_frame_t *frame)
488 return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
492 static_always_inline int
493 cryptodev_enqueue_linked_alg_enc (vlib_main_t *vm,
494 vnet_crypto_async_frame_t *frame)
496 return cryptodev_frame_linked_algs_enqueue (vm, frame,
497 CRYPTODEV_OP_TYPE_ENCRYPT);
500 static_always_inline int
501 cryptodev_enqueue_linked_alg_dec (vlib_main_t *vm,
502 vnet_crypto_async_frame_t *frame)
504 return cryptodev_frame_linked_algs_enqueue (vm, frame,
505 CRYPTODEV_OP_TYPE_DECRYPT);
509 cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
511 cryptodev_main_t *cmt = &cryptodev_main;
512 cryptodev_engine_thread_t *cet;
513 struct rte_cryptodev_sym_capability_idx cap_auth_idx;
514 struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
515 struct rte_cryptodev_sym_capability_idx cap_aead_idx;
517 clib_error_t *error = 0;
520 vec_foreach (cet, cmt->per_thread_data)
522 u32 thread_index = cet - cmt->per_thread_data;
523 u32 numa = vlib_get_main_by_index (thread_index)->numa_node;
524 name = format (0, "vpp_cop_pool_%u_%u", numa, thread_index);
525 cet->cop_pool = rte_mempool_create (
526 (char *) name, CRYPTODEV_NB_CRYPTO_OPS, sizeof (cryptodev_op_t), 0,
527 sizeof (struct rte_crypto_op_pool_private), NULL, NULL, crypto_op_init,
528 NULL, vm->numa_node, 0);
531 error = clib_error_return (
532 0, "Failed to create cryptodev op pool %s", name);
538 name = format (0, "frames_ring_%u_%u", numa, thread_index);
540 rte_ring_create ((char *) name, CRYPTODEV_NB_CRYPTO_OPS, vm->numa_node,
541 RING_F_SP_ENQ | RING_F_SC_DEQ);
544 error = clib_error_return (
545 0, "Failed to create cryptodev op pool %s", name);
551 vec_validate (cet->cops, VNET_CRYPTO_FRAME_SIZE - 1);
554 #define _(a, b, c, d, e, f, g) \
555 cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD; \
556 cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
557 if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f)) \
559 vnet_crypto_register_enqueue_handler ( \
560 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
561 cryptodev_enqueue_aead_aad_##f##_enc); \
562 vnet_crypto_register_enqueue_handler ( \
563 vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
564 cryptodev_enqueue_aead_aad_##f##_dec); \
567 foreach_vnet_aead_crypto_conversion
570 #define _(a, b, c, d, e) \
571 cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
572 cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC; \
573 cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
574 cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
575 if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) && \
576 cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1)) \
578 vnet_crypto_register_enqueue_handler ( \
579 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC, \
580 cryptodev_enqueue_linked_alg_enc); \
581 vnet_crypto_register_enqueue_handler ( \
582 vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC, \
583 cryptodev_enqueue_linked_alg_dec); \
586 foreach_cryptodev_link_async_alg
590 vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_frame_dequeue);
595 vec_foreach (cet, cmt->per_thread_data)
598 rte_ring_free (cet->ring);
601 rte_mempool_free (cet->cop_pool);