2 *------------------------------------------------------------------
3 * Copyright (c) 2019 - 2021 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
17 #ifndef included_cryptodev_h
18 #define included_cryptodev_h
20 #include <vnet/crypto/crypto.h>
22 #include <rte_cryptodev.h>
24 #define CRYPTODEV_NB_CRYPTO_OPS 1024
25 #define CRYPTODEV_CACHE_QUEUE_SIZE VNET_CRYPTO_FRAME_POOL_SIZE
26 #define CRYPTODEV_CACHE_QUEUE_MASK (VNET_CRYPTO_FRAME_POOL_SIZE - 1)
27 #define CRYPTODEV_MAX_INFLIGHT (CRYPTODEV_NB_CRYPTO_OPS - 1)
28 #define CRYPTODEV_AAD_MASK (CRYPTODEV_NB_CRYPTO_OPS - 1)
29 #define CRYPTODE_ENQ_MAX 64
30 #define CRYPTODE_DEQ_MAX 64
31 #define CRYPTODEV_NB_SESSION 4096
32 #define CRYPTODEV_MAX_IV_SIZE 16
33 #define CRYPTODEV_MAX_AAD_SIZE 16
34 #define CRYPTODEV_MAX_N_SGL 8 /**< maximum number of segments */
35 #define CRYPTODEV_MAX_PROCESED_IN_CACHE_QUEUE 8
37 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv))
38 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
40 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN, KEY_LEN
42 #define foreach_vnet_aead_crypto_conversion \
43 _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 8, 16) \
44 _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 12, 16) \
45 _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 8, 24) \
46 _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 12, 24) \
47 _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 8, 32) \
48 _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 12, 32) \
49 _ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 0, 32) \
50 _ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 8, 32) \
51 _ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 12, 32)
54 * crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
56 #define foreach_cryptodev_link_async_alg \
57 _ (AES_128_CBC, AES_CBC, 16, MD5, 12) \
58 _ (AES_192_CBC, AES_CBC, 24, MD5, 12) \
59 _ (AES_256_CBC, AES_CBC, 32, MD5, 12) \
60 _ (AES_128_CBC, AES_CBC, 16, SHA1, 12) \
61 _ (AES_192_CBC, AES_CBC, 24, SHA1, 12) \
62 _ (AES_256_CBC, AES_CBC, 32, SHA1, 12) \
63 _ (AES_128_CBC, AES_CBC, 16, SHA224, 14) \
64 _ (AES_192_CBC, AES_CBC, 24, SHA224, 14) \
65 _ (AES_256_CBC, AES_CBC, 32, SHA224, 14) \
66 _ (AES_128_CBC, AES_CBC, 16, SHA256, 16) \
67 _ (AES_192_CBC, AES_CBC, 24, SHA256, 16) \
68 _ (AES_256_CBC, AES_CBC, 32, SHA256, 16) \
69 _ (AES_128_CBC, AES_CBC, 16, SHA384, 24) \
70 _ (AES_192_CBC, AES_CBC, 24, SHA384, 24) \
71 _ (AES_256_CBC, AES_CBC, 32, SHA384, 24) \
72 _ (AES_128_CBC, AES_CBC, 16, SHA512, 32) \
73 _ (AES_192_CBC, AES_CBC, 24, SHA512, 32) \
74 _ (AES_256_CBC, AES_CBC, 32, SHA512, 32) \
75 _ (AES_128_CTR, AES_CTR, 16, SHA1, 12) \
76 _ (AES_192_CTR, AES_CTR, 24, SHA1, 12) \
77 _ (AES_256_CTR, AES_CTR, 32, SHA1, 12)
81 CRYPTODEV_OP_TYPE_ENCRYPT = 0,
82 CRYPTODEV_OP_TYPE_DECRYPT,
84 } cryptodev_op_type_t;
86 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
87 typedef void cryptodev_session_t;
89 typedef struct rte_cryptodev_sym_session cryptodev_session_t;
92 /* Cryptodev session data, one data per direction per numa */
95 cryptodev_session_t ***keys;
98 /* Replicate DPDK rte_cryptodev_sym_capability structure with key size ranges
99 * in favor of vpp vector */
102 enum rte_crypto_sym_xform_type xform_type;
107 enum rte_crypto_auth_algorithm algo; /*auth algo */
108 u32 *digest_sizes; /* vector of auth digest sizes */
112 enum rte_crypto_cipher_algorithm algo; /* cipher algo */
113 u32 *key_sizes; /* vector of cipher key sizes */
117 enum rte_crypto_aead_algorithm algo; /* aead algo */
118 u32 *key_sizes; /*vector of aead key sizes */
119 u32 *aad_sizes; /*vector of aad sizes */
120 u32 *digest_sizes; /* vector of aead digest sizes */
123 } cryptodev_capability_t;
125 /* Cryptodev instance data */
135 struct rte_mempool *sess_pool;
136 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
137 struct rte_mempool *sess_priv_pool;
139 } cryptodev_session_pool_t;
143 cryptodev_session_pool_t *sess_pools;
144 } cryptodev_numa_data_t;
148 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
149 struct rte_crypto_op op;
150 struct rte_crypto_sym_op sop;
151 u8 iv[CRYPTODEV_MAX_IV_SIZE];
152 u8 aad[CRYPTODEV_MAX_AAD_SIZE];
153 vnet_crypto_async_frame_t *frame;
159 vnet_crypto_async_frame_t *f;
164 /* index of frame elt where enque to
165 * the crypto engine is happening */
167 /* index of the frame elt where dequeue
168 * from the crypto engine is happening */
180 u64 frame_elts_errs_mask;
181 } cryptodev_cache_ring_elt_t;
185 cryptodev_cache_ring_elt_t frames[VNET_CRYPTO_FRAME_POOL_SIZE];
191 /* head of the cache ring */
193 /* tail of the cache ring */
195 /* index of the frame where enqueue
196 * to the crypto engine is happening */
198 /* index of the frame where dequeue
199 * from the crypto engine is happening */
204 } cryptodev_cache_ring_t;
208 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
209 vlib_buffer_t *b[VNET_CRYPTO_FRAME_SIZE];
212 struct rte_mempool *cop_pool;
215 struct rte_crypto_raw_dp_ctx *ctx;
219 cryptodev_session_t *reset_sess;
223 cryptodev_cache_ring_t cache_ring;
227 } cryptodev_engine_thread_t;
231 cryptodev_numa_data_t *per_numa_data;
232 cryptodev_key_t *keys;
233 cryptodev_engine_thread_t *per_thread_data;
234 enum rte_iova_mode iova_mode;
235 cryptodev_inst_t *cryptodev_inst;
236 clib_bitmap_t *active_cdev_inst_mask;
237 clib_spinlock_t tlock;
238 cryptodev_capability_t *supported_caps;
242 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
247 extern cryptodev_main_t cryptodev_main;
249 #define CRYPTODEV_CACHE_RING_GET_FRAME(r, i) \
250 ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].f)
252 #define CRYPTODEV_CACHE_RING_GET_ERR_MASK(r, i) \
253 ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].frame_elts_errs_mask)
255 #define CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT(r, i) \
256 (((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].enq_elts_head) - \
257 ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].deq_elts_tail))
259 static_always_inline void
260 cryptodev_cache_ring_update_enq_head (cryptodev_cache_ring_t *r,
261 vnet_crypto_async_frame_t *f)
263 if (r->frames[r->enq_head].enq_elts_head == f->n_elts)
266 r->enq_head &= CRYPTODEV_CACHE_QUEUE_MASK;
267 f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
271 static_always_inline bool
272 cryptodev_cache_ring_update_deq_tail (cryptodev_cache_ring_t *r,
275 if (r->frames[*deq].deq_elts_tail == r->frames[*deq].n_elts)
278 *deq &= CRYPTODEV_CACHE_QUEUE_MASK;
284 static_always_inline u64
285 cryptodev_mark_frame_fill_err (vnet_crypto_async_frame_t *f, u64 current_err,
286 u16 index, u16 n, vnet_crypto_op_status_t op_s)
288 u64 err = current_err;
291 ERROR_ASSERT (index + n <= VNET_CRYPTO_FRAME_SIZE);
292 ERROR_ASSERT (op_s != VNET_CRYPTO_OP_STATUS_COMPLETED);
294 for (i = index; i < (index + n); i++)
295 f->elts[i].status = op_s;
297 err |= (~(~(0ull) << n) << index);
302 static_always_inline cryptodev_cache_ring_elt_t *
303 cryptodev_cache_ring_push (cryptodev_cache_ring_t *r,
304 vnet_crypto_async_frame_t *f)
309 cryptodev_cache_ring_elt_t *ring_elt = &r->frames[head];
311 * in debug mode we do the ring sanity test when a frame is enqueued to
315 u16 n_cached = (head >= tail) ? (head - tail) :
316 (CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
317 ERROR_ASSERT (n_cached < CRYPTODEV_CACHE_QUEUE_SIZE);
318 ERROR_ASSERT (r->raw == 0 && r->frames[head].raw == 0 &&
319 r->frames[head].f == 0);
321 /*the ring capacity is CRYPTODEV_CACHE_QUEUE_SIZE - 1*/
322 if (PREDICT_FALSE (head + 1) == tail)
326 ring_elt->n_elts = f->n_elts;
329 r->head &= CRYPTODEV_CACHE_QUEUE_MASK;
333 static_always_inline vnet_crypto_async_frame_t *
334 cryptodev_cache_ring_pop (cryptodev_cache_ring_t *r)
336 vnet_crypto_async_frame_t *f;
338 cryptodev_cache_ring_elt_t *ring_elt = &r->frames[tail];
340 ERROR_ASSERT (r->frames[r->head].raw == 0 ? r->head != tail : 1);
341 ERROR_ASSERT (r->frames[tail].raw != 0);
342 ERROR_ASSERT (ring_elt->deq_elts_tail == ring_elt->enq_elts_head &&
343 ring_elt->deq_elts_tail == ring_elt->n_elts);
345 f = CRYPTODEV_CACHE_RING_GET_FRAME (r, tail);
346 f->state = CRYPTODEV_CACHE_RING_GET_ERR_MASK (r, r->tail) == 0 ?
347 VNET_CRYPTO_FRAME_STATE_SUCCESS :
348 VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
350 clib_memset (ring_elt, 0, sizeof (*ring_elt));
352 r->tail &= CRYPTODEV_CACHE_QUEUE_MASK;
357 int cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
360 void cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
361 vnet_crypto_key_index_t idx, u32 aad_len);
363 int cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
364 u32 key_size, u32 digest_size, u32 aad_size);
366 clib_error_t *cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx);
368 clib_error_t *__clib_weak cryptodev_register_raw_hdl (vlib_main_t *vm,
371 clib_error_t *__clib_weak dpdk_cryptodev_init (vlib_main_t *vm);