dpdk-cryptodev: fix coverity issues
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev.h
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2019 - 2021 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 #ifndef included_cryptodev_h
18 #define included_cryptodev_h
19
20 #include <vnet/crypto/crypto.h>
21 #undef always_inline
22 #include <rte_cryptodev.h>
23
24 #define CRYPTODEV_NB_CRYPTO_OPS    1024
25 #define CRYPTODEV_CACHE_QUEUE_SIZE VNET_CRYPTO_FRAME_POOL_SIZE
26 #define CRYPTODEV_CACHE_QUEUE_MASK (VNET_CRYPTO_FRAME_POOL_SIZE - 1)
27 #define CRYPTODEV_MAX_INFLIGHT     (CRYPTODEV_NB_CRYPTO_OPS - 1)
28 #define CRYPTODEV_AAD_MASK         (CRYPTODEV_NB_CRYPTO_OPS - 1)
29 #define CRYPTODE_ENQ_MAX           64
30 #define CRYPTODE_DEQ_MAX           64
31 #define CRYPTODEV_NB_SESSION       4096
32 #define CRYPTODEV_MAX_IV_SIZE      16
33 #define CRYPTODEV_MAX_AAD_SIZE     16
34 #define CRYPTODEV_MAX_N_SGL        8 /**< maximum number of segments */
35
36 #define CRYPTODEV_IV_OFFSET  (offsetof (cryptodev_op_t, iv))
37 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
38
39 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN, KEY_LEN
40  */
41 #define foreach_vnet_aead_crypto_conversion                                   \
42   _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 8, 16)                               \
43   _ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 12, 16)                              \
44   _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 8, 24)                               \
45   _ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 12, 24)                              \
46   _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 8, 32)                               \
47   _ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 12, 32)                              \
48   _ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 0, 32)               \
49   _ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 8, 32)               \
50   _ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 12, 32)
51
52 /**
53  * crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
54  **/
55 #define foreach_cryptodev_link_async_alg                                      \
56   _ (AES_128_CBC, AES_CBC, 16, MD5, 12)                                       \
57   _ (AES_192_CBC, AES_CBC, 24, MD5, 12)                                       \
58   _ (AES_256_CBC, AES_CBC, 32, MD5, 12)                                       \
59   _ (AES_128_CBC, AES_CBC, 16, SHA1, 12)                                      \
60   _ (AES_192_CBC, AES_CBC, 24, SHA1, 12)                                      \
61   _ (AES_256_CBC, AES_CBC, 32, SHA1, 12)                                      \
62   _ (AES_128_CBC, AES_CBC, 16, SHA224, 14)                                    \
63   _ (AES_192_CBC, AES_CBC, 24, SHA224, 14)                                    \
64   _ (AES_256_CBC, AES_CBC, 32, SHA224, 14)                                    \
65   _ (AES_128_CBC, AES_CBC, 16, SHA256, 16)                                    \
66   _ (AES_192_CBC, AES_CBC, 24, SHA256, 16)                                    \
67   _ (AES_256_CBC, AES_CBC, 32, SHA256, 16)                                    \
68   _ (AES_128_CBC, AES_CBC, 16, SHA384, 24)                                    \
69   _ (AES_192_CBC, AES_CBC, 24, SHA384, 24)                                    \
70   _ (AES_256_CBC, AES_CBC, 32, SHA384, 24)                                    \
71   _ (AES_128_CBC, AES_CBC, 16, SHA512, 32)                                    \
72   _ (AES_192_CBC, AES_CBC, 24, SHA512, 32)                                    \
73   _ (AES_256_CBC, AES_CBC, 32, SHA512, 32)                                    \
74   _ (AES_128_CTR, AES_CTR, 16, SHA1, 12)                                      \
75   _ (AES_192_CTR, AES_CTR, 24, SHA1, 12)                                      \
76   _ (AES_256_CTR, AES_CTR, 32, SHA1, 12)
77
78 typedef enum
79 {
80   CRYPTODEV_OP_TYPE_ENCRYPT = 0,
81   CRYPTODEV_OP_TYPE_DECRYPT,
82   CRYPTODEV_N_OP_TYPES,
83 } cryptodev_op_type_t;
84
85 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
86 typedef void cryptodev_session_t;
87 #else
88 typedef struct rte_cryptodev_sym_session cryptodev_session_t;
89 #endif
90
91 /* Cryptodev session data, one data per direction per numa */
92 typedef struct
93 {
94   cryptodev_session_t ***keys;
95 } cryptodev_key_t;
96
97 /* Replicate DPDK rte_cryptodev_sym_capability structure with key size ranges
98  * in favor of vpp vector */
99 typedef struct
100 {
101   enum rte_crypto_sym_xform_type xform_type;
102   union
103   {
104     struct
105     {
106       enum rte_crypto_auth_algorithm algo; /*auth algo */
107       u32 *digest_sizes;                   /* vector of auth digest sizes */
108     } auth;
109     struct
110     {
111       enum rte_crypto_cipher_algorithm algo; /* cipher algo */
112       u32 *key_sizes;                        /* vector of cipher key sizes */
113     } cipher;
114     struct
115     {
116       enum rte_crypto_aead_algorithm algo; /* aead algo */
117       u32 *key_sizes;                      /*vector of aead key sizes */
118       u32 *aad_sizes;                      /*vector of aad sizes */
119       u32 *digest_sizes;                   /* vector of aead digest sizes */
120     } aead;
121   };
122 } cryptodev_capability_t;
123
124 /* Cryptodev instance data */
125 typedef struct
126 {
127   u32 dev_id;
128   u32 q_id;
129   char *desc;
130 } cryptodev_inst_t;
131
132 typedef struct
133 {
134   struct rte_mempool *sess_pool;
135 #if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
136   struct rte_mempool *sess_priv_pool;
137 #endif
138 } cryptodev_session_pool_t;
139
140 typedef struct
141 {
142   cryptodev_session_pool_t *sess_pools;
143 } cryptodev_numa_data_t;
144
145 typedef struct
146 {
147   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
148   struct rte_crypto_op op;
149   struct rte_crypto_sym_op sop;
150   u8 iv[CRYPTODEV_MAX_IV_SIZE];
151   u8 aad[CRYPTODEV_MAX_AAD_SIZE];
152   vnet_crypto_async_frame_t *frame;
153   u32 n_elts;
154 } cryptodev_op_t;
155
156 typedef struct
157 {
158   vnet_crypto_async_frame_t *f;
159   union
160   {
161     struct
162     {
163       /* index of frame elt where enque to
164        * the crypto engine is happening */
165       u8 enq_elts_head;
166       /* index of the frame elt where dequeue
167        * from the crypto engine is happening */
168       u8 deq_elts_tail;
169       u8 elts_inflight;
170
171       u8 op_type;
172       u8 aad_len;
173       u8 n_elts;
174       u16 reserved;
175     };
176     u64 raw;
177   };
178
179   u64 frame_elts_errs_mask;
180 } cryptodev_cache_ring_elt_t;
181
182 typedef struct
183 {
184   cryptodev_cache_ring_elt_t frames[VNET_CRYPTO_FRAME_POOL_SIZE];
185
186   union
187   {
188     struct
189     {
190       /* head of the cache ring */
191       u16 head;
192       /* tail of the cache ring */
193       u16 tail;
194       /* index of the frame where enqueue
195        * to the crypto engine is happening */
196       u16 enq_head;
197       /* index of the frame where dequeue
198        * from the crypto engine is happening */
199       u16 deq_tail;
200     };
201     u64 raw;
202   };
203 } cryptodev_cache_ring_t;
204
205 typedef struct
206 {
207   CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
208   vlib_buffer_t *b[VNET_CRYPTO_FRAME_SIZE];
209   union
210   {
211     struct rte_mempool *cop_pool;
212     struct
213     {
214       struct rte_crypto_raw_dp_ctx *ctx;
215       u16 aad_index;
216       u8 *aad_buf;
217       u64 aad_phy_addr;
218       cryptodev_session_t *reset_sess;
219     };
220   };
221
222   cryptodev_cache_ring_t cache_ring;
223   u16 cryptodev_id;
224   u16 cryptodev_q;
225   u16 inflight;
226 } cryptodev_engine_thread_t;
227
228 typedef struct
229 {
230   cryptodev_numa_data_t *per_numa_data;
231   cryptodev_key_t *keys;
232   cryptodev_engine_thread_t *per_thread_data;
233   enum rte_iova_mode iova_mode;
234   cryptodev_inst_t *cryptodev_inst;
235   clib_bitmap_t *active_cdev_inst_mask;
236   clib_spinlock_t tlock;
237   cryptodev_capability_t *supported_caps;
238   u32 sess_sz;
239   u32 drivers_cnt;
240   u8 is_raw_api;
241 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
242   u8 driver_id;
243 #endif
244 } cryptodev_main_t;
245
246 extern cryptodev_main_t cryptodev_main;
247
248 #define CRYPTODEV_CACHE_RING_GET_FRAME(r, i)                                  \
249   ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].f)
250
251 #define CRYPTODEV_CACHE_RING_GET_ERR_MASK(r, i)                               \
252   ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].frame_elts_errs_mask)
253
254 #define CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT(r, i)                    \
255   (((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].enq_elts_head) -             \
256    ((r)->frames[(i) &CRYPTODEV_CACHE_QUEUE_MASK].deq_elts_tail))
257
258 static_always_inline void
259 cryptodev_cache_ring_update_enq_head (cryptodev_cache_ring_t *r,
260                                       vnet_crypto_async_frame_t *f)
261 {
262   if (r->frames[r->enq_head].enq_elts_head == f->n_elts)
263     {
264       r->enq_head++;
265       r->enq_head &= CRYPTODEV_CACHE_QUEUE_MASK;
266       f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
267     }
268 }
269
270 static_always_inline bool
271 cryptodev_cache_ring_update_deq_tail (cryptodev_cache_ring_t *r,
272                                       u16 *const deq)
273 {
274   if (r->frames[*deq].deq_elts_tail == r->frames[*deq].n_elts)
275     {
276       *deq += 1;
277       *deq &= CRYPTODEV_CACHE_QUEUE_MASK;
278       return 1;
279     }
280
281   return 0;
282 }
283 static_always_inline u64
284 cryptodev_mark_frame_fill_err (vnet_crypto_async_frame_t *f, u64 current_err,
285                                u16 index, u16 n, vnet_crypto_op_status_t op_s)
286 {
287   u64 err = current_err;
288   u16 i;
289
290   ERROR_ASSERT (index + n <= VNET_CRYPTO_FRAME_SIZE);
291   ERROR_ASSERT (op_s != VNET_CRYPTO_OP_STATUS_COMPLETED);
292
293   for (i = index; i < (index + n); i++)
294     f->elts[i].status = op_s;
295
296   err |= (~(~(0ull) << n) << index);
297
298   return err;
299 }
300
301 static_always_inline cryptodev_cache_ring_elt_t *
302 cryptodev_cache_ring_push (cryptodev_cache_ring_t *r,
303                            vnet_crypto_async_frame_t *f)
304 {
305   u16 head = r->head;
306   cryptodev_cache_ring_elt_t *ring_elt = &r->frames[head];
307   /**
308    * in debug mode we do the ring sanity test when a frame is enqueued to
309    * the ring.
310    **/
311 #if CLIB_DEBUG > 0
312   u16 tail = r->tail;
313   u16 n_cached = (head >= tail) ? (head - tail) :
314                                         (CRYPTODEV_CACHE_QUEUE_MASK - tail + head);
315   ERROR_ASSERT (n_cached < VNET_CRYPTO_FRAME_POOL_SIZE);
316   ERROR_ASSERT (r->raw == 0 && r->frames[head].raw == 0 &&
317                 r->frames[head].f == 0);
318 #endif
319   ring_elt->f = f;
320   ring_elt->n_elts = f->n_elts;
321   /* update head */
322   r->head++;
323   r->head &= CRYPTODEV_CACHE_QUEUE_MASK;
324   return ring_elt;
325 }
326
327 static_always_inline vnet_crypto_async_frame_t *
328 cryptodev_cache_ring_pop (cryptodev_cache_ring_t *r)
329 {
330   vnet_crypto_async_frame_t *f;
331   u16 tail = r->tail;
332   cryptodev_cache_ring_elt_t *ring_elt = &r->frames[tail];
333
334   ERROR_ASSERT (r->frames[r->head].raw == 0 ? r->head != tail : 1);
335   ERROR_ASSERT (r->frames[tail].raw != 0);
336   ERROR_ASSERT (ring_elt->deq_elts_tail == ring_elt->enq_elts_head &&
337                 ring_elt->deq_elts_tail == ring_elt->n_elts);
338
339   f = CRYPTODEV_CACHE_RING_GET_FRAME (r, tail);
340   f->state = CRYPTODEV_CACHE_RING_GET_ERR_MASK (r, r->tail) == 0 ?
341                      VNET_CRYPTO_FRAME_STATE_SUCCESS :
342                      VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
343
344   clib_memset (ring_elt, 0, sizeof (*ring_elt));
345   r->tail++;
346   r->tail &= CRYPTODEV_CACHE_QUEUE_MASK;
347
348   return f;
349 }
350
351 int cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
352                               u32 aad_len);
353
354 void cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
355                              vnet_crypto_key_index_t idx, u32 aad_len);
356
357 int cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
358                                  u32 key_size, u32 digest_size, u32 aad_size);
359
360 clib_error_t *cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx);
361
362 clib_error_t *__clib_weak cryptodev_register_raw_hdl (vlib_main_t *vm,
363                                                       u32 eidx);
364
365 clib_error_t *__clib_weak dpdk_cryptodev_init (vlib_main_t *vm);
366
367 #endif