dpdk-cryptodev: introduce sw_ring to the crypto op data path
[vpp.git] / src / plugins / dpdk / cryptodev / cryptodev_op_data_path.c
1
2 /*
3  *------------------------------------------------------------------
4  * Copyright (c) 2019 - 2021 Intel and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  *------------------------------------------------------------------
17  */
18
19 #include <vlib/vlib.h>
20 #include <vnet/crypto/crypto.h>
21
22 #include <dpdk/buffer.h>
23 #include <dpdk/device/dpdk.h>
24 #include <dpdk/device/dpdk_priv.h>
25 #undef always_inline
26 #include <rte_bus_vdev.h>
27 #include <rte_cryptodev.h>
28 #include <rte_crypto_sym.h>
29 #include <rte_crypto.h>
30 #include <rte_ring_peek_zc.h>
31 #include <rte_config.h>
32
33 #include "cryptodev.h"
34
35 #if CLIB_DEBUG > 0
36 #define always_inline static inline
37 #else
38 #define always_inline static inline __attribute__ ((__always_inline__))
39 #endif
40
41 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
42
43 #define foreach_vnet_crypto_status_conversion                                 \
44   _ (SUCCESS, COMPLETED)                                                      \
45   _ (NOT_PROCESSED, WORK_IN_PROGRESS)                                         \
46   _ (AUTH_FAILED, FAIL_BAD_HMAC)                                              \
47   _ (INVALID_SESSION, FAIL_ENGINE_ERR)                                        \
48   _ (INVALID_ARGS, FAIL_ENGINE_ERR)                                           \
49   _ (ERROR, FAIL_ENGINE_ERR)
50
51 static const vnet_crypto_op_status_t cryptodev_status_conversion[] = {
52 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
53   foreach_vnet_crypto_status_conversion
54 #undef _
55 };
56
57 static_always_inline rte_iova_t
58 cryptodev_get_iova (clib_pmalloc_main_t *pm, enum rte_iova_mode mode,
59                     void *data)
60 {
61   u64 index;
62   if (mode == RTE_IOVA_VA)
63     return (rte_iova_t) pointer_to_uword (data);
64
65   index = clib_pmalloc_get_page_index (pm, data);
66   return pointer_to_uword (data) - pm->lookup_table[index];
67 }
68
69 static_always_inline void
70 cryptodev_validate_mbuf (struct rte_mbuf *mb, vlib_buffer_t *b)
71 {
72   /* on vnet side vlib_buffer current_length is updated by cipher padding and
73    * icv_sh. mbuf needs to be sync with these changes */
74   u16 data_len = b->current_length +
75                  (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
76
77   /* for input nodes that are not dpdk-input, it is possible the mbuf
78    * was updated before as one of the chained mbufs. Setting nb_segs
79    * to 1 here to prevent the cryptodev PMD to access potentially
80    * invalid m_src->next pointers.
81    */
82   mb->nb_segs = 1;
83   mb->pkt_len = mb->data_len = data_len;
84 }
85
86 static_always_inline void
87 cryptodev_validate_mbuf_chain (vlib_main_t *vm, struct rte_mbuf *mb,
88                                vlib_buffer_t *b)
89 {
90   struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
91   /* when input node is not dpdk, mbuf data len is not initialized, for
92    * single buffer it is not a problem since the data length is written
93    * into cryptodev operation. For chained buffer a reference data length
94    * has to be computed through vlib_buffer.
95    *
96    * even when input node is dpdk, it is possible chained vlib_buffers
97    * are updated (either added or removed a buffer) but not not mbuf fields.
98    * we have to re-link every mbuf in the chain.
99    */
100   u16 data_len = b->current_length +
101                  (b->data + b->current_data - rte_pktmbuf_mtod (mb, u8 *));
102
103   first_mb->nb_segs = 1;
104   first_mb->pkt_len = first_mb->data_len = data_len;
105
106   while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
107     {
108       b = vlib_get_buffer (vm, b->next_buffer);
109       mb = rte_mbuf_from_vlib_buffer (b);
110       if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
111         rte_pktmbuf_reset (mb);
112       last_mb->next = mb;
113       last_mb = mb;
114       mb->data_len = b->current_length;
115       mb->pkt_len = b->current_length;
116       mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
117       first_mb->nb_segs++;
118       if (PREDICT_FALSE (b->ref_count > 1))
119         mb->pool =
120           dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
121     }
122 }
123
124 static_always_inline void
125 crypto_op_init (struct rte_mempool *mempool,
126                 void *_arg __attribute__ ((unused)), void *_obj,
127                 unsigned i __attribute__ ((unused)))
128 {
129   struct rte_crypto_op *op = _obj;
130
131   op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
132   op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
133   op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
134   op->phys_addr = rte_mempool_virt2iova (_obj);
135   op->mempool = mempool;
136 }
137
138 static_always_inline int
139 cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm,
140                                      vnet_crypto_async_frame_t *frame,
141                                      cryptodev_op_type_t op_type)
142 {
143   cryptodev_main_t *cmt = &cryptodev_main;
144   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
145   cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
146   cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
147
148   cet->frames_on_ring++;
149   ring_elt->f = frame;
150   ring_elt->n_elts = frame->n_elts;
151   ring_elt->aad_len = 1;
152   ring_elt->op_type = (u8) op_type;
153   ring->head++;
154   ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
155   return 0;
156 }
157
158 static_always_inline void
159 cryptodev_frame_linked_algs_enqueue_internal (vlib_main_t *vm,
160                                               vnet_crypto_async_frame_t *frame,
161                                               cryptodev_op_type_t op_type)
162 {
163   cryptodev_main_t *cmt = &cryptodev_main;
164   clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
165   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
166   cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
167   vnet_crypto_async_frame_elt_t *fe;
168   cryptodev_session_t *sess = 0;
169   cryptodev_op_t *cops[CRYPTODE_ENQ_MAX] = {};
170   cryptodev_op_t **cop = cops;
171   u32 *bi = 0;
172   u32 n_enqueue, n_elts;
173   u32 last_key_index = ~0;
174   u32 max_to_enq;
175
176   if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
177     return;
178
179   max_to_enq = clib_min (CRYPTODE_ENQ_MAX,
180                          frame->n_elts - ring->frames[ring->enq].enqueued);
181
182   if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
183     return;
184
185   n_elts = max_to_enq;
186
187   if (PREDICT_FALSE (
188         rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
189     {
190       cryptodev_mark_frame_err_status (frame,
191                                        VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
192                                        VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
193       return;
194     }
195
196   fe = frame->elts + ring->frames[ring->enq].enqueued;
197   bi = frame->buffer_indices + ring->frames[ring->enq].enqueued;
198
199   while (n_elts)
200     {
201       vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
202       struct rte_crypto_sym_op *sop = &cop[0]->sop;
203       i16 crypto_offset = fe->crypto_start_offset;
204       i16 integ_offset = fe->integ_start_offset;
205       u32 offset_diff = crypto_offset - integ_offset;
206
207       if (n_elts > 2)
208         {
209           CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
210           CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
211           clib_prefetch_load (&fe[1]);
212           clib_prefetch_load (&fe[2]);
213         }
214       if (last_key_index != fe->key_index)
215         {
216           cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
217           last_key_index = fe->key_index;
218
219           if (key->keys[vm->numa_node][op_type] == 0)
220             {
221               if (PREDICT_FALSE (
222                     cryptodev_session_create (vm, last_key_index, 0) < 0))
223                 {
224                   cryptodev_mark_frame_err_status (
225                     frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
226                     VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
227                   goto error_exit;
228                 }
229             }
230           sess = key->keys[vm->numa_node][op_type];
231         }
232
233       sop->m_src = rte_mbuf_from_vlib_buffer (b);
234       sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
235       sop->m_dst = 0;
236       /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
237        * so we have to manually adjust mbuf data_off here so cryptodev can
238        * correctly compute the data pointer. The prepend here will be later
239        * rewritten by tx. */
240       if (PREDICT_TRUE (fe->integ_start_offset < 0))
241         {
242           sop->m_src->data_off += fe->integ_start_offset;
243           integ_offset = 0;
244           crypto_offset = offset_diff;
245         }
246       sop->session = sess;
247       sop->cipher.data.offset = crypto_offset;
248       sop->cipher.data.length = fe->crypto_total_length;
249       sop->auth.data.offset = integ_offset;
250       sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
251       sop->auth.digest.data = fe->digest;
252       sop->auth.digest.phys_addr =
253         cryptodev_get_iova (pm, cmt->iova_mode, fe->digest);
254       if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
255         cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
256       else
257         cryptodev_validate_mbuf (sop->m_src, b);
258
259       clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
260       ring->frames[ring->enq].enqueued++;
261       cop++;
262       bi++;
263       fe++;
264       n_elts--;
265     }
266
267   n_enqueue =
268     rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
269                                  (struct rte_crypto_op **) cops, max_to_enq);
270   ASSERT (n_enqueue == max_to_enq);
271   cet->inflight += max_to_enq;
272   ring->frames[ring->enq].frame_inflight += max_to_enq;
273   if (ring->frames[ring->enq].enqueued == frame->n_elts)
274     {
275       cet->frame_ring.enq++;
276       cet->frame_ring.enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
277       frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
278     }
279
280   return;
281
282 error_exit:
283   ring->enq++;
284   ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
285   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
286 }
287
288 static_always_inline int
289 cryptodev_frame_aead_enqueue (vlib_main_t *vm,
290                               vnet_crypto_async_frame_t *frame,
291                               cryptodev_op_type_t op_type, u8 aad_len)
292 {
293   cryptodev_main_t *cmt = &cryptodev_main;
294   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
295   cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
296   cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->head];
297   cet->frames_on_ring++;
298   ring_elt->f = frame;
299   ring_elt->n_elts = frame->n_elts;
300   ring_elt->aad_len = aad_len;
301   ring_elt->op_type = (u8) op_type;
302   ring->head++;
303   ring->head &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
304
305   return 0;
306 }
307
308 static_always_inline int
309 cryptodev_aead_enqueue_internal (vlib_main_t *vm,
310                                  vnet_crypto_async_frame_t *frame,
311                                  cryptodev_op_type_t op_type, u8 aad_len)
312 {
313   cryptodev_main_t *cmt = &cryptodev_main;
314   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
315   cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
316   clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
317   vnet_crypto_async_frame_elt_t *fe;
318   cryptodev_session_t *sess = 0;
319   cryptodev_op_t *cops[CRYPTODE_ENQ_MAX] = {};
320   cryptodev_op_t **cop = cops;
321   u32 *bi = 0;
322   u32 n_enqueue = 0, n_elts;
323   u32 last_key_index = ~0;
324   u16 left_to_enq = frame->n_elts - ring->frames[ring->enq].enqueued;
325   const u16 max_to_enq = clib_min (CRYPTODE_ENQ_MAX, left_to_enq);
326
327   if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
328     return -1;
329
330   if (cet->inflight + max_to_enq > CRYPTODEV_MAX_INFLIGHT)
331     return -1;
332
333   n_elts = max_to_enq;
334
335   if (PREDICT_FALSE (
336         rte_mempool_get_bulk (cet->cop_pool, (void **) cops, n_elts) < 0))
337     {
338       cryptodev_mark_frame_err_status (frame,
339                                        VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
340                                        VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
341       return -1;
342     }
343
344   fe = frame->elts + ring->frames[ring->enq].enqueued;
345   bi = frame->buffer_indices + ring->frames[ring->enq].enqueued;
346
347   while (n_elts)
348     {
349       vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
350       struct rte_crypto_sym_op *sop = &cop[0]->sop;
351       u16 crypto_offset = fe->crypto_start_offset;
352
353       if (n_elts > 2)
354         {
355           CLIB_PREFETCH (cop[1], sizeof (*cop[1]), STORE);
356           CLIB_PREFETCH (cop[2], sizeof (*cop[2]), STORE);
357           clib_prefetch_load (&fe[1]);
358           clib_prefetch_load (&fe[2]);
359         }
360       if (last_key_index != fe->key_index)
361         {
362           cryptodev_key_t *key = vec_elt_at_index (cmt->keys, fe->key_index);
363
364           last_key_index = fe->key_index;
365           if (key->keys[vm->numa_node][op_type] == 0)
366             {
367               if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
368                                                            aad_len) < 0))
369                 {
370                   cryptodev_mark_frame_err_status (
371                     frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
372                     VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
373                   goto error_exit;
374                 }
375             }
376           else if (PREDICT_FALSE (
377 #if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
378                      rte_cryptodev_sym_session_opaque_data_get (
379                        key->keys[vm->numa_node][op_type]) != (u64) aad_len
380 #else
381                      key->keys[vm->numa_node][op_type]->opaque_data != aad_len
382 #endif
383                      ))
384             {
385               cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_DEL,
386                                       fe->key_index, aad_len);
387               if (PREDICT_FALSE (cryptodev_session_create (vm, last_key_index,
388                                                            aad_len) < 0))
389                 {
390                   cryptodev_mark_frame_err_status (
391                     frame, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR,
392                     VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED);
393                   goto error_exit;
394                 }
395             }
396
397           sess = key->keys[vm->numa_node][op_type];
398         }
399
400       sop->m_src = rte_mbuf_from_vlib_buffer (b);
401       sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
402       sop->m_dst = 0;
403       /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
404        * so we have to manually adjust mbuf data_off here so cryptodev can
405        * correctly compute the data pointer. The prepend here will be later
406        * rewritten by tx. */
407       if (PREDICT_FALSE (fe->crypto_start_offset < 0))
408         {
409           rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
410           crypto_offset = 0;
411         }
412
413       sop->session = sess;
414       sop->aead.aad.data = cop[0]->aad;
415       sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
416       sop->aead.data.length = fe->crypto_total_length;
417       sop->aead.data.offset = crypto_offset;
418       sop->aead.digest.data = fe->tag;
419       sop->aead.digest.phys_addr =
420         cryptodev_get_iova (pm, cmt->iova_mode, fe->tag);
421       if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
422         cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
423       else
424         cryptodev_validate_mbuf (sop->m_src, b);
425
426       clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
427       clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
428
429       ring->frames[ring->enq].enqueued++;
430       cop++;
431       bi++;
432       fe++;
433       n_elts--;
434     }
435
436   n_enqueue =
437     rte_cryptodev_enqueue_burst (cet->cryptodev_id, cet->cryptodev_q,
438                                  (struct rte_crypto_op **) cops, max_to_enq);
439   ASSERT (n_enqueue == max_to_enq);
440   cet->inflight += max_to_enq;
441   ring->frames[ring->enq].frame_inflight += max_to_enq;
442   if (ring->frames[ring->enq].enqueued == frame->n_elts)
443     {
444       ring->enq++;
445       ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
446       frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
447       cet->enqueued_not_dequeueq++;
448     }
449
450   return 0;
451
452 error_exit:
453   ring->enq++;
454   ring->enq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
455   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, max_to_enq);
456
457   return -1;
458 }
459
460 static_always_inline u8
461 cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed,
462                                   u32 *enqueue_thread_idx)
463 {
464   cryptodev_main_t *cmt = &cryptodev_main;
465   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
466   vnet_crypto_async_frame_t *frame = NULL;
467   cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
468   u16 n_deq, idx, left_to_deq, i;
469   u16 max_to_deq = 0;
470   u16 inflight = cet->inflight;
471   u8 dequeue_more = 0;
472   cryptodev_op_t *cops[CRYPTODE_DEQ_MAX] = {};
473   cryptodev_op_t **cop = cops;
474   vnet_crypto_async_frame_elt_t *fe;
475   u32 n_elts;
476   u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
477
478   idx = ring->deq;
479
480   for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++)
481     {
482       if (PREDICT_TRUE (ring->frames[idx].frame_inflight > 0))
483         break;
484       idx++;
485       idx &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
486     }
487
488   ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE);
489   ring->deq = idx;
490
491   left_to_deq =
492     ring->frames[ring->deq].f->n_elts - ring->frames[ring->deq].dequeued;
493   max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX);
494
495   /* deq field can be used to track frame that is currently dequeued
496    based on that you can specify the amount of elements to deq for the frame */
497
498   n_deq =
499     rte_cryptodev_dequeue_burst (cet->cryptodev_id, cet->cryptodev_q,
500                                  (struct rte_crypto_op **) cops, max_to_deq);
501
502   if (n_deq == 0)
503     return dequeue_more;
504
505   ss0 = ring->frames[ring->deq].deq_state;
506   ss1 = ring->frames[ring->deq].deq_state;
507   ss2 = ring->frames[ring->deq].deq_state;
508   ss3 = ring->frames[ring->deq].deq_state;
509
510   frame = ring->frames[ring->deq].f;
511   fe = frame->elts + ring->frames[ring->deq].dequeued;
512
513   n_elts = n_deq;
514   while (n_elts > 4)
515     {
516       ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
517       ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
518       ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
519       ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
520
521       cop += 4;
522       fe += 4;
523       n_elts -= 4;
524     }
525
526   while (n_elts)
527     {
528       ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
529       fe++;
530       cop++;
531       n_elts--;
532     }
533
534   ring->frames[ring->deq].deq_state |= (u8) (ss0 | ss1 | ss2 | ss3);
535
536   rte_mempool_put_bulk (cet->cop_pool, (void **) cops, n_deq);
537
538   inflight -= n_deq;
539   ring->frames[ring->deq].dequeued += n_deq;
540   ring->frames[ring->deq].frame_inflight -= n_deq;
541   if (ring->frames[ring->deq].dequeued == ring->frames[ring->deq].n_elts)
542     {
543       frame->state =
544         (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
545                 VNET_CRYPTO_FRAME_STATE_SUCCESS :
546                 VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
547
548       *nb_elts_processed = frame->n_elts;
549       *enqueue_thread_idx = frame->enqueue_thread_index;
550       cet->deqeued_not_returned++;
551       cet->enqueued_not_dequeueq--;
552       ring->deq++;
553       ring->deq &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
554       dequeue_more = (max_to_deq < CRYPTODE_DEQ_MAX);
555     }
556
557   cet->inflight = inflight;
558   return dequeue_more;
559 }
560
561 static_always_inline void
562 cryptodev_enqueue_frame (vlib_main_t *vm, cryptodev_async_ring_elt *ring_elt)
563 {
564   cryptodev_op_type_t op_type = (cryptodev_op_type_t) ring_elt->op_type;
565   u8 linked_or_aad_len = ring_elt->aad_len;
566
567   if (linked_or_aad_len == 1)
568     cryptodev_frame_linked_algs_enqueue_internal (vm, ring_elt->f, op_type);
569   else
570     cryptodev_aead_enqueue_internal (vm, ring_elt->f, op_type,
571                                      linked_or_aad_len);
572 }
573
574 static_always_inline vnet_crypto_async_frame_t *
575 cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
576                          u32 *enqueue_thread_idx)
577 {
578   cryptodev_main_t *cmt = &cryptodev_main;
579   vnet_crypto_main_t *cm = &crypto_main;
580   cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
581
582   cryptodev_async_frame_sw_ring *ring = &cet->frame_ring;
583   cryptodev_async_ring_elt *ring_elt = &ring->frames[ring->tail];
584   vnet_crypto_async_frame_t *ret_frame = 0;
585   u8 dequeue_more = 1;
586
587   while (cet->inflight > 0 && dequeue_more)
588     {
589       dequeue_more = cryptodev_frame_dequeue_internal (vm, nb_elts_processed,
590                                                        enqueue_thread_idx);
591     }
592
593   if (PREDICT_TRUE (ring->frames[ring->enq].f != 0))
594     cryptodev_enqueue_frame (vm, &ring->frames[ring->enq]);
595
596   if (PREDICT_TRUE (ring_elt->f != 0))
597     {
598       if ((ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_SUCCESS ||
599            ring_elt->f->state == VNET_CRYPTO_FRAME_STATE_ELT_ERROR) &&
600           ring_elt->enqueued == ring_elt->dequeued)
601         {
602           vlib_node_set_interrupt_pending (
603             vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index);
604           ret_frame = ring_elt->f;
605           memset (ring_elt, 0, sizeof (*ring_elt));
606           ring->tail += 1;
607           ring->tail &= (VNET_CRYPTO_FRAME_POOL_SIZE - 1);
608           cet->frames_on_ring--;
609           cet->deqeued_not_returned--;
610           return ret_frame;
611         }
612     }
613
614   return ret_frame;
615 }
616 static_always_inline int
617 cryptodev_enqueue_aead_aad_0_enc (vlib_main_t *vm,
618                                   vnet_crypto_async_frame_t *frame)
619 {
620   return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
621                                        0);
622 }
623 static_always_inline int
624 cryptodev_enqueue_aead_aad_8_enc (vlib_main_t *vm,
625                                   vnet_crypto_async_frame_t *frame)
626 {
627   return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
628                                        8);
629 }
630 static_always_inline int
631 cryptodev_enqueue_aead_aad_12_enc (vlib_main_t *vm,
632                                    vnet_crypto_async_frame_t *frame)
633 {
634   return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_ENCRYPT,
635                                        12);
636 }
637
638 static_always_inline int
639 cryptodev_enqueue_aead_aad_0_dec (vlib_main_t *vm,
640                                   vnet_crypto_async_frame_t *frame)
641 {
642   return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
643                                        0);
644 }
645 static_always_inline int
646 cryptodev_enqueue_aead_aad_8_dec (vlib_main_t *vm,
647                                   vnet_crypto_async_frame_t *frame)
648 {
649   return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
650                                        8);
651 }
652 static_always_inline int
653 cryptodev_enqueue_aead_aad_12_dec (vlib_main_t *vm,
654                                    vnet_crypto_async_frame_t *frame)
655 {
656   return cryptodev_frame_aead_enqueue (vm, frame, CRYPTODEV_OP_TYPE_DECRYPT,
657                                        12);
658 }
659
660 static_always_inline int
661 cryptodev_enqueue_linked_alg_enc (vlib_main_t *vm,
662                                   vnet_crypto_async_frame_t *frame)
663 {
664   return cryptodev_frame_linked_algs_enqueue (vm, frame,
665                                               CRYPTODEV_OP_TYPE_ENCRYPT);
666 }
667
668 static_always_inline int
669 cryptodev_enqueue_linked_alg_dec (vlib_main_t *vm,
670                                   vnet_crypto_async_frame_t *frame)
671 {
672   return cryptodev_frame_linked_algs_enqueue (vm, frame,
673                                               CRYPTODEV_OP_TYPE_DECRYPT);
674 }
675
676 clib_error_t *
677 cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx)
678 {
679   cryptodev_main_t *cmt = &cryptodev_main;
680   cryptodev_engine_thread_t *cet;
681   struct rte_cryptodev_sym_capability_idx cap_auth_idx;
682   struct rte_cryptodev_sym_capability_idx cap_cipher_idx;
683   struct rte_cryptodev_sym_capability_idx cap_aead_idx;
684   u8 *name;
685   clib_error_t *error = 0;
686   u32 ref_cnt = 0;
687
688   vec_foreach (cet, cmt->per_thread_data)
689     {
690       u32 thread_index = cet - cmt->per_thread_data;
691       u32 numa = vlib_get_main_by_index (thread_index)->numa_node;
692       name = format (0, "vpp_cop_pool_%u_%u", numa, thread_index);
693       cet->cop_pool = rte_mempool_create (
694         (char *) name, CRYPTODEV_NB_CRYPTO_OPS, sizeof (cryptodev_op_t), 0,
695         sizeof (struct rte_crypto_op_pool_private), NULL, NULL, crypto_op_init,
696         NULL, vm->numa_node, 0);
697       vec_free (name);
698       if (!cet->cop_pool)
699         {
700           error = clib_error_return (
701             0, "Failed to create cryptodev op pool %s", name);
702
703           goto error_exit;
704         }
705     }
706
707 #define _(a, b, c, d, e, f, g)                                                \
708   cap_aead_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;                              \
709   cap_aead_idx.algo.aead = RTE_CRYPTO_##b##_##c;                              \
710   if (cryptodev_check_cap_support (&cap_aead_idx, g, e, f))                   \
711     {                                                                         \
712       vnet_crypto_register_enqueue_handler (                                  \
713         vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC,                 \
714         cryptodev_enqueue_aead_aad_##f##_enc);                                \
715       vnet_crypto_register_enqueue_handler (                                  \
716         vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC,                 \
717         cryptodev_enqueue_aead_aad_##f##_dec);                                \
718       ref_cnt++;                                                              \
719     }
720   foreach_vnet_aead_crypto_conversion
721 #undef _
722
723 #define _(a, b, c, d, e)                                                      \
724   cap_auth_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;                              \
725   cap_auth_idx.algo.auth = RTE_CRYPTO_AUTH_##d##_HMAC;                        \
726   cap_cipher_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;                          \
727   cap_cipher_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b;                         \
728   if (cryptodev_check_cap_support (&cap_cipher_idx, c, -1, -1) &&             \
729       cryptodev_check_cap_support (&cap_auth_idx, -1, e, -1))                 \
730     {                                                                         \
731       vnet_crypto_register_enqueue_handler (                                  \
732         vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_ENC,                    \
733         cryptodev_enqueue_linked_alg_enc);                                    \
734       vnet_crypto_register_enqueue_handler (                                  \
735         vm, eidx, VNET_CRYPTO_OP_##a##_##d##_TAG##e##_DEC,                    \
736         cryptodev_enqueue_linked_alg_dec);                                    \
737       ref_cnt++;                                                              \
738     }
739     foreach_cryptodev_link_async_alg
740 #undef _
741
742     if (ref_cnt)
743       vnet_crypto_register_dequeue_handler (vm, eidx, cryptodev_frame_dequeue);
744
745     return 0;
746
747 error_exit:
748   vec_foreach (cet, cmt->per_thread_data)
749     {
750       if (cet->cop_pool)
751         rte_mempool_free (cet->cop_pool);
752     }
753
754   return error;
755 }