crypto: Add async crypto APIs
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i = vlib_num_workers () > 0;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t * vm,
78                                    vnet_crypto_async_frame_t * frame)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd
82     = vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
84   u64 head = q->head;
85
86   if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
87     {
88       u32 n_elts = frame->n_elts, i;
89       for (i = 0; i < n_elts; i++)
90         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
91       frame->state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
92       return -1;
93     }
94   frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
95   q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
96   head += 1;
97   CLIB_MEMORY_STORE_BARRIER ();
98   q->head = head;
99   return 0;
100 }
101
102 static_always_inline vnet_crypto_async_frame_t *
103 crypto_sw_scheduler_get_pending_frame (crypto_sw_scheduler_queue_t * q)
104 {
105   vnet_crypto_async_frame_t *f;
106   u32 i;
107   u32 tail = q->tail;
108   u32 head = q->head;
109
110   for (i = tail; i < head; i++)
111     {
112       f = q->jobs[i & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
113       if (!f)
114         continue;
115       if (clib_atomic_bool_cmp_and_swap
116           (&f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
117            VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
118         {
119           return f;
120         }
121     }
122   return NULL;
123 }
124
125 static_always_inline vnet_crypto_async_frame_t *
126 crypto_sw_scheduler_get_completed_frame (crypto_sw_scheduler_queue_t * q)
127 {
128   vnet_crypto_async_frame_t *f = 0;
129   if (q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]
130       && q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]->state
131       >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
132     {
133       u32 tail = q->tail;
134       CLIB_MEMORY_STORE_BARRIER ();
135       q->tail++;
136       f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
137       q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
138     }
139   return f;
140 }
141
142 static_always_inline void
143 cryptodev_sw_scheduler_sgl (vlib_main_t * vm,
144                             crypto_sw_scheduler_per_thread_data_t * ptd,
145                             vlib_buffer_t * b, vnet_crypto_op_t * op,
146                             i32 offset, i32 len)
147 {
148   vnet_crypto_op_chunk_t *ch;
149   vlib_buffer_t *nb = b;
150   u32 n_chunks = 0;
151   u32 chunk_index = vec_len (ptd->chunks);
152
153   op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
154
155   while (len)
156     {
157       if (nb->current_data + nb->current_length > offset)
158         {
159           vec_add2 (ptd->chunks, ch, 1);
160           ch->src = ch->dst = nb->data + offset;
161           ch->len
162             = clib_min (nb->current_data + nb->current_length - offset, len);
163           len -= ch->len;
164           offset = 0;
165           n_chunks++;
166           if (!len)
167             break;
168         }
169       if (offset)
170         offset -= nb->current_data + nb->current_length;
171       if (nb->flags & VLIB_BUFFER_NEXT_PRESENT)
172         nb = vlib_get_buffer (vm, nb->next_buffer);
173       else
174         break;
175     }
176
177   ASSERT (offset == 0 && len == 0);
178   op->chunk_index = chunk_index;
179   op->n_chunks = n_chunks;
180 }
181
182 static_always_inline void
183 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
184                                   crypto_sw_scheduler_per_thread_data_t * ptd,
185                                   vnet_crypto_async_frame_elt_t * fe,
186                                   u32 index, u32 bi,
187                                   vnet_crypto_op_id_t op_id, u16 aad_len,
188                                   u8 tag_len)
189 {
190   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
191   vnet_crypto_op_t *op = 0;
192
193   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
194     {
195       vec_add2 (ptd->chained_crypto_ops, op, 1);
196       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
197                                   fe->crypto_total_length);
198     }
199   else
200     {
201       vec_add2 (ptd->crypto_ops, op, 1);
202       op->src = op->dst = b->data + fe->crypto_start_offset;
203       op->len = fe->crypto_total_length;
204     }
205
206   op->op = op_id;
207   op->tag = fe->tag;
208   op->flags = fe->flags;
209   op->key_index = fe->key_index;
210   op->iv = fe->iv;
211   op->aad = fe->aad;
212   op->aad_len = aad_len;
213   op->tag_len = tag_len;
214   op->user_data = index;
215 }
216
217 static_always_inline void
218 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
219                                          crypto_sw_scheduler_per_thread_data_t
220                                          * ptd, vnet_crypto_key_t * key,
221                                          vnet_crypto_async_frame_elt_t * fe,
222                                          u32 index, u32 bi,
223                                          vnet_crypto_op_id_t crypto_op_id,
224                                          vnet_crypto_op_id_t integ_op_id,
225                                          u32 digest_len, u8 is_enc)
226 {
227   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
228   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
229
230   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
231     {
232       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
233       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
234       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
235                                   fe->crypto_start_offset,
236                                   fe->crypto_total_length);
237       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
238                                   fe->integ_start_offset,
239                                   fe->crypto_total_length +
240                                   fe->integ_length_adj);
241     }
242   else
243     {
244       vec_add2 (ptd->crypto_ops, crypto_op, 1);
245       vec_add2 (ptd->integ_ops, integ_op, 1);
246       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
247       crypto_op->len = fe->crypto_total_length;
248       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
249       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
250     }
251
252   crypto_op->op = crypto_op_id;
253   crypto_op->iv = fe->iv;
254   crypto_op->key_index = key->index_crypto;
255   crypto_op->user_data = 0;
256   integ_op->op = integ_op_id;
257   integ_op->digest = fe->digest;
258   integ_op->digest_len = digest_len;
259   integ_op->key_index = key->index_integ;
260   if (is_enc)
261     crypto_op->flags |= VNET_CRYPTO_OP_FLAG_INIT_IV;
262   else
263     integ_op->flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
264   crypto_op->user_data = integ_op->user_data = index;
265 }
266
267 static_always_inline void
268 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
269              vnet_crypto_op_t * ops, u8 * state)
270 {
271   u32 n_fail, n_ops = vec_len (ops);
272   vnet_crypto_op_t *op = ops;
273
274   if (n_ops == 0)
275     return;
276
277   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
278
279   while (n_fail)
280     {
281       ASSERT (op - ops < n_ops);
282
283       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
284         {
285           f->elts[op->user_data].status = op->status;
286           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
287           n_fail--;
288         }
289       op++;
290     }
291 }
292
293 static_always_inline void
294 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
295                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
296                      u8 * state)
297 {
298   u32 n_fail, n_ops = vec_len (ops);
299   vnet_crypto_op_t *op = ops;
300
301   if (n_ops == 0)
302     return;
303
304   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
305
306   while (n_fail)
307     {
308       ASSERT (op - ops < n_ops);
309
310       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
311         {
312           f->elts[op->user_data].status = op->status;
313           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
314           n_fail--;
315         }
316       op++;
317     }
318 }
319
320 static_always_inline vnet_crypto_async_frame_t *
321 crypto_sw_scheduler_dequeue_aead (vlib_main_t * vm,
322                                   vnet_crypto_async_op_id_t async_op_id,
323                                   vnet_crypto_op_id_t sync_op_id, u8 tag_len,
324                                   u8 aad_len, u32 * nb_elts_processed,
325                                   u32 * enqueue_thread_idx)
326 {
327   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
328   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
329   crypto_sw_scheduler_queue_t *q = 0;
330   vnet_crypto_async_frame_t *f = 0;
331   vnet_crypto_async_frame_elt_t *fe;
332   u32 *bi;
333   u32 n_elts;
334   int i = 0;
335   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
336
337   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
338     {
339       /* *INDENT-OFF* */
340       vec_foreach_index (i, cm->per_thread_data)
341       {
342         ptd = cm->per_thread_data + i;
343         q = ptd->queues[async_op_id];
344         f = crypto_sw_scheduler_get_pending_frame (q);
345         if (f)
346           break;
347       }
348       /* *INDENT-ON* */
349     }
350
351   ptd = cm->per_thread_data + vm->thread_index;
352
353   if (f)
354     {
355       *nb_elts_processed = n_elts = f->n_elts;
356       fe = f->elts;
357       bi = f->buffer_indices;
358
359       vec_reset_length (ptd->crypto_ops);
360       vec_reset_length (ptd->chained_crypto_ops);
361       vec_reset_length (ptd->chunks);
362
363       while (n_elts--)
364         {
365           if (n_elts > 1)
366             CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
367
368           crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
369                                             sync_op_id, aad_len, tag_len);
370           bi++;
371           fe++;
372         }
373
374       process_ops (vm, f, ptd->crypto_ops, &state);
375       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
376                            &state);
377       f->state = state;
378       *enqueue_thread_idx = f->enqueue_thread_index;
379     }
380
381   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
382 }
383
384 static_always_inline vnet_crypto_async_frame_t *
385 crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
386                                   vnet_crypto_async_op_id_t async_op_id,
387                                   vnet_crypto_op_id_t sync_crypto_op_id,
388                                   vnet_crypto_op_id_t sync_integ_op_id,
389                                   u16 digest_len, u8 is_enc,
390                                   u32 * nb_elts_processed,
391                                   u32 * enqueue_thread_idx)
392 {
393   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
394   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
395   crypto_sw_scheduler_queue_t *q = 0;
396   vnet_crypto_async_frame_t *f = 0;
397   vnet_crypto_async_frame_elt_t *fe;
398   u32 *bi;
399   u32 n_elts;
400   int i = 0;
401   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
402
403   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
404     {
405       /* *INDENT-OFF* */
406       vec_foreach_index (i, cm->per_thread_data)
407       {
408         ptd = cm->per_thread_data + i;
409         q = ptd->queues[async_op_id];
410         f = crypto_sw_scheduler_get_pending_frame (q);
411         if (f)
412           break;
413       }
414       /* *INDENT-ON* */
415     }
416
417   ptd = cm->per_thread_data + vm->thread_index;
418
419   if (f)
420     {
421       vec_reset_length (ptd->crypto_ops);
422       vec_reset_length (ptd->integ_ops);
423       vec_reset_length (ptd->chained_crypto_ops);
424       vec_reset_length (ptd->chained_integ_ops);
425       vec_reset_length (ptd->chunks);
426
427       *nb_elts_processed = n_elts = f->n_elts;
428       fe = f->elts;
429       bi = f->buffer_indices;
430
431       while (n_elts--)
432         {
433           if (n_elts > 1)
434             CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
435
436           crypto_sw_scheduler_convert_link_crypto (vm, ptd,
437                                                    cm->keys + fe->key_index,
438                                                    fe, fe - f->elts, bi[0],
439                                                    sync_crypto_op_id,
440                                                    sync_integ_op_id,
441                                                    digest_len, is_enc);
442           bi++;
443           fe++;
444         }
445
446       if (is_enc)
447         {
448           process_ops (vm, f, ptd->crypto_ops, &state);
449           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
450                                &state);
451           process_ops (vm, f, ptd->integ_ops, &state);
452           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
453                                &state);
454         }
455       else
456         {
457           process_ops (vm, f, ptd->integ_ops, &state);
458           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
459                                &state);
460           process_ops (vm, f, ptd->crypto_ops, &state);
461           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
462                                &state);
463         }
464
465       f->state = state;
466       *enqueue_thread_idx = f->enqueue_thread_index;
467     }
468
469   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
470 }
471
472 static clib_error_t *
473 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
474                                 vlib_cli_command_t * cmd)
475 {
476   unformat_input_t _line_input, *line_input = &_line_input;
477   u32 worker_index;
478   u8 crypto_enable;
479   int rv;
480
481   /* Get a line of input. */
482   if (!unformat_user (input, unformat_line_input, line_input))
483     return 0;
484
485   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
486     {
487       if (unformat (line_input, "worker %u", &worker_index))
488         {
489           if (unformat (line_input, "crypto"))
490             {
491               if (unformat (line_input, "on"))
492                 crypto_enable = 1;
493               else if (unformat (line_input, "off"))
494                 crypto_enable = 0;
495               else
496                 return (clib_error_return (0, "unknown input '%U'",
497                                            format_unformat_error,
498                                            line_input));
499             }
500           else
501             return (clib_error_return (0, "unknown input '%U'",
502                                        format_unformat_error, line_input));
503         }
504       else
505         return (clib_error_return (0, "unknown input '%U'",
506                                    format_unformat_error, line_input));
507     }
508
509   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
510   if (rv == VNET_API_ERROR_INVALID_VALUE)
511     {
512       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
513     }
514   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
515     {
516       return (clib_error_return (0, "cannot disable all crypto workers"));
517     }
518   return 0;
519 }
520
521 /*?
522  * This command sets if worker will do crypto processing.
523  *
524  * @cliexpar
525  * Example of how to set worker crypto processing off:
526  * @cliexstart{set sw_scheduler worker 0 crypto off}
527  * @cliexend
528  ?*/
529 /* *INDENT-OFF* */
530 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
531   .path = "set sw_scheduler",
532   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
533   .function = sw_scheduler_set_worker_crypto,
534   .is_mp_safe = 1,
535 };
536 /* *INDENT-ON* */
537
538 static clib_error_t *
539 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
540                            vlib_cli_command_t * cmd)
541 {
542   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
543   u32 i;
544
545   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
546   for (i = vlib_num_workers () >= 0; i < vlib_thread_main.n_vlib_mains; i++)
547     {
548       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
549                        (vlib_worker_threads + i)->name,
550                        cm->
551                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
552     }
553
554   return 0;
555 }
556
557 /*?
558  * This command displays sw_scheduler workers.
559  *
560  * @cliexpar
561  * Example of how to show workers:
562  * @cliexstart{show sw_scheduler workers}
563  * @cliexend
564  ?*/
565 /* *INDENT-OFF* */
566 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
567   .path = "show sw_scheduler workers",
568   .short_help = "show sw_scheduler workers",
569   .function = sw_scheduler_show_workers,
570   .is_mp_safe = 1,
571 };
572 /* *INDENT-ON* */
573
574 clib_error_t *
575 sw_scheduler_cli_init (vlib_main_t * vm)
576 {
577   return 0;
578 }
579
580 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
581
582 /* *INDENT-OFF* */
583 #define _(n, s, k, t, a)                                                      \
584   static vnet_crypto_async_frame_t                                            \
585       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc (      \
586           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
587   {                                                                           \
588     return crypto_sw_scheduler_dequeue_aead (                                 \
589         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                       \
590         VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx);       \
591   }                                                                           \
592   static vnet_crypto_async_frame_t                                            \
593       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec (      \
594           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
595   {                                                                           \
596     return crypto_sw_scheduler_dequeue_aead (                                 \
597         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                       \
598         VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx);       \
599   }
600 foreach_crypto_aead_async_alg
601 #undef _
602
603 #define _(c, h, s, k, d)                                                      \
604   static vnet_crypto_async_frame_t                                            \
605       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc (           \
606           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
607   {                                                                           \
608     return crypto_sw_scheduler_dequeue_link (                                 \
609         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,                          \
610         VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1,            \
611         nb_elts_processed, thread_idx);                                       \
612   }                                                                           \
613   static vnet_crypto_async_frame_t                                            \
614       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec (           \
615           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
616   {                                                                           \
617     return crypto_sw_scheduler_dequeue_link (                                 \
618         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,                          \
619         VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0,            \
620         nb_elts_processed, thread_idx);                                       \
621   }
622     foreach_crypto_link_async_alg
623 #undef _
624         /* *INDENT-ON* */
625
626 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
627 clib_error_t *
628 crypto_sw_scheduler_init (vlib_main_t * vm)
629 {
630   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
631   vlib_thread_main_t *tm = vlib_get_thread_main ();
632   clib_error_t *error = 0;
633   crypto_sw_scheduler_per_thread_data_t *ptd;
634
635   u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
636     + sizeof (crypto_sw_scheduler_queue_t);
637
638   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
639                         CLIB_CACHE_LINE_BYTES);
640
641   vec_foreach (ptd, cm->per_thread_data)
642   {
643     ptd->self_crypto_enabled = 1;
644     u32 i;
645     for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
646       {
647         crypto_sw_scheduler_queue_t *q
648           = clib_mem_alloc_aligned (queue_size, CLIB_CACHE_LINE_BYTES);
649         ASSERT (q != 0);
650         ptd->queues[i] = q;
651         clib_memset_u8 (q, 0, queue_size);
652       }
653   }
654
655   cm->crypto_engine_index =
656     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
657                                  "SW Scheduler Async Engine");
658
659   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
660                                     crypto_sw_scheduler_key_handler);
661
662   crypto_sw_scheduler_api_init (vm);
663
664   /* *INDENT-OFF* */
665 #define _(n, s, k, t, a)                                                      \
666   vnet_crypto_register_async_handler (                                        \
667       vm, cm->crypto_engine_index,                                            \
668       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                             \
669       crypto_sw_scheduler_frame_enqueue,                                      \
670       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc);       \
671   vnet_crypto_register_async_handler (                                        \
672       vm, cm->crypto_engine_index,                                            \
673       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                             \
674       crypto_sw_scheduler_frame_enqueue,                                      \
675       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
676   foreach_crypto_aead_async_alg
677 #undef _
678
679 #define _(c, h, s, k, d)                                                      \
680   vnet_crypto_register_async_handler (                                        \
681       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,   \
682       crypto_sw_scheduler_frame_enqueue,                                      \
683       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc);            \
684   vnet_crypto_register_async_handler (                                        \
685       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,   \
686       crypto_sw_scheduler_frame_enqueue,                                      \
687       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
688       foreach_crypto_link_async_alg
689 #undef _
690       /* *INDENT-ON* */
691
692   if (error)
693     vec_free (cm->per_thread_data);
694
695   return error;
696 }
697
698 /* *INDENT-OFF* */
699 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
700   .runs_after = VLIB_INITS ("vnet_crypto_init"),
701 };
702
703 VLIB_PLUGIN_REGISTER () = {
704   .version = VPP_BUILD_VER,
705   .description = "SW Scheduler Crypto Async Engine plugin",
706 };
707 /* *INDENT-ON* */
708
709 /*
710  * fd.io coding-style-patch-verification: ON
711  *
712  * Local Variables:
713  * eval: (c-set-style "gnu")
714  * End:
715  */