29f91b36345bd8a8170db15dd0384e05b530859e
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i = vlib_num_workers () > 0;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t * vm,
78                                    vnet_crypto_async_frame_t * frame)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd
82     = vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
84   u64 head = q->head;
85
86   if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
87     {
88       u32 n_elts = frame->n_elts, i;
89       for (i = 0; i < n_elts; i++)
90         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
91       return -1;
92     }
93   q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
94   head += 1;
95   CLIB_MEMORY_STORE_BARRIER ();
96   q->head = head;
97   return 0;
98 }
99
100 static_always_inline vnet_crypto_async_frame_t *
101 crypto_sw_scheduler_get_pending_frame (crypto_sw_scheduler_queue_t * q)
102 {
103   vnet_crypto_async_frame_t *f;
104   u32 i;
105   u32 tail = q->tail;
106   u32 head = q->head;
107
108   for (i = tail; i < head; i++)
109     {
110       f = q->jobs[i & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
111       if (!f)
112         continue;
113       if (clib_atomic_bool_cmp_and_swap
114           (&f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
115            VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
116         {
117           return f;
118         }
119     }
120   return NULL;
121 }
122
123 static_always_inline vnet_crypto_async_frame_t *
124 crypto_sw_scheduler_get_completed_frame (crypto_sw_scheduler_queue_t * q)
125 {
126   vnet_crypto_async_frame_t *f = 0;
127   if (q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]
128       && q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]->state
129       >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
130     {
131       u32 tail = q->tail;
132       CLIB_MEMORY_STORE_BARRIER ();
133       q->tail++;
134       f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
135       q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
136     }
137   return f;
138 }
139
140 static_always_inline void
141 cryptodev_sw_scheduler_sgl (vlib_main_t * vm,
142                             crypto_sw_scheduler_per_thread_data_t * ptd,
143                             vlib_buffer_t * b, vnet_crypto_op_t * op,
144                             i32 offset, i32 len)
145 {
146   vnet_crypto_op_chunk_t *ch;
147   vlib_buffer_t *nb = b;
148   u32 n_chunks = 0;
149   u32 chunk_index = vec_len (ptd->chunks);
150
151   while (len)
152     {
153       if (nb->current_data + nb->current_length > offset)
154         {
155           vec_add2 (ptd->chunks, ch, 1);
156           ch->src = ch->dst = nb->data + offset;
157           ch->len
158             = clib_min (nb->current_data + nb->current_length - offset, len);
159           len -= ch->len;
160           offset = 0;
161           n_chunks++;
162           if (!len)
163             break;
164         }
165       if (offset)
166         offset -= nb->current_data + nb->current_length;
167       if (nb->flags & VLIB_BUFFER_NEXT_PRESENT)
168         nb = vlib_get_buffer (vm, nb->next_buffer);
169       else
170         break;
171     }
172
173   ASSERT (offset == 0 && len == 0);
174   op->chunk_index = chunk_index;
175   op->n_chunks = n_chunks;
176 }
177
178 static_always_inline void
179 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
180                                   crypto_sw_scheduler_per_thread_data_t * ptd,
181                                   vnet_crypto_async_frame_elt_t * fe,
182                                   u32 index, u32 bi,
183                                   vnet_crypto_op_id_t op_id, u16 aad_len,
184                                   u8 tag_len)
185 {
186   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
187   vnet_crypto_op_t *op = 0;
188
189   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
190     {
191       vec_add2 (ptd->chained_crypto_ops, op, 1);
192       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
193                                   fe->crypto_total_length);
194     }
195   else
196     {
197       vec_add2 (ptd->crypto_ops, op, 1);
198       op->src = op->dst = b->data + fe->crypto_start_offset;
199       op->len = fe->crypto_total_length;
200     }
201
202   op->op = op_id;
203   op->tag = fe->tag;
204   op->flags = fe->flags;
205   op->key_index = fe->key_index;
206   op->iv = fe->iv;
207   op->aad = fe->aad;
208   op->aad_len = aad_len;
209   op->tag_len = tag_len;
210   op->user_data = index;
211 }
212
213 static_always_inline void
214 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
215                                          crypto_sw_scheduler_per_thread_data_t
216                                          * ptd, vnet_crypto_key_t * key,
217                                          vnet_crypto_async_frame_elt_t * fe,
218                                          u32 index, u32 bi,
219                                          vnet_crypto_op_id_t crypto_op_id,
220                                          vnet_crypto_op_id_t integ_op_id,
221                                          u32 digest_len, u8 is_enc)
222 {
223   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
224   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
225
226   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
227     {
228       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
229       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
230       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
231                                   fe->crypto_start_offset,
232                                   fe->crypto_total_length);
233       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
234                                   fe->integ_start_offset,
235                                   fe->crypto_total_length +
236                                   fe->integ_length_adj);
237     }
238   else
239     {
240       vec_add2 (ptd->crypto_ops, crypto_op, 1);
241       vec_add2 (ptd->integ_ops, integ_op, 1);
242       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
243       crypto_op->len = fe->crypto_total_length;
244       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
245       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
246     }
247
248   crypto_op->op = crypto_op_id;
249   crypto_op->iv = fe->iv;
250   crypto_op->key_index = key->index_crypto;
251   crypto_op->user_data = 0;
252   crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
253   integ_op->op = integ_op_id;
254   integ_op->digest = fe->digest;
255   integ_op->digest_len = digest_len;
256   integ_op->key_index = key->index_integ;
257   integ_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_INIT_IV;
258   crypto_op->user_data = integ_op->user_data = index;
259 }
260
261 static_always_inline void
262 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
263              vnet_crypto_op_t * ops, u8 * state)
264 {
265   u32 n_fail, n_ops = vec_len (ops);
266   vnet_crypto_op_t *op = ops;
267
268   if (n_ops == 0)
269     return;
270
271   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
272
273   while (n_fail)
274     {
275       ASSERT (op - ops < n_ops);
276
277       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
278         {
279           f->elts[op->user_data].status = op->status;
280           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
281           n_fail--;
282         }
283       op++;
284     }
285 }
286
287 static_always_inline void
288 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
289                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
290                      u8 * state)
291 {
292   u32 n_fail, n_ops = vec_len (ops);
293   vnet_crypto_op_t *op = ops;
294
295   if (n_ops == 0)
296     return;
297
298   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
299
300   while (n_fail)
301     {
302       ASSERT (op - ops < n_ops);
303
304       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
305         {
306           f->elts[op->user_data].status = op->status;
307           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
308           n_fail--;
309         }
310       op++;
311     }
312 }
313
314 static_always_inline vnet_crypto_async_frame_t *
315 crypto_sw_scheduler_dequeue_aead (vlib_main_t * vm,
316                                   vnet_crypto_async_op_id_t async_op_id,
317                                   vnet_crypto_op_id_t sync_op_id, u8 tag_len,
318                                   u8 aad_len, u32 * nb_elts_processed,
319                                   u32 * enqueue_thread_idx)
320 {
321   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
322   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
323   crypto_sw_scheduler_queue_t *q = 0;
324   vnet_crypto_async_frame_t *f = 0;
325   vnet_crypto_async_frame_elt_t *fe;
326   u32 *bi;
327   u32 n_elts;
328   int i = 0;
329   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
330
331   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
332     {
333       /* *INDENT-OFF* */
334       vec_foreach_index (i, cm->per_thread_data)
335       {
336         ptd = cm->per_thread_data + i;
337         q = ptd->queues[async_op_id];
338         f = crypto_sw_scheduler_get_pending_frame (q);
339         if (f)
340           break;
341       }
342       /* *INDENT-ON* */
343     }
344
345   ptd = cm->per_thread_data + vm->thread_index;
346
347   if (f)
348     {
349       *nb_elts_processed = n_elts = f->n_elts;
350       fe = f->elts;
351       bi = f->buffer_indices;
352
353       vec_reset_length (ptd->crypto_ops);
354       vec_reset_length (ptd->chained_crypto_ops);
355       vec_reset_length (ptd->chunks);
356
357       while (n_elts--)
358         {
359           if (n_elts > 1)
360             CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
361
362           crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
363                                             sync_op_id, aad_len, tag_len);
364           bi++;
365           fe++;
366         }
367
368       process_ops (vm, f, ptd->crypto_ops, &state);
369       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
370                            &state);
371       f->state = state;
372       *enqueue_thread_idx = f->enqueue_thread_index;
373     }
374
375   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
376 }
377
378 static_always_inline vnet_crypto_async_frame_t *
379 crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
380                                   vnet_crypto_async_op_id_t async_op_id,
381                                   vnet_crypto_op_id_t sync_crypto_op_id,
382                                   vnet_crypto_op_id_t sync_integ_op_id,
383                                   u16 digest_len, u8 is_enc,
384                                   u32 * nb_elts_processed,
385                                   u32 * enqueue_thread_idx)
386 {
387   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
388   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
389   crypto_sw_scheduler_queue_t *q = 0;
390   vnet_crypto_async_frame_t *f = 0;
391   vnet_crypto_async_frame_elt_t *fe;
392   u32 *bi;
393   u32 n_elts;
394   int i = 0;
395   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
396
397   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
398     {
399       /* *INDENT-OFF* */
400       vec_foreach_index (i, cm->per_thread_data)
401       {
402         ptd = cm->per_thread_data + i;
403         q = ptd->queues[async_op_id];
404         f = crypto_sw_scheduler_get_pending_frame (q);
405         if (f)
406           break;
407       }
408       /* *INDENT-ON* */
409     }
410
411   ptd = cm->per_thread_data + vm->thread_index;
412
413   if (f)
414     {
415       vec_reset_length (ptd->crypto_ops);
416       vec_reset_length (ptd->integ_ops);
417       vec_reset_length (ptd->chained_crypto_ops);
418       vec_reset_length (ptd->chained_integ_ops);
419       vec_reset_length (ptd->chunks);
420
421       *nb_elts_processed = n_elts = f->n_elts;
422       fe = f->elts;
423       bi = f->buffer_indices;
424
425       while (n_elts--)
426         {
427           if (n_elts > 1)
428             CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
429
430           crypto_sw_scheduler_convert_link_crypto (vm, ptd,
431                                                    cm->keys + fe->key_index,
432                                                    fe, fe - f->elts, bi[0],
433                                                    sync_crypto_op_id,
434                                                    sync_integ_op_id,
435                                                    digest_len, is_enc);
436           bi++;
437           fe++;
438         }
439
440       if (is_enc)
441         {
442           process_ops (vm, f, ptd->crypto_ops, &state);
443           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
444                                &state);
445           process_ops (vm, f, ptd->integ_ops, &state);
446           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
447                                &state);
448         }
449       else
450         {
451           process_ops (vm, f, ptd->integ_ops, &state);
452           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
453                                &state);
454           process_ops (vm, f, ptd->crypto_ops, &state);
455           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
456                                &state);
457         }
458
459       f->state = state;
460       *enqueue_thread_idx = f->enqueue_thread_index;
461     }
462
463   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
464 }
465
466 static clib_error_t *
467 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
468                                 vlib_cli_command_t * cmd)
469 {
470   unformat_input_t _line_input, *line_input = &_line_input;
471   u32 worker_index;
472   u8 crypto_enable;
473   int rv;
474
475   /* Get a line of input. */
476   if (!unformat_user (input, unformat_line_input, line_input))
477     return 0;
478
479   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
480     {
481       if (unformat (line_input, "worker %u", &worker_index))
482         {
483           if (unformat (line_input, "crypto"))
484             {
485               if (unformat (line_input, "on"))
486                 crypto_enable = 1;
487               else if (unformat (line_input, "off"))
488                 crypto_enable = 0;
489               else
490                 return (clib_error_return (0, "unknown input '%U'",
491                                            format_unformat_error,
492                                            line_input));
493             }
494           else
495             return (clib_error_return (0, "unknown input '%U'",
496                                        format_unformat_error, line_input));
497         }
498       else
499         return (clib_error_return (0, "unknown input '%U'",
500                                    format_unformat_error, line_input));
501     }
502
503   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
504   if (rv == VNET_API_ERROR_INVALID_VALUE)
505     {
506       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
507     }
508   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
509     {
510       return (clib_error_return (0, "cannot disable all crypto workers"));
511     }
512   return 0;
513 }
514
515 /*?
516  * This command sets if worker will do crypto processing.
517  *
518  * @cliexpar
519  * Example of how to set worker crypto processing off:
520  * @cliexstart{set sw_scheduler worker 0 crypto off}
521  * @cliexend
522  ?*/
523 /* *INDENT-OFF* */
524 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
525   .path = "set sw_scheduler",
526   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
527   .function = sw_scheduler_set_worker_crypto,
528   .is_mp_safe = 1,
529 };
530 /* *INDENT-ON* */
531
532 static clib_error_t *
533 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
534                            vlib_cli_command_t * cmd)
535 {
536   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
537   u32 i;
538
539   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
540   for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
541     {
542       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
543                        (vlib_worker_threads + i)->name,
544                        cm->
545                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
546     }
547
548   return 0;
549 }
550
551 /*?
552  * This command displays sw_scheduler workers.
553  *
554  * @cliexpar
555  * Example of how to show workers:
556  * @cliexstart{show sw_scheduler workers}
557  * @cliexend
558  ?*/
559 /* *INDENT-OFF* */
560 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
561   .path = "show sw_scheduler workers",
562   .short_help = "show sw_scheduler workers",
563   .function = sw_scheduler_show_workers,
564   .is_mp_safe = 1,
565 };
566 /* *INDENT-ON* */
567
568 clib_error_t *
569 sw_scheduler_cli_init (vlib_main_t * vm)
570 {
571   return 0;
572 }
573
574 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
575
576 /* *INDENT-OFF* */
577 #define _(n, s, k, t, a)                                                      \
578   static vnet_crypto_async_frame_t                                            \
579       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc (      \
580           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
581   {                                                                           \
582     return crypto_sw_scheduler_dequeue_aead (                                 \
583         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                       \
584         VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx);       \
585   }                                                                           \
586   static vnet_crypto_async_frame_t                                            \
587       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec (      \
588           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
589   {                                                                           \
590     return crypto_sw_scheduler_dequeue_aead (                                 \
591         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                       \
592         VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx);       \
593   }
594 foreach_crypto_aead_async_alg
595 #undef _
596
597 #define _(c, h, s, k, d)                                                      \
598   static vnet_crypto_async_frame_t                                            \
599       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc (           \
600           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
601   {                                                                           \
602     return crypto_sw_scheduler_dequeue_link (                                 \
603         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,                          \
604         VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1,            \
605         nb_elts_processed, thread_idx);                                       \
606   }                                                                           \
607   static vnet_crypto_async_frame_t                                            \
608       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec (           \
609           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
610   {                                                                           \
611     return crypto_sw_scheduler_dequeue_link (                                 \
612         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,                          \
613         VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0,            \
614         nb_elts_processed, thread_idx);                                       \
615   }
616     foreach_crypto_link_async_alg
617 #undef _
618         /* *INDENT-ON* */
619
620 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
621 clib_error_t *
622 crypto_sw_scheduler_init (vlib_main_t * vm)
623 {
624   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
625   vlib_thread_main_t *tm = vlib_get_thread_main ();
626   clib_error_t *error = 0;
627   crypto_sw_scheduler_per_thread_data_t *ptd;
628
629   u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
630     + sizeof (crypto_sw_scheduler_queue_t);
631
632   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
633                         CLIB_CACHE_LINE_BYTES);
634
635   vec_foreach (ptd, cm->per_thread_data)
636   {
637     ptd->self_crypto_enabled = 1;
638     u32 i;
639     for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
640       {
641         crypto_sw_scheduler_queue_t *q
642           = clib_mem_alloc_aligned (queue_size, CLIB_CACHE_LINE_BYTES);
643         ASSERT (q != 0);
644         ptd->queues[i] = q;
645         clib_memset_u8 (q, 0, queue_size);
646       }
647   }
648
649   cm->crypto_engine_index =
650     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
651                                  "SW Scheduler Async Engine");
652
653   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
654                                     crypto_sw_scheduler_key_handler);
655
656   crypto_sw_scheduler_api_init (vm);
657
658   /* *INDENT-OFF* */
659 #define _(n, s, k, t, a)                                                      \
660   vnet_crypto_register_async_handler (                                        \
661       vm, cm->crypto_engine_index,                                            \
662       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                             \
663       crypto_sw_scheduler_frame_enqueue,                                      \
664       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc);       \
665   vnet_crypto_register_async_handler (                                        \
666       vm, cm->crypto_engine_index,                                            \
667       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                             \
668       crypto_sw_scheduler_frame_enqueue,                                      \
669       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
670   foreach_crypto_aead_async_alg
671 #undef _
672
673 #define _(c, h, s, k, d)                                                      \
674   vnet_crypto_register_async_handler (                                        \
675       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,   \
676       crypto_sw_scheduler_frame_enqueue,                                      \
677       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc);            \
678   vnet_crypto_register_async_handler (                                        \
679       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,   \
680       crypto_sw_scheduler_frame_enqueue,                                      \
681       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
682       foreach_crypto_link_async_alg
683 #undef _
684       /* *INDENT-ON* */
685
686   if (error)
687     vec_free (cm->per_thread_data);
688
689   return error;
690 }
691
692 /* *INDENT-OFF* */
693 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
694   .runs_after = VLIB_INITS ("vnet_crypto_init"),
695 };
696
697 VLIB_PLUGIN_REGISTER () = {
698   .version = VPP_BUILD_VER,
699   .description = "SW Scheduler Crypto Async Engine plugin",
700 };
701 /* *INDENT-ON* */
702
703 /*
704  * fd.io coding-style-patch-verification: ON
705  *
706  * Local Variables:
707  * eval: (c-set-style "gnu")
708  * End:
709  */