crypto: fix chained buffer integrity support
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i = vlib_num_workers () > 0;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t * vm,
78                                    vnet_crypto_async_frame_t * frame)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd
82     = vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
84   u64 head = q->head;
85
86   if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
87     {
88       u32 n_elts = frame->n_elts, i;
89       for (i = 0; i < n_elts; i++)
90         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
91       return -1;
92     }
93   q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
94   head += 1;
95   CLIB_MEMORY_STORE_BARRIER ();
96   q->head = head;
97   return 0;
98 }
99
100 static_always_inline vnet_crypto_async_frame_t *
101 crypto_sw_scheduler_get_pending_frame (crypto_sw_scheduler_queue_t * q)
102 {
103   vnet_crypto_async_frame_t *f;
104   u32 i;
105   u32 tail = q->tail;
106   u32 head = q->head;
107
108   for (i = tail; i < head; i++)
109     {
110       f = q->jobs[i & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
111       if (!f)
112         continue;
113       if (clib_atomic_bool_cmp_and_swap
114           (&f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
115            VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
116         {
117           return f;
118         }
119     }
120   return NULL;
121 }
122
123 static_always_inline vnet_crypto_async_frame_t *
124 crypto_sw_scheduler_get_completed_frame (crypto_sw_scheduler_queue_t * q)
125 {
126   vnet_crypto_async_frame_t *f = 0;
127   if (q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]
128       && q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]->state
129       >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
130     {
131       u32 tail = q->tail;
132       CLIB_MEMORY_STORE_BARRIER ();
133       q->tail++;
134       f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
135       q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
136     }
137   return f;
138 }
139
140 static_always_inline void
141 cryptodev_sw_scheduler_sgl (vlib_main_t * vm,
142                             crypto_sw_scheduler_per_thread_data_t * ptd,
143                             vlib_buffer_t * b, vnet_crypto_op_t * op,
144                             i32 offset, i32 len)
145 {
146   vnet_crypto_op_chunk_t *ch;
147   vlib_buffer_t *nb = b;
148   u32 n_chunks = 0;
149   u32 chunk_index = vec_len (ptd->chunks);
150
151   while (len)
152     {
153       if (nb->current_data + nb->current_length > offset)
154         {
155           vec_add2 (ptd->chunks, ch, 1);
156           ch->src = ch->dst = nb->data + offset;
157           ch->len
158             = clib_min (nb->current_data + nb->current_length - offset, len);
159           len -= ch->len;
160           offset = 0;
161           n_chunks++;
162           if (!len)
163             break;
164         }
165       if (offset)
166         offset -= nb->current_data + nb->current_length;
167       if (nb->flags & VLIB_BUFFER_NEXT_PRESENT)
168         nb = vlib_get_buffer (vm, nb->next_buffer);
169       else
170         break;
171     }
172
173   ASSERT (offset == 0);
174   if (n_chunks && len)
175     {
176       /* Some async crypto users can use buffers in creative ways, let's allow
177        * some flexibility here...
178        * Current example is ESP decrypt with ESN in async mode: it will stash
179        * ESN at the end of the last buffer (if it can) because it must be part
180        * of the integrity check but it will not update the buffer length.
181        * Fixup the last operation chunk length if we have room.
182        */
183       ASSERT (vlib_buffer_space_left_at_end (vm, nb) >= len);
184       if (vlib_buffer_space_left_at_end (vm, nb) >= len)
185         ch->len += len;
186     }
187
188   op->chunk_index = chunk_index;
189   op->n_chunks = n_chunks;
190 }
191
192 static_always_inline void
193 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
194                                   crypto_sw_scheduler_per_thread_data_t * ptd,
195                                   vnet_crypto_async_frame_elt_t * fe,
196                                   u32 index, u32 bi,
197                                   vnet_crypto_op_id_t op_id, u16 aad_len,
198                                   u8 tag_len)
199 {
200   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
201   vnet_crypto_op_t *op = 0;
202
203   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
204     {
205       vec_add2 (ptd->chained_crypto_ops, op, 1);
206       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
207                                   fe->crypto_total_length);
208     }
209   else
210     {
211       vec_add2 (ptd->crypto_ops, op, 1);
212       op->src = op->dst = b->data + fe->crypto_start_offset;
213       op->len = fe->crypto_total_length;
214     }
215
216   op->op = op_id;
217   op->tag = fe->tag;
218   op->flags = fe->flags;
219   op->key_index = fe->key_index;
220   op->iv = fe->iv;
221   op->aad = fe->aad;
222   op->aad_len = aad_len;
223   op->tag_len = tag_len;
224   op->user_data = index;
225 }
226
227 static_always_inline void
228 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
229                                          crypto_sw_scheduler_per_thread_data_t
230                                          * ptd, vnet_crypto_key_t * key,
231                                          vnet_crypto_async_frame_elt_t * fe,
232                                          u32 index, u32 bi,
233                                          vnet_crypto_op_id_t crypto_op_id,
234                                          vnet_crypto_op_id_t integ_op_id,
235                                          u32 digest_len, u8 is_enc)
236 {
237   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
238   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
239
240   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
241     {
242       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
243       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
244       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
245                                   fe->crypto_start_offset,
246                                   fe->crypto_total_length);
247       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
248                                   fe->integ_start_offset,
249                                   fe->crypto_total_length +
250                                   fe->integ_length_adj);
251     }
252   else
253     {
254       vec_add2 (ptd->crypto_ops, crypto_op, 1);
255       vec_add2 (ptd->integ_ops, integ_op, 1);
256       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
257       crypto_op->len = fe->crypto_total_length;
258       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
259       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
260     }
261
262   crypto_op->op = crypto_op_id;
263   crypto_op->iv = fe->iv;
264   crypto_op->key_index = key->index_crypto;
265   crypto_op->user_data = 0;
266   crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
267   integ_op->op = integ_op_id;
268   integ_op->digest = fe->digest;
269   integ_op->digest_len = digest_len;
270   integ_op->key_index = key->index_integ;
271   integ_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_INIT_IV;
272   crypto_op->user_data = integ_op->user_data = index;
273 }
274
275 static_always_inline void
276 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
277              vnet_crypto_op_t * ops, u8 * state)
278 {
279   u32 n_fail, n_ops = vec_len (ops);
280   vnet_crypto_op_t *op = ops;
281
282   if (n_ops == 0)
283     return;
284
285   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
286
287   while (n_fail)
288     {
289       ASSERT (op - ops < n_ops);
290
291       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
292         {
293           f->elts[op->user_data].status = op->status;
294           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
295           n_fail--;
296         }
297       op++;
298     }
299 }
300
301 static_always_inline void
302 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
303                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
304                      u8 * state)
305 {
306   u32 n_fail, n_ops = vec_len (ops);
307   vnet_crypto_op_t *op = ops;
308
309   if (n_ops == 0)
310     return;
311
312   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
313
314   while (n_fail)
315     {
316       ASSERT (op - ops < n_ops);
317
318       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
319         {
320           f->elts[op->user_data].status = op->status;
321           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
322           n_fail--;
323         }
324       op++;
325     }
326 }
327
328 static_always_inline vnet_crypto_async_frame_t *
329 crypto_sw_scheduler_dequeue_aead (vlib_main_t * vm,
330                                   vnet_crypto_async_op_id_t async_op_id,
331                                   vnet_crypto_op_id_t sync_op_id, u8 tag_len,
332                                   u8 aad_len, u32 * nb_elts_processed,
333                                   u32 * enqueue_thread_idx)
334 {
335   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
336   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
337   crypto_sw_scheduler_queue_t *q = 0;
338   vnet_crypto_async_frame_t *f = 0;
339   vnet_crypto_async_frame_elt_t *fe;
340   u32 *bi;
341   u32 n_elts;
342   int i = 0;
343   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
344
345   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
346     {
347       /* *INDENT-OFF* */
348       vec_foreach_index (i, cm->per_thread_data)
349       {
350         ptd = cm->per_thread_data + i;
351         q = ptd->queues[async_op_id];
352         f = crypto_sw_scheduler_get_pending_frame (q);
353         if (f)
354           break;
355       }
356       /* *INDENT-ON* */
357     }
358
359   ptd = cm->per_thread_data + vm->thread_index;
360
361   if (f)
362     {
363       *nb_elts_processed = n_elts = f->n_elts;
364       fe = f->elts;
365       bi = f->buffer_indices;
366
367       vec_reset_length (ptd->crypto_ops);
368       vec_reset_length (ptd->chained_crypto_ops);
369       vec_reset_length (ptd->chunks);
370
371       while (n_elts--)
372         {
373           if (n_elts > 1)
374             CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
375
376           crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
377                                             sync_op_id, aad_len, tag_len);
378           bi++;
379           fe++;
380         }
381
382       process_ops (vm, f, ptd->crypto_ops, &state);
383       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
384                            &state);
385       f->state = state;
386       *enqueue_thread_idx = f->enqueue_thread_index;
387     }
388
389   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
390 }
391
392 static_always_inline vnet_crypto_async_frame_t *
393 crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
394                                   vnet_crypto_async_op_id_t async_op_id,
395                                   vnet_crypto_op_id_t sync_crypto_op_id,
396                                   vnet_crypto_op_id_t sync_integ_op_id,
397                                   u16 digest_len, u8 is_enc,
398                                   u32 * nb_elts_processed,
399                                   u32 * enqueue_thread_idx)
400 {
401   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
402   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
403   crypto_sw_scheduler_queue_t *q = 0;
404   vnet_crypto_async_frame_t *f = 0;
405   vnet_crypto_async_frame_elt_t *fe;
406   u32 *bi;
407   u32 n_elts;
408   int i = 0;
409   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
410
411   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
412     {
413       /* *INDENT-OFF* */
414       vec_foreach_index (i, cm->per_thread_data)
415       {
416         ptd = cm->per_thread_data + i;
417         q = ptd->queues[async_op_id];
418         f = crypto_sw_scheduler_get_pending_frame (q);
419         if (f)
420           break;
421       }
422       /* *INDENT-ON* */
423     }
424
425   ptd = cm->per_thread_data + vm->thread_index;
426
427   if (f)
428     {
429       vec_reset_length (ptd->crypto_ops);
430       vec_reset_length (ptd->integ_ops);
431       vec_reset_length (ptd->chained_crypto_ops);
432       vec_reset_length (ptd->chained_integ_ops);
433       vec_reset_length (ptd->chunks);
434
435       *nb_elts_processed = n_elts = f->n_elts;
436       fe = f->elts;
437       bi = f->buffer_indices;
438
439       while (n_elts--)
440         {
441           if (n_elts > 1)
442             CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
443
444           crypto_sw_scheduler_convert_link_crypto (vm, ptd,
445                                                    cm->keys + fe->key_index,
446                                                    fe, fe - f->elts, bi[0],
447                                                    sync_crypto_op_id,
448                                                    sync_integ_op_id,
449                                                    digest_len, is_enc);
450           bi++;
451           fe++;
452         }
453
454       if (is_enc)
455         {
456           process_ops (vm, f, ptd->crypto_ops, &state);
457           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
458                                &state);
459           process_ops (vm, f, ptd->integ_ops, &state);
460           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
461                                &state);
462         }
463       else
464         {
465           process_ops (vm, f, ptd->integ_ops, &state);
466           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
467                                &state);
468           process_ops (vm, f, ptd->crypto_ops, &state);
469           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
470                                &state);
471         }
472
473       f->state = state;
474       *enqueue_thread_idx = f->enqueue_thread_index;
475     }
476
477   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
478 }
479
480 static clib_error_t *
481 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
482                                 vlib_cli_command_t * cmd)
483 {
484   unformat_input_t _line_input, *line_input = &_line_input;
485   u32 worker_index;
486   u8 crypto_enable;
487   int rv;
488
489   /* Get a line of input. */
490   if (!unformat_user (input, unformat_line_input, line_input))
491     return 0;
492
493   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
494     {
495       if (unformat (line_input, "worker %u", &worker_index))
496         {
497           if (unformat (line_input, "crypto"))
498             {
499               if (unformat (line_input, "on"))
500                 crypto_enable = 1;
501               else if (unformat (line_input, "off"))
502                 crypto_enable = 0;
503               else
504                 return (clib_error_return (0, "unknown input '%U'",
505                                            format_unformat_error,
506                                            line_input));
507             }
508           else
509             return (clib_error_return (0, "unknown input '%U'",
510                                        format_unformat_error, line_input));
511         }
512       else
513         return (clib_error_return (0, "unknown input '%U'",
514                                    format_unformat_error, line_input));
515     }
516
517   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
518   if (rv == VNET_API_ERROR_INVALID_VALUE)
519     {
520       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
521     }
522   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
523     {
524       return (clib_error_return (0, "cannot disable all crypto workers"));
525     }
526   return 0;
527 }
528
529 /*?
530  * This command sets if worker will do crypto processing.
531  *
532  * @cliexpar
533  * Example of how to set worker crypto processing off:
534  * @cliexstart{set sw_scheduler worker 0 crypto off}
535  * @cliexend
536  ?*/
537 /* *INDENT-OFF* */
538 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
539   .path = "set sw_scheduler",
540   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
541   .function = sw_scheduler_set_worker_crypto,
542   .is_mp_safe = 1,
543 };
544 /* *INDENT-ON* */
545
546 static clib_error_t *
547 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
548                            vlib_cli_command_t * cmd)
549 {
550   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
551   u32 i;
552
553   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
554   for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
555     {
556       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
557                        (vlib_worker_threads + i)->name,
558                        cm->
559                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
560     }
561
562   return 0;
563 }
564
565 /*?
566  * This command displays sw_scheduler workers.
567  *
568  * @cliexpar
569  * Example of how to show workers:
570  * @cliexstart{show sw_scheduler workers}
571  * @cliexend
572  ?*/
573 /* *INDENT-OFF* */
574 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
575   .path = "show sw_scheduler workers",
576   .short_help = "show sw_scheduler workers",
577   .function = sw_scheduler_show_workers,
578   .is_mp_safe = 1,
579 };
580 /* *INDENT-ON* */
581
582 clib_error_t *
583 sw_scheduler_cli_init (vlib_main_t * vm)
584 {
585   return 0;
586 }
587
588 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
589
590 /* *INDENT-OFF* */
591 #define _(n, s, k, t, a)                                                      \
592   static vnet_crypto_async_frame_t                                            \
593       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc (      \
594           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
595   {                                                                           \
596     return crypto_sw_scheduler_dequeue_aead (                                 \
597         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                       \
598         VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx);       \
599   }                                                                           \
600   static vnet_crypto_async_frame_t                                            \
601       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec (      \
602           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
603   {                                                                           \
604     return crypto_sw_scheduler_dequeue_aead (                                 \
605         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                       \
606         VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx);       \
607   }
608 foreach_crypto_aead_async_alg
609 #undef _
610
611 #define _(c, h, s, k, d)                                                      \
612   static vnet_crypto_async_frame_t                                            \
613       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc (           \
614           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
615   {                                                                           \
616     return crypto_sw_scheduler_dequeue_link (                                 \
617         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,                          \
618         VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1,            \
619         nb_elts_processed, thread_idx);                                       \
620   }                                                                           \
621   static vnet_crypto_async_frame_t                                            \
622       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec (           \
623           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
624   {                                                                           \
625     return crypto_sw_scheduler_dequeue_link (                                 \
626         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,                          \
627         VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0,            \
628         nb_elts_processed, thread_idx);                                       \
629   }
630     foreach_crypto_link_async_alg
631 #undef _
632         /* *INDENT-ON* */
633
634 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
635 clib_error_t *
636 crypto_sw_scheduler_init (vlib_main_t * vm)
637 {
638   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
639   vlib_thread_main_t *tm = vlib_get_thread_main ();
640   clib_error_t *error = 0;
641   crypto_sw_scheduler_per_thread_data_t *ptd;
642
643   u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
644     + sizeof (crypto_sw_scheduler_queue_t);
645
646   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
647                         CLIB_CACHE_LINE_BYTES);
648
649   vec_foreach (ptd, cm->per_thread_data)
650   {
651     ptd->self_crypto_enabled = 1;
652     u32 i;
653     for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
654       {
655         crypto_sw_scheduler_queue_t *q
656           = clib_mem_alloc_aligned (queue_size, CLIB_CACHE_LINE_BYTES);
657         ASSERT (q != 0);
658         ptd->queues[i] = q;
659         clib_memset_u8 (q, 0, queue_size);
660       }
661   }
662
663   cm->crypto_engine_index =
664     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
665                                  "SW Scheduler Async Engine");
666
667   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
668                                     crypto_sw_scheduler_key_handler);
669
670   crypto_sw_scheduler_api_init (vm);
671
672   /* *INDENT-OFF* */
673 #define _(n, s, k, t, a)                                                      \
674   vnet_crypto_register_async_handler (                                        \
675       vm, cm->crypto_engine_index,                                            \
676       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                             \
677       crypto_sw_scheduler_frame_enqueue,                                      \
678       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc);       \
679   vnet_crypto_register_async_handler (                                        \
680       vm, cm->crypto_engine_index,                                            \
681       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                             \
682       crypto_sw_scheduler_frame_enqueue,                                      \
683       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
684   foreach_crypto_aead_async_alg
685 #undef _
686
687 #define _(c, h, s, k, d)                                                      \
688   vnet_crypto_register_async_handler (                                        \
689       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,   \
690       crypto_sw_scheduler_frame_enqueue,                                      \
691       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc);            \
692   vnet_crypto_register_async_handler (                                        \
693       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,   \
694       crypto_sw_scheduler_frame_enqueue,                                      \
695       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
696       foreach_crypto_link_async_alg
697 #undef _
698       /* *INDENT-ON* */
699
700   if (error)
701     vec_free (cm->per_thread_data);
702
703   return error;
704 }
705
706 /* *INDENT-OFF* */
707 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
708   .runs_after = VLIB_INITS ("vnet_crypto_init"),
709 };
710
711 VLIB_PLUGIN_REGISTER () = {
712   .version = VPP_BUILD_VER,
713   .description = "SW Scheduler Crypto Async Engine plugin",
714 };
715 /* *INDENT-ON* */
716
717 /*
718  * fd.io coding-style-patch-verification: ON
719  *
720  * Local Variables:
721  * eval: (c-set-style "gnu")
722  * End:
723  */