b0548fa297acce5b6f7b2b49cf04e3c0f4ef1caa
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i = vlib_num_workers () > 0;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t * vm,
78                                    vnet_crypto_async_frame_t * frame)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd
82     = vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
84   u64 head = q->head;
85
86   if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
87     {
88       u32 n_elts = frame->n_elts, i;
89       for (i = 0; i < n_elts; i++)
90         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
91       return -1;
92     }
93   q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
94   head += 1;
95   CLIB_MEMORY_STORE_BARRIER ();
96   q->head = head;
97   return 0;
98 }
99
100 static_always_inline vnet_crypto_async_frame_t *
101 crypto_sw_scheduler_get_pending_frame (crypto_sw_scheduler_queue_t * q)
102 {
103   vnet_crypto_async_frame_t *f;
104   u32 i;
105   u32 tail = q->tail;
106   u32 head = q->head;
107
108   for (i = tail; i < head; i++)
109     {
110       f = q->jobs[i & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
111       if (!f)
112         continue;
113       if (clib_atomic_bool_cmp_and_swap
114           (&f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
115            VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
116         {
117           return f;
118         }
119     }
120   return NULL;
121 }
122
123 static_always_inline vnet_crypto_async_frame_t *
124 crypto_sw_scheduler_get_completed_frame (crypto_sw_scheduler_queue_t * q)
125 {
126   vnet_crypto_async_frame_t *f = 0;
127   if (q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]
128       && q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]->state
129       >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
130     {
131       u32 tail = q->tail;
132       CLIB_MEMORY_STORE_BARRIER ();
133       q->tail++;
134       f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
135       q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
136     }
137   return f;
138 }
139
140 static_always_inline void
141 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
142                             crypto_sw_scheduler_per_thread_data_t *ptd,
143                             vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
144                             u32 len)
145 {
146   vnet_crypto_op_chunk_t *ch;
147   u32 n_chunks;
148
149   /*
150    * offset is relative to b->data (can be negative if we stay in pre_data
151    * area). Make sure it does not go beyond the 1st buffer.
152    */
153   ASSERT (b->current_data + b->current_length > offset);
154   offset = clib_min (b->current_data + b->current_length, offset);
155
156   op->chunk_index = vec_len (ptd->chunks);
157
158   vec_add2 (ptd->chunks, ch, 1);
159   ch->src = ch->dst = b->data + offset;
160   ch->len = clib_min (b->current_data + b->current_length - offset, len);
161   len -= ch->len;
162   n_chunks = 1;
163
164   while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
165     {
166       b = vlib_get_buffer (vm, b->next_buffer);
167       vec_add2 (ptd->chunks, ch, 1);
168       ch->src = ch->dst = vlib_buffer_get_current (b);
169       ch->len = clib_min (b->current_length, len);
170       len -= ch->len;
171       n_chunks++;
172     }
173
174   if (len)
175     {
176       /* Some async crypto users can use buffers in creative ways, let's allow
177        * some flexibility here...
178        * Current example is ESP decrypt with ESN in async mode: it will stash
179        * ESN at the end of the last buffer (if it can) because it must be part
180        * of the integrity check but it will not update the buffer length.
181        * Fixup the last operation chunk length if we have room.
182        */
183       ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
184       if (vlib_buffer_space_left_at_end (vm, b) >= len)
185         ch->len += len;
186     }
187
188   op->n_chunks = n_chunks;
189 }
190
191 static_always_inline void
192 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
193                                   crypto_sw_scheduler_per_thread_data_t * ptd,
194                                   vnet_crypto_async_frame_elt_t * fe,
195                                   u32 index, u32 bi,
196                                   vnet_crypto_op_id_t op_id, u16 aad_len,
197                                   u8 tag_len)
198 {
199   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
200   vnet_crypto_op_t *op = 0;
201
202   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
203     {
204       vec_add2 (ptd->chained_crypto_ops, op, 1);
205       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
206                                   fe->crypto_total_length);
207     }
208   else
209     {
210       vec_add2 (ptd->crypto_ops, op, 1);
211       op->src = op->dst = b->data + fe->crypto_start_offset;
212       op->len = fe->crypto_total_length;
213     }
214
215   op->op = op_id;
216   op->tag = fe->tag;
217   op->flags = fe->flags;
218   op->key_index = fe->key_index;
219   op->iv = fe->iv;
220   op->aad = fe->aad;
221   op->aad_len = aad_len;
222   op->tag_len = tag_len;
223   op->user_data = index;
224 }
225
226 static_always_inline void
227 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
228                                          crypto_sw_scheduler_per_thread_data_t
229                                          * ptd, vnet_crypto_key_t * key,
230                                          vnet_crypto_async_frame_elt_t * fe,
231                                          u32 index, u32 bi,
232                                          vnet_crypto_op_id_t crypto_op_id,
233                                          vnet_crypto_op_id_t integ_op_id,
234                                          u32 digest_len, u8 is_enc)
235 {
236   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
237   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
238
239   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
240     {
241       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
242       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
243       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
244                                   fe->crypto_start_offset,
245                                   fe->crypto_total_length);
246       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
247                                   fe->integ_start_offset,
248                                   fe->crypto_total_length +
249                                   fe->integ_length_adj);
250     }
251   else
252     {
253       vec_add2 (ptd->crypto_ops, crypto_op, 1);
254       vec_add2 (ptd->integ_ops, integ_op, 1);
255       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
256       crypto_op->len = fe->crypto_total_length;
257       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
258       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
259     }
260
261   crypto_op->op = crypto_op_id;
262   crypto_op->iv = fe->iv;
263   crypto_op->key_index = key->index_crypto;
264   crypto_op->user_data = 0;
265   crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
266   integ_op->op = integ_op_id;
267   integ_op->digest = fe->digest;
268   integ_op->digest_len = digest_len;
269   integ_op->key_index = key->index_integ;
270   integ_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_INIT_IV;
271   crypto_op->user_data = integ_op->user_data = index;
272 }
273
274 static_always_inline void
275 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
276              vnet_crypto_op_t * ops, u8 * state)
277 {
278   u32 n_fail, n_ops = vec_len (ops);
279   vnet_crypto_op_t *op = ops;
280
281   if (n_ops == 0)
282     return;
283
284   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
285
286   while (n_fail)
287     {
288       ASSERT (op - ops < n_ops);
289
290       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
291         {
292           f->elts[op->user_data].status = op->status;
293           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
294           n_fail--;
295         }
296       op++;
297     }
298 }
299
300 static_always_inline void
301 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
302                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
303                      u8 * state)
304 {
305   u32 n_fail, n_ops = vec_len (ops);
306   vnet_crypto_op_t *op = ops;
307
308   if (n_ops == 0)
309     return;
310
311   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
312
313   while (n_fail)
314     {
315       ASSERT (op - ops < n_ops);
316
317       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
318         {
319           f->elts[op->user_data].status = op->status;
320           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
321           n_fail--;
322         }
323       op++;
324     }
325 }
326
327 static_always_inline vnet_crypto_async_frame_t *
328 crypto_sw_scheduler_dequeue_aead (vlib_main_t * vm,
329                                   vnet_crypto_async_op_id_t async_op_id,
330                                   vnet_crypto_op_id_t sync_op_id, u8 tag_len,
331                                   u8 aad_len, u32 * nb_elts_processed,
332                                   u32 * enqueue_thread_idx)
333 {
334   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
335   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
336   crypto_sw_scheduler_queue_t *q = 0;
337   vnet_crypto_async_frame_t *f = 0;
338   vnet_crypto_async_frame_elt_t *fe;
339   u32 *bi;
340   u32 n_elts;
341   int i = 0;
342   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
343
344   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
345     {
346       /* *INDENT-OFF* */
347       vec_foreach_index (i, cm->per_thread_data)
348       {
349         ptd = cm->per_thread_data + i;
350         q = ptd->queues[async_op_id];
351         f = crypto_sw_scheduler_get_pending_frame (q);
352         if (f)
353           break;
354       }
355       /* *INDENT-ON* */
356     }
357
358   ptd = cm->per_thread_data + vm->thread_index;
359
360   if (f)
361     {
362       *nb_elts_processed = n_elts = f->n_elts;
363       fe = f->elts;
364       bi = f->buffer_indices;
365
366       vec_reset_length (ptd->crypto_ops);
367       vec_reset_length (ptd->chained_crypto_ops);
368       vec_reset_length (ptd->chunks);
369
370       while (n_elts--)
371         {
372           if (n_elts > 1)
373             clib_prefetch_load (fe + 1);
374
375           crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
376                                             sync_op_id, aad_len, tag_len);
377           bi++;
378           fe++;
379         }
380
381       process_ops (vm, f, ptd->crypto_ops, &state);
382       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
383                            &state);
384       f->state = state;
385       *enqueue_thread_idx = f->enqueue_thread_index;
386     }
387
388   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
389 }
390
391 static_always_inline vnet_crypto_async_frame_t *
392 crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
393                                   vnet_crypto_async_op_id_t async_op_id,
394                                   vnet_crypto_op_id_t sync_crypto_op_id,
395                                   vnet_crypto_op_id_t sync_integ_op_id,
396                                   u16 digest_len, u8 is_enc,
397                                   u32 * nb_elts_processed,
398                                   u32 * enqueue_thread_idx)
399 {
400   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
401   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
402   crypto_sw_scheduler_queue_t *q = 0;
403   vnet_crypto_async_frame_t *f = 0;
404   vnet_crypto_async_frame_elt_t *fe;
405   u32 *bi;
406   u32 n_elts;
407   int i = 0;
408   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
409
410   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
411     {
412       /* *INDENT-OFF* */
413       vec_foreach_index (i, cm->per_thread_data)
414       {
415         ptd = cm->per_thread_data + i;
416         q = ptd->queues[async_op_id];
417         f = crypto_sw_scheduler_get_pending_frame (q);
418         if (f)
419           break;
420       }
421       /* *INDENT-ON* */
422     }
423
424   ptd = cm->per_thread_data + vm->thread_index;
425
426   if (f)
427     {
428       vec_reset_length (ptd->crypto_ops);
429       vec_reset_length (ptd->integ_ops);
430       vec_reset_length (ptd->chained_crypto_ops);
431       vec_reset_length (ptd->chained_integ_ops);
432       vec_reset_length (ptd->chunks);
433
434       *nb_elts_processed = n_elts = f->n_elts;
435       fe = f->elts;
436       bi = f->buffer_indices;
437
438       while (n_elts--)
439         {
440           if (n_elts > 1)
441             clib_prefetch_load (fe + 1);
442
443           crypto_sw_scheduler_convert_link_crypto (vm, ptd,
444                                                    cm->keys + fe->key_index,
445                                                    fe, fe - f->elts, bi[0],
446                                                    sync_crypto_op_id,
447                                                    sync_integ_op_id,
448                                                    digest_len, is_enc);
449           bi++;
450           fe++;
451         }
452
453       if (is_enc)
454         {
455           process_ops (vm, f, ptd->crypto_ops, &state);
456           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
457                                &state);
458           process_ops (vm, f, ptd->integ_ops, &state);
459           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
460                                &state);
461         }
462       else
463         {
464           process_ops (vm, f, ptd->integ_ops, &state);
465           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
466                                &state);
467           process_ops (vm, f, ptd->crypto_ops, &state);
468           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
469                                &state);
470         }
471
472       f->state = state;
473       *enqueue_thread_idx = f->enqueue_thread_index;
474     }
475
476   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
477 }
478
479 static clib_error_t *
480 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
481                                 vlib_cli_command_t * cmd)
482 {
483   unformat_input_t _line_input, *line_input = &_line_input;
484   u32 worker_index;
485   u8 crypto_enable;
486   int rv;
487
488   /* Get a line of input. */
489   if (!unformat_user (input, unformat_line_input, line_input))
490     return 0;
491
492   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
493     {
494       if (unformat (line_input, "worker %u", &worker_index))
495         {
496           if (unformat (line_input, "crypto"))
497             {
498               if (unformat (line_input, "on"))
499                 crypto_enable = 1;
500               else if (unformat (line_input, "off"))
501                 crypto_enable = 0;
502               else
503                 return (clib_error_return (0, "unknown input '%U'",
504                                            format_unformat_error,
505                                            line_input));
506             }
507           else
508             return (clib_error_return (0, "unknown input '%U'",
509                                        format_unformat_error, line_input));
510         }
511       else
512         return (clib_error_return (0, "unknown input '%U'",
513                                    format_unformat_error, line_input));
514     }
515
516   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
517   if (rv == VNET_API_ERROR_INVALID_VALUE)
518     {
519       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
520     }
521   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
522     {
523       return (clib_error_return (0, "cannot disable all crypto workers"));
524     }
525   return 0;
526 }
527
528 /*?
529  * This command sets if worker will do crypto processing.
530  *
531  * @cliexpar
532  * Example of how to set worker crypto processing off:
533  * @cliexstart{set sw_scheduler worker 0 crypto off}
534  * @cliexend
535  ?*/
536 /* *INDENT-OFF* */
537 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
538   .path = "set sw_scheduler",
539   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
540   .function = sw_scheduler_set_worker_crypto,
541   .is_mp_safe = 1,
542 };
543 /* *INDENT-ON* */
544
545 static clib_error_t *
546 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
547                            vlib_cli_command_t * cmd)
548 {
549   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
550   u32 i;
551
552   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
553   for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
554     {
555       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
556                        (vlib_worker_threads + i)->name,
557                        cm->
558                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
559     }
560
561   return 0;
562 }
563
564 /*?
565  * This command displays sw_scheduler workers.
566  *
567  * @cliexpar
568  * Example of how to show workers:
569  * @cliexstart{show sw_scheduler workers}
570  * @cliexend
571  ?*/
572 /* *INDENT-OFF* */
573 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
574   .path = "show sw_scheduler workers",
575   .short_help = "show sw_scheduler workers",
576   .function = sw_scheduler_show_workers,
577   .is_mp_safe = 1,
578 };
579 /* *INDENT-ON* */
580
581 clib_error_t *
582 sw_scheduler_cli_init (vlib_main_t * vm)
583 {
584   return 0;
585 }
586
587 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
588
589 /* *INDENT-OFF* */
590 #define _(n, s, k, t, a)                                                      \
591   static vnet_crypto_async_frame_t                                            \
592       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc (      \
593           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
594   {                                                                           \
595     return crypto_sw_scheduler_dequeue_aead (                                 \
596         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                       \
597         VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx);       \
598   }                                                                           \
599   static vnet_crypto_async_frame_t                                            \
600       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec (      \
601           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
602   {                                                                           \
603     return crypto_sw_scheduler_dequeue_aead (                                 \
604         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                       \
605         VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx);       \
606   }
607 foreach_crypto_aead_async_alg
608 #undef _
609
610 #define _(c, h, s, k, d)                                                      \
611   static vnet_crypto_async_frame_t                                            \
612       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc (           \
613           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
614   {                                                                           \
615     return crypto_sw_scheduler_dequeue_link (                                 \
616         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,                          \
617         VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1,            \
618         nb_elts_processed, thread_idx);                                       \
619   }                                                                           \
620   static vnet_crypto_async_frame_t                                            \
621       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec (           \
622           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
623   {                                                                           \
624     return crypto_sw_scheduler_dequeue_link (                                 \
625         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,                          \
626         VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0,            \
627         nb_elts_processed, thread_idx);                                       \
628   }
629     foreach_crypto_link_async_alg
630 #undef _
631         /* *INDENT-ON* */
632
633 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
634 clib_error_t *
635 crypto_sw_scheduler_init (vlib_main_t * vm)
636 {
637   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
638   vlib_thread_main_t *tm = vlib_get_thread_main ();
639   clib_error_t *error = 0;
640   crypto_sw_scheduler_per_thread_data_t *ptd;
641
642   u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
643     + sizeof (crypto_sw_scheduler_queue_t);
644
645   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
646                         CLIB_CACHE_LINE_BYTES);
647
648   vec_foreach (ptd, cm->per_thread_data)
649   {
650     ptd->self_crypto_enabled = 1;
651     u32 i;
652     for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
653       {
654         crypto_sw_scheduler_queue_t *q
655           = clib_mem_alloc_aligned (queue_size, CLIB_CACHE_LINE_BYTES);
656         ASSERT (q != 0);
657         ptd->queues[i] = q;
658         clib_memset_u8 (q, 0, queue_size);
659       }
660   }
661
662   cm->crypto_engine_index =
663     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
664                                  "SW Scheduler Async Engine");
665
666   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
667                                     crypto_sw_scheduler_key_handler);
668
669   crypto_sw_scheduler_api_init (vm);
670
671   /* *INDENT-OFF* */
672 #define _(n, s, k, t, a)                                                      \
673   vnet_crypto_register_async_handler (                                        \
674       vm, cm->crypto_engine_index,                                            \
675       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                             \
676       crypto_sw_scheduler_frame_enqueue,                                      \
677       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc);       \
678   vnet_crypto_register_async_handler (                                        \
679       vm, cm->crypto_engine_index,                                            \
680       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                             \
681       crypto_sw_scheduler_frame_enqueue,                                      \
682       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
683   foreach_crypto_aead_async_alg
684 #undef _
685
686 #define _(c, h, s, k, d)                                                      \
687   vnet_crypto_register_async_handler (                                        \
688       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,   \
689       crypto_sw_scheduler_frame_enqueue,                                      \
690       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc);            \
691   vnet_crypto_register_async_handler (                                        \
692       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,   \
693       crypto_sw_scheduler_frame_enqueue,                                      \
694       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
695       foreach_crypto_link_async_alg
696 #undef _
697       /* *INDENT-ON* */
698
699   if (error)
700     vec_free (cm->per_thread_data);
701
702   return error;
703 }
704
705 /* *INDENT-OFF* */
706 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
707   .runs_after = VLIB_INITS ("vnet_crypto_init"),
708 };
709
710 VLIB_PLUGIN_REGISTER () = {
711   .version = VPP_BUILD_VER,
712   .description = "SW Scheduler Crypto Async Engine plugin",
713 };
714 /* *INDENT-ON* */
715
716 /*
717  * fd.io coding-style-patch-verification: ON
718  *
719  * Local Variables:
720  * eval: (c-set-style "gnu")
721  * End:
722  */