2e4ad428ea07016d51cabcc75958ec8bb839b367
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i = vlib_num_workers () > 0;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t * vm,
78                                    vnet_crypto_async_frame_t * frame)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd
82     = vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
84   u64 head = q->head;
85
86   if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
87     {
88       u32 n_elts = frame->n_elts, i;
89       for (i = 0; i < n_elts; i++)
90         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
91       return -1;
92     }
93   q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
94   head += 1;
95   CLIB_MEMORY_STORE_BARRIER ();
96   q->head = head;
97   return 0;
98 }
99
100 static_always_inline vnet_crypto_async_frame_t *
101 crypto_sw_scheduler_get_pending_frame (crypto_sw_scheduler_queue_t * q)
102 {
103   vnet_crypto_async_frame_t *f;
104   u32 i;
105   u32 tail = q->tail;
106   u32 head = q->head;
107
108   for (i = tail; i < head; i++)
109     {
110       f = q->jobs[i & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
111       if (!f)
112         continue;
113       if (clib_atomic_bool_cmp_and_swap
114           (&f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
115            VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
116         {
117           return f;
118         }
119     }
120   return NULL;
121 }
122
123 static_always_inline vnet_crypto_async_frame_t *
124 crypto_sw_scheduler_get_completed_frame (crypto_sw_scheduler_queue_t * q)
125 {
126   vnet_crypto_async_frame_t *f = 0;
127   if (q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]
128       && q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]->state
129       >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
130     {
131       u32 tail = q->tail;
132       CLIB_MEMORY_STORE_BARRIER ();
133       q->tail++;
134       f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
135       q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
136     }
137   return f;
138 }
139
140 static_always_inline void
141 cryptodev_sw_scheduler_sgl (vlib_main_t * vm,
142                             crypto_sw_scheduler_per_thread_data_t * ptd,
143                             vlib_buffer_t * b, vnet_crypto_op_t * op,
144                             i32 offset, i32 len)
145 {
146   vnet_crypto_op_chunk_t *ch;
147   vlib_buffer_t *nb = b;
148   u32 n_chunks = 0;
149   u32 chunk_index = vec_len (ptd->chunks);
150
151   op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
152
153   while (len)
154     {
155       if (nb->current_data + nb->current_length > offset)
156         {
157           vec_add2 (ptd->chunks, ch, 1);
158           ch->src = ch->dst = nb->data + offset;
159           ch->len
160             = clib_min (nb->current_data + nb->current_length - offset, len);
161           len -= ch->len;
162           offset = 0;
163           n_chunks++;
164           if (!len)
165             break;
166         }
167       if (offset)
168         offset -= nb->current_data + nb->current_length;
169       if (nb->flags & VLIB_BUFFER_NEXT_PRESENT)
170         nb = vlib_get_buffer (vm, nb->next_buffer);
171       else
172         break;
173     }
174
175   ASSERT (offset == 0 && len == 0);
176   op->chunk_index = chunk_index;
177   op->n_chunks = n_chunks;
178 }
179
180 static_always_inline void
181 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
182                                   crypto_sw_scheduler_per_thread_data_t * ptd,
183                                   vnet_crypto_async_frame_elt_t * fe,
184                                   u32 index, u32 bi,
185                                   vnet_crypto_op_id_t op_id, u16 aad_len,
186                                   u8 tag_len)
187 {
188   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
189   vnet_crypto_op_t *op = 0;
190
191   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
192     {
193       vec_add2 (ptd->chained_crypto_ops, op, 1);
194       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
195                                   fe->crypto_total_length);
196     }
197   else
198     {
199       vec_add2 (ptd->crypto_ops, op, 1);
200       op->src = op->dst = b->data + fe->crypto_start_offset;
201       op->len = fe->crypto_total_length;
202     }
203
204   op->op = op_id;
205   op->tag = fe->tag;
206   op->flags = fe->flags;
207   op->key_index = fe->key_index;
208   op->iv = fe->iv;
209   op->aad = fe->aad;
210   op->aad_len = aad_len;
211   op->tag_len = tag_len;
212   op->user_data = index;
213 }
214
215 static_always_inline void
216 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
217                                          crypto_sw_scheduler_per_thread_data_t
218                                          * ptd, vnet_crypto_key_t * key,
219                                          vnet_crypto_async_frame_elt_t * fe,
220                                          u32 index, u32 bi,
221                                          vnet_crypto_op_id_t crypto_op_id,
222                                          vnet_crypto_op_id_t integ_op_id,
223                                          u32 digest_len, u8 is_enc)
224 {
225   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
226   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
227
228   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
229     {
230       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
231       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
232       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
233                                   fe->crypto_start_offset,
234                                   fe->crypto_total_length);
235       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
236                                   fe->integ_start_offset,
237                                   fe->crypto_total_length +
238                                   fe->integ_length_adj);
239     }
240   else
241     {
242       vec_add2 (ptd->crypto_ops, crypto_op, 1);
243       vec_add2 (ptd->integ_ops, integ_op, 1);
244       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
245       crypto_op->len = fe->crypto_total_length;
246       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
247       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
248     }
249
250   crypto_op->op = crypto_op_id;
251   crypto_op->iv = fe->iv;
252   crypto_op->key_index = key->index_crypto;
253   crypto_op->user_data = 0;
254   integ_op->op = integ_op_id;
255   integ_op->digest = fe->digest;
256   integ_op->digest_len = digest_len;
257   integ_op->key_index = key->index_integ;
258   if (is_enc)
259     crypto_op->flags |= VNET_CRYPTO_OP_FLAG_INIT_IV;
260   else
261     integ_op->flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
262   crypto_op->user_data = integ_op->user_data = index;
263 }
264
265 static_always_inline void
266 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
267              vnet_crypto_op_t * ops, u8 * state)
268 {
269   u32 n_fail, n_ops = vec_len (ops);
270   vnet_crypto_op_t *op = ops;
271
272   if (n_ops == 0)
273     return;
274
275   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
276
277   while (n_fail)
278     {
279       ASSERT (op - ops < n_ops);
280
281       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
282         {
283           f->elts[op->user_data].status = op->status;
284           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
285           n_fail--;
286         }
287       op++;
288     }
289 }
290
291 static_always_inline void
292 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
293                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
294                      u8 * state)
295 {
296   u32 n_fail, n_ops = vec_len (ops);
297   vnet_crypto_op_t *op = ops;
298
299   if (n_ops == 0)
300     return;
301
302   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
303
304   while (n_fail)
305     {
306       ASSERT (op - ops < n_ops);
307
308       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
309         {
310           f->elts[op->user_data].status = op->status;
311           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
312           n_fail--;
313         }
314       op++;
315     }
316 }
317
318 static_always_inline vnet_crypto_async_frame_t *
319 crypto_sw_scheduler_dequeue_aead (vlib_main_t * vm,
320                                   vnet_crypto_async_op_id_t async_op_id,
321                                   vnet_crypto_op_id_t sync_op_id, u8 tag_len,
322                                   u8 aad_len, u32 * nb_elts_processed,
323                                   u32 * enqueue_thread_idx)
324 {
325   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
326   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
327   crypto_sw_scheduler_queue_t *q = 0;
328   vnet_crypto_async_frame_t *f = 0;
329   vnet_crypto_async_frame_elt_t *fe;
330   u32 *bi;
331   u32 n_elts;
332   int i = 0;
333   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
334
335   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
336     {
337       /* *INDENT-OFF* */
338       vec_foreach_index (i, cm->per_thread_data)
339       {
340         ptd = cm->per_thread_data + i;
341         q = ptd->queues[async_op_id];
342         f = crypto_sw_scheduler_get_pending_frame (q);
343         if (f)
344           break;
345       }
346       /* *INDENT-ON* */
347     }
348
349   ptd = cm->per_thread_data + vm->thread_index;
350
351   if (f)
352     {
353       *nb_elts_processed = n_elts = f->n_elts;
354       fe = f->elts;
355       bi = f->buffer_indices;
356
357       vec_reset_length (ptd->crypto_ops);
358       vec_reset_length (ptd->chained_crypto_ops);
359       vec_reset_length (ptd->chunks);
360
361       while (n_elts--)
362         {
363           if (n_elts > 1)
364             CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
365
366           crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
367                                             sync_op_id, aad_len, tag_len);
368           bi++;
369           fe++;
370         }
371
372       process_ops (vm, f, ptd->crypto_ops, &state);
373       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
374                            &state);
375       f->state = state;
376       *enqueue_thread_idx = f->enqueue_thread_index;
377     }
378
379   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
380 }
381
382 static_always_inline vnet_crypto_async_frame_t *
383 crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
384                                   vnet_crypto_async_op_id_t async_op_id,
385                                   vnet_crypto_op_id_t sync_crypto_op_id,
386                                   vnet_crypto_op_id_t sync_integ_op_id,
387                                   u16 digest_len, u8 is_enc,
388                                   u32 * nb_elts_processed,
389                                   u32 * enqueue_thread_idx)
390 {
391   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
392   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
393   crypto_sw_scheduler_queue_t *q = 0;
394   vnet_crypto_async_frame_t *f = 0;
395   vnet_crypto_async_frame_elt_t *fe;
396   u32 *bi;
397   u32 n_elts;
398   int i = 0;
399   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
400
401   if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
402     {
403       /* *INDENT-OFF* */
404       vec_foreach_index (i, cm->per_thread_data)
405       {
406         ptd = cm->per_thread_data + i;
407         q = ptd->queues[async_op_id];
408         f = crypto_sw_scheduler_get_pending_frame (q);
409         if (f)
410           break;
411       }
412       /* *INDENT-ON* */
413     }
414
415   ptd = cm->per_thread_data + vm->thread_index;
416
417   if (f)
418     {
419       vec_reset_length (ptd->crypto_ops);
420       vec_reset_length (ptd->integ_ops);
421       vec_reset_length (ptd->chained_crypto_ops);
422       vec_reset_length (ptd->chained_integ_ops);
423       vec_reset_length (ptd->chunks);
424
425       *nb_elts_processed = n_elts = f->n_elts;
426       fe = f->elts;
427       bi = f->buffer_indices;
428
429       while (n_elts--)
430         {
431           if (n_elts > 1)
432             CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
433
434           crypto_sw_scheduler_convert_link_crypto (vm, ptd,
435                                                    cm->keys + fe->key_index,
436                                                    fe, fe - f->elts, bi[0],
437                                                    sync_crypto_op_id,
438                                                    sync_integ_op_id,
439                                                    digest_len, is_enc);
440           bi++;
441           fe++;
442         }
443
444       if (is_enc)
445         {
446           process_ops (vm, f, ptd->crypto_ops, &state);
447           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
448                                &state);
449           process_ops (vm, f, ptd->integ_ops, &state);
450           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
451                                &state);
452         }
453       else
454         {
455           process_ops (vm, f, ptd->integ_ops, &state);
456           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
457                                &state);
458           process_ops (vm, f, ptd->crypto_ops, &state);
459           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
460                                &state);
461         }
462
463       f->state = state;
464       *enqueue_thread_idx = f->enqueue_thread_index;
465     }
466
467   return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
468 }
469
470 static clib_error_t *
471 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
472                                 vlib_cli_command_t * cmd)
473 {
474   unformat_input_t _line_input, *line_input = &_line_input;
475   u32 worker_index;
476   u8 crypto_enable;
477   int rv;
478
479   /* Get a line of input. */
480   if (!unformat_user (input, unformat_line_input, line_input))
481     return 0;
482
483   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
484     {
485       if (unformat (line_input, "worker %u", &worker_index))
486         {
487           if (unformat (line_input, "crypto"))
488             {
489               if (unformat (line_input, "on"))
490                 crypto_enable = 1;
491               else if (unformat (line_input, "off"))
492                 crypto_enable = 0;
493               else
494                 return (clib_error_return (0, "unknown input '%U'",
495                                            format_unformat_error,
496                                            line_input));
497             }
498           else
499             return (clib_error_return (0, "unknown input '%U'",
500                                        format_unformat_error, line_input));
501         }
502       else
503         return (clib_error_return (0, "unknown input '%U'",
504                                    format_unformat_error, line_input));
505     }
506
507   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
508   if (rv == VNET_API_ERROR_INVALID_VALUE)
509     {
510       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
511     }
512   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
513     {
514       return (clib_error_return (0, "cannot disable all crypto workers"));
515     }
516   return 0;
517 }
518
519 /*?
520  * This command sets if worker will do crypto processing.
521  *
522  * @cliexpar
523  * Example of how to set worker crypto processing off:
524  * @cliexstart{set sw_scheduler worker 0 crypto off}
525  * @cliexend
526  ?*/
527 /* *INDENT-OFF* */
528 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
529   .path = "set sw_scheduler",
530   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
531   .function = sw_scheduler_set_worker_crypto,
532   .is_mp_safe = 1,
533 };
534 /* *INDENT-ON* */
535
536 static clib_error_t *
537 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
538                            vlib_cli_command_t * cmd)
539 {
540   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
541   u32 i;
542
543   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
544   for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
545     {
546       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
547                        (vlib_worker_threads + i)->name,
548                        cm->
549                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
550     }
551
552   return 0;
553 }
554
555 /*?
556  * This command displays sw_scheduler workers.
557  *
558  * @cliexpar
559  * Example of how to show workers:
560  * @cliexstart{show sw_scheduler workers}
561  * @cliexend
562  ?*/
563 /* *INDENT-OFF* */
564 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
565   .path = "show sw_scheduler workers",
566   .short_help = "show sw_scheduler workers",
567   .function = sw_scheduler_show_workers,
568   .is_mp_safe = 1,
569 };
570 /* *INDENT-ON* */
571
572 clib_error_t *
573 sw_scheduler_cli_init (vlib_main_t * vm)
574 {
575   return 0;
576 }
577
578 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
579
580 /* *INDENT-OFF* */
581 #define _(n, s, k, t, a)                                                      \
582   static vnet_crypto_async_frame_t                                            \
583       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc (      \
584           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
585   {                                                                           \
586     return crypto_sw_scheduler_dequeue_aead (                                 \
587         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                       \
588         VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx);       \
589   }                                                                           \
590   static vnet_crypto_async_frame_t                                            \
591       *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec (      \
592           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
593   {                                                                           \
594     return crypto_sw_scheduler_dequeue_aead (                                 \
595         vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                       \
596         VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx);       \
597   }
598 foreach_crypto_aead_async_alg
599 #undef _
600
601 #define _(c, h, s, k, d)                                                      \
602   static vnet_crypto_async_frame_t                                            \
603       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc (           \
604           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
605   {                                                                           \
606     return crypto_sw_scheduler_dequeue_link (                                 \
607         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,                          \
608         VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1,            \
609         nb_elts_processed, thread_idx);                                       \
610   }                                                                           \
611   static vnet_crypto_async_frame_t                                            \
612       *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec (           \
613           vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx)          \
614   {                                                                           \
615     return crypto_sw_scheduler_dequeue_link (                                 \
616         vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,                          \
617         VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0,            \
618         nb_elts_processed, thread_idx);                                       \
619   }
620     foreach_crypto_link_async_alg
621 #undef _
622         /* *INDENT-ON* */
623
624 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
625 clib_error_t *
626 crypto_sw_scheduler_init (vlib_main_t * vm)
627 {
628   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
629   vlib_thread_main_t *tm = vlib_get_thread_main ();
630   clib_error_t *error = 0;
631   crypto_sw_scheduler_per_thread_data_t *ptd;
632
633   u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
634     + sizeof (crypto_sw_scheduler_queue_t);
635
636   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
637                         CLIB_CACHE_LINE_BYTES);
638
639   vec_foreach (ptd, cm->per_thread_data)
640   {
641     ptd->self_crypto_enabled = 1;
642     u32 i;
643     for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
644       {
645         crypto_sw_scheduler_queue_t *q
646           = clib_mem_alloc_aligned (queue_size, CLIB_CACHE_LINE_BYTES);
647         ASSERT (q != 0);
648         ptd->queues[i] = q;
649         clib_memset_u8 (q, 0, queue_size);
650       }
651   }
652
653   cm->crypto_engine_index =
654     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
655                                  "SW Scheduler Async Engine");
656
657   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
658                                     crypto_sw_scheduler_key_handler);
659
660   crypto_sw_scheduler_api_init (vm);
661
662   /* *INDENT-OFF* */
663 #define _(n, s, k, t, a)                                                      \
664   vnet_crypto_register_async_handler (                                        \
665       vm, cm->crypto_engine_index,                                            \
666       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,                             \
667       crypto_sw_scheduler_frame_enqueue,                                      \
668       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc);       \
669   vnet_crypto_register_async_handler (                                        \
670       vm, cm->crypto_engine_index,                                            \
671       VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,                             \
672       crypto_sw_scheduler_frame_enqueue,                                      \
673       crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
674   foreach_crypto_aead_async_alg
675 #undef _
676
677 #define _(c, h, s, k, d)                                                      \
678   vnet_crypto_register_async_handler (                                        \
679       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,   \
680       crypto_sw_scheduler_frame_enqueue,                                      \
681       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc);            \
682   vnet_crypto_register_async_handler (                                        \
683       vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,   \
684       crypto_sw_scheduler_frame_enqueue,                                      \
685       crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
686       foreach_crypto_link_async_alg
687 #undef _
688       /* *INDENT-ON* */
689
690   if (error)
691     vec_free (cm->per_thread_data);
692
693   return error;
694 }
695
696 /* *INDENT-OFF* */
697 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
698   .runs_after = VLIB_INITS ("vnet_crypto_init"),
699 };
700
701 VLIB_PLUGIN_REGISTER () = {
702   .version = VPP_BUILD_VER,
703   .description = "SW Scheduler Crypto Async Engine plugin",
704 };
705 /* *INDENT-ON* */
706
707 /*
708  * fd.io coding-style-patch-verification: ON
709  *
710  * Local Variables:
711  * eval: (c-set-style "gnu")
712  * End:
713  */