crypto: make crypto-dispatch node working in adaptive mode
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (i = 0; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
78                                    vnet_crypto_async_frame_t *frame, u8 is_enc)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd =
82     vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *current_queue =
84     is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
85              &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
86   u64 head = current_queue->head;
87
88   if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
89     {
90       u32 n_elts = frame->n_elts, i;
91       for (i = 0; i < n_elts; i++)
92         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
93       return -1;
94     }
95
96   current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
97   head += 1;
98   CLIB_MEMORY_STORE_BARRIER ();
99   current_queue->head = head;
100   return 0;
101 }
102
103 static int
104 crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
105                                            vnet_crypto_async_frame_t *frame)
106 {
107   return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
108     }
109     static int
110     crypto_sw_scheduler_frame_enqueue_encrypt (
111       vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
112     {
113
114       return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
115     }
116
117 static_always_inline void
118 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
119                             crypto_sw_scheduler_per_thread_data_t *ptd,
120                             vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
121                             u32 len)
122 {
123   vnet_crypto_op_chunk_t *ch;
124   u32 n_chunks;
125
126   /*
127    * offset is relative to b->data (can be negative if we stay in pre_data
128    * area). Make sure it does not go beyond the 1st buffer.
129    */
130   ASSERT (b->current_data + b->current_length > offset);
131   offset = clib_min (b->current_data + b->current_length, offset);
132
133   op->chunk_index = vec_len (ptd->chunks);
134
135   vec_add2 (ptd->chunks, ch, 1);
136   ch->src = ch->dst = b->data + offset;
137   ch->len = clib_min (b->current_data + b->current_length - offset, len);
138   len -= ch->len;
139   n_chunks = 1;
140
141   while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
142     {
143       b = vlib_get_buffer (vm, b->next_buffer);
144       vec_add2 (ptd->chunks, ch, 1);
145       ch->src = ch->dst = vlib_buffer_get_current (b);
146       ch->len = clib_min (b->current_length, len);
147       len -= ch->len;
148       n_chunks++;
149     }
150
151   if (len)
152     {
153       /* Some async crypto users can use buffers in creative ways, let's allow
154        * some flexibility here...
155        * Current example is ESP decrypt with ESN in async mode: it will stash
156        * ESN at the end of the last buffer (if it can) because it must be part
157        * of the integrity check but it will not update the buffer length.
158        * Fixup the last operation chunk length if we have room.
159        */
160       ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
161       if (vlib_buffer_space_left_at_end (vm, b) >= len)
162         ch->len += len;
163     }
164
165   op->n_chunks = n_chunks;
166 }
167
168 static_always_inline void
169 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
170                                   crypto_sw_scheduler_per_thread_data_t * ptd,
171                                   vnet_crypto_async_frame_elt_t * fe,
172                                   u32 index, u32 bi,
173                                   vnet_crypto_op_id_t op_id, u16 aad_len,
174                                   u8 tag_len)
175 {
176   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
177   vnet_crypto_op_t *op = 0;
178
179   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
180     {
181       vec_add2 (ptd->chained_crypto_ops, op, 1);
182       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
183                                   fe->crypto_total_length);
184     }
185   else
186     {
187       vec_add2 (ptd->crypto_ops, op, 1);
188       op->src = op->dst = b->data + fe->crypto_start_offset;
189       op->len = fe->crypto_total_length;
190     }
191
192   op->op = op_id;
193   op->tag = fe->tag;
194   op->flags = fe->flags;
195   op->key_index = fe->key_index;
196   op->iv = fe->iv;
197   op->aad = fe->aad;
198   op->aad_len = aad_len;
199   op->tag_len = tag_len;
200   op->user_data = index;
201 }
202
203 static_always_inline void
204 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
205                                          crypto_sw_scheduler_per_thread_data_t
206                                          * ptd, vnet_crypto_key_t * key,
207                                          vnet_crypto_async_frame_elt_t * fe,
208                                          u32 index, u32 bi,
209                                          vnet_crypto_op_id_t crypto_op_id,
210                                          vnet_crypto_op_id_t integ_op_id,
211                                          u32 digest_len, u8 is_enc)
212 {
213   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
214   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
215
216   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
217     {
218       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
219       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
220       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
221                                   fe->crypto_start_offset,
222                                   fe->crypto_total_length);
223       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
224                                   fe->integ_start_offset,
225                                   fe->crypto_total_length +
226                                   fe->integ_length_adj);
227     }
228   else
229     {
230       vec_add2 (ptd->crypto_ops, crypto_op, 1);
231       vec_add2 (ptd->integ_ops, integ_op, 1);
232       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
233       crypto_op->len = fe->crypto_total_length;
234       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
235       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
236     }
237
238   crypto_op->op = crypto_op_id;
239   crypto_op->iv = fe->iv;
240   crypto_op->key_index = key->index_crypto;
241   crypto_op->user_data = 0;
242   crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
243   integ_op->op = integ_op_id;
244   integ_op->digest = fe->digest;
245   integ_op->digest_len = digest_len;
246   integ_op->key_index = key->index_integ;
247   integ_op->flags = fe->flags;
248   crypto_op->user_data = integ_op->user_data = index;
249 }
250
251 static_always_inline void
252 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
253              vnet_crypto_op_t * ops, u8 * state)
254 {
255   u32 n_fail, n_ops = vec_len (ops);
256   vnet_crypto_op_t *op = ops;
257
258   if (n_ops == 0)
259     return;
260
261   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
262
263   /*
264    * If we had a failure in the ops then we need to walk all the ops
265    * and set the status in the corresponding frame. This status is
266    * not set in the case with no failures, as in that case the overall
267    * frame status is success.
268    */
269   if (n_fail)
270     {
271       for (int i = 0; i < n_ops; i++)
272         {
273           ASSERT (op - ops < n_ops);
274
275           f->elts[op->user_data].status = op->status;
276           op++;
277         }
278       *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
279     }
280 }
281
282 static_always_inline void
283 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
284                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
285                      u8 * state)
286 {
287   u32 n_fail, n_ops = vec_len (ops);
288   vnet_crypto_op_t *op = ops;
289
290   if (n_ops == 0)
291     return;
292
293   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
294
295   /*
296    * If we had a failure in the ops then we need to walk all the ops
297    * and set the status in the corresponding frame. This status is
298    * not set in the case with no failures, as in that case the overall
299    * frame status is success.
300    */
301   if (n_fail)
302     {
303       for (int i = 0; i < n_ops; i++)
304         {
305           ASSERT (op - ops < n_ops);
306
307           f->elts[op->user_data].status = op->status;
308           op++;
309         }
310       *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
311     }
312 }
313
314 static_always_inline void
315 crypto_sw_scheduler_process_aead (vlib_main_t *vm,
316                                   crypto_sw_scheduler_per_thread_data_t *ptd,
317                                   vnet_crypto_async_frame_t *f, u32 aead_op,
318                                   u32 aad_len, u32 digest_len)
319 {
320   vnet_crypto_async_frame_elt_t *fe;
321   u32 *bi;
322   u32 n_elts = f->n_elts;
323   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
324
325   vec_reset_length (ptd->crypto_ops);
326   vec_reset_length (ptd->integ_ops);
327   vec_reset_length (ptd->chained_crypto_ops);
328   vec_reset_length (ptd->chained_integ_ops);
329   vec_reset_length (ptd->chunks);
330
331   fe = f->elts;
332   bi = f->buffer_indices;
333
334   while (n_elts--)
335     {
336       if (n_elts > 1)
337         clib_prefetch_load (fe + 1);
338
339       crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
340                                         aead_op, aad_len, digest_len);
341       bi++;
342       fe++;
343     }
344
345       process_ops (vm, f, ptd->crypto_ops, &state);
346       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
347                            &state);
348       f->state = state;
349     }
350
351     static_always_inline void
352     crypto_sw_scheduler_process_link (
353       vlib_main_t *vm, crypto_sw_scheduler_main_t *cm,
354       crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f,
355       u32 crypto_op, u32 auth_op, u16 digest_len, u8 is_enc)
356     {
357       vnet_crypto_async_frame_elt_t *fe;
358       u32 *bi;
359       u32 n_elts = f->n_elts;
360       u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
361
362       vec_reset_length (ptd->crypto_ops);
363       vec_reset_length (ptd->integ_ops);
364       vec_reset_length (ptd->chained_crypto_ops);
365       vec_reset_length (ptd->chained_integ_ops);
366       vec_reset_length (ptd->chunks);
367       fe = f->elts;
368       bi = f->buffer_indices;
369
370       while (n_elts--)
371         {
372           if (n_elts > 1)
373             clib_prefetch_load (fe + 1);
374
375           crypto_sw_scheduler_convert_link_crypto (
376             vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0],
377             crypto_op, auth_op, digest_len, is_enc);
378           bi++;
379           fe++;
380         }
381
382       if (is_enc)
383         {
384           process_ops (vm, f, ptd->crypto_ops, &state);
385           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
386                                &state);
387           process_ops (vm, f, ptd->integ_ops, &state);
388           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
389                                &state);
390         }
391       else
392         {
393           process_ops (vm, f, ptd->integ_ops, &state);
394           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
395                                &state);
396           process_ops (vm, f, ptd->crypto_ops, &state);
397           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
398                                &state);
399         }
400
401       f->state = state;
402     }
403
404     static_always_inline int
405     convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id,
406                              u32 *crypto_op, u32 *auth_op_or_aad_len,
407                              u16 *digest_len, u8 *is_enc)
408     {
409       switch (async_op_id)
410         {
411 #define _(n, s, k, t, a)                                                      \
412   case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC:                            \
413     *crypto_op = VNET_CRYPTO_OP_##n##_ENC;                                    \
414     *auth_op_or_aad_len = a;                                                  \
415     *digest_len = t;                                                          \
416     *is_enc = 1;                                                              \
417     return 1;                                                                 \
418   case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC:                            \
419     *crypto_op = VNET_CRYPTO_OP_##n##_DEC;                                    \
420     *auth_op_or_aad_len = a;                                                  \
421     *digest_len = t;                                                          \
422     *is_enc = 0;                                                              \
423     return 1;
424           foreach_crypto_aead_async_alg
425 #undef _
426
427 #define _(c, h, s, k, d)                                                      \
428   case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC:                               \
429     *crypto_op = VNET_CRYPTO_OP_##c##_ENC;                                    \
430     *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
431     *digest_len = d;                                                          \
432     *is_enc = 1;                                                              \
433     return 0;                                                                 \
434   case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC:                               \
435     *crypto_op = VNET_CRYPTO_OP_##c##_DEC;                                    \
436     *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
437     *digest_len = d;                                                          \
438     *is_enc = 0;                                                              \
439     return 0;
440             foreach_crypto_link_async_alg
441 #undef _
442
443             default : return -1;
444         }
445
446       return -1;
447     }
448
449     static_always_inline vnet_crypto_async_frame_t *
450     crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
451                                  u32 *enqueue_thread_idx)
452     {
453       crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
454       crypto_sw_scheduler_per_thread_data_t *ptd =
455         cm->per_thread_data + vm->thread_index;
456       vnet_crypto_async_frame_t *f = 0;
457       crypto_sw_scheduler_queue_t *current_queue = 0;
458       u32 tail, head;
459       u8 found = 0;
460
461       /* get a pending frame to process */
462       if (ptd->self_crypto_enabled)
463         {
464           u32 i = ptd->last_serve_lcore_id + 1;
465
466           while (1)
467             {
468               crypto_sw_scheduler_per_thread_data_t *st;
469               u32 j;
470
471               if (i >= vec_len (cm->per_thread_data))
472                 i = 0;
473
474               st = cm->per_thread_data + i;
475
476               if (ptd->last_serve_encrypt)
477                 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
478               else
479                 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
480
481               tail = current_queue->tail;
482               head = current_queue->head;
483
484               /* Skip this queue unless tail < head or head has overflowed
485                * and tail has not. At the point where tail overflows (== 0),
486                * the largest possible value of head is (queue size - 1).
487                * Prior to that, the largest possible value of head is
488                * (queue size - 2).
489                */
490               if ((tail > head) && (head >= CRYPTO_SW_SCHEDULER_QUEUE_MASK))
491                 goto skip_queue;
492
493               for (j = tail; j != head; j++)
494                 {
495
496                   f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
497
498                   if (!f)
499                     continue;
500
501                   if (clib_atomic_bool_cmp_and_swap (
502                         &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
503                         VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
504                     {
505                       found = 1;
506                       break;
507                     }
508                 }
509
510             skip_queue:
511               if (found || i == ptd->last_serve_lcore_id)
512                 {
513                   CLIB_MEMORY_STORE_BARRIER ();
514                   ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
515                   break;
516                 }
517
518               i++;
519             }
520
521           ptd->last_serve_lcore_id = i;
522         }
523
524       if (found)
525         {
526           u32 crypto_op, auth_op_or_aad_len;
527           u16 digest_len;
528           u8 is_enc;
529           int ret;
530
531           ret = convert_async_crypto_id (
532             f->op, &crypto_op, &auth_op_or_aad_len, &digest_len, &is_enc);
533
534           if (ret == 1)
535             crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
536                                               auth_op_or_aad_len, digest_len);
537           else if (ret == 0)
538             crypto_sw_scheduler_process_link (vm, cm, ptd, f, crypto_op,
539                                               auth_op_or_aad_len, digest_len,
540                                               is_enc);
541
542           *enqueue_thread_idx = f->enqueue_thread_index;
543           *nb_elts_processed = f->n_elts;
544         }
545
546       if (ptd->last_return_queue)
547         {
548           current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
549           ptd->last_return_queue = 0;
550         }
551       else
552         {
553           current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
554           ptd->last_return_queue = 1;
555         }
556
557       tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
558
559       if (current_queue->jobs[tail] &&
560           current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
561         {
562
563           CLIB_MEMORY_STORE_BARRIER ();
564           current_queue->tail++;
565           f = current_queue->jobs[tail];
566           current_queue->jobs[tail] = 0;
567
568           return f;
569         }
570
571       return 0;
572     }
573
574 static clib_error_t *
575 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
576                                 vlib_cli_command_t * cmd)
577 {
578   unformat_input_t _line_input, *line_input = &_line_input;
579   u32 worker_index;
580   u8 crypto_enable;
581   int rv;
582
583   /* Get a line of input. */
584   if (!unformat_user (input, unformat_line_input, line_input))
585     return 0;
586
587   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
588     {
589       if (unformat (line_input, "worker %u", &worker_index))
590         {
591           if (unformat (line_input, "crypto"))
592             {
593               if (unformat (line_input, "on"))
594                 crypto_enable = 1;
595               else if (unformat (line_input, "off"))
596                 crypto_enable = 0;
597               else
598                 return (clib_error_return (0, "unknown input '%U'",
599                                            format_unformat_error,
600                                            line_input));
601             }
602           else
603             return (clib_error_return (0, "unknown input '%U'",
604                                        format_unformat_error, line_input));
605         }
606       else
607         return (clib_error_return (0, "unknown input '%U'",
608                                    format_unformat_error, line_input));
609     }
610
611   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
612   if (rv == VNET_API_ERROR_INVALID_VALUE)
613     {
614       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
615     }
616   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
617     {
618       return (clib_error_return (0, "cannot disable all crypto workers"));
619     }
620   return 0;
621 }
622
623 /*?
624  * This command sets if worker will do crypto processing.
625  *
626  * @cliexpar
627  * Example of how to set worker crypto processing off:
628  * @cliexstart{set sw_scheduler worker 0 crypto off}
629  * @cliexend
630  ?*/
631 /* *INDENT-OFF* */
632 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
633   .path = "set sw_scheduler",
634   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
635   .function = sw_scheduler_set_worker_crypto,
636   .is_mp_safe = 1,
637 };
638 /* *INDENT-ON* */
639
640 static clib_error_t *
641 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
642                            vlib_cli_command_t * cmd)
643 {
644   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
645   u32 i;
646
647   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
648   for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
649     {
650       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
651                        (vlib_worker_threads + i)->name,
652                        cm->
653                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
654     }
655
656   return 0;
657 }
658
659 /*?
660  * This command displays sw_scheduler workers.
661  *
662  * @cliexpar
663  * Example of how to show workers:
664  * @cliexstart{show sw_scheduler workers}
665  * @cliexend
666  ?*/
667 /* *INDENT-OFF* */
668 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
669   .path = "show sw_scheduler workers",
670   .short_help = "show sw_scheduler workers",
671   .function = sw_scheduler_show_workers,
672   .is_mp_safe = 1,
673 };
674 /* *INDENT-ON* */
675
676 clib_error_t *
677 sw_scheduler_cli_init (vlib_main_t * vm)
678 {
679   return 0;
680 }
681
682 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
683
684 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
685 clib_error_t *
686 crypto_sw_scheduler_init (vlib_main_t * vm)
687 {
688   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
689   vlib_thread_main_t *tm = vlib_get_thread_main ();
690   clib_error_t *error = 0;
691   crypto_sw_scheduler_per_thread_data_t *ptd;
692
693   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
694                         CLIB_CACHE_LINE_BYTES);
695
696   vec_foreach (ptd, cm->per_thread_data)
697   {
698     ptd->self_crypto_enabled = 1;
699
700     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
701     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
702
703     vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
704                           CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
705                           CLIB_CACHE_LINE_BYTES);
706
707     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
708     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
709
710     ptd->last_serve_encrypt = 0;
711     ptd->last_return_queue = 0;
712
713     vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
714                           CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
715                           CLIB_CACHE_LINE_BYTES);
716   }
717
718   cm->crypto_engine_index =
719     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
720                                  "SW Scheduler Async Engine");
721
722   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
723                                     crypto_sw_scheduler_key_handler);
724
725   crypto_sw_scheduler_api_init (vm);
726
727   /* *INDENT-OFF* */
728 #define _(n, s, k, t, a)                                                      \
729   vnet_crypto_register_enqueue_handler (                                      \
730     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,  \
731     crypto_sw_scheduler_frame_enqueue_encrypt);                               \
732   vnet_crypto_register_enqueue_handler (                                      \
733     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,  \
734     crypto_sw_scheduler_frame_enqueue_decrypt);
735   foreach_crypto_aead_async_alg
736 #undef _
737
738 #define _(c, h, s, k, d)                                                      \
739   vnet_crypto_register_enqueue_handler (                                      \
740     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,     \
741     crypto_sw_scheduler_frame_enqueue_encrypt);                               \
742   vnet_crypto_register_enqueue_handler (                                      \
743     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,     \
744     crypto_sw_scheduler_frame_enqueue_decrypt);
745     foreach_crypto_link_async_alg
746 #undef _
747       /* *INDENT-ON* */
748
749       vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
750                                             crypto_sw_scheduler_dequeue);
751
752   if (error)
753     vec_free (cm->per_thread_data);
754
755   return error;
756 }
757
758 /* *INDENT-OFF* */
759 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
760   .runs_after = VLIB_INITS ("vnet_crypto_init"),
761 };
762
763 VLIB_PLUGIN_REGISTER () = {
764   .version = VPP_BUILD_VER,
765   .description = "SW Scheduler Crypto Async Engine plugin",
766 };
767 /* *INDENT-ON* */
768
769 /*
770  * fd.io coding-style-patch-verification: ON
771  *
772  * Local Variables:
773  * eval: (c-set-style "gnu")
774  * End:
775  */