linux-cp: fix seg fault in get_v2 methods
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (i = 0; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
78                                    vnet_crypto_async_frame_t *frame, u8 is_enc)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd =
82     vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *current_queue =
84     is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
85              &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
86   u64 head = current_queue->head;
87
88   if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
89     {
90       u32 n_elts = frame->n_elts, i;
91       for (i = 0; i < n_elts; i++)
92         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
93       return -1;
94     }
95
96   current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
97   head += 1;
98   CLIB_MEMORY_STORE_BARRIER ();
99   current_queue->head = head;
100   return 0;
101 }
102
103 static int
104 crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
105                                            vnet_crypto_async_frame_t *frame)
106 {
107   return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
108     }
109     static int
110     crypto_sw_scheduler_frame_enqueue_encrypt (
111       vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
112     {
113
114       return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
115     }
116
117 static_always_inline void
118 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
119                             crypto_sw_scheduler_per_thread_data_t *ptd,
120                             vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
121                             u32 len)
122 {
123   vnet_crypto_op_chunk_t *ch;
124   u32 n_chunks;
125
126   /*
127    * offset is relative to b->data (can be negative if we stay in pre_data
128    * area). Make sure it does not go beyond the 1st buffer.
129    */
130   ASSERT (b->current_data + b->current_length > offset);
131   offset = clib_min (b->current_data + b->current_length, offset);
132
133   op->chunk_index = vec_len (ptd->chunks);
134
135   vec_add2 (ptd->chunks, ch, 1);
136   ch->src = ch->dst = b->data + offset;
137   ch->len = clib_min (b->current_data + b->current_length - offset, len);
138   len -= ch->len;
139   n_chunks = 1;
140
141   while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
142     {
143       b = vlib_get_buffer (vm, b->next_buffer);
144       vec_add2 (ptd->chunks, ch, 1);
145       ch->src = ch->dst = vlib_buffer_get_current (b);
146       ch->len = clib_min (b->current_length, len);
147       len -= ch->len;
148       n_chunks++;
149     }
150
151   if (len)
152     {
153       /* Some async crypto users can use buffers in creative ways, let's allow
154        * some flexibility here...
155        * Current example is ESP decrypt with ESN in async mode: it will stash
156        * ESN at the end of the last buffer (if it can) because it must be part
157        * of the integrity check but it will not update the buffer length.
158        * Fixup the last operation chunk length if we have room.
159        */
160       ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
161       if (vlib_buffer_space_left_at_end (vm, b) >= len)
162         ch->len += len;
163     }
164
165   op->n_chunks = n_chunks;
166 }
167
168 static_always_inline void
169 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
170                                   crypto_sw_scheduler_per_thread_data_t * ptd,
171                                   vnet_crypto_async_frame_elt_t * fe,
172                                   u32 index, u32 bi,
173                                   vnet_crypto_op_id_t op_id, u16 aad_len,
174                                   u8 tag_len)
175 {
176   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
177   vnet_crypto_op_t *op = 0;
178
179   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
180     {
181       vec_add2 (ptd->chained_crypto_ops, op, 1);
182       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
183                                   fe->crypto_total_length);
184     }
185   else
186     {
187       vec_add2 (ptd->crypto_ops, op, 1);
188       op->src = op->dst = b->data + fe->crypto_start_offset;
189       op->len = fe->crypto_total_length;
190     }
191
192   op->op = op_id;
193   op->tag = fe->tag;
194   op->flags = fe->flags;
195   op->key_index = fe->key_index;
196   op->iv = fe->iv;
197   op->aad = fe->aad;
198   op->aad_len = aad_len;
199   op->tag_len = tag_len;
200   op->user_data = index;
201 }
202
203 static_always_inline void
204 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
205                                          crypto_sw_scheduler_per_thread_data_t
206                                          * ptd, vnet_crypto_key_t * key,
207                                          vnet_crypto_async_frame_elt_t * fe,
208                                          u32 index, u32 bi,
209                                          vnet_crypto_op_id_t crypto_op_id,
210                                          vnet_crypto_op_id_t integ_op_id,
211                                          u32 digest_len, u8 is_enc)
212 {
213   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
214   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
215
216   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
217     {
218       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
219       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
220       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
221                                   fe->crypto_start_offset,
222                                   fe->crypto_total_length);
223       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
224                                   fe->integ_start_offset,
225                                   fe->crypto_total_length +
226                                   fe->integ_length_adj);
227     }
228   else
229     {
230       vec_add2 (ptd->crypto_ops, crypto_op, 1);
231       vec_add2 (ptd->integ_ops, integ_op, 1);
232       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
233       crypto_op->len = fe->crypto_total_length;
234       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
235       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
236     }
237
238   crypto_op->op = crypto_op_id;
239   crypto_op->iv = fe->iv;
240   crypto_op->key_index = key->index_crypto;
241   crypto_op->user_data = 0;
242   crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
243   integ_op->op = integ_op_id;
244   integ_op->digest = fe->digest;
245   integ_op->digest_len = digest_len;
246   integ_op->key_index = key->index_integ;
247   integ_op->flags = fe->flags;
248   crypto_op->user_data = integ_op->user_data = index;
249 }
250
251 static_always_inline void
252 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
253              vnet_crypto_op_t * ops, u8 * state)
254 {
255   u32 n_fail, n_ops = vec_len (ops);
256   vnet_crypto_op_t *op = ops;
257
258   if (n_ops == 0)
259     return;
260
261   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
262
263   /*
264    * If we had a failure in the ops then we need to walk all the ops
265    * and set the status in the corresponding frame. This status is
266    * not set in the case with no failures, as in that case the overall
267    * frame status is success.
268    */
269   if (n_fail)
270     {
271       for (int i = 0; i < n_ops; i++)
272         {
273           ASSERT (op - ops < n_ops);
274
275           f->elts[op->user_data].status = op->status;
276           op++;
277         }
278       *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
279     }
280 }
281
282 static_always_inline void
283 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
284                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
285                      u8 * state)
286 {
287   u32 n_fail, n_ops = vec_len (ops);
288   vnet_crypto_op_t *op = ops;
289
290   if (n_ops == 0)
291     return;
292
293   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
294
295   /*
296    * If we had a failure in the ops then we need to walk all the ops
297    * and set the status in the corresponding frame. This status is
298    * not set in the case with no failures, as in that case the overall
299    * frame status is success.
300    */
301   if (n_fail)
302     {
303       for (int i = 0; i < n_ops; i++)
304         {
305           ASSERT (op - ops < n_ops);
306
307           f->elts[op->user_data].status = op->status;
308           op++;
309         }
310       *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
311     }
312 }
313
314 static_always_inline void
315 crypto_sw_scheduler_process_aead (vlib_main_t *vm,
316                                   crypto_sw_scheduler_per_thread_data_t *ptd,
317                                   vnet_crypto_async_frame_t *f, u32 aead_op,
318                                   u32 aad_len, u32 digest_len)
319 {
320   vnet_crypto_async_frame_elt_t *fe;
321   u32 *bi;
322   u32 n_elts = f->n_elts;
323   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
324
325   vec_reset_length (ptd->crypto_ops);
326   vec_reset_length (ptd->integ_ops);
327   vec_reset_length (ptd->chained_crypto_ops);
328   vec_reset_length (ptd->chained_integ_ops);
329   vec_reset_length (ptd->chunks);
330
331   fe = f->elts;
332   bi = f->buffer_indices;
333
334   while (n_elts--)
335     {
336       if (n_elts > 1)
337         clib_prefetch_load (fe + 1);
338
339       crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
340                                         aead_op, aad_len, digest_len);
341       bi++;
342       fe++;
343     }
344
345       process_ops (vm, f, ptd->crypto_ops, &state);
346       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
347                            &state);
348       f->state = state;
349 }
350
351 static_always_inline void
352 crypto_sw_scheduler_process_link (vlib_main_t *vm,
353                                   crypto_sw_scheduler_main_t *cm,
354                                   crypto_sw_scheduler_per_thread_data_t *ptd,
355                                   vnet_crypto_async_frame_t *f, u32 crypto_op,
356                                   u32 auth_op, u16 digest_len, u8 is_enc)
357 {
358   vnet_crypto_async_frame_elt_t *fe;
359   u32 *bi;
360   u32 n_elts = f->n_elts;
361   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
362
363   vec_reset_length (ptd->crypto_ops);
364   vec_reset_length (ptd->integ_ops);
365   vec_reset_length (ptd->chained_crypto_ops);
366   vec_reset_length (ptd->chained_integ_ops);
367   vec_reset_length (ptd->chunks);
368   fe = f->elts;
369   bi = f->buffer_indices;
370
371   while (n_elts--)
372     {
373       if (n_elts > 1)
374         clib_prefetch_load (fe + 1);
375
376       crypto_sw_scheduler_convert_link_crypto (
377         vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0], crypto_op,
378         auth_op, digest_len, is_enc);
379       bi++;
380       fe++;
381     }
382
383   if (is_enc)
384     {
385       process_ops (vm, f, ptd->crypto_ops, &state);
386       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
387                            &state);
388       process_ops (vm, f, ptd->integ_ops, &state);
389       process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks, &state);
390     }
391   else
392     {
393       process_ops (vm, f, ptd->integ_ops, &state);
394       process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks, &state);
395       process_ops (vm, f, ptd->crypto_ops, &state);
396       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
397                            &state);
398     }
399
400   f->state = state;
401 }
402
403 static_always_inline int
404 convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id, u32 *crypto_op,
405                          u32 *auth_op_or_aad_len, u16 *digest_len, u8 *is_enc)
406 {
407   switch (async_op_id)
408     {
409 #define _(n, s, k, t, a)                                                      \
410   case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC:                            \
411     *crypto_op = VNET_CRYPTO_OP_##n##_ENC;                                    \
412     *auth_op_or_aad_len = a;                                                  \
413     *digest_len = t;                                                          \
414     *is_enc = 1;                                                              \
415     return 1;                                                                 \
416   case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC:                            \
417     *crypto_op = VNET_CRYPTO_OP_##n##_DEC;                                    \
418     *auth_op_or_aad_len = a;                                                  \
419     *digest_len = t;                                                          \
420     *is_enc = 0;                                                              \
421     return 1;
422       foreach_crypto_aead_async_alg
423 #undef _
424
425 #define _(c, h, s, k, d)                                                      \
426   case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC:                               \
427     *crypto_op = VNET_CRYPTO_OP_##c##_ENC;                                    \
428     *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
429     *digest_len = d;                                                          \
430     *is_enc = 1;                                                              \
431     return 0;                                                                 \
432   case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC:                               \
433     *crypto_op = VNET_CRYPTO_OP_##c##_DEC;                                    \
434     *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
435     *digest_len = d;                                                          \
436     *is_enc = 0;                                                              \
437     return 0;
438         foreach_crypto_link_async_alg
439 #undef _
440
441         default : return -1;
442     }
443
444   return -1;
445 }
446
447 static_always_inline vnet_crypto_async_frame_t *
448 crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
449                              u32 *enqueue_thread_idx)
450 {
451   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
452   crypto_sw_scheduler_per_thread_data_t *ptd =
453     cm->per_thread_data + vm->thread_index;
454   vnet_crypto_async_frame_t *f = 0;
455   crypto_sw_scheduler_queue_t *current_queue = 0;
456   u32 tail, head;
457   u8 found = 0;
458
459   /* get a pending frame to process */
460   if (ptd->self_crypto_enabled)
461     {
462       u32 i = ptd->last_serve_lcore_id + 1;
463
464       while (1)
465         {
466           crypto_sw_scheduler_per_thread_data_t *st;
467           u32 j;
468
469           if (i >= vec_len (cm->per_thread_data))
470             i = 0;
471
472           st = cm->per_thread_data + i;
473
474           if (ptd->last_serve_encrypt)
475             current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
476           else
477             current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
478
479           tail = current_queue->tail;
480           head = current_queue->head;
481
482           /* Skip this queue unless tail < head or head has overflowed
483            * and tail has not. At the point where tail overflows (== 0),
484            * the largest possible value of head is (queue size - 1).
485            * Prior to that, the largest possible value of head is
486            * (queue size - 2).
487            */
488           if ((tail > head) && (head >= CRYPTO_SW_SCHEDULER_QUEUE_MASK))
489             goto skip_queue;
490
491           for (j = tail; j != head; j++)
492             {
493
494               f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
495
496               if (!f)
497                 continue;
498
499               if (clib_atomic_bool_cmp_and_swap (
500                     &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
501                     VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
502                 {
503                   found = 1;
504                   break;
505                 }
506             }
507
508         skip_queue:
509           if (found || i == ptd->last_serve_lcore_id)
510             {
511               CLIB_MEMORY_STORE_BARRIER ();
512               ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
513               break;
514             }
515
516           i++;
517         }
518
519       ptd->last_serve_lcore_id = i;
520     }
521
522   if (found)
523     {
524       u32 crypto_op, auth_op_or_aad_len;
525       u16 digest_len;
526       u8 is_enc;
527       int ret;
528
529       ret = convert_async_crypto_id (f->op, &crypto_op, &auth_op_or_aad_len,
530                                      &digest_len, &is_enc);
531
532       if (ret == 1)
533         crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
534                                           auth_op_or_aad_len, digest_len);
535       else if (ret == 0)
536         crypto_sw_scheduler_process_link (
537           vm, cm, ptd, f, crypto_op, auth_op_or_aad_len, digest_len, is_enc);
538
539       *enqueue_thread_idx = f->enqueue_thread_index;
540       *nb_elts_processed = f->n_elts;
541     }
542
543   if (ptd->last_return_queue)
544     {
545       current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
546       ptd->last_return_queue = 0;
547     }
548   else
549     {
550       current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
551       ptd->last_return_queue = 1;
552     }
553
554   tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
555
556   if (current_queue->jobs[tail] &&
557       current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
558     {
559
560       CLIB_MEMORY_STORE_BARRIER ();
561       current_queue->tail++;
562       f = current_queue->jobs[tail];
563       current_queue->jobs[tail] = 0;
564
565       return f;
566     }
567
568   return 0;
569 }
570
571 static clib_error_t *
572 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
573                                 vlib_cli_command_t * cmd)
574 {
575   unformat_input_t _line_input, *line_input = &_line_input;
576   u32 worker_index;
577   u8 crypto_enable;
578   int rv;
579
580   /* Get a line of input. */
581   if (!unformat_user (input, unformat_line_input, line_input))
582     return 0;
583
584   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
585     {
586       if (unformat (line_input, "worker %u", &worker_index))
587         {
588           if (unformat (line_input, "crypto"))
589             {
590               if (unformat (line_input, "on"))
591                 crypto_enable = 1;
592               else if (unformat (line_input, "off"))
593                 crypto_enable = 0;
594               else
595                 return (clib_error_return (0, "unknown input '%U'",
596                                            format_unformat_error,
597                                            line_input));
598             }
599           else
600             return (clib_error_return (0, "unknown input '%U'",
601                                        format_unformat_error, line_input));
602         }
603       else
604         return (clib_error_return (0, "unknown input '%U'",
605                                    format_unformat_error, line_input));
606     }
607
608   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
609   if (rv == VNET_API_ERROR_INVALID_VALUE)
610     {
611       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
612     }
613   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
614     {
615       return (clib_error_return (0, "cannot disable all crypto workers"));
616     }
617   return 0;
618 }
619
620 /*?
621  * This command sets if worker will do crypto processing.
622  *
623  * @cliexpar
624  * Example of how to set worker crypto processing off:
625  * @cliexstart{set sw_scheduler worker 0 crypto off}
626  * @cliexend
627  ?*/
628 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
629   .path = "set sw_scheduler",
630   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
631   .function = sw_scheduler_set_worker_crypto,
632   .is_mp_safe = 1,
633 };
634
635 static clib_error_t *
636 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
637                            vlib_cli_command_t * cmd)
638 {
639   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
640   u32 i;
641
642   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
643   for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
644     {
645       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
646                        (vlib_worker_threads + i)->name,
647                        cm->
648                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
649     }
650
651   return 0;
652 }
653
654 /*?
655  * This command displays sw_scheduler workers.
656  *
657  * @cliexpar
658  * Example of how to show workers:
659  * @cliexstart{show sw_scheduler workers}
660  * @cliexend
661  ?*/
662 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
663   .path = "show sw_scheduler workers",
664   .short_help = "show sw_scheduler workers",
665   .function = sw_scheduler_show_workers,
666   .is_mp_safe = 1,
667 };
668
669 clib_error_t *
670 sw_scheduler_cli_init (vlib_main_t * vm)
671 {
672   return 0;
673 }
674
675 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
676
677 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
678 clib_error_t *
679 crypto_sw_scheduler_init (vlib_main_t * vm)
680 {
681   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
682   vlib_thread_main_t *tm = vlib_get_thread_main ();
683   clib_error_t *error = 0;
684   crypto_sw_scheduler_per_thread_data_t *ptd;
685   u32 i;
686
687   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
688                         CLIB_CACHE_LINE_BYTES);
689
690   for (i = 0; i < tm->n_vlib_mains; i++)
691     {
692       ptd = cm->per_thread_data + i;
693       ptd->self_crypto_enabled = i > 0 || vlib_num_workers () < 1;
694
695       ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
696       ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
697
698       vec_validate_aligned (
699         ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
700         CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1, CLIB_CACHE_LINE_BYTES);
701
702       ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
703       ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
704
705       ptd->last_serve_encrypt = 0;
706       ptd->last_return_queue = 0;
707
708       vec_validate_aligned (
709         ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
710         CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1, CLIB_CACHE_LINE_BYTES);
711     }
712
713   cm->crypto_engine_index =
714     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
715                                  "SW Scheduler Async Engine");
716
717   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
718                                     crypto_sw_scheduler_key_handler);
719
720   crypto_sw_scheduler_api_init (vm);
721
722 #define _(n, s, k, t, a)                                                      \
723   vnet_crypto_register_enqueue_handler (                                      \
724     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,  \
725     crypto_sw_scheduler_frame_enqueue_encrypt);                               \
726   vnet_crypto_register_enqueue_handler (                                      \
727     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,  \
728     crypto_sw_scheduler_frame_enqueue_decrypt);
729   foreach_crypto_aead_async_alg
730 #undef _
731
732 #define _(c, h, s, k, d)                                                      \
733   vnet_crypto_register_enqueue_handler (                                      \
734     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,     \
735     crypto_sw_scheduler_frame_enqueue_encrypt);                               \
736   vnet_crypto_register_enqueue_handler (                                      \
737     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,     \
738     crypto_sw_scheduler_frame_enqueue_decrypt);
739     foreach_crypto_link_async_alg
740 #undef _
741
742       vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
743                                             crypto_sw_scheduler_dequeue);
744
745   if (error)
746     vec_free (cm->per_thread_data);
747
748   return error;
749 }
750
751 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
752   .runs_after = VLIB_INITS ("vnet_crypto_init"),
753 };
754
755 VLIB_PLUGIN_REGISTER () = {
756   .version = VPP_BUILD_VER,
757   .description = "SW Scheduler Crypto Async Engine plugin",
758 };
759
760 /*
761  * fd.io coding-style-patch-verification: ON
762  *
763  * Local Variables:
764  * eval: (c-set-style "gnu")
765  * End:
766  */