crypto-sw-scheduler: fix queue iterator
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i = vlib_num_workers () > 0;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
78                                    vnet_crypto_async_frame_t *frame, u8 is_enc)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd =
82     vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *current_queue =
84     is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
85              &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
86   u64 head = current_queue->head;
87
88   if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
89     {
90       u32 n_elts = frame->n_elts, i;
91       for (i = 0; i < n_elts; i++)
92         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
93       return -1;
94     }
95
96   current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
97   head += 1;
98   CLIB_MEMORY_STORE_BARRIER ();
99   current_queue->head = head;
100   return 0;
101 }
102
103 static int
104 crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
105                                            vnet_crypto_async_frame_t *frame)
106 {
107   return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
108     }
109     static int
110     crypto_sw_scheduler_frame_enqueue_encrypt (
111       vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
112     {
113
114       return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
115     }
116
117 static_always_inline void
118 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
119                             crypto_sw_scheduler_per_thread_data_t *ptd,
120                             vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
121                             u32 len)
122 {
123   vnet_crypto_op_chunk_t *ch;
124   u32 n_chunks;
125
126   /*
127    * offset is relative to b->data (can be negative if we stay in pre_data
128    * area). Make sure it does not go beyond the 1st buffer.
129    */
130   ASSERT (b->current_data + b->current_length > offset);
131   offset = clib_min (b->current_data + b->current_length, offset);
132
133   op->chunk_index = vec_len (ptd->chunks);
134
135   vec_add2 (ptd->chunks, ch, 1);
136   ch->src = ch->dst = b->data + offset;
137   ch->len = clib_min (b->current_data + b->current_length - offset, len);
138   len -= ch->len;
139   n_chunks = 1;
140
141   while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
142     {
143       b = vlib_get_buffer (vm, b->next_buffer);
144       vec_add2 (ptd->chunks, ch, 1);
145       ch->src = ch->dst = vlib_buffer_get_current (b);
146       ch->len = clib_min (b->current_length, len);
147       len -= ch->len;
148       n_chunks++;
149     }
150
151   if (len)
152     {
153       /* Some async crypto users can use buffers in creative ways, let's allow
154        * some flexibility here...
155        * Current example is ESP decrypt with ESN in async mode: it will stash
156        * ESN at the end of the last buffer (if it can) because it must be part
157        * of the integrity check but it will not update the buffer length.
158        * Fixup the last operation chunk length if we have room.
159        */
160       ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
161       if (vlib_buffer_space_left_at_end (vm, b) >= len)
162         ch->len += len;
163     }
164
165   op->n_chunks = n_chunks;
166 }
167
168 static_always_inline void
169 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
170                                   crypto_sw_scheduler_per_thread_data_t * ptd,
171                                   vnet_crypto_async_frame_elt_t * fe,
172                                   u32 index, u32 bi,
173                                   vnet_crypto_op_id_t op_id, u16 aad_len,
174                                   u8 tag_len)
175 {
176   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
177   vnet_crypto_op_t *op = 0;
178
179   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
180     {
181       vec_add2 (ptd->chained_crypto_ops, op, 1);
182       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
183                                   fe->crypto_total_length);
184     }
185   else
186     {
187       vec_add2 (ptd->crypto_ops, op, 1);
188       op->src = op->dst = b->data + fe->crypto_start_offset;
189       op->len = fe->crypto_total_length;
190     }
191
192   op->op = op_id;
193   op->tag = fe->tag;
194   op->flags = fe->flags;
195   op->key_index = fe->key_index;
196   op->iv = fe->iv;
197   op->aad = fe->aad;
198   op->aad_len = aad_len;
199   op->tag_len = tag_len;
200   op->user_data = index;
201 }
202
203 static_always_inline void
204 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
205                                          crypto_sw_scheduler_per_thread_data_t
206                                          * ptd, vnet_crypto_key_t * key,
207                                          vnet_crypto_async_frame_elt_t * fe,
208                                          u32 index, u32 bi,
209                                          vnet_crypto_op_id_t crypto_op_id,
210                                          vnet_crypto_op_id_t integ_op_id,
211                                          u32 digest_len, u8 is_enc)
212 {
213   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
214   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
215
216   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
217     {
218       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
219       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
220       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
221                                   fe->crypto_start_offset,
222                                   fe->crypto_total_length);
223       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
224                                   fe->integ_start_offset,
225                                   fe->crypto_total_length +
226                                   fe->integ_length_adj);
227     }
228   else
229     {
230       vec_add2 (ptd->crypto_ops, crypto_op, 1);
231       vec_add2 (ptd->integ_ops, integ_op, 1);
232       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
233       crypto_op->len = fe->crypto_total_length;
234       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
235       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
236     }
237
238   crypto_op->op = crypto_op_id;
239   crypto_op->iv = fe->iv;
240   crypto_op->key_index = key->index_crypto;
241   crypto_op->user_data = 0;
242   crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
243   integ_op->op = integ_op_id;
244   integ_op->digest = fe->digest;
245   integ_op->digest_len = digest_len;
246   integ_op->key_index = key->index_integ;
247   integ_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_INIT_IV;
248   crypto_op->user_data = integ_op->user_data = index;
249 }
250
251 static_always_inline void
252 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
253              vnet_crypto_op_t * ops, u8 * state)
254 {
255   u32 n_fail, n_ops = vec_len (ops);
256   vnet_crypto_op_t *op = ops;
257
258   if (n_ops == 0)
259     return;
260
261   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
262
263   while (n_fail)
264     {
265       ASSERT (op - ops < n_ops);
266
267       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
268         {
269           f->elts[op->user_data].status = op->status;
270           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
271           n_fail--;
272         }
273       op++;
274     }
275 }
276
277 static_always_inline void
278 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
279                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
280                      u8 * state)
281 {
282   u32 n_fail, n_ops = vec_len (ops);
283   vnet_crypto_op_t *op = ops;
284
285   if (n_ops == 0)
286     return;
287
288   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
289
290   while (n_fail)
291     {
292       ASSERT (op - ops < n_ops);
293
294       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
295         {
296           f->elts[op->user_data].status = op->status;
297           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
298           n_fail--;
299         }
300       op++;
301     }
302 }
303
304 static_always_inline void
305 crypto_sw_scheduler_process_aead (vlib_main_t *vm,
306                                   crypto_sw_scheduler_per_thread_data_t *ptd,
307                                   vnet_crypto_async_frame_t *f, u32 aead_op,
308                                   u32 aad_len, u32 digest_len)
309 {
310   vnet_crypto_async_frame_elt_t *fe;
311   u32 *bi;
312   u32 n_elts = f->n_elts;
313   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
314
315   vec_reset_length (ptd->crypto_ops);
316   vec_reset_length (ptd->integ_ops);
317   vec_reset_length (ptd->chained_crypto_ops);
318   vec_reset_length (ptd->chained_integ_ops);
319   vec_reset_length (ptd->chunks);
320
321   fe = f->elts;
322   bi = f->buffer_indices;
323
324   while (n_elts--)
325     {
326       if (n_elts > 1)
327         clib_prefetch_load (fe + 1);
328
329       crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
330                                         aead_op, aad_len, digest_len);
331       bi++;
332       fe++;
333     }
334
335       process_ops (vm, f, ptd->crypto_ops, &state);
336       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
337                            &state);
338       f->state = state;
339     }
340
341     static_always_inline void
342     crypto_sw_scheduler_process_link (
343       vlib_main_t *vm, crypto_sw_scheduler_main_t *cm,
344       crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f,
345       u32 crypto_op, u32 auth_op, u16 digest_len, u8 is_enc)
346     {
347       vnet_crypto_async_frame_elt_t *fe;
348       u32 *bi;
349       u32 n_elts = f->n_elts;
350       u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
351
352       vec_reset_length (ptd->crypto_ops);
353       vec_reset_length (ptd->integ_ops);
354       vec_reset_length (ptd->chained_crypto_ops);
355       vec_reset_length (ptd->chained_integ_ops);
356       vec_reset_length (ptd->chunks);
357       fe = f->elts;
358       bi = f->buffer_indices;
359
360       while (n_elts--)
361         {
362           if (n_elts > 1)
363             clib_prefetch_load (fe + 1);
364
365           crypto_sw_scheduler_convert_link_crypto (
366             vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0],
367             crypto_op, auth_op, digest_len, is_enc);
368           bi++;
369           fe++;
370         }
371
372       if (is_enc)
373         {
374           process_ops (vm, f, ptd->crypto_ops, &state);
375           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
376                                &state);
377           process_ops (vm, f, ptd->integ_ops, &state);
378           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
379                                &state);
380         }
381       else
382         {
383           process_ops (vm, f, ptd->integ_ops, &state);
384           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
385                                &state);
386           process_ops (vm, f, ptd->crypto_ops, &state);
387           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
388                                &state);
389         }
390
391       f->state = state;
392     }
393
394     static_always_inline int
395     convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id,
396                              u32 *crypto_op, u32 *auth_op_or_aad_len,
397                              u16 *digest_len, u8 *is_enc)
398     {
399       switch (async_op_id)
400         {
401 #define _(n, s, k, t, a)                                                      \
402   case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC:                            \
403     *crypto_op = VNET_CRYPTO_OP_##n##_ENC;                                    \
404     *auth_op_or_aad_len = a;                                                  \
405     *digest_len = t;                                                          \
406     *is_enc = 1;                                                              \
407     return 1;                                                                 \
408   case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC:                            \
409     *crypto_op = VNET_CRYPTO_OP_##n##_DEC;                                    \
410     *auth_op_or_aad_len = a;                                                  \
411     *digest_len = t;                                                          \
412     *is_enc = 0;                                                              \
413     return 1;
414           foreach_crypto_aead_async_alg
415 #undef _
416
417 #define _(c, h, s, k, d)                                                      \
418   case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC:                               \
419     *crypto_op = VNET_CRYPTO_OP_##c##_ENC;                                    \
420     *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
421     *digest_len = d;                                                          \
422     *is_enc = 1;                                                              \
423     return 0;                                                                 \
424   case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC:                               \
425     *crypto_op = VNET_CRYPTO_OP_##c##_DEC;                                    \
426     *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
427     *digest_len = d;                                                          \
428     *is_enc = 0;                                                              \
429     return 0;
430             foreach_crypto_link_async_alg
431 #undef _
432
433             default : return -1;
434         }
435
436       return -1;
437     }
438
439     static_always_inline vnet_crypto_async_frame_t *
440     crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
441                                  u32 *enqueue_thread_idx)
442     {
443       crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
444       crypto_sw_scheduler_per_thread_data_t *ptd =
445         cm->per_thread_data + vm->thread_index;
446       vnet_crypto_async_frame_t *f = 0;
447       crypto_sw_scheduler_queue_t *current_queue = 0;
448       u32 tail, head;
449       u8 found = 0;
450
451       /* get a pending frame to process */
452       if (ptd->self_crypto_enabled)
453         {
454           u32 i = ptd->last_serve_lcore_id + 1;
455
456           while (1)
457             {
458               crypto_sw_scheduler_per_thread_data_t *st;
459               u32 j;
460
461               if (i >= vec_len (cm->per_thread_data))
462                 i = 0;
463
464               st = cm->per_thread_data + i;
465
466               if (ptd->last_serve_encrypt)
467                 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
468               else
469                 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
470
471               tail = current_queue->tail;
472               head = current_queue->head;
473
474               /* Skip this queue unless tail < head or head has overflowed
475                * and tail has not. At the point where tail overflows (== 0),
476                * the largest possible value of head is (queue size - 1).
477                * Prior to that, the largest possible value of head is
478                * (queue size - 2).
479                */
480               if ((tail > head) && (head >= CRYPTO_SW_SCHEDULER_QUEUE_MASK))
481                 goto skip_queue;
482
483               for (j = tail; j != head; j++)
484                 {
485
486                   f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
487
488                   if (!f)
489                     continue;
490
491                   if (clib_atomic_bool_cmp_and_swap (
492                         &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
493                         VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
494                     {
495                       found = 1;
496                       break;
497                     }
498                 }
499
500             skip_queue:
501               if (found || i == ptd->last_serve_lcore_id)
502                 {
503                   CLIB_MEMORY_STORE_BARRIER ();
504                   ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
505                   break;
506                 }
507
508               i++;
509             }
510
511           ptd->last_serve_lcore_id = i;
512         }
513
514       if (found)
515         {
516           u32 crypto_op, auth_op_or_aad_len;
517           u16 digest_len;
518           u8 is_enc;
519           int ret;
520
521           ret = convert_async_crypto_id (
522             f->op, &crypto_op, &auth_op_or_aad_len, &digest_len, &is_enc);
523
524           if (ret == 1)
525             crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
526                                               auth_op_or_aad_len, digest_len);
527           else if (ret == 0)
528             crypto_sw_scheduler_process_link (vm, cm, ptd, f, crypto_op,
529                                               auth_op_or_aad_len, digest_len,
530                                               is_enc);
531
532           *enqueue_thread_idx = f->enqueue_thread_index;
533           *nb_elts_processed = f->n_elts;
534         }
535
536       if (ptd->last_return_queue)
537         {
538           current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
539           ptd->last_return_queue = 0;
540         }
541       else
542         {
543           current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
544           ptd->last_return_queue = 1;
545         }
546
547       tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
548
549       if (current_queue->jobs[tail] &&
550           current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
551         {
552
553           CLIB_MEMORY_STORE_BARRIER ();
554           current_queue->tail++;
555           f = current_queue->jobs[tail];
556           current_queue->jobs[tail] = 0;
557
558           return f;
559         }
560
561       return 0;
562     }
563
564 static clib_error_t *
565 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
566                                 vlib_cli_command_t * cmd)
567 {
568   unformat_input_t _line_input, *line_input = &_line_input;
569   u32 worker_index;
570   u8 crypto_enable;
571   int rv;
572
573   /* Get a line of input. */
574   if (!unformat_user (input, unformat_line_input, line_input))
575     return 0;
576
577   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
578     {
579       if (unformat (line_input, "worker %u", &worker_index))
580         {
581           if (unformat (line_input, "crypto"))
582             {
583               if (unformat (line_input, "on"))
584                 crypto_enable = 1;
585               else if (unformat (line_input, "off"))
586                 crypto_enable = 0;
587               else
588                 return (clib_error_return (0, "unknown input '%U'",
589                                            format_unformat_error,
590                                            line_input));
591             }
592           else
593             return (clib_error_return (0, "unknown input '%U'",
594                                        format_unformat_error, line_input));
595         }
596       else
597         return (clib_error_return (0, "unknown input '%U'",
598                                    format_unformat_error, line_input));
599     }
600
601   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
602   if (rv == VNET_API_ERROR_INVALID_VALUE)
603     {
604       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
605     }
606   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
607     {
608       return (clib_error_return (0, "cannot disable all crypto workers"));
609     }
610   return 0;
611 }
612
613 /*?
614  * This command sets if worker will do crypto processing.
615  *
616  * @cliexpar
617  * Example of how to set worker crypto processing off:
618  * @cliexstart{set sw_scheduler worker 0 crypto off}
619  * @cliexend
620  ?*/
621 /* *INDENT-OFF* */
622 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
623   .path = "set sw_scheduler",
624   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
625   .function = sw_scheduler_set_worker_crypto,
626   .is_mp_safe = 1,
627 };
628 /* *INDENT-ON* */
629
630 static clib_error_t *
631 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
632                            vlib_cli_command_t * cmd)
633 {
634   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
635   u32 i;
636
637   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
638   for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
639     {
640       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
641                        (vlib_worker_threads + i)->name,
642                        cm->
643                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
644     }
645
646   return 0;
647 }
648
649 /*?
650  * This command displays sw_scheduler workers.
651  *
652  * @cliexpar
653  * Example of how to show workers:
654  * @cliexstart{show sw_scheduler workers}
655  * @cliexend
656  ?*/
657 /* *INDENT-OFF* */
658 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
659   .path = "show sw_scheduler workers",
660   .short_help = "show sw_scheduler workers",
661   .function = sw_scheduler_show_workers,
662   .is_mp_safe = 1,
663 };
664 /* *INDENT-ON* */
665
666 clib_error_t *
667 sw_scheduler_cli_init (vlib_main_t * vm)
668 {
669   return 0;
670 }
671
672 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
673
674 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
675 clib_error_t *
676 crypto_sw_scheduler_init (vlib_main_t * vm)
677 {
678   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
679   vlib_thread_main_t *tm = vlib_get_thread_main ();
680   clib_error_t *error = 0;
681   crypto_sw_scheduler_per_thread_data_t *ptd;
682
683   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
684                         CLIB_CACHE_LINE_BYTES);
685
686   vec_foreach (ptd, cm->per_thread_data)
687   {
688     ptd->self_crypto_enabled = 1;
689
690     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
691     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
692
693     vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
694                           CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
695                           CLIB_CACHE_LINE_BYTES);
696
697     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
698     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
699
700     ptd->last_serve_encrypt = 0;
701     ptd->last_return_queue = 0;
702
703     vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
704                           CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
705                           CLIB_CACHE_LINE_BYTES);
706   }
707
708   cm->crypto_engine_index =
709     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
710                                  "SW Scheduler Async Engine");
711
712   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
713                                     crypto_sw_scheduler_key_handler);
714
715   crypto_sw_scheduler_api_init (vm);
716
717   /* *INDENT-OFF* */
718 #define _(n, s, k, t, a)                                                      \
719   vnet_crypto_register_enqueue_handler (                                      \
720     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,  \
721     crypto_sw_scheduler_frame_enqueue_encrypt);                               \
722   vnet_crypto_register_enqueue_handler (                                      \
723     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,  \
724     crypto_sw_scheduler_frame_enqueue_decrypt);
725   foreach_crypto_aead_async_alg
726 #undef _
727
728 #define _(c, h, s, k, d)                                                      \
729   vnet_crypto_register_enqueue_handler (                                      \
730     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,     \
731     crypto_sw_scheduler_frame_enqueue_encrypt);                               \
732   vnet_crypto_register_enqueue_handler (                                      \
733     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,     \
734     crypto_sw_scheduler_frame_enqueue_decrypt);
735     foreach_crypto_link_async_alg
736 #undef _
737       /* *INDENT-ON* */
738
739       vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
740                                             crypto_sw_scheduler_dequeue);
741
742   if (error)
743     vec_free (cm->per_thread_data);
744
745   return error;
746 }
747
748 /* *INDENT-OFF* */
749 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
750   .runs_after = VLIB_INITS ("vnet_crypto_init"),
751 };
752
753 VLIB_PLUGIN_REGISTER () = {
754   .version = VPP_BUILD_VER,
755   .description = "SW Scheduler Crypto Async Engine plugin",
756 };
757 /* *INDENT-ON* */
758
759 /*
760  * fd.io coding-style-patch-verification: ON
761  *
762  * Local Variables:
763  * eval: (c-set-style "gnu")
764  * End:
765  */