crypto: encrypt/decrypt queues sw_scheduler
[vpp.git] / src / plugins / crypto_sw_scheduler / main.c
1 /*
2  * Copyright (c) 2020 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
19
20 #include "crypto_sw_scheduler.h"
21
22 int
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
24 {
25   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26   vlib_thread_main_t *tm = vlib_get_thread_main ();
27   crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28   u32 count = 0, i = vlib_num_workers () > 0;
29
30   if (worker_idx >= vlib_num_workers ())
31     {
32       return VNET_API_ERROR_INVALID_VALUE;
33     }
34
35   for (; i < tm->n_vlib_mains; i++)
36     {
37       ptd = cm->per_thread_data + i;
38       count += ptd->self_crypto_enabled;
39     }
40
41   if (enabled || count > 1)
42     {
43       cm->per_thread_data[vlib_get_worker_thread_index
44                           (worker_idx)].self_crypto_enabled = enabled;
45     }
46   else                          /* cannot disable all crypto workers */
47     {
48       return VNET_API_ERROR_INVALID_VALUE_2;
49     }
50   return 0;
51 }
52
53 static void
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55                                  vnet_crypto_key_index_t idx)
56 {
57   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58   vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
59
60   vec_validate (cm->keys, idx);
61
62   if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
63     {
64       if (kop == VNET_CRYPTO_KEY_OP_DEL)
65         {
66           cm->keys[idx].index_crypto = UINT32_MAX;
67           cm->keys[idx].index_integ = UINT32_MAX;
68         }
69       else
70         {
71           cm->keys[idx] = *key;
72         }
73     }
74 }
75
76 static int
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
78                                    vnet_crypto_async_frame_t *frame, u8 is_enc)
79 {
80   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81   crypto_sw_scheduler_per_thread_data_t *ptd =
82     vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83   crypto_sw_scheduler_queue_t *current_queue =
84     is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
85              &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
86   u64 head = current_queue->head;
87
88   if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
89     {
90       u32 n_elts = frame->n_elts, i;
91       for (i = 0; i < n_elts; i++)
92         frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
93       return -1;
94     }
95
96   current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
97   head += 1;
98   CLIB_MEMORY_STORE_BARRIER ();
99   current_queue->head = head;
100   return 0;
101 }
102
103 static int
104 crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
105                                            vnet_crypto_async_frame_t *frame)
106 {
107   return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
108     }
109     static int
110     crypto_sw_scheduler_frame_enqueue_encrypt (
111       vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
112     {
113
114       return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
115     }
116
117 static_always_inline void
118 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
119                             crypto_sw_scheduler_per_thread_data_t *ptd,
120                             vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
121                             u32 len)
122 {
123   vnet_crypto_op_chunk_t *ch;
124   u32 n_chunks;
125
126   /*
127    * offset is relative to b->data (can be negative if we stay in pre_data
128    * area). Make sure it does not go beyond the 1st buffer.
129    */
130   ASSERT (b->current_data + b->current_length > offset);
131   offset = clib_min (b->current_data + b->current_length, offset);
132
133   op->chunk_index = vec_len (ptd->chunks);
134
135   vec_add2 (ptd->chunks, ch, 1);
136   ch->src = ch->dst = b->data + offset;
137   ch->len = clib_min (b->current_data + b->current_length - offset, len);
138   len -= ch->len;
139   n_chunks = 1;
140
141   while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
142     {
143       b = vlib_get_buffer (vm, b->next_buffer);
144       vec_add2 (ptd->chunks, ch, 1);
145       ch->src = ch->dst = vlib_buffer_get_current (b);
146       ch->len = clib_min (b->current_length, len);
147       len -= ch->len;
148       n_chunks++;
149     }
150
151   if (len)
152     {
153       /* Some async crypto users can use buffers in creative ways, let's allow
154        * some flexibility here...
155        * Current example is ESP decrypt with ESN in async mode: it will stash
156        * ESN at the end of the last buffer (if it can) because it must be part
157        * of the integrity check but it will not update the buffer length.
158        * Fixup the last operation chunk length if we have room.
159        */
160       ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
161       if (vlib_buffer_space_left_at_end (vm, b) >= len)
162         ch->len += len;
163     }
164
165   op->n_chunks = n_chunks;
166 }
167
168 static_always_inline void
169 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
170                                   crypto_sw_scheduler_per_thread_data_t * ptd,
171                                   vnet_crypto_async_frame_elt_t * fe,
172                                   u32 index, u32 bi,
173                                   vnet_crypto_op_id_t op_id, u16 aad_len,
174                                   u8 tag_len)
175 {
176   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
177   vnet_crypto_op_t *op = 0;
178
179   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
180     {
181       vec_add2 (ptd->chained_crypto_ops, op, 1);
182       cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
183                                   fe->crypto_total_length);
184     }
185   else
186     {
187       vec_add2 (ptd->crypto_ops, op, 1);
188       op->src = op->dst = b->data + fe->crypto_start_offset;
189       op->len = fe->crypto_total_length;
190     }
191
192   op->op = op_id;
193   op->tag = fe->tag;
194   op->flags = fe->flags;
195   op->key_index = fe->key_index;
196   op->iv = fe->iv;
197   op->aad = fe->aad;
198   op->aad_len = aad_len;
199   op->tag_len = tag_len;
200   op->user_data = index;
201 }
202
203 static_always_inline void
204 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
205                                          crypto_sw_scheduler_per_thread_data_t
206                                          * ptd, vnet_crypto_key_t * key,
207                                          vnet_crypto_async_frame_elt_t * fe,
208                                          u32 index, u32 bi,
209                                          vnet_crypto_op_id_t crypto_op_id,
210                                          vnet_crypto_op_id_t integ_op_id,
211                                          u32 digest_len, u8 is_enc)
212 {
213   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
214   vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
215
216   if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
217     {
218       vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
219       vec_add2 (ptd->chained_integ_ops, integ_op, 1);
220       cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
221                                   fe->crypto_start_offset,
222                                   fe->crypto_total_length);
223       cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
224                                   fe->integ_start_offset,
225                                   fe->crypto_total_length +
226                                   fe->integ_length_adj);
227     }
228   else
229     {
230       vec_add2 (ptd->crypto_ops, crypto_op, 1);
231       vec_add2 (ptd->integ_ops, integ_op, 1);
232       crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
233       crypto_op->len = fe->crypto_total_length;
234       integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
235       integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
236     }
237
238   crypto_op->op = crypto_op_id;
239   crypto_op->iv = fe->iv;
240   crypto_op->key_index = key->index_crypto;
241   crypto_op->user_data = 0;
242   crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
243   integ_op->op = integ_op_id;
244   integ_op->digest = fe->digest;
245   integ_op->digest_len = digest_len;
246   integ_op->key_index = key->index_integ;
247   integ_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_INIT_IV;
248   crypto_op->user_data = integ_op->user_data = index;
249 }
250
251 static_always_inline void
252 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
253              vnet_crypto_op_t * ops, u8 * state)
254 {
255   u32 n_fail, n_ops = vec_len (ops);
256   vnet_crypto_op_t *op = ops;
257
258   if (n_ops == 0)
259     return;
260
261   n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
262
263   while (n_fail)
264     {
265       ASSERT (op - ops < n_ops);
266
267       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
268         {
269           f->elts[op->user_data].status = op->status;
270           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
271           n_fail--;
272         }
273       op++;
274     }
275 }
276
277 static_always_inline void
278 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
279                      vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
280                      u8 * state)
281 {
282   u32 n_fail, n_ops = vec_len (ops);
283   vnet_crypto_op_t *op = ops;
284
285   if (n_ops == 0)
286     return;
287
288   n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
289
290   while (n_fail)
291     {
292       ASSERT (op - ops < n_ops);
293
294       if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
295         {
296           f->elts[op->user_data].status = op->status;
297           *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
298           n_fail--;
299         }
300       op++;
301     }
302 }
303
304 static_always_inline void
305 crypto_sw_scheduler_process_aead (vlib_main_t *vm,
306                                   crypto_sw_scheduler_per_thread_data_t *ptd,
307                                   vnet_crypto_async_frame_t *f, u32 aead_op,
308                                   u32 aad_len, u32 digest_len)
309 {
310   vnet_crypto_async_frame_elt_t *fe;
311   u32 *bi;
312   u32 n_elts = f->n_elts;
313   u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
314
315   vec_reset_length (ptd->crypto_ops);
316   vec_reset_length (ptd->integ_ops);
317   vec_reset_length (ptd->chained_crypto_ops);
318   vec_reset_length (ptd->chained_integ_ops);
319   vec_reset_length (ptd->chunks);
320
321   fe = f->elts;
322   bi = f->buffer_indices;
323
324   while (n_elts--)
325     {
326       if (n_elts > 1)
327         clib_prefetch_load (fe + 1);
328
329       crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
330                                         aead_op, aad_len, digest_len);
331       bi++;
332       fe++;
333     }
334
335       process_ops (vm, f, ptd->crypto_ops, &state);
336       process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
337                            &state);
338       f->state = state;
339     }
340
341     static_always_inline void
342     crypto_sw_scheduler_process_link (
343       vlib_main_t *vm, crypto_sw_scheduler_main_t *cm,
344       crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f,
345       u32 crypto_op, u32 auth_op, u16 digest_len, u8 is_enc)
346     {
347       vnet_crypto_async_frame_elt_t *fe;
348       u32 *bi;
349       u32 n_elts = f->n_elts;
350       u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
351
352       vec_reset_length (ptd->crypto_ops);
353       vec_reset_length (ptd->integ_ops);
354       vec_reset_length (ptd->chained_crypto_ops);
355       vec_reset_length (ptd->chained_integ_ops);
356       vec_reset_length (ptd->chunks);
357       fe = f->elts;
358       bi = f->buffer_indices;
359
360       while (n_elts--)
361         {
362           if (n_elts > 1)
363             clib_prefetch_load (fe + 1);
364
365           crypto_sw_scheduler_convert_link_crypto (
366             vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0],
367             crypto_op, auth_op, digest_len, is_enc);
368           bi++;
369           fe++;
370         }
371
372       if (is_enc)
373         {
374           process_ops (vm, f, ptd->crypto_ops, &state);
375           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
376                                &state);
377           process_ops (vm, f, ptd->integ_ops, &state);
378           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
379                                &state);
380         }
381       else
382         {
383           process_ops (vm, f, ptd->integ_ops, &state);
384           process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
385                                &state);
386           process_ops (vm, f, ptd->crypto_ops, &state);
387           process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
388                                &state);
389         }
390
391       f->state = state;
392     }
393
394     static_always_inline int
395     convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id,
396                              u32 *crypto_op, u32 *auth_op_or_aad_len,
397                              u16 *digest_len, u8 *is_enc)
398     {
399       switch (async_op_id)
400         {
401 #define _(n, s, k, t, a)                                                      \
402   case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC:                            \
403     *crypto_op = VNET_CRYPTO_OP_##n##_ENC;                                    \
404     *auth_op_or_aad_len = a;                                                  \
405     *digest_len = t;                                                          \
406     *is_enc = 1;                                                              \
407     return 1;                                                                 \
408   case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC:                            \
409     *crypto_op = VNET_CRYPTO_OP_##n##_DEC;                                    \
410     *auth_op_or_aad_len = a;                                                  \
411     *digest_len = t;                                                          \
412     *is_enc = 0;                                                              \
413     return 1;
414           foreach_crypto_aead_async_alg
415 #undef _
416
417 #define _(c, h, s, k, d)                                                      \
418   case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC:                               \
419     *crypto_op = VNET_CRYPTO_OP_##c##_ENC;                                    \
420     *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
421     *digest_len = d;                                                          \
422     *is_enc = 1;                                                              \
423     return 0;                                                                 \
424   case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC:                               \
425     *crypto_op = VNET_CRYPTO_OP_##c##_DEC;                                    \
426     *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC;                          \
427     *digest_len = d;                                                          \
428     *is_enc = 0;                                                              \
429     return 0;
430             foreach_crypto_link_async_alg
431 #undef _
432
433             default : return -1;
434         }
435
436       return -1;
437     }
438
439     static_always_inline vnet_crypto_async_frame_t *
440     crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
441                                  u32 *enqueue_thread_idx)
442     {
443       crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
444       crypto_sw_scheduler_per_thread_data_t *ptd =
445         cm->per_thread_data + vm->thread_index;
446       vnet_crypto_async_frame_t *f = 0;
447       crypto_sw_scheduler_queue_t *current_queue = 0;
448       u32 tail, head;
449       u8 found = 0;
450
451       /* get a pending frame to process */
452       if (ptd->self_crypto_enabled)
453         {
454           u32 i = ptd->last_serve_lcore_id + 1;
455
456           while (1)
457             {
458               crypto_sw_scheduler_per_thread_data_t *st;
459               u32 j;
460
461               if (i >= vec_len (cm->per_thread_data))
462                 i = 0;
463
464               st = cm->per_thread_data + i;
465
466               if (ptd->last_serve_encrypt)
467                 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
468               else
469                 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
470
471               tail = current_queue->tail;
472               head = current_queue->head;
473
474               for (j = tail; j != head; j++)
475                 {
476
477                   f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
478
479                   if (!f)
480                     continue;
481
482                   if (clib_atomic_bool_cmp_and_swap (
483                         &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
484                         VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
485                     {
486                       found = 1;
487                       break;
488                     }
489                 }
490
491               if (found || i == ptd->last_serve_lcore_id)
492                 {
493                   CLIB_MEMORY_STORE_BARRIER ();
494                   ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
495                   break;
496                 }
497
498               i++;
499             }
500
501           ptd->last_serve_lcore_id = i;
502         }
503
504       if (found)
505         {
506           u32 crypto_op, auth_op_or_aad_len;
507           u16 digest_len;
508           u8 is_enc;
509           int ret;
510
511           ret = convert_async_crypto_id (
512             f->op, &crypto_op, &auth_op_or_aad_len, &digest_len, &is_enc);
513
514           if (ret == 1)
515             crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
516                                               auth_op_or_aad_len, digest_len);
517           else if (ret == 0)
518             crypto_sw_scheduler_process_link (vm, cm, ptd, f, crypto_op,
519                                               auth_op_or_aad_len, digest_len,
520                                               is_enc);
521
522           *enqueue_thread_idx = f->enqueue_thread_index;
523           *nb_elts_processed = f->n_elts;
524         }
525
526       if (ptd->last_return_queue)
527         {
528           current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
529           ptd->last_return_queue = 0;
530         }
531       else
532         {
533           current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
534           ptd->last_return_queue = 1;
535         }
536
537       tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
538
539       if (current_queue->jobs[tail] &&
540           current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
541         {
542
543           CLIB_MEMORY_STORE_BARRIER ();
544           current_queue->tail++;
545           f = current_queue->jobs[tail];
546           current_queue->jobs[tail] = 0;
547
548           return f;
549         }
550
551       return 0;
552     }
553
554 static clib_error_t *
555 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
556                                 vlib_cli_command_t * cmd)
557 {
558   unformat_input_t _line_input, *line_input = &_line_input;
559   u32 worker_index;
560   u8 crypto_enable;
561   int rv;
562
563   /* Get a line of input. */
564   if (!unformat_user (input, unformat_line_input, line_input))
565     return 0;
566
567   while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
568     {
569       if (unformat (line_input, "worker %u", &worker_index))
570         {
571           if (unformat (line_input, "crypto"))
572             {
573               if (unformat (line_input, "on"))
574                 crypto_enable = 1;
575               else if (unformat (line_input, "off"))
576                 crypto_enable = 0;
577               else
578                 return (clib_error_return (0, "unknown input '%U'",
579                                            format_unformat_error,
580                                            line_input));
581             }
582           else
583             return (clib_error_return (0, "unknown input '%U'",
584                                        format_unformat_error, line_input));
585         }
586       else
587         return (clib_error_return (0, "unknown input '%U'",
588                                    format_unformat_error, line_input));
589     }
590
591   rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
592   if (rv == VNET_API_ERROR_INVALID_VALUE)
593     {
594       return (clib_error_return (0, "invalid worker idx: %d", worker_index));
595     }
596   else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
597     {
598       return (clib_error_return (0, "cannot disable all crypto workers"));
599     }
600   return 0;
601 }
602
603 /*?
604  * This command sets if worker will do crypto processing.
605  *
606  * @cliexpar
607  * Example of how to set worker crypto processing off:
608  * @cliexstart{set sw_scheduler worker 0 crypto off}
609  * @cliexend
610  ?*/
611 /* *INDENT-OFF* */
612 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
613   .path = "set sw_scheduler",
614   .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
615   .function = sw_scheduler_set_worker_crypto,
616   .is_mp_safe = 1,
617 };
618 /* *INDENT-ON* */
619
620 static clib_error_t *
621 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
622                            vlib_cli_command_t * cmd)
623 {
624   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
625   u32 i;
626
627   vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
628   for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
629     {
630       vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
631                        (vlib_worker_threads + i)->name,
632                        cm->
633                        per_thread_data[i].self_crypto_enabled ? "on" : "off");
634     }
635
636   return 0;
637 }
638
639 /*?
640  * This command displays sw_scheduler workers.
641  *
642  * @cliexpar
643  * Example of how to show workers:
644  * @cliexstart{show sw_scheduler workers}
645  * @cliexend
646  ?*/
647 /* *INDENT-OFF* */
648 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
649   .path = "show sw_scheduler workers",
650   .short_help = "show sw_scheduler workers",
651   .function = sw_scheduler_show_workers,
652   .is_mp_safe = 1,
653 };
654 /* *INDENT-ON* */
655
656 clib_error_t *
657 sw_scheduler_cli_init (vlib_main_t * vm)
658 {
659   return 0;
660 }
661
662 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
663
664 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
665 clib_error_t *
666 crypto_sw_scheduler_init (vlib_main_t * vm)
667 {
668   crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
669   vlib_thread_main_t *tm = vlib_get_thread_main ();
670   clib_error_t *error = 0;
671   crypto_sw_scheduler_per_thread_data_t *ptd;
672
673   vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
674                         CLIB_CACHE_LINE_BYTES);
675
676   vec_foreach (ptd, cm->per_thread_data)
677   {
678     ptd->self_crypto_enabled = 1;
679
680     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
681     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
682
683     vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
684                           CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
685                           CLIB_CACHE_LINE_BYTES);
686
687     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
688     ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
689
690     ptd->last_serve_encrypt = 0;
691     ptd->last_return_queue = 0;
692
693     vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
694                           CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
695                           CLIB_CACHE_LINE_BYTES);
696   }
697
698   cm->crypto_engine_index =
699     vnet_crypto_register_engine (vm, "sw_scheduler", 100,
700                                  "SW Scheduler Async Engine");
701
702   vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
703                                     crypto_sw_scheduler_key_handler);
704
705   crypto_sw_scheduler_api_init (vm);
706
707   /* *INDENT-OFF* */
708 #define _(n, s, k, t, a)                                                      \
709   vnet_crypto_register_enqueue_handler (                                      \
710     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC,  \
711     crypto_sw_scheduler_frame_enqueue_encrypt);                               \
712   vnet_crypto_register_enqueue_handler (                                      \
713     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,  \
714     crypto_sw_scheduler_frame_enqueue_decrypt);
715   foreach_crypto_aead_async_alg
716 #undef _
717
718 #define _(c, h, s, k, d)                                                      \
719   vnet_crypto_register_enqueue_handler (                                      \
720     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC,     \
721     crypto_sw_scheduler_frame_enqueue_encrypt);                               \
722   vnet_crypto_register_enqueue_handler (                                      \
723     vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,     \
724     crypto_sw_scheduler_frame_enqueue_decrypt);
725     foreach_crypto_link_async_alg
726 #undef _
727       /* *INDENT-ON* */
728
729       vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
730                                             crypto_sw_scheduler_dequeue);
731
732   if (error)
733     vec_free (cm->per_thread_data);
734
735   return error;
736 }
737
738 /* *INDENT-OFF* */
739 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
740   .runs_after = VLIB_INITS ("vnet_crypto_init"),
741 };
742
743 VLIB_PLUGIN_REGISTER () = {
744   .version = VPP_BUILD_VER,
745   .description = "SW Scheduler Crypto Async Engine plugin",
746 };
747 /* *INDENT-ON* */
748
749 /*
750  * fd.io coding-style-patch-verification: ON
751  *
752  * Local Variables:
753  * eval: (c-set-style "gnu")
754  * End:
755  */