2 * Copyright (c) 2020 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
20 #include "crypto_sw_scheduler.h"
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
25 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26 vlib_thread_main_t *tm = vlib_get_thread_main ();
27 crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28 u32 count = 0, i = vlib_num_workers () > 0;
30 if (worker_idx >= vlib_num_workers ())
32 return VNET_API_ERROR_INVALID_VALUE;
35 for (; i < tm->n_vlib_mains; i++)
37 ptd = cm->per_thread_data + i;
38 count += ptd->self_crypto_enabled;
41 if (enabled || count > 1)
43 cm->per_thread_data[vlib_get_worker_thread_index
44 (worker_idx)].self_crypto_enabled = enabled;
46 else /* cannot disable all crypto workers */
48 return VNET_API_ERROR_INVALID_VALUE_2;
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55 vnet_crypto_key_index_t idx)
57 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
60 vec_validate (cm->keys, idx);
62 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
64 if (kop == VNET_CRYPTO_KEY_OP_DEL)
66 cm->keys[idx].index_crypto = UINT32_MAX;
67 cm->keys[idx].index_integ = UINT32_MAX;
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
78 vnet_crypto_async_frame_t *frame, u8 is_enc)
80 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81 crypto_sw_scheduler_per_thread_data_t *ptd =
82 vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83 crypto_sw_scheduler_queue_t *current_queue =
84 is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
85 &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
86 u64 head = current_queue->head;
88 if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
90 u32 n_elts = frame->n_elts, i;
91 for (i = 0; i < n_elts; i++)
92 frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
96 current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
98 CLIB_MEMORY_STORE_BARRIER ();
99 current_queue->head = head;
104 crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
105 vnet_crypto_async_frame_t *frame)
107 return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
110 crypto_sw_scheduler_frame_enqueue_encrypt (
111 vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
114 return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
117 static_always_inline void
118 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
119 crypto_sw_scheduler_per_thread_data_t *ptd,
120 vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
123 vnet_crypto_op_chunk_t *ch;
127 * offset is relative to b->data (can be negative if we stay in pre_data
128 * area). Make sure it does not go beyond the 1st buffer.
130 ASSERT (b->current_data + b->current_length > offset);
131 offset = clib_min (b->current_data + b->current_length, offset);
133 op->chunk_index = vec_len (ptd->chunks);
135 vec_add2 (ptd->chunks, ch, 1);
136 ch->src = ch->dst = b->data + offset;
137 ch->len = clib_min (b->current_data + b->current_length - offset, len);
141 while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
143 b = vlib_get_buffer (vm, b->next_buffer);
144 vec_add2 (ptd->chunks, ch, 1);
145 ch->src = ch->dst = vlib_buffer_get_current (b);
146 ch->len = clib_min (b->current_length, len);
153 /* Some async crypto users can use buffers in creative ways, let's allow
154 * some flexibility here...
155 * Current example is ESP decrypt with ESN in async mode: it will stash
156 * ESN at the end of the last buffer (if it can) because it must be part
157 * of the integrity check but it will not update the buffer length.
158 * Fixup the last operation chunk length if we have room.
160 ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
161 if (vlib_buffer_space_left_at_end (vm, b) >= len)
165 op->n_chunks = n_chunks;
168 static_always_inline void
169 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
170 crypto_sw_scheduler_per_thread_data_t * ptd,
171 vnet_crypto_async_frame_elt_t * fe,
173 vnet_crypto_op_id_t op_id, u16 aad_len,
176 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
177 vnet_crypto_op_t *op = 0;
179 if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
181 vec_add2 (ptd->chained_crypto_ops, op, 1);
182 cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
183 fe->crypto_total_length);
187 vec_add2 (ptd->crypto_ops, op, 1);
188 op->src = op->dst = b->data + fe->crypto_start_offset;
189 op->len = fe->crypto_total_length;
194 op->flags = fe->flags;
195 op->key_index = fe->key_index;
198 op->aad_len = aad_len;
199 op->tag_len = tag_len;
200 op->user_data = index;
203 static_always_inline void
204 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
205 crypto_sw_scheduler_per_thread_data_t
206 * ptd, vnet_crypto_key_t * key,
207 vnet_crypto_async_frame_elt_t * fe,
209 vnet_crypto_op_id_t crypto_op_id,
210 vnet_crypto_op_id_t integ_op_id,
211 u32 digest_len, u8 is_enc)
213 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
214 vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
216 if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
218 vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
219 vec_add2 (ptd->chained_integ_ops, integ_op, 1);
220 cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
221 fe->crypto_start_offset,
222 fe->crypto_total_length);
223 cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
224 fe->integ_start_offset,
225 fe->crypto_total_length +
226 fe->integ_length_adj);
230 vec_add2 (ptd->crypto_ops, crypto_op, 1);
231 vec_add2 (ptd->integ_ops, integ_op, 1);
232 crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
233 crypto_op->len = fe->crypto_total_length;
234 integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
235 integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
238 crypto_op->op = crypto_op_id;
239 crypto_op->iv = fe->iv;
240 crypto_op->key_index = key->index_crypto;
241 crypto_op->user_data = 0;
242 crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
243 integ_op->op = integ_op_id;
244 integ_op->digest = fe->digest;
245 integ_op->digest_len = digest_len;
246 integ_op->key_index = key->index_integ;
247 integ_op->flags = fe->flags;
248 crypto_op->user_data = integ_op->user_data = index;
251 static_always_inline void
252 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
253 vnet_crypto_op_t * ops, u8 * state)
255 u32 n_fail, n_ops = vec_len (ops);
256 vnet_crypto_op_t *op = ops;
261 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
264 * If we had a failure in the ops then we need to walk all the ops
265 * and set the status in the corresponding frame. This status is
266 * not set in the case with no failures, as in that case the overall
267 * frame status is success.
271 for (int i = 0; i < n_ops; i++)
273 ASSERT (op - ops < n_ops);
275 f->elts[op->user_data].status = op->status;
278 *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
282 static_always_inline void
283 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
284 vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
287 u32 n_fail, n_ops = vec_len (ops);
288 vnet_crypto_op_t *op = ops;
293 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
296 * If we had a failure in the ops then we need to walk all the ops
297 * and set the status in the corresponding frame. This status is
298 * not set in the case with no failures, as in that case the overall
299 * frame status is success.
303 for (int i = 0; i < n_ops; i++)
305 ASSERT (op - ops < n_ops);
307 f->elts[op->user_data].status = op->status;
310 *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
314 static_always_inline void
315 crypto_sw_scheduler_process_aead (vlib_main_t *vm,
316 crypto_sw_scheduler_per_thread_data_t *ptd,
317 vnet_crypto_async_frame_t *f, u32 aead_op,
318 u32 aad_len, u32 digest_len)
320 vnet_crypto_async_frame_elt_t *fe;
322 u32 n_elts = f->n_elts;
323 u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
325 vec_reset_length (ptd->crypto_ops);
326 vec_reset_length (ptd->integ_ops);
327 vec_reset_length (ptd->chained_crypto_ops);
328 vec_reset_length (ptd->chained_integ_ops);
329 vec_reset_length (ptd->chunks);
332 bi = f->buffer_indices;
337 clib_prefetch_load (fe + 1);
339 crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
340 aead_op, aad_len, digest_len);
345 process_ops (vm, f, ptd->crypto_ops, &state);
346 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
351 static_always_inline void
352 crypto_sw_scheduler_process_link (
353 vlib_main_t *vm, crypto_sw_scheduler_main_t *cm,
354 crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f,
355 u32 crypto_op, u32 auth_op, u16 digest_len, u8 is_enc)
357 vnet_crypto_async_frame_elt_t *fe;
359 u32 n_elts = f->n_elts;
360 u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
362 vec_reset_length (ptd->crypto_ops);
363 vec_reset_length (ptd->integ_ops);
364 vec_reset_length (ptd->chained_crypto_ops);
365 vec_reset_length (ptd->chained_integ_ops);
366 vec_reset_length (ptd->chunks);
368 bi = f->buffer_indices;
373 clib_prefetch_load (fe + 1);
375 crypto_sw_scheduler_convert_link_crypto (
376 vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0],
377 crypto_op, auth_op, digest_len, is_enc);
384 process_ops (vm, f, ptd->crypto_ops, &state);
385 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
387 process_ops (vm, f, ptd->integ_ops, &state);
388 process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
393 process_ops (vm, f, ptd->integ_ops, &state);
394 process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
396 process_ops (vm, f, ptd->crypto_ops, &state);
397 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
404 static_always_inline int
405 convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id,
406 u32 *crypto_op, u32 *auth_op_or_aad_len,
407 u16 *digest_len, u8 *is_enc)
411 #define _(n, s, k, t, a) \
412 case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC: \
413 *crypto_op = VNET_CRYPTO_OP_##n##_ENC; \
414 *auth_op_or_aad_len = a; \
418 case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC: \
419 *crypto_op = VNET_CRYPTO_OP_##n##_DEC; \
420 *auth_op_or_aad_len = a; \
424 foreach_crypto_aead_async_alg
427 #define _(c, h, s, k, d) \
428 case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC: \
429 *crypto_op = VNET_CRYPTO_OP_##c##_ENC; \
430 *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC; \
434 case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC: \
435 *crypto_op = VNET_CRYPTO_OP_##c##_DEC; \
436 *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC; \
440 foreach_crypto_link_async_alg
449 static_always_inline vnet_crypto_async_frame_t *
450 crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
451 u32 *enqueue_thread_idx)
453 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
454 crypto_sw_scheduler_per_thread_data_t *ptd =
455 cm->per_thread_data + vm->thread_index;
456 vnet_crypto_async_frame_t *f = 0;
457 crypto_sw_scheduler_queue_t *current_queue = 0;
462 crypto_main.dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT;
466 /* get a pending frame to process */
467 if (ptd->self_crypto_enabled)
469 u32 i = ptd->last_serve_lcore_id + 1;
473 crypto_sw_scheduler_per_thread_data_t *st;
476 if (i >= vec_len (cm->per_thread_data))
479 st = cm->per_thread_data + i;
481 if (ptd->last_serve_encrypt)
482 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
484 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
486 tail = current_queue->tail;
487 head = current_queue->head;
489 /* Skip this queue unless tail < head or head has overflowed
490 * and tail has not. At the point where tail overflows (== 0),
491 * the largest possible value of head is (queue size - 1).
492 * Prior to that, the largest possible value of head is
495 if ((tail > head) && (head >= CRYPTO_SW_SCHEDULER_QUEUE_MASK))
498 for (j = tail; j != head; j++)
501 f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
506 if (clib_atomic_bool_cmp_and_swap (
507 &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
508 VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
516 if (found || i == ptd->last_serve_lcore_id)
518 CLIB_MEMORY_STORE_BARRIER ();
519 ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
526 ptd->last_serve_lcore_id = i;
531 u32 crypto_op, auth_op_or_aad_len;
536 ret = convert_async_crypto_id (
537 f->op, &crypto_op, &auth_op_or_aad_len, &digest_len, &is_enc);
540 crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
541 auth_op_or_aad_len, digest_len);
543 crypto_sw_scheduler_process_link (vm, cm, ptd, f, crypto_op,
544 auth_op_or_aad_len, digest_len,
547 *enqueue_thread_idx = f->enqueue_thread_index;
548 *nb_elts_processed = f->n_elts;
551 if (ptd->last_return_queue)
553 current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
554 ptd->last_return_queue = 0;
558 current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
559 ptd->last_return_queue = 1;
562 tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
564 if (current_queue->jobs[tail] &&
565 current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
568 CLIB_MEMORY_STORE_BARRIER ();
569 current_queue->tail++;
570 f = current_queue->jobs[tail];
571 current_queue->jobs[tail] = 0;
576 if (!found && recheck_queues)
579 goto run_half_queues;
584 static clib_error_t *
585 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
586 vlib_cli_command_t * cmd)
588 unformat_input_t _line_input, *line_input = &_line_input;
593 /* Get a line of input. */
594 if (!unformat_user (input, unformat_line_input, line_input))
597 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
599 if (unformat (line_input, "worker %u", &worker_index))
601 if (unformat (line_input, "crypto"))
603 if (unformat (line_input, "on"))
605 else if (unformat (line_input, "off"))
608 return (clib_error_return (0, "unknown input '%U'",
609 format_unformat_error,
613 return (clib_error_return (0, "unknown input '%U'",
614 format_unformat_error, line_input));
617 return (clib_error_return (0, "unknown input '%U'",
618 format_unformat_error, line_input));
621 rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
622 if (rv == VNET_API_ERROR_INVALID_VALUE)
624 return (clib_error_return (0, "invalid worker idx: %d", worker_index));
626 else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
628 return (clib_error_return (0, "cannot disable all crypto workers"));
634 * This command sets if worker will do crypto processing.
637 * Example of how to set worker crypto processing off:
638 * @cliexstart{set sw_scheduler worker 0 crypto off}
642 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
643 .path = "set sw_scheduler",
644 .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
645 .function = sw_scheduler_set_worker_crypto,
650 static clib_error_t *
651 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
652 vlib_cli_command_t * cmd)
654 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
657 vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
658 for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
660 vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
661 (vlib_worker_threads + i)->name,
663 per_thread_data[i].self_crypto_enabled ? "on" : "off");
670 * This command displays sw_scheduler workers.
673 * Example of how to show workers:
674 * @cliexstart{show sw_scheduler workers}
678 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
679 .path = "show sw_scheduler workers",
680 .short_help = "show sw_scheduler workers",
681 .function = sw_scheduler_show_workers,
687 sw_scheduler_cli_init (vlib_main_t * vm)
692 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
694 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
696 crypto_sw_scheduler_init (vlib_main_t * vm)
698 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
699 vlib_thread_main_t *tm = vlib_get_thread_main ();
700 clib_error_t *error = 0;
701 crypto_sw_scheduler_per_thread_data_t *ptd;
703 vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
704 CLIB_CACHE_LINE_BYTES);
706 vec_foreach (ptd, cm->per_thread_data)
708 ptd->self_crypto_enabled = 1;
710 ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
711 ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
713 vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
714 CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
715 CLIB_CACHE_LINE_BYTES);
717 ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
718 ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
720 ptd->last_serve_encrypt = 0;
721 ptd->last_return_queue = 0;
723 vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
724 CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
725 CLIB_CACHE_LINE_BYTES);
728 cm->crypto_engine_index =
729 vnet_crypto_register_engine (vm, "sw_scheduler", 100,
730 "SW Scheduler Async Engine");
732 vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
733 crypto_sw_scheduler_key_handler);
735 crypto_sw_scheduler_api_init (vm);
738 #define _(n, s, k, t, a) \
739 vnet_crypto_register_enqueue_handler ( \
740 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
741 crypto_sw_scheduler_frame_enqueue_encrypt); \
742 vnet_crypto_register_enqueue_handler ( \
743 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
744 crypto_sw_scheduler_frame_enqueue_decrypt);
745 foreach_crypto_aead_async_alg
748 #define _(c, h, s, k, d) \
749 vnet_crypto_register_enqueue_handler ( \
750 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
751 crypto_sw_scheduler_frame_enqueue_encrypt); \
752 vnet_crypto_register_enqueue_handler ( \
753 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
754 crypto_sw_scheduler_frame_enqueue_decrypt);
755 foreach_crypto_link_async_alg
759 vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
760 crypto_sw_scheduler_dequeue);
763 vec_free (cm->per_thread_data);
769 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
770 .runs_after = VLIB_INITS ("vnet_crypto_init"),
773 VLIB_PLUGIN_REGISTER () = {
774 .version = VPP_BUILD_VER,
775 .description = "SW Scheduler Crypto Async Engine plugin",
780 * fd.io coding-style-patch-verification: ON
783 * eval: (c-set-style "gnu")