2 * Copyright (c) 2020 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
20 #include "crypto_sw_scheduler.h"
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
25 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26 vlib_thread_main_t *tm = vlib_get_thread_main ();
27 crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28 u32 count = 0, i = vlib_num_workers () > 0;
30 if (worker_idx >= vlib_num_workers ())
32 return VNET_API_ERROR_INVALID_VALUE;
35 for (; i < tm->n_vlib_mains; i++)
37 ptd = cm->per_thread_data + i;
38 count += ptd->self_crypto_enabled;
41 if (enabled || count > 1)
43 cm->per_thread_data[vlib_get_worker_thread_index
44 (worker_idx)].self_crypto_enabled = enabled;
46 else /* cannot disable all crypto workers */
48 return VNET_API_ERROR_INVALID_VALUE_2;
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55 vnet_crypto_key_index_t idx)
57 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
60 vec_validate (cm->keys, idx);
62 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
64 if (kop == VNET_CRYPTO_KEY_OP_DEL)
66 cm->keys[idx].index_crypto = UINT32_MAX;
67 cm->keys[idx].index_integ = UINT32_MAX;
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t *vm,
78 vnet_crypto_async_frame_t *frame, u8 is_enc)
80 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81 crypto_sw_scheduler_per_thread_data_t *ptd =
82 vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83 crypto_sw_scheduler_queue_t *current_queue =
84 is_enc ? &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT] :
85 &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
86 u64 head = current_queue->head;
88 if (current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
90 u32 n_elts = frame->n_elts, i;
91 for (i = 0; i < n_elts; i++)
92 frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
96 current_queue->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
98 CLIB_MEMORY_STORE_BARRIER ();
99 current_queue->head = head;
104 crypto_sw_scheduler_frame_enqueue_decrypt (vlib_main_t *vm,
105 vnet_crypto_async_frame_t *frame)
107 return crypto_sw_scheduler_frame_enqueue (vm, frame, 0);
110 crypto_sw_scheduler_frame_enqueue_encrypt (
111 vlib_main_t *vm, vnet_crypto_async_frame_t *frame)
114 return crypto_sw_scheduler_frame_enqueue (vm, frame, 1);
117 static_always_inline void
118 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
119 crypto_sw_scheduler_per_thread_data_t *ptd,
120 vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
123 vnet_crypto_op_chunk_t *ch;
127 * offset is relative to b->data (can be negative if we stay in pre_data
128 * area). Make sure it does not go beyond the 1st buffer.
130 ASSERT (b->current_data + b->current_length > offset);
131 offset = clib_min (b->current_data + b->current_length, offset);
133 op->chunk_index = vec_len (ptd->chunks);
135 vec_add2 (ptd->chunks, ch, 1);
136 ch->src = ch->dst = b->data + offset;
137 ch->len = clib_min (b->current_data + b->current_length - offset, len);
141 while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
143 b = vlib_get_buffer (vm, b->next_buffer);
144 vec_add2 (ptd->chunks, ch, 1);
145 ch->src = ch->dst = vlib_buffer_get_current (b);
146 ch->len = clib_min (b->current_length, len);
153 /* Some async crypto users can use buffers in creative ways, let's allow
154 * some flexibility here...
155 * Current example is ESP decrypt with ESN in async mode: it will stash
156 * ESN at the end of the last buffer (if it can) because it must be part
157 * of the integrity check but it will not update the buffer length.
158 * Fixup the last operation chunk length if we have room.
160 ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
161 if (vlib_buffer_space_left_at_end (vm, b) >= len)
165 op->n_chunks = n_chunks;
168 static_always_inline void
169 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
170 crypto_sw_scheduler_per_thread_data_t * ptd,
171 vnet_crypto_async_frame_elt_t * fe,
173 vnet_crypto_op_id_t op_id, u16 aad_len,
176 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
177 vnet_crypto_op_t *op = 0;
179 if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
181 vec_add2 (ptd->chained_crypto_ops, op, 1);
182 cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
183 fe->crypto_total_length);
187 vec_add2 (ptd->crypto_ops, op, 1);
188 op->src = op->dst = b->data + fe->crypto_start_offset;
189 op->len = fe->crypto_total_length;
194 op->flags = fe->flags;
195 op->key_index = fe->key_index;
198 op->aad_len = aad_len;
199 op->tag_len = tag_len;
200 op->user_data = index;
203 static_always_inline void
204 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
205 crypto_sw_scheduler_per_thread_data_t
206 * ptd, vnet_crypto_key_t * key,
207 vnet_crypto_async_frame_elt_t * fe,
209 vnet_crypto_op_id_t crypto_op_id,
210 vnet_crypto_op_id_t integ_op_id,
211 u32 digest_len, u8 is_enc)
213 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
214 vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
216 if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
218 vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
219 vec_add2 (ptd->chained_integ_ops, integ_op, 1);
220 cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
221 fe->crypto_start_offset,
222 fe->crypto_total_length);
223 cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
224 fe->integ_start_offset,
225 fe->crypto_total_length +
226 fe->integ_length_adj);
230 vec_add2 (ptd->crypto_ops, crypto_op, 1);
231 vec_add2 (ptd->integ_ops, integ_op, 1);
232 crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
233 crypto_op->len = fe->crypto_total_length;
234 integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
235 integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
238 crypto_op->op = crypto_op_id;
239 crypto_op->iv = fe->iv;
240 crypto_op->key_index = key->index_crypto;
241 crypto_op->user_data = 0;
242 crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
243 integ_op->op = integ_op_id;
244 integ_op->digest = fe->digest;
245 integ_op->digest_len = digest_len;
246 integ_op->key_index = key->index_integ;
247 integ_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_INIT_IV;
248 crypto_op->user_data = integ_op->user_data = index;
251 static_always_inline void
252 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
253 vnet_crypto_op_t * ops, u8 * state)
255 u32 n_fail, n_ops = vec_len (ops);
256 vnet_crypto_op_t *op = ops;
261 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
265 ASSERT (op - ops < n_ops);
267 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
269 f->elts[op->user_data].status = op->status;
270 *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
277 static_always_inline void
278 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
279 vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
282 u32 n_fail, n_ops = vec_len (ops);
283 vnet_crypto_op_t *op = ops;
288 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
292 ASSERT (op - ops < n_ops);
294 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
296 f->elts[op->user_data].status = op->status;
297 *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
304 static_always_inline void
305 crypto_sw_scheduler_process_aead (vlib_main_t *vm,
306 crypto_sw_scheduler_per_thread_data_t *ptd,
307 vnet_crypto_async_frame_t *f, u32 aead_op,
308 u32 aad_len, u32 digest_len)
310 vnet_crypto_async_frame_elt_t *fe;
312 u32 n_elts = f->n_elts;
313 u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
315 vec_reset_length (ptd->crypto_ops);
316 vec_reset_length (ptd->integ_ops);
317 vec_reset_length (ptd->chained_crypto_ops);
318 vec_reset_length (ptd->chained_integ_ops);
319 vec_reset_length (ptd->chunks);
322 bi = f->buffer_indices;
327 clib_prefetch_load (fe + 1);
329 crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
330 aead_op, aad_len, digest_len);
335 process_ops (vm, f, ptd->crypto_ops, &state);
336 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
341 static_always_inline void
342 crypto_sw_scheduler_process_link (
343 vlib_main_t *vm, crypto_sw_scheduler_main_t *cm,
344 crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f,
345 u32 crypto_op, u32 auth_op, u16 digest_len, u8 is_enc)
347 vnet_crypto_async_frame_elt_t *fe;
349 u32 n_elts = f->n_elts;
350 u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
352 vec_reset_length (ptd->crypto_ops);
353 vec_reset_length (ptd->integ_ops);
354 vec_reset_length (ptd->chained_crypto_ops);
355 vec_reset_length (ptd->chained_integ_ops);
356 vec_reset_length (ptd->chunks);
358 bi = f->buffer_indices;
363 clib_prefetch_load (fe + 1);
365 crypto_sw_scheduler_convert_link_crypto (
366 vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0],
367 crypto_op, auth_op, digest_len, is_enc);
374 process_ops (vm, f, ptd->crypto_ops, &state);
375 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
377 process_ops (vm, f, ptd->integ_ops, &state);
378 process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
383 process_ops (vm, f, ptd->integ_ops, &state);
384 process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
386 process_ops (vm, f, ptd->crypto_ops, &state);
387 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
394 static_always_inline int
395 convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id,
396 u32 *crypto_op, u32 *auth_op_or_aad_len,
397 u16 *digest_len, u8 *is_enc)
401 #define _(n, s, k, t, a) \
402 case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC: \
403 *crypto_op = VNET_CRYPTO_OP_##n##_ENC; \
404 *auth_op_or_aad_len = a; \
408 case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC: \
409 *crypto_op = VNET_CRYPTO_OP_##n##_DEC; \
410 *auth_op_or_aad_len = a; \
414 foreach_crypto_aead_async_alg
417 #define _(c, h, s, k, d) \
418 case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC: \
419 *crypto_op = VNET_CRYPTO_OP_##c##_ENC; \
420 *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC; \
424 case VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC: \
425 *crypto_op = VNET_CRYPTO_OP_##c##_DEC; \
426 *auth_op_or_aad_len = VNET_CRYPTO_OP_##h##_HMAC; \
430 foreach_crypto_link_async_alg
439 static_always_inline vnet_crypto_async_frame_t *
440 crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
441 u32 *enqueue_thread_idx)
443 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
444 crypto_sw_scheduler_per_thread_data_t *ptd =
445 cm->per_thread_data + vm->thread_index;
446 vnet_crypto_async_frame_t *f = 0;
447 crypto_sw_scheduler_queue_t *current_queue = 0;
451 /* get a pending frame to process */
452 if (ptd->self_crypto_enabled)
454 u32 i = ptd->last_serve_lcore_id + 1;
458 crypto_sw_scheduler_per_thread_data_t *st;
461 if (i >= vec_len (cm->per_thread_data))
464 st = cm->per_thread_data + i;
466 if (ptd->last_serve_encrypt)
467 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
469 current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
471 tail = current_queue->tail;
472 head = current_queue->head;
474 for (j = tail; j != head; j++)
477 f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
482 if (clib_atomic_bool_cmp_and_swap (
483 &f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
484 VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
491 if (found || i == ptd->last_serve_lcore_id)
493 CLIB_MEMORY_STORE_BARRIER ();
494 ptd->last_serve_encrypt = !ptd->last_serve_encrypt;
501 ptd->last_serve_lcore_id = i;
506 u32 crypto_op, auth_op_or_aad_len;
511 ret = convert_async_crypto_id (
512 f->op, &crypto_op, &auth_op_or_aad_len, &digest_len, &is_enc);
515 crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op,
516 auth_op_or_aad_len, digest_len);
518 crypto_sw_scheduler_process_link (vm, cm, ptd, f, crypto_op,
519 auth_op_or_aad_len, digest_len,
522 *enqueue_thread_idx = f->enqueue_thread_index;
523 *nb_elts_processed = f->n_elts;
526 if (ptd->last_return_queue)
528 current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT];
529 ptd->last_return_queue = 0;
533 current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT];
534 ptd->last_return_queue = 1;
537 tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK;
539 if (current_queue->jobs[tail] &&
540 current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
543 CLIB_MEMORY_STORE_BARRIER ();
544 current_queue->tail++;
545 f = current_queue->jobs[tail];
546 current_queue->jobs[tail] = 0;
554 static clib_error_t *
555 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
556 vlib_cli_command_t * cmd)
558 unformat_input_t _line_input, *line_input = &_line_input;
563 /* Get a line of input. */
564 if (!unformat_user (input, unformat_line_input, line_input))
567 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
569 if (unformat (line_input, "worker %u", &worker_index))
571 if (unformat (line_input, "crypto"))
573 if (unformat (line_input, "on"))
575 else if (unformat (line_input, "off"))
578 return (clib_error_return (0, "unknown input '%U'",
579 format_unformat_error,
583 return (clib_error_return (0, "unknown input '%U'",
584 format_unformat_error, line_input));
587 return (clib_error_return (0, "unknown input '%U'",
588 format_unformat_error, line_input));
591 rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
592 if (rv == VNET_API_ERROR_INVALID_VALUE)
594 return (clib_error_return (0, "invalid worker idx: %d", worker_index));
596 else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
598 return (clib_error_return (0, "cannot disable all crypto workers"));
604 * This command sets if worker will do crypto processing.
607 * Example of how to set worker crypto processing off:
608 * @cliexstart{set sw_scheduler worker 0 crypto off}
612 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
613 .path = "set sw_scheduler",
614 .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
615 .function = sw_scheduler_set_worker_crypto,
620 static clib_error_t *
621 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
622 vlib_cli_command_t * cmd)
624 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
627 vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
628 for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
630 vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
631 (vlib_worker_threads + i)->name,
633 per_thread_data[i].self_crypto_enabled ? "on" : "off");
640 * This command displays sw_scheduler workers.
643 * Example of how to show workers:
644 * @cliexstart{show sw_scheduler workers}
648 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
649 .path = "show sw_scheduler workers",
650 .short_help = "show sw_scheduler workers",
651 .function = sw_scheduler_show_workers,
657 sw_scheduler_cli_init (vlib_main_t * vm)
662 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
664 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
666 crypto_sw_scheduler_init (vlib_main_t * vm)
668 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
669 vlib_thread_main_t *tm = vlib_get_thread_main ();
670 clib_error_t *error = 0;
671 crypto_sw_scheduler_per_thread_data_t *ptd;
673 vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
674 CLIB_CACHE_LINE_BYTES);
676 vec_foreach (ptd, cm->per_thread_data)
678 ptd->self_crypto_enabled = 1;
680 ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].head = 0;
681 ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].tail = 0;
683 vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT].jobs,
684 CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
685 CLIB_CACHE_LINE_BYTES);
687 ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].head = 0;
688 ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].tail = 0;
690 ptd->last_serve_encrypt = 0;
691 ptd->last_return_queue = 0;
693 vec_validate_aligned (ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT].jobs,
694 CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1,
695 CLIB_CACHE_LINE_BYTES);
698 cm->crypto_engine_index =
699 vnet_crypto_register_engine (vm, "sw_scheduler", 100,
700 "SW Scheduler Async Engine");
702 vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
703 crypto_sw_scheduler_key_handler);
705 crypto_sw_scheduler_api_init (vm);
708 #define _(n, s, k, t, a) \
709 vnet_crypto_register_enqueue_handler ( \
710 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
711 crypto_sw_scheduler_frame_enqueue_encrypt); \
712 vnet_crypto_register_enqueue_handler ( \
713 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
714 crypto_sw_scheduler_frame_enqueue_decrypt);
715 foreach_crypto_aead_async_alg
718 #define _(c, h, s, k, d) \
719 vnet_crypto_register_enqueue_handler ( \
720 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
721 crypto_sw_scheduler_frame_enqueue_encrypt); \
722 vnet_crypto_register_enqueue_handler ( \
723 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
724 crypto_sw_scheduler_frame_enqueue_decrypt);
725 foreach_crypto_link_async_alg
729 vnet_crypto_register_dequeue_handler (vm, cm->crypto_engine_index,
730 crypto_sw_scheduler_dequeue);
733 vec_free (cm->per_thread_data);
739 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
740 .runs_after = VLIB_INITS ("vnet_crypto_init"),
743 VLIB_PLUGIN_REGISTER () = {
744 .version = VPP_BUILD_VER,
745 .description = "SW Scheduler Crypto Async Engine plugin",
750 * fd.io coding-style-patch-verification: ON
753 * eval: (c-set-style "gnu")