2 * Copyright (c) 2020 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vlib/vlib.h>
17 #include <vnet/plugin/plugin.h>
18 #include <vpp/app/version.h>
20 #include "crypto_sw_scheduler.h"
23 crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
25 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
26 vlib_thread_main_t *tm = vlib_get_thread_main ();
27 crypto_sw_scheduler_per_thread_data_t *ptd = 0;
28 u32 count = 0, i = vlib_num_workers () > 0;
30 if (worker_idx >= vlib_num_workers ())
32 return VNET_API_ERROR_INVALID_VALUE;
35 for (; i < tm->n_vlib_mains; i++)
37 ptd = cm->per_thread_data + i;
38 count += ptd->self_crypto_enabled;
41 if (enabled || count > 1)
43 cm->per_thread_data[vlib_get_worker_thread_index
44 (worker_idx)].self_crypto_enabled = enabled;
46 else /* cannot disable all crypto workers */
48 return VNET_API_ERROR_INVALID_VALUE_2;
54 crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
55 vnet_crypto_key_index_t idx)
57 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
58 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
60 vec_validate (cm->keys, idx);
62 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
64 if (kop == VNET_CRYPTO_KEY_OP_DEL)
66 cm->keys[idx].index_crypto = UINT32_MAX;
67 cm->keys[idx].index_integ = UINT32_MAX;
77 crypto_sw_scheduler_frame_enqueue (vlib_main_t * vm,
78 vnet_crypto_async_frame_t * frame)
80 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
81 crypto_sw_scheduler_per_thread_data_t *ptd
82 = vec_elt_at_index (cm->per_thread_data, vm->thread_index);
83 crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
86 if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
88 u32 n_elts = frame->n_elts, i;
89 for (i = 0; i < n_elts; i++)
90 frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
93 q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
95 CLIB_MEMORY_STORE_BARRIER ();
100 static_always_inline vnet_crypto_async_frame_t *
101 crypto_sw_scheduler_get_pending_frame (crypto_sw_scheduler_queue_t * q)
103 vnet_crypto_async_frame_t *f;
108 for (i = tail; i < head; i++)
110 f = q->jobs[i & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
113 if (clib_atomic_bool_cmp_and_swap
114 (&f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
115 VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
123 static_always_inline vnet_crypto_async_frame_t *
124 crypto_sw_scheduler_get_completed_frame (crypto_sw_scheduler_queue_t * q)
126 vnet_crypto_async_frame_t *f = 0;
127 if (q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]
128 && q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]->state
129 >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
132 CLIB_MEMORY_STORE_BARRIER ();
134 f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
135 q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
140 static_always_inline void
141 cryptodev_sw_scheduler_sgl (vlib_main_t *vm,
142 crypto_sw_scheduler_per_thread_data_t *ptd,
143 vlib_buffer_t *b, vnet_crypto_op_t *op, i16 offset,
146 vnet_crypto_op_chunk_t *ch;
150 * offset is relative to b->data (can be negative if we stay in pre_data
151 * area). Make sure it does not go beyond the 1st buffer.
153 ASSERT (b->current_data + b->current_length > offset);
154 offset = clib_min (b->current_data + b->current_length, offset);
156 op->chunk_index = vec_len (ptd->chunks);
158 vec_add2 (ptd->chunks, ch, 1);
159 ch->src = ch->dst = b->data + offset;
160 ch->len = clib_min (b->current_data + b->current_length - offset, len);
164 while (len && b->flags & VLIB_BUFFER_NEXT_PRESENT)
166 b = vlib_get_buffer (vm, b->next_buffer);
167 vec_add2 (ptd->chunks, ch, 1);
168 ch->src = ch->dst = vlib_buffer_get_current (b);
169 ch->len = clib_min (b->current_length, len);
176 /* Some async crypto users can use buffers in creative ways, let's allow
177 * some flexibility here...
178 * Current example is ESP decrypt with ESN in async mode: it will stash
179 * ESN at the end of the last buffer (if it can) because it must be part
180 * of the integrity check but it will not update the buffer length.
181 * Fixup the last operation chunk length if we have room.
183 ASSERT (vlib_buffer_space_left_at_end (vm, b) >= len);
184 if (vlib_buffer_space_left_at_end (vm, b) >= len)
188 op->n_chunks = n_chunks;
191 static_always_inline void
192 crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
193 crypto_sw_scheduler_per_thread_data_t * ptd,
194 vnet_crypto_async_frame_elt_t * fe,
196 vnet_crypto_op_id_t op_id, u16 aad_len,
199 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
200 vnet_crypto_op_t *op = 0;
202 if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
204 vec_add2 (ptd->chained_crypto_ops, op, 1);
205 cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
206 fe->crypto_total_length);
210 vec_add2 (ptd->crypto_ops, op, 1);
211 op->src = op->dst = b->data + fe->crypto_start_offset;
212 op->len = fe->crypto_total_length;
217 op->flags = fe->flags;
218 op->key_index = fe->key_index;
221 op->aad_len = aad_len;
222 op->tag_len = tag_len;
223 op->user_data = index;
226 static_always_inline void
227 crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
228 crypto_sw_scheduler_per_thread_data_t
229 * ptd, vnet_crypto_key_t * key,
230 vnet_crypto_async_frame_elt_t * fe,
232 vnet_crypto_op_id_t crypto_op_id,
233 vnet_crypto_op_id_t integ_op_id,
234 u32 digest_len, u8 is_enc)
236 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
237 vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
239 if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
241 vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
242 vec_add2 (ptd->chained_integ_ops, integ_op, 1);
243 cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
244 fe->crypto_start_offset,
245 fe->crypto_total_length);
246 cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
247 fe->integ_start_offset,
248 fe->crypto_total_length +
249 fe->integ_length_adj);
253 vec_add2 (ptd->crypto_ops, crypto_op, 1);
254 vec_add2 (ptd->integ_ops, integ_op, 1);
255 crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
256 crypto_op->len = fe->crypto_total_length;
257 integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
258 integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
261 crypto_op->op = crypto_op_id;
262 crypto_op->iv = fe->iv;
263 crypto_op->key_index = key->index_crypto;
264 crypto_op->user_data = 0;
265 crypto_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
266 integ_op->op = integ_op_id;
267 integ_op->digest = fe->digest;
268 integ_op->digest_len = digest_len;
269 integ_op->key_index = key->index_integ;
270 integ_op->flags = fe->flags & ~VNET_CRYPTO_OP_FLAG_INIT_IV;
271 crypto_op->user_data = integ_op->user_data = index;
274 static_always_inline void
275 process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
276 vnet_crypto_op_t * ops, u8 * state)
278 u32 n_fail, n_ops = vec_len (ops);
279 vnet_crypto_op_t *op = ops;
284 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
288 ASSERT (op - ops < n_ops);
290 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
292 f->elts[op->user_data].status = op->status;
293 *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
300 static_always_inline void
301 process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
302 vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
305 u32 n_fail, n_ops = vec_len (ops);
306 vnet_crypto_op_t *op = ops;
311 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
315 ASSERT (op - ops < n_ops);
317 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
319 f->elts[op->user_data].status = op->status;
320 *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
327 static_always_inline vnet_crypto_async_frame_t *
328 crypto_sw_scheduler_dequeue_aead (vlib_main_t * vm,
329 vnet_crypto_async_op_id_t async_op_id,
330 vnet_crypto_op_id_t sync_op_id, u8 tag_len,
331 u8 aad_len, u32 * nb_elts_processed,
332 u32 * enqueue_thread_idx)
334 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
335 crypto_sw_scheduler_per_thread_data_t *ptd = 0;
336 crypto_sw_scheduler_queue_t *q = 0;
337 vnet_crypto_async_frame_t *f = 0;
338 vnet_crypto_async_frame_elt_t *fe;
342 u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
344 if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
347 vec_foreach_index (i, cm->per_thread_data)
349 ptd = cm->per_thread_data + i;
350 q = ptd->queues[async_op_id];
351 f = crypto_sw_scheduler_get_pending_frame (q);
358 ptd = cm->per_thread_data + vm->thread_index;
362 *nb_elts_processed = n_elts = f->n_elts;
364 bi = f->buffer_indices;
366 vec_reset_length (ptd->crypto_ops);
367 vec_reset_length (ptd->chained_crypto_ops);
368 vec_reset_length (ptd->chunks);
373 clib_prefetch_load (fe + 1);
375 crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
376 sync_op_id, aad_len, tag_len);
381 process_ops (vm, f, ptd->crypto_ops, &state);
382 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
385 *enqueue_thread_idx = f->enqueue_thread_index;
388 return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
391 static_always_inline vnet_crypto_async_frame_t *
392 crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
393 vnet_crypto_async_op_id_t async_op_id,
394 vnet_crypto_op_id_t sync_crypto_op_id,
395 vnet_crypto_op_id_t sync_integ_op_id,
396 u16 digest_len, u8 is_enc,
397 u32 * nb_elts_processed,
398 u32 * enqueue_thread_idx)
400 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
401 crypto_sw_scheduler_per_thread_data_t *ptd = 0;
402 crypto_sw_scheduler_queue_t *q = 0;
403 vnet_crypto_async_frame_t *f = 0;
404 vnet_crypto_async_frame_elt_t *fe;
408 u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
410 if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
413 vec_foreach_index (i, cm->per_thread_data)
415 ptd = cm->per_thread_data + i;
416 q = ptd->queues[async_op_id];
417 f = crypto_sw_scheduler_get_pending_frame (q);
424 ptd = cm->per_thread_data + vm->thread_index;
428 vec_reset_length (ptd->crypto_ops);
429 vec_reset_length (ptd->integ_ops);
430 vec_reset_length (ptd->chained_crypto_ops);
431 vec_reset_length (ptd->chained_integ_ops);
432 vec_reset_length (ptd->chunks);
434 *nb_elts_processed = n_elts = f->n_elts;
436 bi = f->buffer_indices;
441 clib_prefetch_load (fe + 1);
443 crypto_sw_scheduler_convert_link_crypto (vm, ptd,
444 cm->keys + fe->key_index,
445 fe, fe - f->elts, bi[0],
455 process_ops (vm, f, ptd->crypto_ops, &state);
456 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
458 process_ops (vm, f, ptd->integ_ops, &state);
459 process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
464 process_ops (vm, f, ptd->integ_ops, &state);
465 process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
467 process_ops (vm, f, ptd->crypto_ops, &state);
468 process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
473 *enqueue_thread_idx = f->enqueue_thread_index;
476 return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
479 static clib_error_t *
480 sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
481 vlib_cli_command_t * cmd)
483 unformat_input_t _line_input, *line_input = &_line_input;
488 /* Get a line of input. */
489 if (!unformat_user (input, unformat_line_input, line_input))
492 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
494 if (unformat (line_input, "worker %u", &worker_index))
496 if (unformat (line_input, "crypto"))
498 if (unformat (line_input, "on"))
500 else if (unformat (line_input, "off"))
503 return (clib_error_return (0, "unknown input '%U'",
504 format_unformat_error,
508 return (clib_error_return (0, "unknown input '%U'",
509 format_unformat_error, line_input));
512 return (clib_error_return (0, "unknown input '%U'",
513 format_unformat_error, line_input));
516 rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
517 if (rv == VNET_API_ERROR_INVALID_VALUE)
519 return (clib_error_return (0, "invalid worker idx: %d", worker_index));
521 else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
523 return (clib_error_return (0, "cannot disable all crypto workers"));
529 * This command sets if worker will do crypto processing.
532 * Example of how to set worker crypto processing off:
533 * @cliexstart{set sw_scheduler worker 0 crypto off}
537 VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
538 .path = "set sw_scheduler",
539 .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
540 .function = sw_scheduler_set_worker_crypto,
545 static clib_error_t *
546 sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
547 vlib_cli_command_t * cmd)
549 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
552 vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
553 for (i = 1; i < vlib_thread_main.n_vlib_mains; i++)
555 vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
556 (vlib_worker_threads + i)->name,
558 per_thread_data[i].self_crypto_enabled ? "on" : "off");
565 * This command displays sw_scheduler workers.
568 * Example of how to show workers:
569 * @cliexstart{show sw_scheduler workers}
573 VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
574 .path = "show sw_scheduler workers",
575 .short_help = "show sw_scheduler workers",
576 .function = sw_scheduler_show_workers,
582 sw_scheduler_cli_init (vlib_main_t * vm)
587 VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
590 #define _(n, s, k, t, a) \
591 static vnet_crypto_async_frame_t \
592 *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc ( \
593 vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
595 return crypto_sw_scheduler_dequeue_aead ( \
596 vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
597 VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx); \
599 static vnet_crypto_async_frame_t \
600 *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec ( \
601 vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
603 return crypto_sw_scheduler_dequeue_aead ( \
604 vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
605 VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx); \
607 foreach_crypto_aead_async_alg
610 #define _(c, h, s, k, d) \
611 static vnet_crypto_async_frame_t \
612 *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc ( \
613 vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
615 return crypto_sw_scheduler_dequeue_link ( \
616 vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
617 VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1, \
618 nb_elts_processed, thread_idx); \
620 static vnet_crypto_async_frame_t \
621 *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec ( \
622 vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
624 return crypto_sw_scheduler_dequeue_link ( \
625 vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
626 VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0, \
627 nb_elts_processed, thread_idx); \
629 foreach_crypto_link_async_alg
633 crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
635 crypto_sw_scheduler_init (vlib_main_t * vm)
637 crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
638 vlib_thread_main_t *tm = vlib_get_thread_main ();
639 clib_error_t *error = 0;
640 crypto_sw_scheduler_per_thread_data_t *ptd;
642 u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
643 + sizeof (crypto_sw_scheduler_queue_t);
645 vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
646 CLIB_CACHE_LINE_BYTES);
648 vec_foreach (ptd, cm->per_thread_data)
650 ptd->self_crypto_enabled = 1;
652 for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
654 crypto_sw_scheduler_queue_t *q
655 = clib_mem_alloc_aligned (queue_size, CLIB_CACHE_LINE_BYTES);
658 clib_memset_u8 (q, 0, queue_size);
662 cm->crypto_engine_index =
663 vnet_crypto_register_engine (vm, "sw_scheduler", 100,
664 "SW Scheduler Async Engine");
666 vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
667 crypto_sw_scheduler_key_handler);
669 crypto_sw_scheduler_api_init (vm);
672 #define _(n, s, k, t, a) \
673 vnet_crypto_register_async_handler ( \
674 vm, cm->crypto_engine_index, \
675 VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
676 crypto_sw_scheduler_frame_enqueue, \
677 crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc); \
678 vnet_crypto_register_async_handler ( \
679 vm, cm->crypto_engine_index, \
680 VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
681 crypto_sw_scheduler_frame_enqueue, \
682 crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
683 foreach_crypto_aead_async_alg
686 #define _(c, h, s, k, d) \
687 vnet_crypto_register_async_handler ( \
688 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
689 crypto_sw_scheduler_frame_enqueue, \
690 crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc); \
691 vnet_crypto_register_async_handler ( \
692 vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
693 crypto_sw_scheduler_frame_enqueue, \
694 crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
695 foreach_crypto_link_async_alg
700 vec_free (cm->per_thread_data);
706 VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
707 .runs_after = VLIB_INITS ("vnet_crypto_init"),
710 VLIB_PLUGIN_REGISTER () = {
711 .version = VPP_BUILD_VER,
712 .description = "SW Scheduler Crypto Async Engine plugin",
717 * fd.io coding-style-patch-verification: ON
720 * eval: (c-set-style "gnu")