4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_cryptodev.h>
38 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
39 #include <rte_cryptodev_scheduler.h>
43 #include "cperf_options.h"
44 #include "cperf_test_vector_parsing.h"
45 #include "cperf_test_throughput.h"
46 #include "cperf_test_latency.h"
47 #include "cperf_test_verify.h"
48 #include "cperf_test_pmd_cyclecount.h"
50 #define NUM_SESSIONS 2048
51 #define SESS_MEMPOOL_CACHE_SIZE 64
53 const char *cperf_test_type_strs[] = {
54 [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
55 [CPERF_TEST_TYPE_LATENCY] = "latency",
56 [CPERF_TEST_TYPE_VERIFY] = "verify",
57 [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
60 const char *cperf_op_type_strs[] = {
61 [CPERF_CIPHER_ONLY] = "cipher-only",
62 [CPERF_AUTH_ONLY] = "auth-only",
63 [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
64 [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
68 const struct cperf_test cperf_testmap[] = {
69 [CPERF_TEST_TYPE_THROUGHPUT] = {
70 cperf_throughput_test_constructor,
71 cperf_throughput_test_runner,
72 cperf_throughput_test_destructor
74 [CPERF_TEST_TYPE_LATENCY] = {
75 cperf_latency_test_constructor,
76 cperf_latency_test_runner,
77 cperf_latency_test_destructor
79 [CPERF_TEST_TYPE_VERIFY] = {
80 cperf_verify_test_constructor,
81 cperf_verify_test_runner,
82 cperf_verify_test_destructor
84 [CPERF_TEST_TYPE_PMDCC] = {
85 cperf_pmd_cyclecount_test_constructor,
86 cperf_pmd_cyclecount_test_runner,
87 cperf_pmd_cyclecount_test_destructor
92 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
93 struct rte_mempool *session_pool_socket[])
95 uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
99 enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
100 enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
101 if (enabled_cdev_count == 0) {
102 printf("No crypto devices type %s available\n",
107 nb_lcores = rte_lcore_count() - 1;
111 "Number of enabled cores need to be higher than 1\n");
116 * Use less number of devices,
117 * if there are more available than cores.
119 if (enabled_cdev_count > nb_lcores)
120 enabled_cdev_count = nb_lcores;
122 /* Create a mempool shared by all the devices */
123 uint32_t max_sess_size = 0, sess_size;
125 for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
126 sess_size = rte_cryptodev_get_private_session_size(cdev_id);
127 if (sess_size > max_sess_size)
128 max_sess_size = sess_size;
132 * Calculate number of needed queue pairs, based on the amount
133 * of available number of logical cores and crypto devices.
134 * For instance, if there are 4 cores and 2 crypto devices,
135 * 2 queue pairs will be set up per device.
137 opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
138 (nb_lcores / enabled_cdev_count) + 1 :
139 nb_lcores / enabled_cdev_count;
141 for (i = 0; i < enabled_cdev_count &&
142 i < RTE_CRYPTO_MAX_DEVS; i++) {
143 cdev_id = enabled_cdevs[i];
144 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
146 * If multi-core scheduler is used, limit the number
147 * of queue pairs to 1, as there is no way to know
148 * how many cores are being used by the PMD, and
149 * how many will be available for the application.
151 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
152 rte_cryptodev_scheduler_mode_get(cdev_id) ==
153 CDEV_SCHED_MODE_MULTICORE)
157 struct rte_cryptodev_info cdev_info;
158 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
160 rte_cryptodev_info_get(cdev_id, &cdev_info);
161 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
162 printf("Number of needed queue pairs is higher "
163 "than the maximum number of queue pairs "
165 printf("Lower the number of cores or increase "
166 "the number of crypto devices\n");
169 struct rte_cryptodev_config conf = {
170 .nb_queue_pairs = opts->nb_qps,
171 .socket_id = socket_id
174 struct rte_cryptodev_qp_conf qp_conf = {
175 .nb_descriptors = opts->nb_descriptors
178 if (session_pool_socket[socket_id] == NULL) {
179 char mp_name[RTE_MEMPOOL_NAMESIZE];
180 struct rte_mempool *sess_mp;
182 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
183 "sess_mp_%u", socket_id);
185 sess_mp = rte_mempool_create(mp_name,
188 SESS_MEMPOOL_CACHE_SIZE,
193 if (sess_mp == NULL) {
194 printf("Cannot create session pool on socket %d\n",
199 printf("Allocated session pool on socket %d\n", socket_id);
200 session_pool_socket[socket_id] = sess_mp;
203 ret = rte_cryptodev_configure(cdev_id, &conf);
205 printf("Failed to configure cryptodev %u", cdev_id);
209 for (j = 0; j < opts->nb_qps; j++) {
210 ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
212 session_pool_socket[socket_id]);
214 printf("Failed to setup queue pair %u on "
215 "cryptodev %u", j, cdev_id);
220 ret = rte_cryptodev_start(cdev_id);
222 printf("Failed to start device %u: error %d\n",
228 return enabled_cdev_count;
232 cperf_verify_devices_capabilities(struct cperf_options *opts,
233 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
235 struct rte_cryptodev_sym_capability_idx cap_idx;
236 const struct rte_cryptodev_symmetric_capability *capability;
241 for (i = 0; i < nb_cryptodevs; i++) {
243 cdev_id = enabled_cdevs[i];
245 if (opts->op_type == CPERF_AUTH_ONLY ||
246 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
247 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
249 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
250 cap_idx.algo.auth = opts->auth_algo;
252 capability = rte_cryptodev_sym_capability_get(cdev_id,
254 if (capability == NULL)
257 ret = rte_cryptodev_sym_capability_check_auth(
266 if (opts->op_type == CPERF_CIPHER_ONLY ||
267 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
268 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
270 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
271 cap_idx.algo.cipher = opts->cipher_algo;
273 capability = rte_cryptodev_sym_capability_get(cdev_id,
275 if (capability == NULL)
278 ret = rte_cryptodev_sym_capability_check_cipher(
286 if (opts->op_type == CPERF_AEAD) {
288 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
289 cap_idx.algo.aead = opts->aead_algo;
291 capability = rte_cryptodev_sym_capability_get(cdev_id,
293 if (capability == NULL)
296 ret = rte_cryptodev_sym_capability_check_aead(
311 cperf_check_test_vector(struct cperf_options *opts,
312 struct cperf_test_vector *test_vec)
314 if (opts->op_type == CPERF_CIPHER_ONLY) {
315 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
316 if (test_vec->plaintext.data == NULL)
318 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
319 if (test_vec->plaintext.data == NULL)
321 if (test_vec->plaintext.length < opts->max_buffer_size)
323 if (test_vec->ciphertext.data == NULL)
325 if (test_vec->ciphertext.length < opts->max_buffer_size)
327 if (test_vec->cipher_iv.data == NULL)
329 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
331 if (test_vec->cipher_key.data == NULL)
333 if (test_vec->cipher_key.length != opts->cipher_key_sz)
336 } else if (opts->op_type == CPERF_AUTH_ONLY) {
337 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
338 if (test_vec->plaintext.data == NULL)
340 if (test_vec->plaintext.length < opts->max_buffer_size)
342 if (test_vec->auth_key.data == NULL)
344 if (test_vec->auth_key.length != opts->auth_key_sz)
346 if (test_vec->auth_iv.length != opts->auth_iv_sz)
348 /* Auth IV is only required for some algorithms */
349 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
351 if (test_vec->digest.data == NULL)
353 if (test_vec->digest.length < opts->digest_sz)
357 } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
358 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
359 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
360 if (test_vec->plaintext.data == NULL)
362 if (test_vec->plaintext.length < opts->max_buffer_size)
364 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
365 if (test_vec->plaintext.data == NULL)
367 if (test_vec->plaintext.length < opts->max_buffer_size)
369 if (test_vec->ciphertext.data == NULL)
371 if (test_vec->ciphertext.length < opts->max_buffer_size)
373 if (test_vec->cipher_iv.data == NULL)
375 if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
377 if (test_vec->cipher_key.data == NULL)
379 if (test_vec->cipher_key.length != opts->cipher_key_sz)
382 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
383 if (test_vec->auth_key.data == NULL)
385 if (test_vec->auth_key.length != opts->auth_key_sz)
387 if (test_vec->auth_iv.length != opts->auth_iv_sz)
389 /* Auth IV is only required for some algorithms */
390 if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
392 if (test_vec->digest.data == NULL)
394 if (test_vec->digest.length < opts->digest_sz)
397 } else if (opts->op_type == CPERF_AEAD) {
398 if (test_vec->plaintext.data == NULL)
400 if (test_vec->plaintext.length < opts->max_buffer_size)
402 if (test_vec->ciphertext.data == NULL)
404 if (test_vec->ciphertext.length < opts->max_buffer_size)
406 if (test_vec->aead_iv.data == NULL)
408 if (test_vec->aead_iv.length != opts->aead_iv_sz)
410 if (test_vec->aad.data == NULL)
412 if (test_vec->aad.length != opts->aead_aad_sz)
414 if (test_vec->digest.data == NULL)
416 if (test_vec->digest.length < opts->digest_sz)
423 main(int argc, char **argv)
425 struct cperf_options opts = {0};
426 struct cperf_test_vector *t_vec = NULL;
427 struct cperf_op_fns op_fns;
429 void *ctx[RTE_MAX_LCORE] = { };
430 struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
432 int nb_cryptodevs = 0;
433 uint16_t total_nb_qps = 0;
435 uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
437 uint8_t buffer_size_idx = 0;
442 /* Initialise DPDK EAL */
443 ret = rte_eal_init(argc, argv);
445 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
449 cperf_options_default(&opts);
451 ret = cperf_options_parse(&opts, argc, argv);
453 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
457 ret = cperf_options_check(&opts);
460 "Checking on or more user options failed\n");
464 nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
465 session_pool_socket);
468 cperf_options_dump(&opts);
470 if (nb_cryptodevs < 1) {
471 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
477 ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
480 RTE_LOG(ERR, USER1, "Crypto device type does not support "
481 "capabilities requested\n");
485 if (opts.test_file != NULL) {
486 t_vec = cperf_test_vector_get_from_file(&opts);
489 "Failed to create test vector for"
490 " specified file\n");
494 if (cperf_check_test_vector(&opts, t_vec)) {
495 RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
500 t_vec = cperf_test_vector_get_dummy(&opts);
503 "Failed to create test vector for"
504 " specified algorithms\n");
509 ret = cperf_get_op_functions(&opts, &op_fns);
511 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
512 "specified algorithms combination\n");
517 show_test_vector(t_vec);
519 total_nb_qps = nb_cryptodevs * opts.nb_qps;
522 uint8_t qp_id = 0, cdev_index = 0;
523 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
525 if (i == total_nb_qps)
528 cdev_id = enabled_cdevs[cdev_index];
530 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
532 ctx[i] = cperf_testmap[opts.test].constructor(
533 session_pool_socket[socket_id], cdev_id, qp_id,
534 &opts, t_vec, &op_fns);
535 if (ctx[i] == NULL) {
536 RTE_LOG(ERR, USER1, "Test run constructor failed\n");
539 qp_id = (qp_id + 1) % opts.nb_qps;
545 /* Get first size from range or list */
546 if (opts.inc_buffer_size != 0)
547 opts.test_buffer_size = opts.min_buffer_size;
549 opts.test_buffer_size = opts.buffer_size_list[0];
551 while (opts.test_buffer_size <= opts.max_buffer_size) {
553 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
555 if (i == total_nb_qps)
558 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
563 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
565 if (i == total_nb_qps)
567 rte_eal_wait_lcore(lcore_id);
571 /* Get next size from range or list */
572 if (opts.inc_buffer_size != 0)
573 opts.test_buffer_size += opts.inc_buffer_size;
575 if (++buffer_size_idx == opts.buffer_size_count)
577 opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
582 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
584 if (i == total_nb_qps)
587 cperf_testmap[opts.test].destructor(ctx[i]);
591 for (i = 0; i < nb_cryptodevs &&
592 i < RTE_CRYPTO_MAX_DEVS; i++)
593 rte_cryptodev_stop(enabled_cdevs[i]);
595 free_test_vector(t_vec, &opts);
602 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
603 if (i == total_nb_qps)
606 cdev_id = enabled_cdevs[i];
608 if (ctx[i] && cperf_testmap[opts.test].destructor)
609 cperf_testmap[opts.test].destructor(ctx[i]);
613 for (i = 0; i < nb_cryptodevs &&
614 i < RTE_CRYPTO_MAX_DEVS; i++)
615 rte_cryptodev_stop(enabled_cdevs[i]);
617 free_test_vector(t_vec, &opts);