13e01218814b8d3e34aac5017c957aa24303cd68
[deb_dpdk.git] / app / test-crypto-perf / main.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <stdio.h>
34 #include <unistd.h>
35
36 #include <rte_eal.h>
37 #include <rte_cryptodev.h>
38 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
39 #include <rte_cryptodev_scheduler.h>
40 #endif
41
42 #include "cperf.h"
43 #include "cperf_options.h"
44 #include "cperf_test_vector_parsing.h"
45 #include "cperf_test_throughput.h"
46 #include "cperf_test_latency.h"
47 #include "cperf_test_verify.h"
48 #include "cperf_test_pmd_cyclecount.h"
49
50 #define NUM_SESSIONS 2048
51 #define SESS_MEMPOOL_CACHE_SIZE 64
52
53 const char *cperf_test_type_strs[] = {
54         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
55         [CPERF_TEST_TYPE_LATENCY] = "latency",
56         [CPERF_TEST_TYPE_VERIFY] = "verify",
57         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
58 };
59
60 const char *cperf_op_type_strs[] = {
61         [CPERF_CIPHER_ONLY] = "cipher-only",
62         [CPERF_AUTH_ONLY] = "auth-only",
63         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
64         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
65         [CPERF_AEAD] = "aead"
66 };
67
68 const struct cperf_test cperf_testmap[] = {
69                 [CPERF_TEST_TYPE_THROUGHPUT] = {
70                                 cperf_throughput_test_constructor,
71                                 cperf_throughput_test_runner,
72                                 cperf_throughput_test_destructor
73                 },
74                 [CPERF_TEST_TYPE_LATENCY] = {
75                                 cperf_latency_test_constructor,
76                                 cperf_latency_test_runner,
77                                 cperf_latency_test_destructor
78                 },
79                 [CPERF_TEST_TYPE_VERIFY] = {
80                                 cperf_verify_test_constructor,
81                                 cperf_verify_test_runner,
82                                 cperf_verify_test_destructor
83                 },
84                 [CPERF_TEST_TYPE_PMDCC] = {
85                                 cperf_pmd_cyclecount_test_constructor,
86                                 cperf_pmd_cyclecount_test_runner,
87                                 cperf_pmd_cyclecount_test_destructor
88                 }
89 };
90
91 static int
92 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
93                         struct rte_mempool *session_pool_socket[])
94 {
95         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
96         unsigned int i, j;
97         int ret;
98
99         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
100                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
101         if (enabled_cdev_count == 0) {
102                 printf("No crypto devices type %s available\n",
103                                 opts->device_type);
104                 return -EINVAL;
105         }
106
107         nb_lcores = rte_lcore_count() - 1;
108
109         if (nb_lcores < 1) {
110                 RTE_LOG(ERR, USER1,
111                         "Number of enabled cores need to be higher than 1\n");
112                 return -EINVAL;
113         }
114
115         /*
116          * Use less number of devices,
117          * if there are more available than cores.
118          */
119         if (enabled_cdev_count > nb_lcores)
120                 enabled_cdev_count = nb_lcores;
121
122         /* Create a mempool shared by all the devices */
123         uint32_t max_sess_size = 0, sess_size;
124
125         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
126                 sess_size = rte_cryptodev_get_private_session_size(cdev_id);
127                 if (sess_size > max_sess_size)
128                         max_sess_size = sess_size;
129         }
130
131         /*
132          * Calculate number of needed queue pairs, based on the amount
133          * of available number of logical cores and crypto devices.
134          * For instance, if there are 4 cores and 2 crypto devices,
135          * 2 queue pairs will be set up per device.
136          */
137         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
138                                 (nb_lcores / enabled_cdev_count) + 1 :
139                                 nb_lcores / enabled_cdev_count;
140
141         for (i = 0; i < enabled_cdev_count &&
142                         i < RTE_CRYPTO_MAX_DEVS; i++) {
143                 cdev_id = enabled_cdevs[i];
144 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
145                 /*
146                  * If multi-core scheduler is used, limit the number
147                  * of queue pairs to 1, as there is no way to know
148                  * how many cores are being used by the PMD, and
149                  * how many will be available for the application.
150                  */
151                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
152                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
153                                 CDEV_SCHED_MODE_MULTICORE)
154                         opts->nb_qps = 1;
155 #endif
156
157                 struct rte_cryptodev_info cdev_info;
158                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
159
160                 rte_cryptodev_info_get(cdev_id, &cdev_info);
161                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
162                         printf("Number of needed queue pairs is higher "
163                                 "than the maximum number of queue pairs "
164                                 "per device.\n");
165                         printf("Lower the number of cores or increase "
166                                 "the number of crypto devices\n");
167                         return -EINVAL;
168                 }
169                 struct rte_cryptodev_config conf = {
170                         .nb_queue_pairs = opts->nb_qps,
171                         .socket_id = socket_id
172                 };
173
174                 struct rte_cryptodev_qp_conf qp_conf = {
175                         .nb_descriptors = opts->nb_descriptors
176                 };
177
178                 if (session_pool_socket[socket_id] == NULL) {
179                         char mp_name[RTE_MEMPOOL_NAMESIZE];
180                         struct rte_mempool *sess_mp;
181
182                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
183                                 "sess_mp_%u", socket_id);
184
185                         sess_mp = rte_mempool_create(mp_name,
186                                                 NUM_SESSIONS,
187                                                 max_sess_size,
188                                                 SESS_MEMPOOL_CACHE_SIZE,
189                                                 0, NULL, NULL, NULL,
190                                                 NULL, socket_id,
191                                                 0);
192
193                         if (sess_mp == NULL) {
194                                 printf("Cannot create session pool on socket %d\n",
195                                         socket_id);
196                                 return -ENOMEM;
197                         }
198
199                         printf("Allocated session pool on socket %d\n", socket_id);
200                         session_pool_socket[socket_id] = sess_mp;
201                 }
202
203                 ret = rte_cryptodev_configure(cdev_id, &conf);
204                 if (ret < 0) {
205                         printf("Failed to configure cryptodev %u", cdev_id);
206                         return -EINVAL;
207                 }
208
209                 for (j = 0; j < opts->nb_qps; j++) {
210                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
211                                 &qp_conf, socket_id,
212                                 session_pool_socket[socket_id]);
213                         if (ret < 0) {
214                                 printf("Failed to setup queue pair %u on "
215                                         "cryptodev %u", j, cdev_id);
216                                 return -EINVAL;
217                         }
218                 }
219
220                 ret = rte_cryptodev_start(cdev_id);
221                 if (ret < 0) {
222                         printf("Failed to start device %u: error %d\n",
223                                         cdev_id, ret);
224                         return -EPERM;
225                 }
226         }
227
228         return enabled_cdev_count;
229 }
230
231 static int
232 cperf_verify_devices_capabilities(struct cperf_options *opts,
233                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
234 {
235         struct rte_cryptodev_sym_capability_idx cap_idx;
236         const struct rte_cryptodev_symmetric_capability *capability;
237
238         uint8_t i, cdev_id;
239         int ret;
240
241         for (i = 0; i < nb_cryptodevs; i++) {
242
243                 cdev_id = enabled_cdevs[i];
244
245                 if (opts->op_type == CPERF_AUTH_ONLY ||
246                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
247                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
248
249                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
250                         cap_idx.algo.auth = opts->auth_algo;
251
252                         capability = rte_cryptodev_sym_capability_get(cdev_id,
253                                         &cap_idx);
254                         if (capability == NULL)
255                                 return -1;
256
257                         ret = rte_cryptodev_sym_capability_check_auth(
258                                         capability,
259                                         opts->auth_key_sz,
260                                         opts->digest_sz,
261                                         opts->auth_iv_sz);
262                         if (ret != 0)
263                                 return ret;
264                 }
265
266                 if (opts->op_type == CPERF_CIPHER_ONLY ||
267                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
268                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
269
270                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
271                         cap_idx.algo.cipher = opts->cipher_algo;
272
273                         capability = rte_cryptodev_sym_capability_get(cdev_id,
274                                         &cap_idx);
275                         if (capability == NULL)
276                                 return -1;
277
278                         ret = rte_cryptodev_sym_capability_check_cipher(
279                                         capability,
280                                         opts->cipher_key_sz,
281                                         opts->cipher_iv_sz);
282                         if (ret != 0)
283                                 return ret;
284                 }
285
286                 if (opts->op_type == CPERF_AEAD) {
287
288                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
289                         cap_idx.algo.aead = opts->aead_algo;
290
291                         capability = rte_cryptodev_sym_capability_get(cdev_id,
292                                         &cap_idx);
293                         if (capability == NULL)
294                                 return -1;
295
296                         ret = rte_cryptodev_sym_capability_check_aead(
297                                         capability,
298                                         opts->aead_key_sz,
299                                         opts->digest_sz,
300                                         opts->aead_aad_sz,
301                                         opts->aead_iv_sz);
302                         if (ret != 0)
303                                 return ret;
304                 }
305         }
306
307         return 0;
308 }
309
310 static int
311 cperf_check_test_vector(struct cperf_options *opts,
312                 struct cperf_test_vector *test_vec)
313 {
314         if (opts->op_type == CPERF_CIPHER_ONLY) {
315                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
316                         if (test_vec->plaintext.data == NULL)
317                                 return -1;
318                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
319                         if (test_vec->plaintext.data == NULL)
320                                 return -1;
321                         if (test_vec->plaintext.length < opts->max_buffer_size)
322                                 return -1;
323                         if (test_vec->ciphertext.data == NULL)
324                                 return -1;
325                         if (test_vec->ciphertext.length < opts->max_buffer_size)
326                                 return -1;
327                         if (test_vec->cipher_iv.data == NULL)
328                                 return -1;
329                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
330                                 return -1;
331                         if (test_vec->cipher_key.data == NULL)
332                                 return -1;
333                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
334                                 return -1;
335                 }
336         } else if (opts->op_type == CPERF_AUTH_ONLY) {
337                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
338                         if (test_vec->plaintext.data == NULL)
339                                 return -1;
340                         if (test_vec->plaintext.length < opts->max_buffer_size)
341                                 return -1;
342                         if (test_vec->auth_key.data == NULL)
343                                 return -1;
344                         if (test_vec->auth_key.length != opts->auth_key_sz)
345                                 return -1;
346                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
347                                 return -1;
348                         /* Auth IV is only required for some algorithms */
349                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
350                                 return -1;
351                         if (test_vec->digest.data == NULL)
352                                 return -1;
353                         if (test_vec->digest.length < opts->digest_sz)
354                                 return -1;
355                 }
356
357         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
358                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
359                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
360                         if (test_vec->plaintext.data == NULL)
361                                 return -1;
362                         if (test_vec->plaintext.length < opts->max_buffer_size)
363                                 return -1;
364                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
365                         if (test_vec->plaintext.data == NULL)
366                                 return -1;
367                         if (test_vec->plaintext.length < opts->max_buffer_size)
368                                 return -1;
369                         if (test_vec->ciphertext.data == NULL)
370                                 return -1;
371                         if (test_vec->ciphertext.length < opts->max_buffer_size)
372                                 return -1;
373                         if (test_vec->cipher_iv.data == NULL)
374                                 return -1;
375                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
376                                 return -1;
377                         if (test_vec->cipher_key.data == NULL)
378                                 return -1;
379                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
380                                 return -1;
381                 }
382                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
383                         if (test_vec->auth_key.data == NULL)
384                                 return -1;
385                         if (test_vec->auth_key.length != opts->auth_key_sz)
386                                 return -1;
387                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
388                                 return -1;
389                         /* Auth IV is only required for some algorithms */
390                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
391                                 return -1;
392                         if (test_vec->digest.data == NULL)
393                                 return -1;
394                         if (test_vec->digest.length < opts->digest_sz)
395                                 return -1;
396                 }
397         } else if (opts->op_type == CPERF_AEAD) {
398                 if (test_vec->plaintext.data == NULL)
399                         return -1;
400                 if (test_vec->plaintext.length < opts->max_buffer_size)
401                         return -1;
402                 if (test_vec->ciphertext.data == NULL)
403                         return -1;
404                 if (test_vec->ciphertext.length < opts->max_buffer_size)
405                         return -1;
406                 if (test_vec->aead_iv.data == NULL)
407                         return -1;
408                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
409                         return -1;
410                 if (test_vec->aad.data == NULL)
411                         return -1;
412                 if (test_vec->aad.length != opts->aead_aad_sz)
413                         return -1;
414                 if (test_vec->digest.data == NULL)
415                         return -1;
416                 if (test_vec->digest.length < opts->digest_sz)
417                         return -1;
418         }
419         return 0;
420 }
421
422 int
423 main(int argc, char **argv)
424 {
425         struct cperf_options opts = {0};
426         struct cperf_test_vector *t_vec = NULL;
427         struct cperf_op_fns op_fns;
428
429         void *ctx[RTE_MAX_LCORE] = { };
430         struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
431
432         int nb_cryptodevs = 0;
433         uint16_t total_nb_qps = 0;
434         uint8_t cdev_id, i;
435         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
436
437         uint8_t buffer_size_idx = 0;
438
439         int ret;
440         uint32_t lcore_id;
441
442         /* Initialise DPDK EAL */
443         ret = rte_eal_init(argc, argv);
444         if (ret < 0)
445                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
446         argc -= ret;
447         argv += ret;
448
449         cperf_options_default(&opts);
450
451         ret = cperf_options_parse(&opts, argc, argv);
452         if (ret) {
453                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
454                 goto err;
455         }
456
457         ret = cperf_options_check(&opts);
458         if (ret) {
459                 RTE_LOG(ERR, USER1,
460                                 "Checking on or more user options failed\n");
461                 goto err;
462         }
463
464         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
465                         session_pool_socket);
466
467         if (!opts.silent)
468                 cperf_options_dump(&opts);
469
470         if (nb_cryptodevs < 1) {
471                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
472                                 "device type\n");
473                 nb_cryptodevs = 0;
474                 goto err;
475         }
476
477         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
478                         nb_cryptodevs);
479         if (ret) {
480                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
481                                 "capabilities requested\n");
482                 goto err;
483         }
484
485         if (opts.test_file != NULL) {
486                 t_vec = cperf_test_vector_get_from_file(&opts);
487                 if (t_vec == NULL) {
488                         RTE_LOG(ERR, USER1,
489                                         "Failed to create test vector for"
490                                         " specified file\n");
491                         goto err;
492                 }
493
494                 if (cperf_check_test_vector(&opts, t_vec)) {
495                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
496                                         "\n");
497                         goto err;
498                 }
499         } else {
500                 t_vec = cperf_test_vector_get_dummy(&opts);
501                 if (t_vec == NULL) {
502                         RTE_LOG(ERR, USER1,
503                                         "Failed to create test vector for"
504                                         " specified algorithms\n");
505                         goto err;
506                 }
507         }
508
509         ret = cperf_get_op_functions(&opts, &op_fns);
510         if (ret) {
511                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
512                                 "specified algorithms combination\n");
513                 goto err;
514         }
515
516         if (!opts.silent)
517                 show_test_vector(t_vec);
518
519         total_nb_qps = nb_cryptodevs * opts.nb_qps;
520
521         i = 0;
522         uint8_t qp_id = 0, cdev_index = 0;
523         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
524
525                 if (i == total_nb_qps)
526                         break;
527
528                 cdev_id = enabled_cdevs[cdev_index];
529
530                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
531
532                 ctx[i] = cperf_testmap[opts.test].constructor(
533                                 session_pool_socket[socket_id], cdev_id, qp_id,
534                                 &opts, t_vec, &op_fns);
535                 if (ctx[i] == NULL) {
536                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
537                         goto err;
538                 }
539                 qp_id = (qp_id + 1) % opts.nb_qps;
540                 if (qp_id == 0)
541                         cdev_index++;
542                 i++;
543         }
544
545         /* Get first size from range or list */
546         if (opts.inc_buffer_size != 0)
547                 opts.test_buffer_size = opts.min_buffer_size;
548         else
549                 opts.test_buffer_size = opts.buffer_size_list[0];
550
551         while (opts.test_buffer_size <= opts.max_buffer_size) {
552                 i = 0;
553                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
554
555                         if (i == total_nb_qps)
556                                 break;
557
558                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
559                                 ctx[i], lcore_id);
560                         i++;
561                 }
562                 i = 0;
563                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
564
565                         if (i == total_nb_qps)
566                                 break;
567                         rte_eal_wait_lcore(lcore_id);
568                         i++;
569                 }
570
571                 /* Get next size from range or list */
572                 if (opts.inc_buffer_size != 0)
573                         opts.test_buffer_size += opts.inc_buffer_size;
574                 else {
575                         if (++buffer_size_idx == opts.buffer_size_count)
576                                 break;
577                         opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
578                 }
579         }
580
581         i = 0;
582         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
583
584                 if (i == total_nb_qps)
585                         break;
586
587                 cperf_testmap[opts.test].destructor(ctx[i]);
588                 i++;
589         }
590
591         for (i = 0; i < nb_cryptodevs &&
592                         i < RTE_CRYPTO_MAX_DEVS; i++)
593                 rte_cryptodev_stop(enabled_cdevs[i]);
594
595         free_test_vector(t_vec, &opts);
596
597         printf("\n");
598         return EXIT_SUCCESS;
599
600 err:
601         i = 0;
602         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
603                 if (i == total_nb_qps)
604                         break;
605
606                 cdev_id = enabled_cdevs[i];
607
608                 if (ctx[i] && cperf_testmap[opts.test].destructor)
609                         cperf_testmap[opts.test].destructor(ctx[i]);
610                 i++;
611         }
612
613         for (i = 0; i < nb_cryptodevs &&
614                         i < RTE_CRYPTO_MAX_DEVS; i++)
615                 rte_cryptodev_stop(enabled_cdevs[i]);
616
617         free_test_vector(t_vec, &opts);
618
619         printf("\n");
620         return EXIT_FAILURE;
621 }