New upstream version 17.11.5
[deb_dpdk.git] / app / test-crypto-perf / main.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <stdio.h>
34 #include <unistd.h>
35
36 #include <rte_eal.h>
37 #include <rte_cryptodev.h>
38 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
39 #include <rte_cryptodev_scheduler.h>
40 #endif
41
42 #include "cperf.h"
43 #include "cperf_options.h"
44 #include "cperf_test_vector_parsing.h"
45 #include "cperf_test_throughput.h"
46 #include "cperf_test_latency.h"
47 #include "cperf_test_verify.h"
48 #include "cperf_test_pmd_cyclecount.h"
49
50 #define NUM_SESSIONS 2048
51 #define SESS_MEMPOOL_CACHE_SIZE 64
52
53 const char *cperf_test_type_strs[] = {
54         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
55         [CPERF_TEST_TYPE_LATENCY] = "latency",
56         [CPERF_TEST_TYPE_VERIFY] = "verify",
57         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
58 };
59
60 const char *cperf_op_type_strs[] = {
61         [CPERF_CIPHER_ONLY] = "cipher-only",
62         [CPERF_AUTH_ONLY] = "auth-only",
63         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
64         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
65         [CPERF_AEAD] = "aead"
66 };
67
68 const struct cperf_test cperf_testmap[] = {
69                 [CPERF_TEST_TYPE_THROUGHPUT] = {
70                                 cperf_throughput_test_constructor,
71                                 cperf_throughput_test_runner,
72                                 cperf_throughput_test_destructor
73                 },
74                 [CPERF_TEST_TYPE_LATENCY] = {
75                                 cperf_latency_test_constructor,
76                                 cperf_latency_test_runner,
77                                 cperf_latency_test_destructor
78                 },
79                 [CPERF_TEST_TYPE_VERIFY] = {
80                                 cperf_verify_test_constructor,
81                                 cperf_verify_test_runner,
82                                 cperf_verify_test_destructor
83                 },
84                 [CPERF_TEST_TYPE_PMDCC] = {
85                                 cperf_pmd_cyclecount_test_constructor,
86                                 cperf_pmd_cyclecount_test_runner,
87                                 cperf_pmd_cyclecount_test_destructor
88                 }
89 };
90
91 static int
92 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
93                         struct rte_mempool *session_pool_socket[])
94 {
95         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
96         unsigned int i, j;
97         int ret;
98
99         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
100                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
101         if (enabled_cdev_count == 0) {
102                 printf("No crypto devices type %s available\n",
103                                 opts->device_type);
104                 return -EINVAL;
105         }
106
107         nb_lcores = rte_lcore_count() - 1;
108
109         if (nb_lcores < 1) {
110                 RTE_LOG(ERR, USER1,
111                         "Number of enabled cores need to be higher than 1\n");
112                 return -EINVAL;
113         }
114
115         /*
116          * Use less number of devices,
117          * if there are more available than cores.
118          */
119         if (enabled_cdev_count > nb_lcores)
120                 enabled_cdev_count = nb_lcores;
121
122         /* Create a mempool shared by all the devices */
123         uint32_t max_sess_size = 0, sess_size;
124
125         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
126                 sess_size = rte_cryptodev_get_private_session_size(cdev_id);
127                 if (sess_size > max_sess_size)
128                         max_sess_size = sess_size;
129         }
130
131         /*
132          * Calculate number of needed queue pairs, based on the amount
133          * of available number of logical cores and crypto devices.
134          * For instance, if there are 4 cores and 2 crypto devices,
135          * 2 queue pairs will be set up per device.
136          */
137         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
138                                 (nb_lcores / enabled_cdev_count) + 1 :
139                                 nb_lcores / enabled_cdev_count;
140
141         for (i = 0; i < enabled_cdev_count &&
142                         i < RTE_CRYPTO_MAX_DEVS; i++) {
143                 cdev_id = enabled_cdevs[i];
144 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
145                 /*
146                  * If multi-core scheduler is used, limit the number
147                  * of queue pairs to 1, as there is no way to know
148                  * how many cores are being used by the PMD, and
149                  * how many will be available for the application.
150                  */
151                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
152                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
153                                 CDEV_SCHED_MODE_MULTICORE)
154                         opts->nb_qps = 1;
155 #endif
156
157                 struct rte_cryptodev_info cdev_info;
158                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
159
160                 rte_cryptodev_info_get(cdev_id, &cdev_info);
161                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
162                         printf("Number of needed queue pairs is higher "
163                                 "than the maximum number of queue pairs "
164                                 "per device.\n");
165                         printf("Lower the number of cores or increase "
166                                 "the number of crypto devices\n");
167                         return -EINVAL;
168                 }
169                 struct rte_cryptodev_config conf = {
170                         .nb_queue_pairs = opts->nb_qps,
171                         .socket_id = socket_id
172                 };
173
174                 struct rte_cryptodev_qp_conf qp_conf = {
175                         .nb_descriptors = opts->nb_descriptors
176                 };
177
178                 if (session_pool_socket[socket_id] == NULL) {
179                         char mp_name[RTE_MEMPOOL_NAMESIZE];
180                         struct rte_mempool *sess_mp;
181
182                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
183                                 "sess_mp_%u", socket_id);
184
185                         sess_mp = rte_mempool_create(mp_name,
186                                                 NUM_SESSIONS,
187                                                 max_sess_size,
188                                                 SESS_MEMPOOL_CACHE_SIZE,
189                                                 0, NULL, NULL, NULL,
190                                                 NULL, socket_id,
191                                                 0);
192
193                         if (sess_mp == NULL) {
194                                 printf("Cannot create session pool on socket %d\n",
195                                         socket_id);
196                                 return -ENOMEM;
197                         }
198
199                         printf("Allocated session pool on socket %d\n", socket_id);
200                         session_pool_socket[socket_id] = sess_mp;
201                 }
202
203                 ret = rte_cryptodev_configure(cdev_id, &conf);
204                 if (ret < 0) {
205                         printf("Failed to configure cryptodev %u", cdev_id);
206                         return -EINVAL;
207                 }
208
209                 for (j = 0; j < opts->nb_qps; j++) {
210                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
211                                 &qp_conf, socket_id,
212                                 session_pool_socket[socket_id]);
213                         if (ret < 0) {
214                                 printf("Failed to setup queue pair %u on "
215                                         "cryptodev %u", j, cdev_id);
216                                 return -EINVAL;
217                         }
218                 }
219
220                 ret = rte_cryptodev_start(cdev_id);
221                 if (ret < 0) {
222                         printf("Failed to start device %u: error %d\n",
223                                         cdev_id, ret);
224                         return -EPERM;
225                 }
226         }
227
228         return enabled_cdev_count;
229 }
230
231 static int
232 cperf_verify_devices_capabilities(struct cperf_options *opts,
233                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
234 {
235         struct rte_cryptodev_sym_capability_idx cap_idx;
236         const struct rte_cryptodev_symmetric_capability *capability;
237
238         uint8_t i, cdev_id;
239         int ret;
240
241         for (i = 0; i < nb_cryptodevs; i++) {
242
243                 cdev_id = enabled_cdevs[i];
244
245                 if (opts->op_type == CPERF_AUTH_ONLY ||
246                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
247                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
248
249                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
250                         cap_idx.algo.auth = opts->auth_algo;
251
252                         capability = rte_cryptodev_sym_capability_get(cdev_id,
253                                         &cap_idx);
254                         if (capability == NULL)
255                                 return -1;
256
257                         ret = rte_cryptodev_sym_capability_check_auth(
258                                         capability,
259                                         opts->auth_key_sz,
260                                         opts->digest_sz,
261                                         opts->auth_iv_sz);
262                         if (ret != 0)
263                                 return ret;
264                 }
265
266                 if (opts->op_type == CPERF_CIPHER_ONLY ||
267                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
268                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
269
270                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
271                         cap_idx.algo.cipher = opts->cipher_algo;
272
273                         capability = rte_cryptodev_sym_capability_get(cdev_id,
274                                         &cap_idx);
275                         if (capability == NULL)
276                                 return -1;
277
278                         ret = rte_cryptodev_sym_capability_check_cipher(
279                                         capability,
280                                         opts->cipher_key_sz,
281                                         opts->cipher_iv_sz);
282                         if (ret != 0)
283                                 return ret;
284                 }
285
286                 if (opts->op_type == CPERF_AEAD) {
287
288                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
289                         cap_idx.algo.aead = opts->aead_algo;
290
291                         capability = rte_cryptodev_sym_capability_get(cdev_id,
292                                         &cap_idx);
293                         if (capability == NULL)
294                                 return -1;
295
296                         ret = rte_cryptodev_sym_capability_check_aead(
297                                         capability,
298                                         opts->aead_key_sz,
299                                         opts->digest_sz,
300                                         opts->aead_aad_sz,
301                                         opts->aead_iv_sz);
302                         if (ret != 0)
303                                 return ret;
304                 }
305         }
306
307         return 0;
308 }
309
310 static int
311 cperf_check_test_vector(struct cperf_options *opts,
312                 struct cperf_test_vector *test_vec)
313 {
314         if (opts->op_type == CPERF_CIPHER_ONLY) {
315                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
316                         if (test_vec->plaintext.data == NULL)
317                                 return -1;
318                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
319                         if (test_vec->plaintext.data == NULL)
320                                 return -1;
321                         if (test_vec->plaintext.length < opts->max_buffer_size)
322                                 return -1;
323                         if (test_vec->ciphertext.data == NULL)
324                                 return -1;
325                         if (test_vec->ciphertext.length < opts->max_buffer_size)
326                                 return -1;
327                         /* Cipher IV is only required for some algorithms */
328                         if (opts->cipher_iv_sz &&
329                                         test_vec->cipher_iv.data == NULL)
330                                 return -1;
331                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
332                                 return -1;
333                         if (test_vec->cipher_key.data == NULL)
334                                 return -1;
335                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
336                                 return -1;
337                 }
338         } else if (opts->op_type == CPERF_AUTH_ONLY) {
339                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
340                         if (test_vec->plaintext.data == NULL)
341                                 return -1;
342                         if (test_vec->plaintext.length < opts->max_buffer_size)
343                                 return -1;
344                         /* Auth key is only required for some algorithms */
345                         if (opts->auth_key_sz &&
346                                         test_vec->auth_key.data == NULL)
347                                 return -1;
348                         if (test_vec->auth_key.length != opts->auth_key_sz)
349                                 return -1;
350                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
351                                 return -1;
352                         /* Auth IV is only required for some algorithms */
353                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
354                                 return -1;
355                         if (test_vec->digest.data == NULL)
356                                 return -1;
357                         if (test_vec->digest.length < opts->digest_sz)
358                                 return -1;
359                 }
360
361         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
362                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
363                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
364                         if (test_vec->plaintext.data == NULL)
365                                 return -1;
366                         if (test_vec->plaintext.length < opts->max_buffer_size)
367                                 return -1;
368                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
369                         if (test_vec->plaintext.data == NULL)
370                                 return -1;
371                         if (test_vec->plaintext.length < opts->max_buffer_size)
372                                 return -1;
373                         if (test_vec->ciphertext.data == NULL)
374                                 return -1;
375                         if (test_vec->ciphertext.length < opts->max_buffer_size)
376                                 return -1;
377                         if (test_vec->cipher_iv.data == NULL)
378                                 return -1;
379                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
380                                 return -1;
381                         if (test_vec->cipher_key.data == NULL)
382                                 return -1;
383                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
384                                 return -1;
385                 }
386                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
387                         if (test_vec->auth_key.data == NULL)
388                                 return -1;
389                         if (test_vec->auth_key.length != opts->auth_key_sz)
390                                 return -1;
391                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
392                                 return -1;
393                         /* Auth IV is only required for some algorithms */
394                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
395                                 return -1;
396                         if (test_vec->digest.data == NULL)
397                                 return -1;
398                         if (test_vec->digest.length < opts->digest_sz)
399                                 return -1;
400                 }
401         } else if (opts->op_type == CPERF_AEAD) {
402                 if (test_vec->plaintext.data == NULL)
403                         return -1;
404                 if (test_vec->plaintext.length < opts->max_buffer_size)
405                         return -1;
406                 if (test_vec->ciphertext.data == NULL)
407                         return -1;
408                 if (test_vec->ciphertext.length < opts->max_buffer_size)
409                         return -1;
410                 if (test_vec->aead_iv.data == NULL)
411                         return -1;
412                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
413                         return -1;
414                 if (test_vec->aad.data == NULL)
415                         return -1;
416                 if (test_vec->aad.length != opts->aead_aad_sz)
417                         return -1;
418                 if (test_vec->digest.data == NULL)
419                         return -1;
420                 if (test_vec->digest.length < opts->digest_sz)
421                         return -1;
422         }
423         return 0;
424 }
425
426 int
427 main(int argc, char **argv)
428 {
429         struct cperf_options opts = {0};
430         struct cperf_test_vector *t_vec = NULL;
431         struct cperf_op_fns op_fns;
432
433         void *ctx[RTE_MAX_LCORE] = { };
434         struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
435
436         int nb_cryptodevs = 0;
437         uint16_t total_nb_qps = 0;
438         uint8_t cdev_id, i;
439         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
440
441         uint8_t buffer_size_idx = 0;
442
443         int ret;
444         uint32_t lcore_id;
445
446         /* Initialise DPDK EAL */
447         ret = rte_eal_init(argc, argv);
448         if (ret < 0)
449                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
450         argc -= ret;
451         argv += ret;
452
453         cperf_options_default(&opts);
454
455         ret = cperf_options_parse(&opts, argc, argv);
456         if (ret) {
457                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
458                 goto err;
459         }
460
461         ret = cperf_options_check(&opts);
462         if (ret) {
463                 RTE_LOG(ERR, USER1,
464                                 "Checking on or more user options failed\n");
465                 goto err;
466         }
467
468         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
469                         session_pool_socket);
470
471         if (!opts.silent)
472                 cperf_options_dump(&opts);
473
474         if (nb_cryptodevs < 1) {
475                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
476                                 "device type\n");
477                 nb_cryptodevs = 0;
478                 goto err;
479         }
480
481         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
482                         nb_cryptodevs);
483         if (ret) {
484                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
485                                 "capabilities requested\n");
486                 goto err;
487         }
488
489         if (opts.test_file != NULL) {
490                 t_vec = cperf_test_vector_get_from_file(&opts);
491                 if (t_vec == NULL) {
492                         RTE_LOG(ERR, USER1,
493                                         "Failed to create test vector for"
494                                         " specified file\n");
495                         goto err;
496                 }
497
498                 if (cperf_check_test_vector(&opts, t_vec)) {
499                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
500                                         "\n");
501                         goto err;
502                 }
503         } else {
504                 t_vec = cperf_test_vector_get_dummy(&opts);
505                 if (t_vec == NULL) {
506                         RTE_LOG(ERR, USER1,
507                                         "Failed to create test vector for"
508                                         " specified algorithms\n");
509                         goto err;
510                 }
511         }
512
513         ret = cperf_get_op_functions(&opts, &op_fns);
514         if (ret) {
515                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
516                                 "specified algorithms combination\n");
517                 goto err;
518         }
519
520         if (!opts.silent)
521                 show_test_vector(t_vec);
522
523         total_nb_qps = nb_cryptodevs * opts.nb_qps;
524
525         i = 0;
526         uint8_t qp_id = 0, cdev_index = 0;
527         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
528
529                 if (i == total_nb_qps)
530                         break;
531
532                 cdev_id = enabled_cdevs[cdev_index];
533
534                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
535
536                 ctx[i] = cperf_testmap[opts.test].constructor(
537                                 session_pool_socket[socket_id], cdev_id, qp_id,
538                                 &opts, t_vec, &op_fns);
539                 if (ctx[i] == NULL) {
540                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
541                         goto err;
542                 }
543                 qp_id = (qp_id + 1) % opts.nb_qps;
544                 if (qp_id == 0)
545                         cdev_index++;
546                 i++;
547         }
548
549         /* Get first size from range or list */
550         if (opts.inc_buffer_size != 0)
551                 opts.test_buffer_size = opts.min_buffer_size;
552         else
553                 opts.test_buffer_size = opts.buffer_size_list[0];
554
555         while (opts.test_buffer_size <= opts.max_buffer_size) {
556                 i = 0;
557                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
558
559                         if (i == total_nb_qps)
560                                 break;
561
562                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
563                                 ctx[i], lcore_id);
564                         i++;
565                 }
566                 i = 0;
567                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
568
569                         if (i == total_nb_qps)
570                                 break;
571                         rte_eal_wait_lcore(lcore_id);
572                         i++;
573                 }
574
575                 /* Get next size from range or list */
576                 if (opts.inc_buffer_size != 0)
577                         opts.test_buffer_size += opts.inc_buffer_size;
578                 else {
579                         if (++buffer_size_idx == opts.buffer_size_count)
580                                 break;
581                         opts.test_buffer_size = opts.buffer_size_list[buffer_size_idx];
582                 }
583         }
584
585         i = 0;
586         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
587
588                 if (i == total_nb_qps)
589                         break;
590
591                 cperf_testmap[opts.test].destructor(ctx[i]);
592                 i++;
593         }
594
595         for (i = 0; i < nb_cryptodevs &&
596                         i < RTE_CRYPTO_MAX_DEVS; i++)
597                 rte_cryptodev_stop(enabled_cdevs[i]);
598
599         free_test_vector(t_vec, &opts);
600
601         printf("\n");
602         return EXIT_SUCCESS;
603
604 err:
605         i = 0;
606         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
607                 if (i == total_nb_qps)
608                         break;
609
610                 cdev_id = enabled_cdevs[i];
611
612                 if (ctx[i] && cperf_testmap[opts.test].destructor)
613                         cperf_testmap[opts.test].destructor(ctx[i]);
614                 i++;
615         }
616
617         for (i = 0; i < nb_cryptodevs &&
618                         i < RTE_CRYPTO_MAX_DEVS; i++)
619                 rte_cryptodev_stop(enabled_cdevs[i]);
620
621         free_test_vector(t_vec, &opts);
622
623         printf("\n");
624         return EXIT_FAILURE;
625 }