New upstream version 18.02
[deb_dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24 #define NUM_SESSIONS 2048
25 #define SESS_MEMPOOL_CACHE_SIZE 64
26
27 const char *cperf_test_type_strs[] = {
28         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
29         [CPERF_TEST_TYPE_LATENCY] = "latency",
30         [CPERF_TEST_TYPE_VERIFY] = "verify",
31         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
32 };
33
34 const char *cperf_op_type_strs[] = {
35         [CPERF_CIPHER_ONLY] = "cipher-only",
36         [CPERF_AUTH_ONLY] = "auth-only",
37         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
38         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
39         [CPERF_AEAD] = "aead"
40 };
41
42 const struct cperf_test cperf_testmap[] = {
43                 [CPERF_TEST_TYPE_THROUGHPUT] = {
44                                 cperf_throughput_test_constructor,
45                                 cperf_throughput_test_runner,
46                                 cperf_throughput_test_destructor
47                 },
48                 [CPERF_TEST_TYPE_LATENCY] = {
49                                 cperf_latency_test_constructor,
50                                 cperf_latency_test_runner,
51                                 cperf_latency_test_destructor
52                 },
53                 [CPERF_TEST_TYPE_VERIFY] = {
54                                 cperf_verify_test_constructor,
55                                 cperf_verify_test_runner,
56                                 cperf_verify_test_destructor
57                 },
58                 [CPERF_TEST_TYPE_PMDCC] = {
59                                 cperf_pmd_cyclecount_test_constructor,
60                                 cperf_pmd_cyclecount_test_runner,
61                                 cperf_pmd_cyclecount_test_destructor
62                 }
63 };
64
65 static int
66 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
67                         struct rte_mempool *session_pool_socket[])
68 {
69         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
70         unsigned int i, j;
71         int ret;
72
73         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
74                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
75         if (enabled_cdev_count == 0) {
76                 printf("No crypto devices type %s available\n",
77                                 opts->device_type);
78                 return -EINVAL;
79         }
80
81         nb_lcores = rte_lcore_count() - 1;
82
83         if (enabled_cdev_count > nb_lcores) {
84                 printf("Number of capable crypto devices (%d) "
85                                 "has to be less or equal to number of slave "
86                                 "cores (%d)\n", enabled_cdev_count, nb_lcores);
87                 return -EINVAL;
88         }
89
90         /* Create a mempool shared by all the devices */
91         uint32_t max_sess_size = 0, sess_size;
92
93         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
94                 sess_size = rte_cryptodev_get_private_session_size(cdev_id);
95                 if (sess_size > max_sess_size)
96                         max_sess_size = sess_size;
97         }
98
99         /*
100          * Calculate number of needed queue pairs, based on the amount
101          * of available number of logical cores and crypto devices.
102          * For instance, if there are 4 cores and 2 crypto devices,
103          * 2 queue pairs will be set up per device.
104          */
105         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
106                                 (nb_lcores / enabled_cdev_count) + 1 :
107                                 nb_lcores / enabled_cdev_count;
108
109         for (i = 0; i < enabled_cdev_count &&
110                         i < RTE_CRYPTO_MAX_DEVS; i++) {
111                 cdev_id = enabled_cdevs[i];
112 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
113                 /*
114                  * If multi-core scheduler is used, limit the number
115                  * of queue pairs to 1, as there is no way to know
116                  * how many cores are being used by the PMD, and
117                  * how many will be available for the application.
118                  */
119                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
120                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
121                                 CDEV_SCHED_MODE_MULTICORE)
122                         opts->nb_qps = 1;
123 #endif
124
125                 struct rte_cryptodev_info cdev_info;
126                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
127
128                 rte_cryptodev_info_get(cdev_id, &cdev_info);
129                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
130                         printf("Number of needed queue pairs is higher "
131                                 "than the maximum number of queue pairs "
132                                 "per device.\n");
133                         printf("Lower the number of cores or increase "
134                                 "the number of crypto devices\n");
135                         return -EINVAL;
136                 }
137                 struct rte_cryptodev_config conf = {
138                         .nb_queue_pairs = opts->nb_qps,
139                         .socket_id = socket_id
140                 };
141
142                 struct rte_cryptodev_qp_conf qp_conf = {
143                         .nb_descriptors = opts->nb_descriptors
144                 };
145
146                 if (session_pool_socket[socket_id] == NULL) {
147                         char mp_name[RTE_MEMPOOL_NAMESIZE];
148                         struct rte_mempool *sess_mp;
149
150                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
151                                 "sess_mp_%u", socket_id);
152
153                         sess_mp = rte_mempool_create(mp_name,
154                                                 NUM_SESSIONS,
155                                                 max_sess_size,
156                                                 SESS_MEMPOOL_CACHE_SIZE,
157                                                 0, NULL, NULL, NULL,
158                                                 NULL, socket_id,
159                                                 0);
160
161                         if (sess_mp == NULL) {
162                                 printf("Cannot create session pool on socket %d\n",
163                                         socket_id);
164                                 return -ENOMEM;
165                         }
166
167                         printf("Allocated session pool on socket %d\n", socket_id);
168                         session_pool_socket[socket_id] = sess_mp;
169                 }
170
171                 ret = rte_cryptodev_configure(cdev_id, &conf);
172                 if (ret < 0) {
173                         printf("Failed to configure cryptodev %u", cdev_id);
174                         return -EINVAL;
175                 }
176
177                 for (j = 0; j < opts->nb_qps; j++) {
178                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
179                                 &qp_conf, socket_id,
180                                 session_pool_socket[socket_id]);
181                         if (ret < 0) {
182                                 printf("Failed to setup queue pair %u on "
183                                         "cryptodev %u", j, cdev_id);
184                                 return -EINVAL;
185                         }
186                 }
187
188                 ret = rte_cryptodev_start(cdev_id);
189                 if (ret < 0) {
190                         printf("Failed to start device %u: error %d\n",
191                                         cdev_id, ret);
192                         return -EPERM;
193                 }
194         }
195
196         return enabled_cdev_count;
197 }
198
199 static int
200 cperf_verify_devices_capabilities(struct cperf_options *opts,
201                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
202 {
203         struct rte_cryptodev_sym_capability_idx cap_idx;
204         const struct rte_cryptodev_symmetric_capability *capability;
205
206         uint8_t i, cdev_id;
207         int ret;
208
209         for (i = 0; i < nb_cryptodevs; i++) {
210
211                 cdev_id = enabled_cdevs[i];
212
213                 if (opts->op_type == CPERF_AUTH_ONLY ||
214                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
215                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
216
217                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
218                         cap_idx.algo.auth = opts->auth_algo;
219
220                         capability = rte_cryptodev_sym_capability_get(cdev_id,
221                                         &cap_idx);
222                         if (capability == NULL)
223                                 return -1;
224
225                         ret = rte_cryptodev_sym_capability_check_auth(
226                                         capability,
227                                         opts->auth_key_sz,
228                                         opts->digest_sz,
229                                         opts->auth_iv_sz);
230                         if (ret != 0)
231                                 return ret;
232                 }
233
234                 if (opts->op_type == CPERF_CIPHER_ONLY ||
235                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
236                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
237
238                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
239                         cap_idx.algo.cipher = opts->cipher_algo;
240
241                         capability = rte_cryptodev_sym_capability_get(cdev_id,
242                                         &cap_idx);
243                         if (capability == NULL)
244                                 return -1;
245
246                         ret = rte_cryptodev_sym_capability_check_cipher(
247                                         capability,
248                                         opts->cipher_key_sz,
249                                         opts->cipher_iv_sz);
250                         if (ret != 0)
251                                 return ret;
252                 }
253
254                 if (opts->op_type == CPERF_AEAD) {
255
256                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
257                         cap_idx.algo.aead = opts->aead_algo;
258
259                         capability = rte_cryptodev_sym_capability_get(cdev_id,
260                                         &cap_idx);
261                         if (capability == NULL)
262                                 return -1;
263
264                         ret = rte_cryptodev_sym_capability_check_aead(
265                                         capability,
266                                         opts->aead_key_sz,
267                                         opts->digest_sz,
268                                         opts->aead_aad_sz,
269                                         opts->aead_iv_sz);
270                         if (ret != 0)
271                                 return ret;
272                 }
273         }
274
275         return 0;
276 }
277
278 static int
279 cperf_check_test_vector(struct cperf_options *opts,
280                 struct cperf_test_vector *test_vec)
281 {
282         if (opts->op_type == CPERF_CIPHER_ONLY) {
283                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
284                         if (test_vec->plaintext.data == NULL)
285                                 return -1;
286                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
287                         if (test_vec->plaintext.data == NULL)
288                                 return -1;
289                         if (test_vec->plaintext.length < opts->max_buffer_size)
290                                 return -1;
291                         if (test_vec->ciphertext.data == NULL)
292                                 return -1;
293                         if (test_vec->ciphertext.length < opts->max_buffer_size)
294                                 return -1;
295                         if (test_vec->cipher_iv.data == NULL)
296                                 return -1;
297                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
298                                 return -1;
299                         if (test_vec->cipher_key.data == NULL)
300                                 return -1;
301                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
302                                 return -1;
303                 }
304         } else if (opts->op_type == CPERF_AUTH_ONLY) {
305                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
306                         if (test_vec->plaintext.data == NULL)
307                                 return -1;
308                         if (test_vec->plaintext.length < opts->max_buffer_size)
309                                 return -1;
310                         if (test_vec->auth_key.data == NULL)
311                                 return -1;
312                         if (test_vec->auth_key.length != opts->auth_key_sz)
313                                 return -1;
314                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
315                                 return -1;
316                         /* Auth IV is only required for some algorithms */
317                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
318                                 return -1;
319                         if (test_vec->digest.data == NULL)
320                                 return -1;
321                         if (test_vec->digest.length < opts->digest_sz)
322                                 return -1;
323                 }
324
325         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
326                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
327                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
328                         if (test_vec->plaintext.data == NULL)
329                                 return -1;
330                         if (test_vec->plaintext.length < opts->max_buffer_size)
331                                 return -1;
332                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
333                         if (test_vec->plaintext.data == NULL)
334                                 return -1;
335                         if (test_vec->plaintext.length < opts->max_buffer_size)
336                                 return -1;
337                         if (test_vec->ciphertext.data == NULL)
338                                 return -1;
339                         if (test_vec->ciphertext.length < opts->max_buffer_size)
340                                 return -1;
341                         if (test_vec->cipher_iv.data == NULL)
342                                 return -1;
343                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
344                                 return -1;
345                         if (test_vec->cipher_key.data == NULL)
346                                 return -1;
347                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
348                                 return -1;
349                 }
350                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
351                         if (test_vec->auth_key.data == NULL)
352                                 return -1;
353                         if (test_vec->auth_key.length != opts->auth_key_sz)
354                                 return -1;
355                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
356                                 return -1;
357                         /* Auth IV is only required for some algorithms */
358                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
359                                 return -1;
360                         if (test_vec->digest.data == NULL)
361                                 return -1;
362                         if (test_vec->digest.length < opts->digest_sz)
363                                 return -1;
364                 }
365         } else if (opts->op_type == CPERF_AEAD) {
366                 if (test_vec->plaintext.data == NULL)
367                         return -1;
368                 if (test_vec->plaintext.length < opts->max_buffer_size)
369                         return -1;
370                 if (test_vec->ciphertext.data == NULL)
371                         return -1;
372                 if (test_vec->ciphertext.length < opts->max_buffer_size)
373                         return -1;
374                 if (test_vec->aead_iv.data == NULL)
375                         return -1;
376                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
377                         return -1;
378                 if (test_vec->aad.data == NULL)
379                         return -1;
380                 if (test_vec->aad.length != opts->aead_aad_sz)
381                         return -1;
382                 if (test_vec->digest.data == NULL)
383                         return -1;
384                 if (test_vec->digest.length < opts->digest_sz)
385                         return -1;
386         }
387         return 0;
388 }
389
390 int
391 main(int argc, char **argv)
392 {
393         struct cperf_options opts = {0};
394         struct cperf_test_vector *t_vec = NULL;
395         struct cperf_op_fns op_fns;
396
397         void *ctx[RTE_MAX_LCORE] = { };
398         struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
399
400         int nb_cryptodevs = 0;
401         uint16_t total_nb_qps = 0;
402         uint8_t cdev_id, i;
403         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
404
405         uint8_t buffer_size_idx = 0;
406
407         int ret;
408         uint32_t lcore_id;
409
410         /* Initialise DPDK EAL */
411         ret = rte_eal_init(argc, argv);
412         if (ret < 0)
413                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
414         argc -= ret;
415         argv += ret;
416
417         cperf_options_default(&opts);
418
419         ret = cperf_options_parse(&opts, argc, argv);
420         if (ret) {
421                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
422                 goto err;
423         }
424
425         ret = cperf_options_check(&opts);
426         if (ret) {
427                 RTE_LOG(ERR, USER1,
428                                 "Checking on or more user options failed\n");
429                 goto err;
430         }
431
432         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
433                         session_pool_socket);
434
435         if (!opts.silent)
436                 cperf_options_dump(&opts);
437
438         if (nb_cryptodevs < 1) {
439                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
440                                 "device type\n");
441                 nb_cryptodevs = 0;
442                 goto err;
443         }
444
445         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
446                         nb_cryptodevs);
447         if (ret) {
448                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
449                                 "capabilities requested\n");
450                 goto err;
451         }
452
453         if (opts.test_file != NULL) {
454                 t_vec = cperf_test_vector_get_from_file(&opts);
455                 if (t_vec == NULL) {
456                         RTE_LOG(ERR, USER1,
457                                         "Failed to create test vector for"
458                                         " specified file\n");
459                         goto err;
460                 }
461
462                 if (cperf_check_test_vector(&opts, t_vec)) {
463                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
464                                         "\n");
465                         goto err;
466                 }
467         } else {
468                 t_vec = cperf_test_vector_get_dummy(&opts);
469                 if (t_vec == NULL) {
470                         RTE_LOG(ERR, USER1,
471                                         "Failed to create test vector for"
472                                         " specified algorithms\n");
473                         goto err;
474                 }
475         }
476
477         ret = cperf_get_op_functions(&opts, &op_fns);
478         if (ret) {
479                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
480                                 "specified algorithms combination\n");
481                 goto err;
482         }
483
484         if (!opts.silent)
485                 show_test_vector(t_vec);
486
487         total_nb_qps = nb_cryptodevs * opts.nb_qps;
488
489         i = 0;
490         uint8_t qp_id = 0, cdev_index = 0;
491         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
492
493                 if (i == total_nb_qps)
494                         break;
495
496                 cdev_id = enabled_cdevs[cdev_index];
497
498                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
499
500                 ctx[i] = cperf_testmap[opts.test].constructor(
501                                 session_pool_socket[socket_id], cdev_id, qp_id,
502                                 &opts, t_vec, &op_fns);
503                 if (ctx[i] == NULL) {
504                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
505                         goto err;
506                 }
507                 qp_id = (qp_id + 1) % opts.nb_qps;
508                 if (qp_id == 0)
509                         cdev_index++;
510                 i++;
511         }
512
513         if (opts.imix_distribution_count != 0) {
514                 uint8_t buffer_size_count = opts.buffer_size_count;
515                 uint16_t distribution_total[buffer_size_count];
516                 uint32_t op_idx;
517                 uint32_t test_average_size = 0;
518                 const uint32_t *buffer_size_list = opts.buffer_size_list;
519                 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
520
521                 opts.imix_buffer_sizes = rte_malloc(NULL,
522                                         sizeof(uint32_t) * opts.pool_sz,
523                                         0);
524                 /*
525                  * Calculate accumulated distribution of
526                  * probabilities per packet size
527                  */
528                 distribution_total[0] = imix_distribution_list[0];
529                 for (i = 1; i < buffer_size_count; i++)
530                         distribution_total[i] = imix_distribution_list[i] +
531                                 distribution_total[i-1];
532
533                 /* Calculate a random sequence of packet sizes, based on distribution */
534                 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
535                         uint16_t random_number = rte_rand() %
536                                 distribution_total[buffer_size_count - 1];
537                         for (i = 0; i < buffer_size_count; i++)
538                                 if (random_number < distribution_total[i])
539                                         break;
540
541                         opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
542                 }
543
544                 /* Calculate average buffer size for the IMIX distribution */
545                 for (i = 0; i < buffer_size_count; i++)
546                         test_average_size += buffer_size_list[i] *
547                                 imix_distribution_list[i];
548
549                 opts.test_buffer_size = test_average_size /
550                                 distribution_total[buffer_size_count - 1];
551
552                 i = 0;
553                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
554
555                         if (i == total_nb_qps)
556                                 break;
557
558                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
559                                 ctx[i], lcore_id);
560                         i++;
561                 }
562                 i = 0;
563                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
564
565                         if (i == total_nb_qps)
566                                 break;
567                         rte_eal_wait_lcore(lcore_id);
568                         i++;
569                 }
570         } else {
571
572                 /* Get next size from range or list */
573                 if (opts.inc_buffer_size != 0)
574                         opts.test_buffer_size = opts.min_buffer_size;
575                 else
576                         opts.test_buffer_size = opts.buffer_size_list[0];
577
578                 while (opts.test_buffer_size <= opts.max_buffer_size) {
579                         i = 0;
580                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
581
582                                 if (i == total_nb_qps)
583                                         break;
584
585                                 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
586                                         ctx[i], lcore_id);
587                                 i++;
588                         }
589                         i = 0;
590                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
591
592                                 if (i == total_nb_qps)
593                                         break;
594                                 rte_eal_wait_lcore(lcore_id);
595                                 i++;
596                         }
597
598                         /* Get next size from range or list */
599                         if (opts.inc_buffer_size != 0)
600                                 opts.test_buffer_size += opts.inc_buffer_size;
601                         else {
602                                 if (++buffer_size_idx == opts.buffer_size_count)
603                                         break;
604                                 opts.test_buffer_size =
605                                         opts.buffer_size_list[buffer_size_idx];
606                         }
607                 }
608         }
609
610         i = 0;
611         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
612
613                 if (i == total_nb_qps)
614                         break;
615
616                 cperf_testmap[opts.test].destructor(ctx[i]);
617                 i++;
618         }
619
620         for (i = 0; i < nb_cryptodevs &&
621                         i < RTE_CRYPTO_MAX_DEVS; i++)
622                 rte_cryptodev_stop(enabled_cdevs[i]);
623
624         free_test_vector(t_vec, &opts);
625
626         printf("\n");
627         return EXIT_SUCCESS;
628
629 err:
630         i = 0;
631         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
632                 if (i == total_nb_qps)
633                         break;
634
635                 if (ctx[i] && cperf_testmap[opts.test].destructor)
636                         cperf_testmap[opts.test].destructor(ctx[i]);
637                 i++;
638         }
639
640         for (i = 0; i < nb_cryptodevs &&
641                         i < RTE_CRYPTO_MAX_DEVS; i++)
642                 rte_cryptodev_stop(enabled_cdevs[i]);
643         rte_free(opts.imix_buffer_sizes);
644         free_test_vector(t_vec, &opts);
645
646         printf("\n");
647         return EXIT_FAILURE;
648 }