New upstream version 18.11.2
[deb_dpdk.git] / app / test-crypto-perf / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <stdio.h>
6 #include <unistd.h>
7
8 #include <rte_malloc.h>
9 #include <rte_random.h>
10 #include <rte_eal.h>
11 #include <rte_cryptodev.h>
12 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
13 #include <rte_cryptodev_scheduler.h>
14 #endif
15
16 #include "cperf.h"
17 #include "cperf_options.h"
18 #include "cperf_test_vector_parsing.h"
19 #include "cperf_test_throughput.h"
20 #include "cperf_test_latency.h"
21 #include "cperf_test_verify.h"
22 #include "cperf_test_pmd_cyclecount.h"
23
24
25 const char *cperf_test_type_strs[] = {
26         [CPERF_TEST_TYPE_THROUGHPUT] = "throughput",
27         [CPERF_TEST_TYPE_LATENCY] = "latency",
28         [CPERF_TEST_TYPE_VERIFY] = "verify",
29         [CPERF_TEST_TYPE_PMDCC] = "pmd-cyclecount"
30 };
31
32 const char *cperf_op_type_strs[] = {
33         [CPERF_CIPHER_ONLY] = "cipher-only",
34         [CPERF_AUTH_ONLY] = "auth-only",
35         [CPERF_CIPHER_THEN_AUTH] = "cipher-then-auth",
36         [CPERF_AUTH_THEN_CIPHER] = "auth-then-cipher",
37         [CPERF_AEAD] = "aead"
38 };
39
40 const struct cperf_test cperf_testmap[] = {
41                 [CPERF_TEST_TYPE_THROUGHPUT] = {
42                                 cperf_throughput_test_constructor,
43                                 cperf_throughput_test_runner,
44                                 cperf_throughput_test_destructor
45                 },
46                 [CPERF_TEST_TYPE_LATENCY] = {
47                                 cperf_latency_test_constructor,
48                                 cperf_latency_test_runner,
49                                 cperf_latency_test_destructor
50                 },
51                 [CPERF_TEST_TYPE_VERIFY] = {
52                                 cperf_verify_test_constructor,
53                                 cperf_verify_test_runner,
54                                 cperf_verify_test_destructor
55                 },
56                 [CPERF_TEST_TYPE_PMDCC] = {
57                                 cperf_pmd_cyclecount_test_constructor,
58                                 cperf_pmd_cyclecount_test_runner,
59                                 cperf_pmd_cyclecount_test_destructor
60                 }
61 };
62
63 static int
64 cperf_initialize_cryptodev(struct cperf_options *opts, uint8_t *enabled_cdevs,
65                         struct rte_mempool *session_pool_socket[])
66 {
67         uint8_t enabled_cdev_count = 0, nb_lcores, cdev_id;
68         uint32_t sessions_needed = 0;
69         unsigned int i, j;
70         int ret;
71
72         enabled_cdev_count = rte_cryptodev_devices_get(opts->device_type,
73                         enabled_cdevs, RTE_CRYPTO_MAX_DEVS);
74         if (enabled_cdev_count == 0) {
75                 printf("No crypto devices type %s available\n",
76                                 opts->device_type);
77                 return -EINVAL;
78         }
79
80         nb_lcores = rte_lcore_count() - 1;
81
82         if (nb_lcores < 1) {
83                 RTE_LOG(ERR, USER1,
84                         "Number of enabled cores need to be higher than 1\n");
85                 return -EINVAL;
86         }
87
88         /*
89          * Use less number of devices,
90          * if there are more available than cores.
91          */
92         if (enabled_cdev_count > nb_lcores)
93                 enabled_cdev_count = nb_lcores;
94
95         /* Create a mempool shared by all the devices */
96         uint32_t max_sess_size = 0, sess_size;
97
98         for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
99                 sess_size = rte_cryptodev_sym_get_private_session_size(cdev_id);
100                 if (sess_size > max_sess_size)
101                         max_sess_size = sess_size;
102         }
103
104         /*
105          * Calculate number of needed queue pairs, based on the amount
106          * of available number of logical cores and crypto devices.
107          * For instance, if there are 4 cores and 2 crypto devices,
108          * 2 queue pairs will be set up per device.
109          */
110         opts->nb_qps = (nb_lcores % enabled_cdev_count) ?
111                                 (nb_lcores / enabled_cdev_count) + 1 :
112                                 nb_lcores / enabled_cdev_count;
113
114         for (i = 0; i < enabled_cdev_count &&
115                         i < RTE_CRYPTO_MAX_DEVS; i++) {
116                 cdev_id = enabled_cdevs[i];
117 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
118                 /*
119                  * If multi-core scheduler is used, limit the number
120                  * of queue pairs to 1, as there is no way to know
121                  * how many cores are being used by the PMD, and
122                  * how many will be available for the application.
123                  */
124                 if (!strcmp((const char *)opts->device_type, "crypto_scheduler") &&
125                                 rte_cryptodev_scheduler_mode_get(cdev_id) ==
126                                 CDEV_SCHED_MODE_MULTICORE)
127                         opts->nb_qps = 1;
128 #endif
129
130                 struct rte_cryptodev_info cdev_info;
131                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
132                 /* range check the socket_id - negative values become big
133                  * positive ones due to use of unsigned value
134                  */
135                 if (socket_id >= RTE_MAX_NUMA_NODES)
136                         socket_id = 0;
137
138                 rte_cryptodev_info_get(cdev_id, &cdev_info);
139                 if (opts->nb_qps > cdev_info.max_nb_queue_pairs) {
140                         printf("Number of needed queue pairs is higher "
141                                 "than the maximum number of queue pairs "
142                                 "per device.\n");
143                         printf("Lower the number of cores or increase "
144                                 "the number of crypto devices\n");
145                         return -EINVAL;
146                 }
147                 struct rte_cryptodev_config conf = {
148                         .nb_queue_pairs = opts->nb_qps,
149                         .socket_id = socket_id
150                 };
151
152                 struct rte_cryptodev_qp_conf qp_conf = {
153                         .nb_descriptors = opts->nb_descriptors
154                 };
155
156                 /**
157                  * Device info specifies the min headroom and tailroom
158                  * requirement for the crypto PMD. This need to be honoured
159                  * by the application, while creating mbuf.
160                  */
161                 if (opts->headroom_sz < cdev_info.min_mbuf_headroom_req) {
162                         /* Update headroom */
163                         opts->headroom_sz = cdev_info.min_mbuf_headroom_req;
164                 }
165                 if (opts->tailroom_sz < cdev_info.min_mbuf_tailroom_req) {
166                         /* Update tailroom */
167                         opts->tailroom_sz = cdev_info.min_mbuf_tailroom_req;
168                 }
169
170                 /* Update segment size to include headroom & tailroom */
171                 opts->segment_sz += (opts->headroom_sz + opts->tailroom_sz);
172
173                 uint32_t dev_max_nb_sess = cdev_info.sym.max_nb_sessions;
174                 /*
175                  * Two sessions objects are required for each session
176                  * (one for the header, one for the private data)
177                  */
178                 if (!strcmp((const char *)opts->device_type,
179                                         "crypto_scheduler")) {
180 #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
181                         uint32_t nb_slaves =
182                                 rte_cryptodev_scheduler_slaves_get(cdev_id,
183                                                                 NULL);
184
185                         sessions_needed = 2 * enabled_cdev_count *
186                                 opts->nb_qps * nb_slaves;
187 #endif
188                 } else
189                         sessions_needed = 2 * enabled_cdev_count *
190                                                 opts->nb_qps;
191
192                 /*
193                  * A single session is required per queue pair
194                  * in each device
195                  */
196                 if (dev_max_nb_sess != 0 && dev_max_nb_sess < opts->nb_qps) {
197                         RTE_LOG(ERR, USER1,
198                                 "Device does not support at least "
199                                 "%u sessions\n", opts->nb_qps);
200                         return -ENOTSUP;
201                 }
202                 if (session_pool_socket[socket_id] == NULL) {
203                         char mp_name[RTE_MEMPOOL_NAMESIZE];
204                         struct rte_mempool *sess_mp;
205
206                         snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
207                                 "sess_mp_%u", socket_id);
208                         sess_mp = rte_mempool_create(mp_name,
209                                                 sessions_needed,
210                                                 max_sess_size,
211                                                 0,
212                                                 0, NULL, NULL, NULL,
213                                                 NULL, socket_id,
214                                                 0);
215
216                         if (sess_mp == NULL) {
217                                 printf("Cannot create session pool on socket %d\n",
218                                         socket_id);
219                                 return -ENOMEM;
220                         }
221
222                         printf("Allocated session pool on socket %d\n", socket_id);
223                         session_pool_socket[socket_id] = sess_mp;
224                 }
225
226                 ret = rte_cryptodev_configure(cdev_id, &conf);
227                 if (ret < 0) {
228                         printf("Failed to configure cryptodev %u", cdev_id);
229                         return -EINVAL;
230                 }
231
232                 for (j = 0; j < opts->nb_qps; j++) {
233                         ret = rte_cryptodev_queue_pair_setup(cdev_id, j,
234                                 &qp_conf, socket_id,
235                                 session_pool_socket[socket_id]);
236                         if (ret < 0) {
237                                 printf("Failed to setup queue pair %u on "
238                                         "cryptodev %u", j, cdev_id);
239                                 return -EINVAL;
240                         }
241                 }
242
243                 ret = rte_cryptodev_start(cdev_id);
244                 if (ret < 0) {
245                         printf("Failed to start device %u: error %d\n",
246                                         cdev_id, ret);
247                         return -EPERM;
248                 }
249         }
250
251         return enabled_cdev_count;
252 }
253
254 static int
255 cperf_verify_devices_capabilities(struct cperf_options *opts,
256                 uint8_t *enabled_cdevs, uint8_t nb_cryptodevs)
257 {
258         struct rte_cryptodev_sym_capability_idx cap_idx;
259         const struct rte_cryptodev_symmetric_capability *capability;
260
261         uint8_t i, cdev_id;
262         int ret;
263
264         for (i = 0; i < nb_cryptodevs; i++) {
265
266                 cdev_id = enabled_cdevs[i];
267
268                 if (opts->op_type == CPERF_AUTH_ONLY ||
269                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
270                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
271
272                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH;
273                         cap_idx.algo.auth = opts->auth_algo;
274
275                         capability = rte_cryptodev_sym_capability_get(cdev_id,
276                                         &cap_idx);
277                         if (capability == NULL)
278                                 return -1;
279
280                         ret = rte_cryptodev_sym_capability_check_auth(
281                                         capability,
282                                         opts->auth_key_sz,
283                                         opts->digest_sz,
284                                         opts->auth_iv_sz);
285                         if (ret != 0)
286                                 return ret;
287                 }
288
289                 if (opts->op_type == CPERF_CIPHER_ONLY ||
290                                 opts->op_type == CPERF_CIPHER_THEN_AUTH ||
291                                 opts->op_type == CPERF_AUTH_THEN_CIPHER) {
292
293                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
294                         cap_idx.algo.cipher = opts->cipher_algo;
295
296                         capability = rte_cryptodev_sym_capability_get(cdev_id,
297                                         &cap_idx);
298                         if (capability == NULL)
299                                 return -1;
300
301                         ret = rte_cryptodev_sym_capability_check_cipher(
302                                         capability,
303                                         opts->cipher_key_sz,
304                                         opts->cipher_iv_sz);
305                         if (ret != 0)
306                                 return ret;
307                 }
308
309                 if (opts->op_type == CPERF_AEAD) {
310
311                         cap_idx.type = RTE_CRYPTO_SYM_XFORM_AEAD;
312                         cap_idx.algo.aead = opts->aead_algo;
313
314                         capability = rte_cryptodev_sym_capability_get(cdev_id,
315                                         &cap_idx);
316                         if (capability == NULL)
317                                 return -1;
318
319                         ret = rte_cryptodev_sym_capability_check_aead(
320                                         capability,
321                                         opts->aead_key_sz,
322                                         opts->digest_sz,
323                                         opts->aead_aad_sz,
324                                         opts->aead_iv_sz);
325                         if (ret != 0)
326                                 return ret;
327                 }
328         }
329
330         return 0;
331 }
332
333 static int
334 cperf_check_test_vector(struct cperf_options *opts,
335                 struct cperf_test_vector *test_vec)
336 {
337         if (opts->op_type == CPERF_CIPHER_ONLY) {
338                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
339                         if (test_vec->plaintext.data == NULL)
340                                 return -1;
341                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
342                         if (test_vec->plaintext.data == NULL)
343                                 return -1;
344                         if (test_vec->plaintext.length < opts->max_buffer_size)
345                                 return -1;
346                         if (test_vec->ciphertext.data == NULL)
347                                 return -1;
348                         if (test_vec->ciphertext.length < opts->max_buffer_size)
349                                 return -1;
350                         /* Cipher IV is only required for some algorithms */
351                         if (opts->cipher_iv_sz &&
352                                         test_vec->cipher_iv.data == NULL)
353                                 return -1;
354                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
355                                 return -1;
356                         if (test_vec->cipher_key.data == NULL)
357                                 return -1;
358                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
359                                 return -1;
360                 }
361         } else if (opts->op_type == CPERF_AUTH_ONLY) {
362                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
363                         if (test_vec->plaintext.data == NULL)
364                                 return -1;
365                         if (test_vec->plaintext.length < opts->max_buffer_size)
366                                 return -1;
367                         /* Auth key is only required for some algorithms */
368                         if (opts->auth_key_sz &&
369                                         test_vec->auth_key.data == NULL)
370                                 return -1;
371                         if (test_vec->auth_key.length != opts->auth_key_sz)
372                                 return -1;
373                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
374                                 return -1;
375                         /* Auth IV is only required for some algorithms */
376                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
377                                 return -1;
378                         if (test_vec->digest.data == NULL)
379                                 return -1;
380                         if (test_vec->digest.length < opts->digest_sz)
381                                 return -1;
382                 }
383
384         } else if (opts->op_type == CPERF_CIPHER_THEN_AUTH ||
385                         opts->op_type == CPERF_AUTH_THEN_CIPHER) {
386                 if (opts->cipher_algo == RTE_CRYPTO_CIPHER_NULL) {
387                         if (test_vec->plaintext.data == NULL)
388                                 return -1;
389                         if (test_vec->plaintext.length < opts->max_buffer_size)
390                                 return -1;
391                 } else if (opts->cipher_algo != RTE_CRYPTO_CIPHER_NULL) {
392                         if (test_vec->plaintext.data == NULL)
393                                 return -1;
394                         if (test_vec->plaintext.length < opts->max_buffer_size)
395                                 return -1;
396                         if (test_vec->ciphertext.data == NULL)
397                                 return -1;
398                         if (test_vec->ciphertext.length < opts->max_buffer_size)
399                                 return -1;
400                         if (test_vec->cipher_iv.data == NULL)
401                                 return -1;
402                         if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
403                                 return -1;
404                         if (test_vec->cipher_key.data == NULL)
405                                 return -1;
406                         if (test_vec->cipher_key.length != opts->cipher_key_sz)
407                                 return -1;
408                 }
409                 if (opts->auth_algo != RTE_CRYPTO_AUTH_NULL) {
410                         if (test_vec->auth_key.data == NULL)
411                                 return -1;
412                         if (test_vec->auth_key.length != opts->auth_key_sz)
413                                 return -1;
414                         if (test_vec->auth_iv.length != opts->auth_iv_sz)
415                                 return -1;
416                         /* Auth IV is only required for some algorithms */
417                         if (opts->auth_iv_sz && test_vec->auth_iv.data == NULL)
418                                 return -1;
419                         if (test_vec->digest.data == NULL)
420                                 return -1;
421                         if (test_vec->digest.length < opts->digest_sz)
422                                 return -1;
423                 }
424         } else if (opts->op_type == CPERF_AEAD) {
425                 if (test_vec->plaintext.data == NULL)
426                         return -1;
427                 if (test_vec->plaintext.length < opts->max_buffer_size)
428                         return -1;
429                 if (test_vec->ciphertext.data == NULL)
430                         return -1;
431                 if (test_vec->ciphertext.length < opts->max_buffer_size)
432                         return -1;
433                 if (test_vec->aead_key.data == NULL)
434                         return -1;
435                 if (test_vec->aead_key.length != opts->aead_key_sz)
436                         return -1;
437                 if (test_vec->aead_iv.data == NULL)
438                         return -1;
439                 if (test_vec->aead_iv.length != opts->aead_iv_sz)
440                         return -1;
441                 if (test_vec->aad.data == NULL)
442                         return -1;
443                 if (test_vec->aad.length != opts->aead_aad_sz)
444                         return -1;
445                 if (test_vec->digest.data == NULL)
446                         return -1;
447                 if (test_vec->digest.length < opts->digest_sz)
448                         return -1;
449         }
450         return 0;
451 }
452
453 int
454 main(int argc, char **argv)
455 {
456         struct cperf_options opts = {0};
457         struct cperf_test_vector *t_vec = NULL;
458         struct cperf_op_fns op_fns;
459
460         void *ctx[RTE_MAX_LCORE] = { };
461         struct rte_mempool *session_pool_socket[RTE_MAX_NUMA_NODES] = { 0 };
462
463         int nb_cryptodevs = 0;
464         uint16_t total_nb_qps = 0;
465         uint8_t cdev_id, i;
466         uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = { 0 };
467
468         uint8_t buffer_size_idx = 0;
469
470         int ret;
471         uint32_t lcore_id;
472
473         /* Initialise DPDK EAL */
474         ret = rte_eal_init(argc, argv);
475         if (ret < 0)
476                 rte_exit(EXIT_FAILURE, "Invalid EAL arguments!\n");
477         argc -= ret;
478         argv += ret;
479
480         cperf_options_default(&opts);
481
482         ret = cperf_options_parse(&opts, argc, argv);
483         if (ret) {
484                 RTE_LOG(ERR, USER1, "Parsing on or more user options failed\n");
485                 goto err;
486         }
487
488         ret = cperf_options_check(&opts);
489         if (ret) {
490                 RTE_LOG(ERR, USER1,
491                                 "Checking on or more user options failed\n");
492                 goto err;
493         }
494
495         nb_cryptodevs = cperf_initialize_cryptodev(&opts, enabled_cdevs,
496                         session_pool_socket);
497
498         if (!opts.silent)
499                 cperf_options_dump(&opts);
500
501         if (nb_cryptodevs < 1) {
502                 RTE_LOG(ERR, USER1, "Failed to initialise requested crypto "
503                                 "device type\n");
504                 nb_cryptodevs = 0;
505                 goto err;
506         }
507
508         ret = cperf_verify_devices_capabilities(&opts, enabled_cdevs,
509                         nb_cryptodevs);
510         if (ret) {
511                 RTE_LOG(ERR, USER1, "Crypto device type does not support "
512                                 "capabilities requested\n");
513                 goto err;
514         }
515
516         if (opts.test_file != NULL) {
517                 t_vec = cperf_test_vector_get_from_file(&opts);
518                 if (t_vec == NULL) {
519                         RTE_LOG(ERR, USER1,
520                                         "Failed to create test vector for"
521                                         " specified file\n");
522                         goto err;
523                 }
524
525                 if (cperf_check_test_vector(&opts, t_vec)) {
526                         RTE_LOG(ERR, USER1, "Incomplete necessary test vectors"
527                                         "\n");
528                         goto err;
529                 }
530         } else {
531                 t_vec = cperf_test_vector_get_dummy(&opts);
532                 if (t_vec == NULL) {
533                         RTE_LOG(ERR, USER1,
534                                         "Failed to create test vector for"
535                                         " specified algorithms\n");
536                         goto err;
537                 }
538         }
539
540         ret = cperf_get_op_functions(&opts, &op_fns);
541         if (ret) {
542                 RTE_LOG(ERR, USER1, "Failed to find function ops set for "
543                                 "specified algorithms combination\n");
544                 goto err;
545         }
546
547         if (!opts.silent)
548                 show_test_vector(t_vec);
549
550         total_nb_qps = nb_cryptodevs * opts.nb_qps;
551
552         i = 0;
553         uint8_t qp_id = 0, cdev_index = 0;
554         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
555
556                 if (i == total_nb_qps)
557                         break;
558
559                 cdev_id = enabled_cdevs[cdev_index];
560
561                 uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
562
563                 ctx[i] = cperf_testmap[opts.test].constructor(
564                                 session_pool_socket[socket_id], cdev_id, qp_id,
565                                 &opts, t_vec, &op_fns);
566                 if (ctx[i] == NULL) {
567                         RTE_LOG(ERR, USER1, "Test run constructor failed\n");
568                         goto err;
569                 }
570                 qp_id = (qp_id + 1) % opts.nb_qps;
571                 if (qp_id == 0)
572                         cdev_index++;
573                 i++;
574         }
575
576         if (opts.imix_distribution_count != 0) {
577                 uint8_t buffer_size_count = opts.buffer_size_count;
578                 uint16_t distribution_total[buffer_size_count];
579                 uint32_t op_idx;
580                 uint32_t test_average_size = 0;
581                 const uint32_t *buffer_size_list = opts.buffer_size_list;
582                 const uint32_t *imix_distribution_list = opts.imix_distribution_list;
583
584                 opts.imix_buffer_sizes = rte_malloc(NULL,
585                                         sizeof(uint32_t) * opts.pool_sz,
586                                         0);
587                 /*
588                  * Calculate accumulated distribution of
589                  * probabilities per packet size
590                  */
591                 distribution_total[0] = imix_distribution_list[0];
592                 for (i = 1; i < buffer_size_count; i++)
593                         distribution_total[i] = imix_distribution_list[i] +
594                                 distribution_total[i-1];
595
596                 /* Calculate a random sequence of packet sizes, based on distribution */
597                 for (op_idx = 0; op_idx < opts.pool_sz; op_idx++) {
598                         uint16_t random_number = rte_rand() %
599                                 distribution_total[buffer_size_count - 1];
600                         for (i = 0; i < buffer_size_count; i++)
601                                 if (random_number < distribution_total[i])
602                                         break;
603
604                         opts.imix_buffer_sizes[op_idx] = buffer_size_list[i];
605                 }
606
607                 /* Calculate average buffer size for the IMIX distribution */
608                 for (i = 0; i < buffer_size_count; i++)
609                         test_average_size += buffer_size_list[i] *
610                                 imix_distribution_list[i];
611
612                 opts.test_buffer_size = test_average_size /
613                                 distribution_total[buffer_size_count - 1];
614
615                 i = 0;
616                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
617
618                         if (i == total_nb_qps)
619                                 break;
620
621                         rte_eal_remote_launch(cperf_testmap[opts.test].runner,
622                                 ctx[i], lcore_id);
623                         i++;
624                 }
625                 i = 0;
626                 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
627
628                         if (i == total_nb_qps)
629                                 break;
630                         rte_eal_wait_lcore(lcore_id);
631                         i++;
632                 }
633         } else {
634
635                 /* Get next size from range or list */
636                 if (opts.inc_buffer_size != 0)
637                         opts.test_buffer_size = opts.min_buffer_size;
638                 else
639                         opts.test_buffer_size = opts.buffer_size_list[0];
640
641                 while (opts.test_buffer_size <= opts.max_buffer_size) {
642                         i = 0;
643                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
644
645                                 if (i == total_nb_qps)
646                                         break;
647
648                                 rte_eal_remote_launch(cperf_testmap[opts.test].runner,
649                                         ctx[i], lcore_id);
650                                 i++;
651                         }
652                         i = 0;
653                         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
654
655                                 if (i == total_nb_qps)
656                                         break;
657                                 rte_eal_wait_lcore(lcore_id);
658                                 i++;
659                         }
660
661                         /* Get next size from range or list */
662                         if (opts.inc_buffer_size != 0)
663                                 opts.test_buffer_size += opts.inc_buffer_size;
664                         else {
665                                 if (++buffer_size_idx == opts.buffer_size_count)
666                                         break;
667                                 opts.test_buffer_size =
668                                         opts.buffer_size_list[buffer_size_idx];
669                         }
670                 }
671         }
672
673         i = 0;
674         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
675
676                 if (i == total_nb_qps)
677                         break;
678
679                 cperf_testmap[opts.test].destructor(ctx[i]);
680                 i++;
681         }
682
683         for (i = 0; i < nb_cryptodevs &&
684                         i < RTE_CRYPTO_MAX_DEVS; i++)
685                 rte_cryptodev_stop(enabled_cdevs[i]);
686
687         free_test_vector(t_vec, &opts);
688
689         printf("\n");
690         return EXIT_SUCCESS;
691
692 err:
693         i = 0;
694         RTE_LCORE_FOREACH_SLAVE(lcore_id) {
695                 if (i == total_nb_qps)
696                         break;
697
698                 if (ctx[i] && cperf_testmap[opts.test].destructor)
699                         cperf_testmap[opts.test].destructor(ctx[i]);
700                 i++;
701         }
702
703         for (i = 0; i < nb_cryptodevs &&
704                         i < RTE_CRYPTO_MAX_DEVS; i++)
705                 rte_cryptodev_stop(enabled_cdevs[i]);
706         rte_free(opts.imix_buffer_sizes);
707         free_test_vector(t_vec, &opts);
708
709         printf("\n");
710         return EXIT_FAILURE;
711 }