+ uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+ uint64_t processed = 0, failed_polls = 0, retries = 0;
+ uint64_t tsc_start = 0, tsc_end = 0;
+
+ uint16_t digest_length = get_auth_digest_length(pparams->auth_algo);
+
+ struct rte_crypto_op *ops[pparams->burst_size];
+ struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+ struct rte_mbuf *mbufs[pparams->burst_size * 8];
+
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ static struct rte_cryptodev_sym_session *sess;
+
+ if (rte_cryptodev_count() == 0) {
+ printf("\nNo crypto devices available. Is kernel driver loaded?\n");
+ return TEST_FAILED;
+ }
+
+ /* Create Crypto session*/
+ sess = test_perf_create_aes_sha_session(ts_params->dev_id,
+ pparams->chain, pparams->cipher_algo,
+ pparams->cipher_key_length, pparams->auth_algo);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+ /* Generate a burst of crypto operations */
+ for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+ mbufs[i] = test_perf_create_pktmbuf(
+ ts_params->mbuf_mp,
+ pparams->buf_size);
+
+ if (mbufs[i] == NULL) {
+ printf("\nFailed to get mbuf - freeing the rest.\n");
+ for (k = 0; k < i; k++)
+ rte_pktmbuf_free(mbufs[k]);
+ return -1;
+ }
+ /* Make room for Digest and IV in mbuf */
+ rte_pktmbuf_append(mbufs[i], digest_length);
+ rte_pktmbuf_prepend(mbufs[i], AES_CIPHER_IV_LENGTH);
+ }
+
+
+ tsc_start = rte_rdtsc_precise();
+
+ while (total_enqueued < pparams->total_operations) {
+ uint16_t burst_size =
+ total_enqueued+pparams->burst_size <= pparams->total_operations ?
+ pparams->burst_size : pparams->total_operations-total_enqueued;
+ uint16_t ops_needed = burst_size-ops_unused;
+
+ if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+ printf("\nFailed to alloc enough ops, finish dequeuing "
+ "and free ops below.");
+ } else {
+ for (i = 0; i < ops_needed; i++)
+ ops[i] = test_perf_set_crypto_op_aes(ops[i],
+ mbufs[i + (pparams->burst_size *
+ (j % NUM_MBUF_SETS))],
+ sess, pparams->buf_size, digest_length);
+
+ /* enqueue burst */
+ burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+ queue_id, ops, burst_size);
+
+ if (burst_enqueued < burst_size)
+ retries++;
+
+ ops_unused = burst_size-burst_enqueued;
+ total_enqueued += burst_enqueued;
+ }
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (l = 0; l < burst_dequeued; l++)
+ rte_crypto_op_free(proc_ops[l]);
+ }
+ j++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (processed < pparams->total_operations) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (m = 0; m < burst_dequeued; m++)
+ rte_crypto_op_free(proc_ops[m]);
+ }
+ }
+
+ tsc_end = rte_rdtsc_precise();
+
+ double ops_s = ((double)processed / (tsc_end - tsc_start)) * rte_get_tsc_hz();
+ double throughput = (ops_s * pparams->buf_size * 8) / 1000000000;
+
+ printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size, ops_s/1000000,
+ throughput, retries, failed_polls);
+
+ for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+ rte_pktmbuf_free(mbufs[i]);
+ rte_cryptodev_sym_session_free(dev_id, sess);
+
+ printf("\n");
+ return TEST_SUCCESS;
+}
+
+
+static int
+test_perf_snow3g(uint8_t dev_id, uint16_t queue_id,
+ struct perf_test_params *pparams)
+{
+ uint16_t i, k, l, m;
+ uint16_t j = 0;
+ uint16_t ops_unused = 0;
+ uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+ uint64_t processed = 0, failed_polls = 0, retries = 0;
+ uint64_t tsc_start = 0, tsc_end = 0;
+
+ uint16_t digest_length = get_auth_digest_length(pparams->auth_algo);
+
+ struct rte_crypto_op *ops[pparams->burst_size];
+ struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+ struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
+
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ static struct rte_cryptodev_sym_session *sess;
+
+ if (rte_cryptodev_count() == 0) {
+ printf("\nNo crypto devices found. Is PMD build configured?\n");
+ printf("\nAnd is kernel driver loaded for HW PMDs?\n");
+ return TEST_FAILED;
+ }
+
+ /* Create Crypto session*/
+ sess = test_perf_create_snow3g_session(ts_params->dev_id,
+ pparams->chain, pparams->cipher_algo,
+ pparams->cipher_key_length, pparams->auth_algo);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+ /* Generate a burst of crypto operations */
+ for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+ /*
+ * Buffer size + iv/aad len is allocated, for perf tests they
+ * are equal + digest len.
+ */
+ mbufs[i] = test_perf_create_pktmbuf(
+ ts_params->mbuf_mp,
+ pparams->buf_size + SNOW3G_CIPHER_IV_LENGTH +
+ digest_length);
+
+ if (mbufs[i] == NULL) {
+ printf("\nFailed to get mbuf - freeing the rest.\n");
+ for (k = 0; k < i; k++)
+ rte_pktmbuf_free(mbufs[k]);
+ return -1;
+ }
+
+ }
+
+ tsc_start = rte_rdtsc_precise();
+
+ while (total_enqueued < pparams->total_operations) {
+ uint16_t burst_size =
+ (total_enqueued+pparams->burst_size)
+ <= pparams->total_operations ?
+ pparams->burst_size : pparams->total_operations-total_enqueued;
+ uint16_t ops_needed = burst_size-ops_unused;
+ /* Handle the last burst correctly */
+ uint16_t op_offset = pparams->burst_size - burst_size;
+
+ if (ops_needed !=
+ rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops+op_offset, ops_needed)) {
+ printf("\nFailed to alloc enough ops.");
+ /*Don't exit, dequeue, more ops should become available*/
+ } else {
+ for (i = 0; i < ops_needed; i++) {
+ if (pparams->chain == HASH_ONLY)
+ ops[i+op_offset] =
+ test_perf_set_crypto_op_snow3g_hash(ops[i+op_offset],
+ mbufs[i +
+ (pparams->burst_size * (j % NUM_MBUF_SETS))],
+ sess,
+ pparams->buf_size, digest_length);
+ else if (pparams->chain == CIPHER_ONLY)
+ ops[i+op_offset] =
+ test_perf_set_crypto_op_snow3g_cipher(ops[i+op_offset],
+ mbufs[i +
+ (pparams->burst_size * (j % NUM_MBUF_SETS))],
+ sess,
+ pparams->buf_size);
+ else
+ return 1;
+ }
+
+ /* enqueue burst */
+ burst_enqueued =
+ rte_cryptodev_enqueue_burst(dev_id, queue_id,
+ ops+op_offset, burst_size);
+
+ if (burst_enqueued < burst_size)
+ retries++;
+
+ ops_unused = burst_size-burst_enqueued;
+ total_enqueued += burst_enqueued;
+ }
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0) {
+ failed_polls++;
+ } else {
+ processed += burst_dequeued;
+ for (l = 0; l < burst_dequeued; l++)
+ rte_crypto_op_free(proc_ops[l]);
+ }
+ j++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (processed < pparams->total_operations) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+ for (m = 0; m < burst_dequeued; m++)
+ rte_crypto_op_free(proc_ops[m]);
+ }
+ }
+
+ tsc_end = rte_rdtsc_precise();
+
+ double ops_s = ((double)processed / (tsc_end - tsc_start)) * rte_get_tsc_hz();
+ double cycles_burst = (double) (tsc_end - tsc_start) /
+ (double) processed * pparams->burst_size;
+ double cycles_buff = (double) (tsc_end - tsc_start) / (double) processed;
+ double cycles_B = cycles_buff / pparams->buf_size;
+ double throughput = (ops_s * pparams->buf_size * 8) / 1000000;
+
+ if (gbl_cryptodev_perftest_devtype == RTE_CRYPTODEV_QAT_SYM_PMD) {
+ /* Cycle count misleading on HW devices for this test, so don't print */
+ printf("%4u\t%6.2f\t%10.2f\t n/a \t\t n/a "
+ "\t\t n/a \t\t%8"PRIu64"\t%8"PRIu64,
+ pparams->buf_size, ops_s/1000000,
+ throughput, retries, failed_polls);
+ } else {
+ printf("%4u\t%6.2f\t%10.2f\t%10.2f\t%8.2f"
+ "\t%8.2f\t%8"PRIu64"\t%8"PRIu64,
+ pparams->buf_size, ops_s/1000000, throughput, cycles_burst,
+ cycles_buff, cycles_B, retries, failed_polls);
+ }
+
+ for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+ rte_pktmbuf_free(mbufs[i]);
+ rte_cryptodev_sym_session_free(dev_id, sess);
+
+ printf("\n");
+ return TEST_SUCCESS;
+}
+
+static int
+test_perf_openssl(uint8_t dev_id, uint16_t queue_id,
+ struct perf_test_params *pparams)
+{
+ uint16_t i, k, l, m;
+ uint16_t j = 0;
+ uint16_t ops_unused = 0;
+
+ uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+ uint64_t processed = 0, failed_polls = 0, retries = 0;
+ uint64_t tsc_start = 0, tsc_end = 0;
+
+ unsigned int digest_length = get_auth_digest_length(pparams->auth_algo);
+
+ struct rte_crypto_op *ops[pparams->burst_size];
+ struct rte_crypto_op *proc_ops[pparams->burst_size];
+
+ struct rte_mbuf *mbufs[pparams->burst_size * NUM_MBUF_SETS];
+
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+
+ static struct rte_cryptodev_sym_session *sess;
+
+ static struct rte_crypto_op *(*test_perf_set_crypto_op)
+ (struct rte_crypto_op *, struct rte_mbuf *,
+ struct rte_cryptodev_sym_session *,
+ unsigned int, unsigned int);
+
+ switch (pparams->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ test_perf_set_crypto_op = test_perf_set_crypto_op_3des;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ test_perf_set_crypto_op = test_perf_set_crypto_op_aes;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ test_perf_set_crypto_op = test_perf_set_crypto_op_aes_gcm;
+ break;
+ default:
+ return TEST_FAILED;
+ }
+
+ if (rte_cryptodev_count() == 0) {
+ printf("\nNo crypto devices found. Is PMD build configured?\n");
+ return TEST_FAILED;
+ }
+
+ /* Create Crypto session*/
+ sess = test_perf_create_openssl_session(ts_params->dev_id,
+ pparams->chain, pparams->cipher_algo,
+ pparams->cipher_key_length, pparams->auth_algo);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+ /* Generate a burst of crypto operations */
+ for (i = 0; i < (pparams->burst_size * NUM_MBUF_SETS); i++) {
+ mbufs[i] = test_perf_create_pktmbuf(
+ ts_params->mbuf_mp,
+ pparams->buf_size);
+
+ if (mbufs[i] == NULL) {
+ printf("\nFailed to get mbuf - freeing the rest.\n");
+ for (k = 0; k < i; k++)
+ rte_pktmbuf_free(mbufs[k]);
+ return -1;
+ }
+ }
+
+ tsc_start = rte_rdtsc_precise();
+
+ while (total_enqueued < pparams->total_operations) {
+ uint16_t burst_size =
+ total_enqueued + pparams->burst_size <=
+ pparams->total_operations ? pparams->burst_size :
+ pparams->total_operations - total_enqueued;
+ uint16_t ops_needed = burst_size - ops_unused;
+
+ if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+ printf("\nFailed to alloc enough ops, finish dequeuing "
+ "and free ops below.");
+ } else {
+ for (i = 0; i < ops_needed; i++)
+ ops[i] = test_perf_set_crypto_op(ops[i],
+ mbufs[i + (pparams->burst_size *
+ (j % NUM_MBUF_SETS))],
+ sess, pparams->buf_size, digest_length);
+
+ /* enqueue burst */
+ burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+ queue_id, ops, burst_size);
+
+ if (burst_enqueued < burst_size)
+ retries++;
+
+ ops_unused = burst_size - burst_enqueued;
+ total_enqueued += burst_enqueued;
+ }
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (l = 0; l < burst_dequeued; l++)
+ rte_crypto_op_free(proc_ops[l]);
+ }
+ j++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (processed < pparams->total_operations) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (m = 0; m < burst_dequeued; m++)
+ rte_crypto_op_free(proc_ops[m]);
+ }
+ }
+
+ tsc_end = rte_rdtsc_precise();
+
+ double ops_s = ((double)processed / (tsc_end - tsc_start))
+ * rte_get_tsc_hz();
+ double throughput = (ops_s * pparams->buf_size * NUM_MBUF_SETS)
+ / 1000000000;
+
+ printf("\t%u\t%6.2f\t%10.2f\t%8"PRIu64"\t%8"PRIu64, pparams->buf_size,
+ ops_s / 1000000, throughput, retries, failed_polls);
+
+ for (i = 0; i < pparams->burst_size * NUM_MBUF_SETS; i++)
+ rte_pktmbuf_free(mbufs[i]);
+ rte_cryptodev_sym_session_free(dev_id, sess);
+
+ printf("\n");
+ return TEST_SUCCESS;
+}
+
+/*
+
+ perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA1);
+ perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA_256);
+ perf_test_aes_sha("avx2", HASH_CIPHER, 16, CBC, SHA_512);
+
+ perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA1);
+ perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA_256);
+ perf_test_aes_sha("avx2", CIPHER_HASH, 32, CBC, SHA_512);
+
+ perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA1);
+ perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA_256);
+ perf_test_aes_sha("avx2", HASH_CIPHER, 32, CBC, SHA_512);
+ */
+static int
+test_perf_aes_cbc_encrypt_digest_vary_pkt_size(void)
+{
+ unsigned total_operations = 1000000;
+ unsigned burst_size = 32;
+ unsigned buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536, 1792, 2048 };
+ uint8_t i, j;
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 32,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 32,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA256_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .cipher_key_length = 32,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA512_HMAC
+ },
+ };
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+
+ params_set[i].total_operations = total_operations;
+ params_set[i].burst_size = burst_size;
+ printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+ " burst_size: %d ops\n",
+ chain_mode_name(params_set[i].chain),
+ cipher_algo_name(params_set[i].cipher_algo),
+ auth_algo_name(params_set[i].auth_algo),
+ params_set[i].cipher_key_length,
+ burst_size);
+ printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
+ "Retries\tEmptyPolls\n");
+ for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+ params_set[i].buf_size = buf_lengths[j];
+ test_perf_aes_sha(testsuite_params.dev_id, 0,
+ ¶ms_set[i]);
+ }
+ }
+ return 0;
+}
+
+static int
+test_perf_snow3G_vary_pkt_size(void)
+{
+ unsigned total_operations = 1000000;
+ uint8_t i, j;
+ unsigned k;
+ uint16_t burst_sizes[] = { 64 };
+ uint16_t buf_lengths[] = { 40, 64, 80, 120, 240, 256, 400, 512, 600, 1024, 2048 };
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_ONLY,
+ .cipher_algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_NULL,
+ },
+ {
+ .chain = HASH_ONLY,
+ .cipher_algo = RTE_CRYPTO_CIPHER_NULL,
+ .auth_algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .cipher_key_length = 16
+ },
+ };
+
+ printf("\n\nStart %s.", __func__);
+ printf("\nTest to measure max throughput at various pkt sizes.");
+ printf("\nOn HW devices t'put maximised when high Retries and EmptyPolls"
+ " so cycle cost not relevant (n/a displayed).");
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+ printf("\n\n");
+ params_set[i].total_operations = total_operations;
+ for (k = 0; k < RTE_DIM(burst_sizes); k++) {
+ printf("\nOn %s dev%u qp%u, %s, "
+ "cipher algo:%s, auth algo:%s, burst_size: %d ops",
+ pmd_name(gbl_cryptodev_perftest_devtype),
+ testsuite_params.dev_id, 0,
+ chain_mode_name(params_set[i].chain),
+ cipher_algo_name(params_set[i].cipher_algo),
+ auth_algo_name(params_set[i].auth_algo),
+ burst_sizes[k]);
+
+ params_set[i].burst_size = burst_sizes[k];
+ printf("\nPktSzB\tOp/s(M)\tThruput(Mbps)\tCycles/Burst\t"
+ "Cycles/buf\tCycles/B\tRetries\t\tEmptyPolls\n");
+ for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+
+ params_set[i].buf_size = buf_lengths[j];
+
+ test_perf_snow3g(testsuite_params.dev_id, 0, ¶ms_set[i]);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_perf_openssl_vary_pkt_size(void)
+{
+ unsigned int total_operations = 10000;
+ unsigned int burst_size = { 64 };
+ unsigned int buf_lengths[] = { 64, 128, 256, 512, 768, 1024, 1280, 1536,
+ 1792, 2048 };
+ uint8_t i, j;
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .cipher_key_length = 24,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key_length = 32,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .cipher_key_length = 24,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_AES_GCM
+ },
+ };
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+ params_set[i].total_operations = total_operations;
+ params_set[i].burst_size = burst_size;
+ printf("\n%s. cipher algo: %s auth algo: %s cipher key size=%u."
+ " burst_size: %d ops\n",
+ chain_mode_name(params_set[i].chain),
+ cipher_algo_name(params_set[i].cipher_algo),
+ auth_algo_name(params_set[i].auth_algo),
+ params_set[i].cipher_key_length,
+ burst_size);
+ printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\tRetries\t"
+ "EmptyPolls\n");
+ for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+ params_set[i].buf_size = buf_lengths[j];
+ test_perf_openssl(testsuite_params.dev_id, 0,
+ ¶ms_set[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_perf_openssl_vary_burst_size(void)
+{
+ unsigned int total_operations = 4096;
+ uint16_t buf_lengths[] = { 40 };
+ uint8_t i, j;
+
+ struct perf_test_params params_set[] = {
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .cipher_key_length = 24,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .cipher_key_length = 32,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .cipher_key_length = 24,
+ .auth_algo = RTE_CRYPTO_AUTH_SHA1_HMAC
+ },
+ {
+ .chain = CIPHER_HASH,
+
+ .cipher_algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .cipher_key_length = 16,
+ .auth_algo = RTE_CRYPTO_AUTH_AES_GCM
+ },
+ };
+
+ printf("\n\nStart %s.", __func__);
+ printf("\nThis Test measures the average IA cycle cost using a "
+ "constant request(packet) size. ");
+ printf("Cycle cost is only valid when indicators show device is not"
+ " busy, i.e. Retries and EmptyPolls = 0");
+
+ for (i = 0; i < RTE_DIM(params_set); i++) {
+ printf("\n");
+ params_set[i].total_operations = total_operations;
+
+ for (j = 0; j < RTE_DIM(buf_lengths); j++) {
+ params_set[i].buf_size = buf_lengths[j];
+ test_perf_openssl_optimise_cyclecount(¶ms_set[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int
+test_perf_aes_cbc_vary_burst_size(void)
+{
+ return test_perf_crypto_qp_vary_burst_size(testsuite_params.dev_id);
+}
+
+
+static struct rte_cryptodev_sym_session *
+test_perf_create_session(uint8_t dev_id, struct perf_test_params *pparams)
+{
+ static struct rte_cryptodev_sym_session *sess;
+ struct rte_crypto_sym_xform cipher_xform = { 0 };
+ struct rte_crypto_sym_xform auth_xform = { 0 };
+
+ uint8_t cipher_key[pparams->session_attrs->key_cipher_len];
+ uint8_t auth_key[pparams->session_attrs->key_auth_len];
+
+ memcpy(cipher_key, pparams->session_attrs->key_cipher_data,
+ pparams->session_attrs->key_cipher_len);
+ memcpy(auth_key, pparams->session_attrs->key_auth_data,
+ pparams->session_attrs->key_auth_len);
+
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform.next = NULL;
+
+ cipher_xform.cipher.algo = pparams->session_attrs->cipher_algorithm;
+ cipher_xform.cipher.op = pparams->session_attrs->cipher;
+ cipher_xform.cipher.key.data = cipher_key;
+ cipher_xform.cipher.key.length = pparams->session_attrs->key_cipher_len;
+
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform.next = NULL;
+
+ auth_xform.auth.op = pparams->session_attrs->auth;
+ auth_xform.auth.algo = pparams->session_attrs->auth_algorithm;
+
+ auth_xform.auth.digest_length = pparams->session_attrs->digest_len;
+ auth_xform.auth.key.length = pparams->session_attrs->key_auth_len;
+
+
+ cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ if (cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ cipher_xform.next = &auth_xform;
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &cipher_xform);
+ } else {
+ auth_xform.next = &cipher_xform;
+ sess = rte_cryptodev_sym_session_create(dev_id,
+ &auth_xform);
+ }
+
+ return sess;
+}
+
+static inline struct rte_crypto_op *
+perf_gcm_set_crypto_op(struct rte_crypto_op *op, struct rte_mbuf *m,
+ struct rte_cryptodev_sym_session *sess,
+ struct crypto_params *m_hlp,
+ struct perf_test_params *params)
+{
+ if (rte_crypto_op_attach_sym_session(op, sess) != 0) {
+ rte_crypto_op_free(op);
+ return NULL;
+ }
+
+ uint16_t iv_pad_len = ALIGN_POW2_ROUNDUP(params->symmetric_op->iv_len,
+ 16);
+
+ op->sym->auth.digest.data = m_hlp->digest;
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(
+ m,
+ params->symmetric_op->aad_len +
+ iv_pad_len +
+ params->symmetric_op->p_len);
+
+ op->sym->auth.digest.length = params->symmetric_op->t_len;
+
+ op->sym->auth.aad.data = m_hlp->aad;
+ op->sym->auth.aad.length = params->symmetric_op->aad_len;
+ op->sym->auth.aad.phys_addr = rte_pktmbuf_mtophys_offset(
+ m,
+ iv_pad_len);
+
+ rte_memcpy(op->sym->auth.aad.data, params->symmetric_op->aad_data,
+ params->symmetric_op->aad_len);
+
+ op->sym->cipher.iv.data = m_hlp->iv;
+ rte_memcpy(op->sym->cipher.iv.data, params->symmetric_op->iv_data,
+ params->symmetric_op->iv_len);
+ if (params->symmetric_op->iv_len == 12)
+ op->sym->cipher.iv.data[15] = 1;
+
+ op->sym->cipher.iv.length = params->symmetric_op->iv_len;
+
+ op->sym->auth.data.offset =
+ iv_pad_len + params->symmetric_op->aad_len;
+ op->sym->auth.data.length = params->symmetric_op->p_len;
+
+ op->sym->cipher.data.offset =
+ iv_pad_len + params->symmetric_op->aad_len;
+ op->sym->cipher.data.length = params->symmetric_op->p_len;
+
+ op->sym->m_src = m;
+
+ return op;
+}
+
+static struct rte_mbuf *
+test_perf_create_pktmbuf_fill(struct rte_mempool *mpool,
+ struct perf_test_params *params,
+ unsigned buf_sz, struct crypto_params *m_hlp)
+{
+ struct rte_mbuf *m = rte_pktmbuf_alloc(mpool);
+ uint16_t iv_pad_len =
+ ALIGN_POW2_ROUNDUP(params->symmetric_op->iv_len, 16);
+ uint16_t aad_len = params->symmetric_op->aad_len;
+ uint16_t digest_size = params->symmetric_op->t_len;
+ char *p;
+
+ p = rte_pktmbuf_append(m, aad_len);
+ if (p == NULL) {
+ rte_pktmbuf_free(m);
+ return NULL;
+ }
+ m_hlp->aad = (uint8_t *)p;
+
+ p = rte_pktmbuf_append(m, iv_pad_len);
+ if (p == NULL) {
+ rte_pktmbuf_free(m);
+ return NULL;
+ }
+ m_hlp->iv = (uint8_t *)p;
+
+ p = rte_pktmbuf_append(m, buf_sz);
+ if (p == NULL) {
+ rte_pktmbuf_free(m);
+ return NULL;
+ }
+ rte_memcpy(p, params->symmetric_op->p_data, buf_sz);
+
+ p = rte_pktmbuf_append(m, digest_size);
+ if (p == NULL) {
+ rte_pktmbuf_free(m);
+ return NULL;
+ }
+ m_hlp->digest = (uint8_t *)p;
+
+ return m;
+}
+
+static int
+perf_AES_GCM(uint8_t dev_id, uint16_t queue_id,
+ struct perf_test_params *pparams, uint32_t test_ops)
+{
+ int j = 0;
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ struct rte_cryptodev_sym_session *sess;
+ struct rte_crypto_op *ops[pparams->burst_size];
+ struct rte_crypto_op *proc_ops[pparams->burst_size];
+ uint32_t total_operations = pparams->total_operations;
+
+ uint64_t burst_enqueued = 0, total_enqueued = 0, burst_dequeued = 0;
+ uint64_t processed = 0, failed_polls = 0, retries = 0;
+ uint64_t tsc_start = 0, tsc_end = 0;
+
+ uint16_t i = 0, l = 0, m = 0;
+ uint16_t burst = pparams->burst_size * NUM_MBUF_SETS;
+ uint16_t ops_unused = 0;
+
+ struct rte_mbuf *mbufs[burst];
+ struct crypto_params m_hlp[burst];
+
+ if (rte_cryptodev_count() == 0) {
+ printf("\nNo crypto devices available. "
+ "Is kernel driver loaded?\n");
+ return TEST_FAILED;
+ }
+
+ sess = test_perf_create_session(dev_id, pparams);
+ TEST_ASSERT_NOT_NULL(sess, "Session creation failed");
+
+ for (i = 0; i < burst; i++) {
+ mbufs[i] = test_perf_create_pktmbuf_fill(
+ ts_params->mbuf_mp,
+ pparams, pparams->symmetric_op->p_len,
+ &m_hlp[i]);
+ }
+
+ if (test_ops)
+ total_operations = test_ops;
+
+ tsc_start = rte_rdtsc_precise();
+ while (total_enqueued < total_operations) {
+ uint16_t burst_size =
+ total_enqueued+pparams->burst_size <= total_operations ?
+ pparams->burst_size : total_operations-total_enqueued;
+ uint16_t ops_needed = burst_size-ops_unused;
+
+ if (ops_needed != rte_crypto_op_bulk_alloc(ts_params->op_mpool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, ops_needed)){
+ printf("\nFailed to alloc enough ops, "
+ "finish dequeuing");
+ } else {
+ for (i = 0; i < ops_needed; i++)
+ ops[i] = perf_gcm_set_crypto_op(ops[i],
+ mbufs[i + (pparams->burst_size *
+ (j % NUM_MBUF_SETS))],
+ sess, &m_hlp[i + (pparams->burst_size *
+ (j % NUM_MBUF_SETS))], pparams);
+
+ /* enqueue burst */
+ burst_enqueued = rte_cryptodev_enqueue_burst(dev_id,
+ queue_id, ops, burst_size);
+
+ if (burst_enqueued < burst_size)
+ retries++;
+
+ ops_unused = burst_size-burst_enqueued;
+ total_enqueued += burst_enqueued;
+ }
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (l = 0; l < burst_dequeued; l++)
+ rte_crypto_op_free(proc_ops[l]);
+ }
+
+ j++;
+ }
+
+ /* Dequeue any operations still in the crypto device */
+ while (processed < total_operations) {
+ /* Sending 0 length burst to flush sw crypto device */
+ rte_cryptodev_enqueue_burst(dev_id, queue_id, NULL, 0);
+
+ /* dequeue burst */
+ burst_dequeued = rte_cryptodev_dequeue_burst(dev_id, queue_id,
+ proc_ops, pparams->burst_size);
+ if (burst_dequeued == 0)
+ failed_polls++;
+ else {
+ processed += burst_dequeued;
+
+ for (m = 0; m < burst_dequeued; m++) {
+ if (test_ops) {
+ uint16_t iv_pad_len = ALIGN_POW2_ROUNDUP
+ (pparams->symmetric_op->iv_len, 16);
+ uint8_t *pkt = rte_pktmbuf_mtod(
+ proc_ops[m]->sym->m_src,
+ uint8_t *);
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ pparams->symmetric_op->c_data,
+ pkt + iv_pad_len +
+ pparams->symmetric_op->aad_len,
+ pparams->symmetric_op->c_len,
+ "GCM Ciphertext data not as expected");
+
+ TEST_ASSERT_BUFFERS_ARE_EQUAL(
+ pparams->symmetric_op->t_data,
+ pkt + iv_pad_len +
+ pparams->symmetric_op->aad_len +
+ pparams->symmetric_op->c_len,
+ pparams->symmetric_op->t_len,
+ "GCM MAC data not as expected");
+
+ }
+ rte_crypto_op_free(proc_ops[m]);
+ }
+ }
+ }
+
+ tsc_end = rte_rdtsc_precise();
+
+ double ops_s = ((double)processed / (tsc_end - tsc_start))
+ * rte_get_tsc_hz();
+ double throughput = (ops_s * pparams->symmetric_op->p_len * 8)
+ / 1000000000;
+
+ if (!test_ops) {
+ printf("\n%u\t\t%6.2f\t%16.2f\t%8"PRIu64"\t%10"PRIu64,
+ pparams->symmetric_op->p_len,
+ ops_s/1000000, throughput, retries, failed_polls);
+ }
+
+ for (i = 0; i < burst; i++)
+ rte_pktmbuf_free(mbufs[i]);
+ rte_cryptodev_sym_session_free(dev_id, sess);
+
+ return 0;
+}
+
+static int
+test_perf_AES_GCM(int continual_buf_len, int continual_size)
+{
+ uint16_t i, j, k, loops = 1;
+
+ uint16_t buf_lengths[] = { 64, 128, 256, 512, 1024, 1536, 2048 };
+
+ static const struct cryptodev_perf_test_data *gcm_tests[] = {
+ &AES_GCM_128_12IV_0AAD
+ };
+
+ if (continual_buf_len)
+ loops = continual_size;
+
+ int TEST_CASES_GCM = RTE_DIM(gcm_tests);
+
+ const unsigned burst_size = 32;
+
+ struct symmetric_op ops_set[TEST_CASES_GCM];
+ struct perf_test_params params_set[TEST_CASES_GCM];
+ struct symmetric_session_attrs session_attrs[TEST_CASES_GCM];
+ static const struct cryptodev_perf_test_data *gcm_test;
+
+ for (i = 0; i < TEST_CASES_GCM; ++i) {
+
+ gcm_test = gcm_tests[i];
+
+ session_attrs[i].cipher =
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ session_attrs[i].cipher_algorithm =
+ RTE_CRYPTO_CIPHER_AES_GCM;
+ session_attrs[i].key_cipher_data =
+ gcm_test->key.data;
+ session_attrs[i].key_cipher_len =
+ gcm_test->key.len;
+ session_attrs[i].auth_algorithm =
+ RTE_CRYPTO_AUTH_AES_GCM;
+ session_attrs[i].auth =
+ RTE_CRYPTO_AUTH_OP_GENERATE;
+ session_attrs[i].key_auth_data = NULL;
+ session_attrs[i].key_auth_len = 0;
+ session_attrs[i].digest_len =
+ gcm_test->auth_tag.len;
+
+ ops_set[i].aad_data = gcm_test->aad.data;
+ ops_set[i].aad_len = gcm_test->aad.len;
+ ops_set[i].iv_data = gcm_test->iv.data;
+ ops_set[i].iv_len = gcm_test->iv.len;
+ ops_set[i].p_data = gcm_test->plaintext.data;
+ ops_set[i].p_len = buf_lengths[i];
+ ops_set[i].c_data = gcm_test->ciphertext.data;
+ ops_set[i].c_len = buf_lengths[i];
+ ops_set[i].t_data = gcm_test->auth_tags[i].data;
+ ops_set[i].t_len = gcm_test->auth_tags[i].len;
+
+ params_set[i].chain = CIPHER_HASH;
+ params_set[i].session_attrs = &session_attrs[i];
+ params_set[i].symmetric_op = &ops_set[i];
+ if (continual_buf_len)
+ params_set[i].total_operations = 0xFFFFFF;
+ else
+ params_set[i].total_operations = 1000000;
+
+ params_set[i].burst_size = burst_size;
+
+ }
+
+ if (continual_buf_len)
+ printf("\nCipher algo: %s Cipher hash: %s cipher key size: %ub"
+ " burst size: %u", "AES_GCM", "AES_GCM",
+ gcm_test->key.len << 3, burst_size);
+
+ for (i = 0; i < RTE_DIM(gcm_tests); i++) {
+
+ if (!continual_buf_len) {
+ printf("\nCipher algo: %s Cipher hash: %s cipher key size: %ub"
+ " burst size: %u", "AES_GCM", "AES_GCM",
+ gcm_test->key.len << 3, burst_size);
+ printf("\nBuffer Size(B)\tOPS(M)\tThroughput(Gbps)\t"
+ " Retries\tEmptyPolls");
+ }
+
+ uint16_t len = RTE_DIM(buf_lengths);
+ uint16_t p = 0;
+
+ if (continual_buf_len) {
+ for (k = 0; k < RTE_DIM(buf_lengths); k++)
+ if (buf_lengths[k] == continual_buf_len) {
+ len = k + 1;
+ p = k;
+ break;
+ }
+ }
+ for (j = p; j < len; ++j) {
+
+ params_set[i].symmetric_op->c_len = buf_lengths[j];
+ params_set[i].symmetric_op->p_len = buf_lengths[j];
+
+ ops_set[i].t_data = gcm_tests[i]->auth_tags[j].data;
+ ops_set[i].t_len = gcm_tests[i]->auth_tags[j].len;
+
+ /* Run is twice, one for encryption/hash checks,
+ * one for perf
+ */
+ if (perf_AES_GCM(testsuite_params.dev_id, 0,
+ ¶ms_set[i], 1))
+ return TEST_FAILED;
+
+ for (k = 0; k < loops; k++) {
+ if (continual_buf_len)
+ printf("\n\nBuffer Size(B)\tOPS(M)\t"
+ "Throughput(Gbps)\t"
+ "Retries\tEmptyPolls");
+ if (perf_AES_GCM(testsuite_params.dev_id, 0,
+ ¶ms_set[i], 0))
+ return TEST_FAILED;
+ if (continual_buf_len)
+ printf("\n\nCompleted loop %i of %i ...",
+ k+1, loops);
+ }
+ }
+
+ }
+ printf("\n");
+ return 0;
+}
+
+static int test_cryptodev_perf_AES_GCM(void)
+{
+ return test_perf_AES_GCM(0, 0);
+}
+/*
+ * This function calls AES GCM performance tests providing
+ * size of packet as an argument. If size of packet is not
+ * in the buf_lengths array, all sizes will be used
+ */
+static int test_continual_perf_AES_GCM(void)
+{
+ return test_perf_AES_GCM(1024, 10);