1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2017 Intel Corporation
7 #include <rte_common.h>
8 #include <rte_malloc.h>
9 #include <rte_cryptodev_pmd.h>
11 #include "rte_aesni_mb_pmd_private.h"
14 static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
16 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
18 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
20 .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
28 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
43 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
45 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
47 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
51 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
59 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
74 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
76 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
78 .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
82 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
90 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
105 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
107 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
109 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
113 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
121 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
136 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
138 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
140 .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
144 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
152 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
167 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
169 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
171 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
175 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
183 #if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
197 { /* AES XCBC HMAC */
198 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
200 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
202 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
219 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
221 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
223 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
239 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
241 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
243 .algo = RTE_CRYPTO_CIPHER_AES_CTR,
258 { /* AES DOCSIS BPI */
259 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
261 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
263 .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
279 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
281 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
283 .algo = RTE_CRYPTO_CIPHER_DES_CBC,
299 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
301 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
303 .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
318 { /* DES DOCSIS BPI */
319 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
321 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
323 .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
339 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
341 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
343 .algo = RTE_CRYPTO_AEAD_AES_CCM,
369 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
371 .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
373 .algo = RTE_CRYPTO_AUTH_AES_CMAC,
390 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
392 .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
394 .algo = RTE_CRYPTO_AEAD_AES_GCM,
419 RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
423 /** Configure device */
425 aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
426 __rte_unused struct rte_cryptodev_config *config)
433 aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
440 aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
446 aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
452 /** Get device statistics */
454 aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
455 struct rte_cryptodev_stats *stats)
459 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
460 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
462 stats->enqueued_count += qp->stats.enqueued_count;
463 stats->dequeued_count += qp->stats.dequeued_count;
465 stats->enqueue_err_count += qp->stats.enqueue_err_count;
466 stats->dequeue_err_count += qp->stats.dequeue_err_count;
470 /** Reset device statistics */
472 aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
476 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
477 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
479 memset(&qp->stats, 0, sizeof(qp->stats));
484 /** Get device info */
486 aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
487 struct rte_cryptodev_info *dev_info)
489 struct aesni_mb_private *internals = dev->data->dev_private;
491 if (dev_info != NULL) {
492 dev_info->driver_id = dev->driver_id;
493 dev_info->feature_flags = dev->feature_flags;
494 dev_info->capabilities = aesni_mb_pmd_capabilities;
495 dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
496 /* No limit of number of sessions */
497 dev_info->sym.max_nb_sessions = 0;
501 /** Release queue pair */
503 aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
505 struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
506 struct rte_ring *r = NULL;
509 r = rte_ring_lookup(qp->name);
513 free_mb_mgr(qp->mb_mgr);
515 dev->data->queue_pairs[qp_id] = NULL;
520 /** set a unique name for the queue pair based on it's name, dev_id and qp_id */
522 aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
523 struct aesni_mb_qp *qp)
525 unsigned n = snprintf(qp->name, sizeof(qp->name),
526 "aesni_mb_pmd_%u_qp_%u",
527 dev->data->dev_id, qp->id);
529 if (n >= sizeof(qp->name))
535 /** Create a ring to place processed operations on */
536 static struct rte_ring *
537 aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
538 const char *str, unsigned int ring_size, int socket_id)
541 char ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
543 unsigned int n = snprintf(ring_name, sizeof(ring_name),
547 if (n >= sizeof(ring_name))
550 r = rte_ring_lookup(ring_name);
552 if (rte_ring_get_size(r) >= ring_size) {
553 AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
558 AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
563 return rte_ring_create(ring_name, ring_size, socket_id,
564 RING_F_SP_ENQ | RING_F_SC_DEQ);
567 /** Setup a queue pair */
569 aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
570 const struct rte_cryptodev_qp_conf *qp_conf,
571 int socket_id, struct rte_mempool *session_pool)
573 struct aesni_mb_qp *qp = NULL;
574 struct aesni_mb_private *internals = dev->data->dev_private;
577 /* Free memory prior to re-allocation if needed. */
578 if (dev->data->queue_pairs[qp_id] != NULL)
579 aesni_mb_pmd_qp_release(dev, qp_id);
581 /* Allocate the queue pair data structure. */
582 qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
583 RTE_CACHE_LINE_SIZE, socket_id);
588 dev->data->queue_pairs[qp_id] = qp;
590 if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
591 goto qp_setup_cleanup;
594 qp->mb_mgr = alloc_mb_mgr(0);
595 if (qp->mb_mgr == NULL) {
597 goto qp_setup_cleanup;
600 qp->op_fns = &job_ops[internals->vector_mode];
602 qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
603 "ingress", qp_conf->nb_descriptors, socket_id);
604 if (qp->ingress_queue == NULL) {
606 goto qp_setup_cleanup;
609 qp->sess_mp = session_pool;
611 memset(&qp->stats, 0, sizeof(qp->stats));
613 char mp_name[RTE_MEMPOOL_NAMESIZE];
615 snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
616 "digest_mp_%u_%u", dev->data->dev_id, qp_id);
618 /* Initialise multi-buffer manager */
619 (*qp->op_fns->job.init_mgr)(qp->mb_mgr);
624 if (qp->mb_mgr == NULL)
625 free_mb_mgr(qp->mb_mgr);
632 /** Return the number of allocated queue pairs */
634 aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
636 return dev->data->nb_queue_pairs;
639 /** Returns the size of the aesni multi-buffer session structure */
641 aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
643 return sizeof(struct aesni_mb_session);
646 /** Configure a aesni multi-buffer session from a crypto xform chain */
648 aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
649 struct rte_crypto_sym_xform *xform,
650 struct rte_cryptodev_sym_session *sess,
651 struct rte_mempool *mempool)
653 void *sess_private_data;
654 struct aesni_mb_private *internals = dev->data->dev_private;
657 if (unlikely(sess == NULL)) {
658 AESNI_MB_LOG(ERR, "invalid session struct");
662 if (rte_mempool_get(mempool, &sess_private_data)) {
664 "Couldn't get object from session mempool");
668 ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
669 sess_private_data, xform);
671 AESNI_MB_LOG(ERR, "failed configure session parameters");
673 /* Return session to mempool */
674 rte_mempool_put(mempool, sess_private_data);
678 set_sym_session_private_data(sess, dev->driver_id,
684 /** Clear the memory of session so it doesn't leave key material behind */
686 aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
687 struct rte_cryptodev_sym_session *sess)
689 uint8_t index = dev->driver_id;
690 void *sess_priv = get_sym_session_private_data(sess, index);
692 /* Zero out the whole structure */
694 memset(sess_priv, 0, sizeof(struct aesni_mb_session));
695 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
696 set_sym_session_private_data(sess, index, NULL);
697 rte_mempool_put(sess_mp, sess_priv);
701 struct rte_cryptodev_ops aesni_mb_pmd_ops = {
702 .dev_configure = aesni_mb_pmd_config,
703 .dev_start = aesni_mb_pmd_start,
704 .dev_stop = aesni_mb_pmd_stop,
705 .dev_close = aesni_mb_pmd_close,
707 .stats_get = aesni_mb_pmd_stats_get,
708 .stats_reset = aesni_mb_pmd_stats_reset,
710 .dev_infos_get = aesni_mb_pmd_info_get,
712 .queue_pair_setup = aesni_mb_pmd_qp_setup,
713 .queue_pair_release = aesni_mb_pmd_qp_release,
714 .queue_pair_count = aesni_mb_pmd_qp_count,
716 .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
717 .sym_session_configure = aesni_mb_pmd_sym_session_configure,
718 .sym_session_clear = aesni_mb_pmd_sym_session_clear
721 struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;