/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <des.h>
+
#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include "rte_aesni_mb_pmd_private.h"
+static uint8_t cryptodev_driver_id;
+
typedef void (*hash_one_block_t)(const void *data, void *digest);
typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
break;
default:
MB_LOG_ERR("Unsupported authentication algorithm selection");
- return -1;
+ return -ENOTSUP;
}
/* Calculate Authentication precomputes */
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
+ uint8_t is_aes = 0;
aes_keyexp_t aes_keyexp_fn;
if (xform == NULL) {
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
MB_LOG_ERR("Crypto xform struct not of type cipher");
- return -1;
+ return -EINVAL;
}
/* Select cipher direction */
sess->cipher.direction = DECRYPT;
break;
default:
- MB_LOG_ERR("Unsupported cipher operation parameter");
- return -1;
+ MB_LOG_ERR("Invalid cipher operation parameter");
+ return -EINVAL;
}
/* Select cipher mode */
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
sess->cipher.mode = CBC;
+ is_aes = 1;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
sess->cipher.mode = CNTR;
+ is_aes = 1;
break;
case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
sess->cipher.mode = DOCSIS_SEC_BPI;
+ is_aes = 1;
break;
- default:
- MB_LOG_ERR("Unsupported cipher mode parameter");
- return -1;
- }
-
- /* Check key length and choose key expansion function */
- switch (xform->cipher.key.length) {
- case AES_128_BYTES:
- sess->cipher.key_length_in_bytes = AES_128_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.mode = DES;
break;
- case AES_192_BYTES:
- sess->cipher.key_length_in_bytes = AES_192_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
- break;
- case AES_256_BYTES:
- sess->cipher.key_length_in_bytes = AES_256_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_DES;
break;
default:
- MB_LOG_ERR("Unsupported cipher key length");
- return -1;
+ MB_LOG_ERR("Unsupported cipher mode parameter");
+ return -ENOTSUP;
}
- /* Expanded cipher keys */
- (*aes_keyexp_fn)(xform->cipher.key.data,
- sess->cipher.expanded_aes_keys.encode,
- sess->cipher.expanded_aes_keys.decode);
+ /* Set IV parameters */
+ sess->iv.offset = xform->cipher.iv.offset;
+ sess->iv.length = xform->cipher.iv.length;
+
+ /* Check key length and choose key expansion function for AES */
+ if (is_aes) {
+ switch (xform->cipher.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ break;
+ default:
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ } else {
+ if (xform->cipher.key.length != 8) {
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
+ }
+ sess->cipher.key_length_in_bytes = 8;
+
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.encode,
+ xform->cipher.key.data);
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.decode,
+ xform->cipher.key.data);
+ }
return 0;
}
{
const struct rte_crypto_sym_xform *auth_xform = NULL;
const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ int ret;
/* Select Crypto operation - hash then cipher / cipher then hash */
switch (aesni_mb_get_chain_order(xform)) {
case AESNI_MB_OP_NOT_SUPPORTED:
default:
MB_LOG_ERR("Unsupported operation chain order parameter");
- return -1;
+ return -ENOTSUP;
}
- if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+
+ ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
+ if (ret != 0) {
MB_LOG_ERR("Invalid/unsupported authentication parameters");
- return -1;
+ return ret;
}
- if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
- cipher_xform)) {
+ ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+ cipher_xform);
+ if (ret != 0) {
MB_LOG_ERR("Invalid/unsupported cipher parameters");
- return -1;
+ return ret;
}
+
return 0;
}
{
struct aesni_mb_session *sess = NULL;
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
- if (unlikely(op->sym->session->dev_type !=
- RTE_CRYPTODEV_AESNI_MB_PMD)) {
- return NULL;
- }
-
- sess = (struct aesni_mb_session *)op->sym->session->_private;
- } else {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session != NULL))
+ sess = (struct aesni_mb_session *)
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ } else {
void *_sess = NULL;
+ void *_sess_private_data = NULL;
if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
return NULL;
- sess = (struct aesni_mb_session *)
- ((struct rte_cryptodev_sym_session *)_sess)->_private;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct aesni_mb_session *)_sess_private_data;
if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
sess, op->sym->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session, cryptodev_driver_id,
+ _sess_private_data);
}
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
return sess;
}
*/
static inline int
set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
- struct rte_crypto_op *op)
+ struct rte_crypto_op *op, uint8_t *digest_idx)
{
struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
struct aesni_mb_session *session;
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
return -1;
}
- op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
/* Set crypto operation */
job->chain_order = session->chain_order;
/* Set digest output location */
if (job->hash_alg != NULL_HASH &&
session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
- get_digest_byte_length(job->hash_alg));
-
- if (job->auth_tag_output == NULL) {
- MB_LOG_ERR("failed to allocate space in output mbuf "
- "for temp digest");
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return -1;
- }
-
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
-
+ job->auth_tag_output = qp->temp_digests[*digest_idx];
+ *digest_idx = (*digest_idx + 1) % MAX_JOBS;
} else {
job->auth_tag_output = op->sym->auth.digest.data;
}
get_truncated_digest_byte_length(job->hash_alg);
/* Set IV parameters */
- job->iv = op->sym->cipher.iv.data;
- job->iv_len_in_bytes = op->sym->cipher.iv.length;
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ job->iv_len_in_bytes = session->iv.length;
/* Data Parameter */
job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
/* Set user data to be crypto operation data struct */
job->user_data = op;
- job->user_data2 = m_dst;
return 0;
}
static inline void
-verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
- struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
-
- RTE_ASSERT(m_dst == NULL);
-
+verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
+ struct rte_crypto_op *op) {
/* Verify digest if required */
if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/**
* Process a completed job and return rte_mbuf which job processed
*
+ * @param qp Queue Pair to process
* @param job JOB_AES_HMAC job to process
*
* @return
- * - Returns processed mbuf which is trimmed of output digest used in
- * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns processed crypto operation.
* - Returns NULL on invalid job
*/
static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+ struct aesni_mb_session *sess = get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
- struct aesni_mb_session *sess;
-
- RTE_ASSERT(op == NULL);
-
- if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
+ if (likely(op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)) {
switch (job->status) {
case STS_COMPLETED:
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
if (job->hash_alg != NULL_HASH) {
- sess = (struct aesni_mb_session *)
- op->sym->session->_private;
-
if (sess->auth.operation ==
RTE_CRYPTO_AUTH_OP_VERIFY)
- verify_digest(job, op);
+ verify_digest(qp, job, op);
}
break;
default:
}
/* Free session if a session-less crypto op */
- if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct aesni_mb_session));
+ memset(op->sym->session, 0,
+ rte_cryptodev_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
struct rte_crypto_op *op = NULL;
unsigned processed_jobs = 0;
- while (job != NULL && processed_jobs < nb_ops) {
+ while (job != NULL) {
op = post_process_mb_job(qp, job);
if (op) {
qp->stats.dequeue_err_count++;
break;
}
+ if (processed_jobs == nb_ops)
+ break;
job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
}
}
static inline JOB_AES_HMAC *
-set_job_null_op(JOB_AES_HMAC *job)
+set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
{
job->chain_order = HASH_CIPHER;
job->cipher_mode = NULL_CIPHER;
job->hash_alg = NULL_HASH;
job->cipher_direction = DECRYPT;
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+
return job;
}
int retval, processed_jobs = 0;
- do {
- /* Get next operation to process from ingress queue */
- retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
- if (retval < 0)
- break;
+ if (unlikely(nb_ops == 0))
+ return 0;
+ uint8_t digest_idx = qp->digest_idx;
+ do {
/* Get next free mb job struct from mb manager */
job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL)) {
/* if no free mb job structs we need to flush mb_mgr */
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
- (nb_ops - processed_jobs) - 1);
+ nb_ops - processed_jobs);
+
+ if (nb_ops == processed_jobs)
+ break;
job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
}
- retval = set_mb_job_params(job, qp, op);
+ /*
+ * Get next operation to process from ingress queue.
+ * There is no need to return the job to the MB_MGR
+ * if there are no more operations to process, since the MB_MGR
+ * can use that pointer again in next get_next calls.
+ */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
+
+ retval = set_mb_job_params(job, qp, op, &digest_idx);
if (unlikely(retval != 0)) {
qp->stats.dequeue_err_count++;
- set_job_null_op(job);
+ set_job_null_op(job, op);
}
/* Submit job to multi-buffer for processing */
} while (processed_jobs < nb_ops);
+ qp->digest_idx = digest_idx;
+
if (processed_jobs < 1)
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
static int
cryptodev_aesni_mb_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct aesni_mb_private *internals;
enum aesni_mb_vector_mode vector_mode;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ MB_LOG_ERR("AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ MB_LOG_ERR("failed to create cryptodev vdev");
+ return -ENODEV;
+ }
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
vector_mode = RTE_AESNI_MB_AVX2;
else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
vector_mode = RTE_AESNI_MB_AVX;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ else
vector_mode = RTE_AESNI_MB_SSE;
- else {
- MB_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
- dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
- sizeof(struct aesni_mb_private), init_params->socket_id);
- if (dev == NULL) {
- MB_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
- }
-
- dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
+ dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_mb_pmd_ops;
/* register rx/tx burst functions for data path */
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
-init_error:
- MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
- init_params->name);
-
- cryptodev_aesni_mb_remove(vdev);
- return -EFAULT;
}
static int
cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_mb_private),
rte_socket_id(),
- ""
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
- const char *name;
- const char *input_args;
+ const char *name, *args;
+ int retval;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+
+ args = rte_vdev_device_args(vdev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n",
+ args);
+ return -EINVAL;
+ }
return cryptodev_aesni_mb_create(name, vdev, &init_params);
}
static int
cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
.remove = cryptodev_aesni_mb_remove
};
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
+ cryptodev_aesni_mb_pmd_drv,
+ cryptodev_driver_id);