New upstream version 17.08
[deb_dpdk.git] / drivers / crypto / aesni_gcm / aesni_gcm_pmd.c
index 101ef98..d9c91d0 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
  *   modification, are permitted provided that the following conditions
@@ -35,6 +35,7 @@
 #include <rte_hexdump.h>
 #include <rte_cryptodev.h>
 #include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_vdev.h>
 #include <rte_vdev.h>
 #include <rte_malloc.h>
 #include <rte_cpuflags.h>
 
 #include "aesni_gcm_pmd_private.h"
 
-/** GCM encode functions pointer table */
-static const struct aesni_gcm_ops aesni_gcm_enc[] = {
-               [AESNI_GCM_KEY_128] = {
-                               aesni_gcm128_init,
-                               aesni_gcm128_enc_update,
-                               aesni_gcm128_enc_finalize
-               },
-               [AESNI_GCM_KEY_256] = {
-                               aesni_gcm256_init,
-                               aesni_gcm256_enc_update,
-                               aesni_gcm256_enc_finalize
-               }
-};
-
-/** GCM decode functions pointer table */
-static const struct aesni_gcm_ops aesni_gcm_dec[] = {
-               [AESNI_GCM_KEY_128] = {
-                               aesni_gcm128_init,
-                               aesni_gcm128_dec_update,
-                               aesni_gcm128_dec_finalize
-               },
-               [AESNI_GCM_KEY_256] = {
-                               aesni_gcm256_init,
-                               aesni_gcm256_dec_update,
-                               aesni_gcm256_dec_finalize
-               }
-};
+static uint8_t cryptodev_driver_id;
 
 /** Parse crypto xform chain and set private session parameters */
 int
-aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+               struct aesni_gcm_session *sess,
                const struct rte_crypto_sym_xform *xform)
 {
        const struct rte_crypto_sym_xform *auth_xform;
-       const struct rte_crypto_sym_xform *cipher_xform;
-
-       if (xform->next == NULL || xform->next->next != NULL) {
-               GCM_LOG_ERR("Two and only two chained xform required");
-               return -EINVAL;
-       }
+       const struct rte_crypto_sym_xform *aead_xform;
+       uint16_t digest_length;
+       uint8_t key_length;
+       uint8_t *key;
 
-       if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
-                       xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
-               auth_xform = xform->next;
-               cipher_xform = xform;
-       } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
-                       xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+       /* AES-GMAC */
+       if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
                auth_xform = xform;
-               cipher_xform = xform->next;
+               if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) {
+                       GCM_LOG_ERR("Only AES GMAC is supported as an "
+                                       "authentication only algorithm");
+                       return -ENOTSUP;
+               }
+               /* Set IV parameters */
+               sess->iv.offset = auth_xform->auth.iv.offset;
+               sess->iv.length = auth_xform->auth.iv.length;
+
+               /* Select Crypto operation */
+               if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+                       sess->op = AESNI_GMAC_OP_GENERATE;
+               else
+                       sess->op = AESNI_GMAC_OP_VERIFY;
+
+               key_length = auth_xform->auth.key.length;
+               key = auth_xform->auth.key.data;
+               digest_length = auth_xform->auth.digest_length;
+
+       /* AES-GCM */
+       } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+               aead_xform = xform;
+
+               if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) {
+                       GCM_LOG_ERR("The only combined operation "
+                                               "supported is AES GCM");
+                       return -ENOTSUP;
+               }
+
+               /* Set IV parameters */
+               sess->iv.offset = aead_xform->aead.iv.offset;
+               sess->iv.length = aead_xform->aead.iv.length;
+
+               /* Select Crypto operation */
+               if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+                       sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+               else
+                       sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+               key_length = aead_xform->aead.key.length;
+               key = aead_xform->aead.key.data;
+
+               sess->aad_length = aead_xform->aead.aad_length;
+               digest_length = aead_xform->aead.digest_length;
        } else {
-               GCM_LOG_ERR("Cipher and auth xform required");
-               return -EINVAL;
+               GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication");
+               return -ENOTSUP;
        }
 
-       if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
-               (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
-                       auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
-               GCM_LOG_ERR("We only support AES GCM and AES GMAC");
-               return -EINVAL;
-       }
 
-       /* Select Crypto operation */
-       if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
-                       auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
-               sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
-       else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
-                       auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
-               sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
-       else {
-               GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
-                               " Decrypt/Verify are valid only");
+       /* IV check */
+       if (sess->iv.length != 16 && sess->iv.length != 12 &&
+                       sess->iv.length != 0) {
+               GCM_LOG_ERR("Wrong IV length");
                return -EINVAL;
        }
 
        /* Check key length and calculate GCM pre-compute. */
-       switch (cipher_xform->cipher.key.length) {
+       switch (key_length) {
        case 16:
-               aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
                sess->key = AESNI_GCM_KEY_128;
-
+               break;
+       case 24:
+               sess->key = AESNI_GCM_KEY_192;
                break;
        case 32:
-               aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
                sess->key = AESNI_GCM_KEY_256;
-
                break;
        default:
-               GCM_LOG_ERR("Unsupported cipher key length");
+               GCM_LOG_ERR("Invalid key length");
+               return -EINVAL;
+       }
+
+       gcm_ops[sess->key].precomp(key, &sess->gdata_key);
+
+       /* Digest check */
+       if (digest_length != 16 &&
+                       digest_length != 12 &&
+                       digest_length != 8) {
+               GCM_LOG_ERR("digest");
                return -EINVAL;
        }
+       sess->digest_length = digest_length;
 
        return 0;
 }
 
 /** Get gcm session */
 static struct aesni_gcm_session *
-aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op)
 {
        struct aesni_gcm_session *sess = NULL;
-
-       if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
-               if (unlikely(op->session->dev_type
-                                       != RTE_CRYPTODEV_AESNI_GCM_PMD))
-                       return sess;
-
-               sess = (struct aesni_gcm_session *)op->session->_private;
+       struct rte_crypto_sym_op *sym_op = op->sym;
+
+       if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+               if (likely(sym_op->session != NULL))
+                       sess = (struct aesni_gcm_session *)
+                                       get_session_private_data(
+                                       sym_op->session,
+                                       cryptodev_driver_id);
        } else  {
                void *_sess;
+               void *_sess_private_data = NULL;
+
+               if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+                       return NULL;
 
-               if (rte_mempool_get(qp->sess_mp, &_sess))
-                       return sess;
+               if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+                       return NULL;
 
-               sess = (struct aesni_gcm_session *)
-                       ((struct rte_cryptodev_sym_session *)_sess)->_private;
+               sess = (struct aesni_gcm_session *)_sess_private_data;
 
-               if (unlikely(aesni_gcm_set_session_parameters(sess,
-                               op->xform) != 0)) {
+               if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+                               sess, sym_op->xform) != 0)) {
                        rte_mempool_put(qp->sess_mp, _sess);
+                       rte_mempool_put(qp->sess_mp, _sess_private_data);
                        sess = NULL;
                }
+               sym_op->session = (struct rte_cryptodev_sym_session *)_sess;
+               set_session_private_data(sym_op->session, cryptodev_driver_id,
+                       _sess_private_data);
        }
+
+       if (unlikely(sess == NULL))
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
        return sess;
 }
 
 /**
- * Process a crypto operation and complete a JOB_AES_HMAC job structure for
- * submission to the multi buffer library for processing.
+ * Process a crypto operation, calling
+ * the GCM API from the multi buffer library.
  *
  * @param      qp              queue pair
  * @param      op              symmetric crypto operation
@@ -178,14 +201,27 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
  *
  */
 static int
-process_gcm_crypto_op(struct rte_crypto_sym_op *op,
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
                struct aesni_gcm_session *session)
 {
        uint8_t *src, *dst;
-       struct rte_mbuf *m_src = op->m_src;
-       uint32_t offset = op->cipher.data.offset;
+       uint8_t *iv_ptr;
+       struct rte_crypto_sym_op *sym_op = op->sym;
+       struct rte_mbuf *m_src = sym_op->m_src;
+       uint32_t offset, data_offset, data_length;
        uint32_t part_len, total_len, data_len;
 
+       if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
+                       session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+               offset = sym_op->aead.data.offset;
+               data_offset = offset;
+               data_length = sym_op->aead.data.length;
+       } else {
+               offset = sym_op->auth.data.offset;
+               data_offset = offset;
+               data_length = sym_op->auth.data.length;
+       }
+
        RTE_ASSERT(m_src != NULL);
 
        while (offset >= m_src->data_len) {
@@ -196,60 +232,50 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
        }
 
        data_len = m_src->data_len - offset;
-       part_len = (data_len < op->cipher.data.length) ? data_len :
-                       op->cipher.data.length;
+       part_len = (data_len < data_length) ? data_len :
+                       data_length;
 
        /* Destination buffer is required when segmented source buffer */
-       RTE_ASSERT((part_len == op->cipher.data.length) ||
-                       ((part_len != op->cipher.data.length) &&
-                                       (op->m_dst != NULL)));
+       RTE_ASSERT((part_len == data_length) ||
+                       ((part_len != data_length) &&
+                                       (sym_op->m_dst != NULL)));
        /* Segmented destination buffer is not supported */
-       RTE_ASSERT((op->m_dst == NULL) ||
-                       ((op->m_dst != NULL) &&
-                                       rte_pktmbuf_is_contiguous(op->m_dst)));
+       RTE_ASSERT((sym_op->m_dst == NULL) ||
+                       ((sym_op->m_dst != NULL) &&
+                                       rte_pktmbuf_is_contiguous(sym_op->m_dst)));
 
 
-       dst = op->m_dst ?
-                       rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
-                                       op->cipher.data.offset) :
-                       rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
-                                       op->cipher.data.offset);
+       dst = sym_op->m_dst ?
+                       rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *,
+                                       data_offset) :
+                       rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *,
+                                       data_offset);
 
        src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
 
-       /* sanity checks */
-       if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
-                       op->cipher.iv.length != 0) {
-               GCM_LOG_ERR("iv");
-               return -1;
-       }
-
+       iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+                               session->iv.offset);
        /*
         * GCM working in 12B IV mode => 16B pre-counter block we need
         * to set BE LSB to 1, driver expects that 16B is allocated
         */
-       if (op->cipher.iv.length == 12) {
-               uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
+       if (session->iv.length == 12) {
+               uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
                *iv_padd = rte_bswap32(1);
        }
 
-       if (op->auth.digest.length != 16 &&
-                       op->auth.digest.length != 12 &&
-                       op->auth.digest.length != 8) {
-               GCM_LOG_ERR("digest");
-               return -1;
-       }
-
        if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
 
-               aesni_gcm_enc[session->key].init(&session->gdata,
-                               op->cipher.iv.data,
-                               op->auth.aad.data,
-                               (uint64_t)op->auth.aad.length);
+               qp->ops[session->key].init(&session->gdata_key,
+                               &qp->gdata_ctx,
+                               iv_ptr,
+                               sym_op->aead.aad.data,
+                               (uint64_t)session->aad_length);
 
-               aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
+               qp->ops[session->key].update_enc(&session->gdata_key,
+                               &qp->gdata_ctx, dst, src,
                                (uint64_t)part_len);
-               total_len = op->cipher.data.length - part_len;
+               total_len = data_length - part_len;
 
                while (total_len) {
                        dst += part_len;
@@ -261,33 +287,36 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
                        part_len = (m_src->data_len < total_len) ?
                                        m_src->data_len : total_len;
 
-                       aesni_gcm_enc[session->key].update(&session->gdata,
-                                       dst, src,
+                       qp->ops[session->key].update_enc(&session->gdata_key,
+                                       &qp->gdata_ctx, dst, src,
                                        (uint64_t)part_len);
                        total_len -= part_len;
                }
 
-               aesni_gcm_enc[session->key].finalize(&session->gdata,
-                               op->auth.digest.data,
-                               (uint64_t)op->auth.digest.length);
-       } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
-               uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
-                               op->m_dst : op->m_src,
-                               op->auth.digest.length);
+               qp->ops[session->key].finalize(&session->gdata_key,
+                               &qp->gdata_ctx,
+                               sym_op->aead.digest.data,
+                               (uint64_t)session->digest_length);
+       } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+               uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+                               sym_op->m_dst : sym_op->m_src,
+                               session->digest_length);
 
                if (!auth_tag) {
                        GCM_LOG_ERR("auth_tag");
                        return -1;
                }
 
-               aesni_gcm_dec[session->key].init(&session->gdata,
-                               op->cipher.iv.data,
-                               op->auth.aad.data,
-                               (uint64_t)op->auth.aad.length);
+               qp->ops[session->key].init(&session->gdata_key,
+                               &qp->gdata_ctx,
+                               iv_ptr,
+                               sym_op->aead.aad.data,
+                               (uint64_t)session->aad_length);
 
-               aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
+               qp->ops[session->key].update_dec(&session->gdata_key,
+                               &qp->gdata_ctx, dst, src,
                                (uint64_t)part_len);
-               total_len = op->cipher.data.length - part_len;
+               total_len = data_length - part_len;
 
                while (total_len) {
                        dst += part_len;
@@ -299,15 +328,47 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
                        part_len = (m_src->data_len < total_len) ?
                                        m_src->data_len : total_len;
 
-                       aesni_gcm_dec[session->key].update(&session->gdata,
+                       qp->ops[session->key].update_dec(&session->gdata_key,
+                                       &qp->gdata_ctx,
                                        dst, src,
                                        (uint64_t)part_len);
                        total_len -= part_len;
                }
 
-               aesni_gcm_dec[session->key].finalize(&session->gdata,
+               qp->ops[session->key].finalize(&session->gdata_key,
+                               &qp->gdata_ctx,
                                auth_tag,
-                               (uint64_t)op->auth.digest.length);
+                               (uint64_t)session->digest_length);
+       } else if (session->op == AESNI_GMAC_OP_GENERATE) {
+               qp->ops[session->key].init(&session->gdata_key,
+                               &qp->gdata_ctx,
+                               iv_ptr,
+                               src,
+                               (uint64_t)data_length);
+               qp->ops[session->key].finalize(&session->gdata_key,
+                               &qp->gdata_ctx,
+                               sym_op->auth.digest.data,
+                               (uint64_t)session->digest_length);
+       } else { /* AESNI_GMAC_OP_VERIFY */
+               uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
+                               sym_op->m_dst : sym_op->m_src,
+                               session->digest_length);
+
+               if (!auth_tag) {
+                       GCM_LOG_ERR("auth_tag");
+                       return -1;
+               }
+
+               qp->ops[session->key].init(&session->gdata_key,
+                               &qp->gdata_ctx,
+                               iv_ptr,
+                               src,
+                               (uint64_t)data_length);
+
+               qp->ops[session->key].finalize(&session->gdata_key,
+                               &qp->gdata_ctx,
+                               auth_tag,
+                               (uint64_t)session->digest_length);
        }
 
        return 0;
@@ -324,34 +385,38 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op,
  * - Returns NULL on invalid job
  */
 static void
-post_process_gcm_crypto_op(struct rte_crypto_op *op)
+post_process_gcm_crypto_op(struct rte_crypto_op *op,
+               struct aesni_gcm_session *session)
 {
        struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
 
-       struct aesni_gcm_session *session =
-               (struct aesni_gcm_session *)op->sym->session->_private;
-
        op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
 
        /* Verify digest if required */
-       if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+       if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION ||
+                       session->op == AESNI_GMAC_OP_VERIFY) {
+               uint8_t *digest;
 
                uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
-                               m->data_len - op->sym->auth.digest.length);
+                               m->data_len - session->digest_length);
+
+               if (session->op == AESNI_GMAC_OP_VERIFY)
+                       digest = op->sym->auth.digest.data;
+               else
+                       digest = op->sym->aead.digest.data;
 
 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
                rte_hexdump(stdout, "auth tag (orig):",
-                               op->sym->auth.digest.data, op->sym->auth.digest.length);
+                               digest, session->digest_length);
                rte_hexdump(stdout, "auth tag (calc):",
-                               tag, op->sym->auth.digest.length);
+                               tag, session->digest_length);
 #endif
 
-               if (memcmp(tag, op->sym->auth.digest.data,
-                               op->sym->auth.digest.length) != 0)
+               if (memcmp(tag, digest, session->digest_length) != 0)
                        op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
 
                /* trim area used for digest from mbuf */
-               rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+               rte_pktmbuf_trim(m, session->digest_length);
        }
 }
 
@@ -359,6 +424,7 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op)
  * Process a completed GCM request
  *
  * @param qp           Queue Pair to process
+ * @param op           Crypto operation
  * @param job          JOB_AES_HMAC job
  *
  * @return
@@ -366,12 +432,17 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op)
  */
 static void
 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
-               struct rte_crypto_op *op)
+               struct rte_crypto_op *op,
+               struct aesni_gcm_session *sess)
 {
-       post_process_gcm_crypto_op(op);
+       post_process_gcm_crypto_op(op, sess);
 
        /* Free session if a session-less crypto op */
-       if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+       if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+               memset(sess, 0, sizeof(struct aesni_gcm_session));
+               memset(op->sym->session, 0,
+                               rte_cryptodev_get_header_session_size());
+               rte_mempool_put(qp->sess_mp, sess);
                rte_mempool_put(qp->sess_mp, op->sym->session);
                op->sym->session = NULL;
        }
@@ -392,21 +463,21 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair,
 
        for (i = 0; i < nb_dequeued; i++) {
 
-               sess = aesni_gcm_get_session(qp, ops[i]->sym);
+               sess = aesni_gcm_get_session(qp, ops[i]);
                if (unlikely(sess == NULL)) {
                        ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
                        qp->qp_stats.dequeue_err_count++;
                        break;
                }
 
-               retval = process_gcm_crypto_op(ops[i]->sym, sess);
+               retval = process_gcm_crypto_op(qp, ops[i], sess);
                if (retval < 0) {
                        ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
                        qp->qp_stats.dequeue_err_count++;
                        break;
                }
 
-               handle_completed_gcm_crypto_op(qp, ops[i]);
+               handle_completed_gcm_crypto_op(qp, ops[i], sess);
        }
 
        qp->qp_stats.dequeued_count += i;
@@ -438,6 +509,7 @@ aesni_gcm_create(const char *name,
 {
        struct rte_cryptodev *dev;
        struct aesni_gcm_private *internals;
+       enum aesni_gcm_vector_mode vector_mode;
 
        if (init_params->name[0] == '\0')
                snprintf(init_params->name, sizeof(init_params->name),
@@ -449,14 +521,23 @@ aesni_gcm_create(const char *name,
                return -EFAULT;
        }
 
-       dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
-                       sizeof(struct aesni_gcm_private), init_params->socket_id);
+       /* Check CPU for supported vector instruction set */
+       if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+               vector_mode = RTE_AESNI_GCM_AVX2;
+       else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+               vector_mode = RTE_AESNI_GCM_AVX;
+       else
+               vector_mode = RTE_AESNI_GCM_SSE;
+
+       dev = rte_cryptodev_vdev_pmd_init(init_params->name,
+                       sizeof(struct aesni_gcm_private), init_params->socket_id,
+                       vdev);
        if (dev == NULL) {
                GCM_LOG_ERR("failed to create cryptodev vdev");
                goto init_error;
        }
 
-       dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+       dev->driver_id = cryptodev_driver_id;
        dev->dev_ops = rte_aesni_gcm_pmd_ops;
 
        /* register rx/tx burst functions for data path */
@@ -468,8 +549,24 @@ aesni_gcm_create(const char *name,
                        RTE_CRYPTODEV_FF_CPU_AESNI |
                        RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
 
+       switch (vector_mode) {
+       case RTE_AESNI_GCM_SSE:
+               dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+               break;
+       case RTE_AESNI_GCM_AVX:
+               dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+               break;
+       case RTE_AESNI_GCM_AVX2:
+               dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+               break;
+       default:
+               break;
+       }
+
        internals = dev->data->dev_private;
 
+       internals->vector_mode = vector_mode;
+
        internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
        internals->max_nb_sessions = init_params->max_nb_sessions;
 
@@ -498,7 +595,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
        if (name == NULL)
                return -EINVAL;
        input_args = rte_vdev_device_args(vdev);
-       rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+       rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
 
        RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
                        init_params.socket_id);
@@ -539,3 +636,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
        "max_nb_queue_pairs=<int> "
        "max_nb_sessions=<int> "
        "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_pmd_drv, cryptodev_driver_id);