New upstream version 18.02
[deb_dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
index 16155b1..18681cf 100644 (file)
@@ -1,34 +1,8 @@
-/*-
- *   BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2017 NXP.
+ *   Copyright 2017 NXP
  *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of NXP nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #include <fcntl.h>
@@ -41,6 +15,7 @@
 #include <rte_cryptodev_pmd.h>
 #include <rte_crypto.h>
 #include <rte_cryptodev.h>
+#include <rte_security_driver.h>
 #include <rte_cycles.h>
 #include <rte_dev.h>
 #include <rte_kvargs.h>
@@ -69,6 +44,9 @@ static uint8_t cryptodev_driver_id;
 static __thread struct rte_crypto_op **dpaa_sec_ops;
 static __thread int dpaa_sec_op_nb;
 
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
+
 static inline void
 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
 {
@@ -106,6 +84,8 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
        dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
 
        ctx->ctx_pool = ses->ctx_pool;
+       ctx->vtop_offset = (uint64_t) ctx
+                               - rte_mempool_virt2iova(ctx);
 
        return ctx;
 }
@@ -121,7 +101,7 @@ dpaa_mem_vtop(void *vaddr)
        for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
                if (vaddr_64 >= memseg[i].addr_64 &&
                    vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
-                       paddr = memseg[i].phys_addr +
+                       paddr = memseg[i].iova +
                                (vaddr_64 - memseg[i].addr_64);
 
                        return (rte_iova_t)paddr;
@@ -130,6 +110,13 @@ dpaa_mem_vtop(void *vaddr)
        return (rte_iova_t)(NULL);
 }
 
+/* virtual address conversin when mempool support is available for ctx */
+static inline phys_addr_t
+dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
+{
+       return (uint64_t)vaddr - ctx->vtop_offset;
+}
+
 static inline void *
 dpaa_mem_ptov(rte_iova_t paddr)
 {
@@ -137,10 +124,10 @@ dpaa_mem_ptov(rte_iova_t paddr)
        int i;
 
        for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
-               if (paddr >= memseg[i].phys_addr &&
-                   (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+               if (paddr >= memseg[i].iova &&
+                   (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
                        return (void *)(memseg[i].addr_64 +
-                                       (paddr - memseg[i].phys_addr));
+                                       (paddr - memseg[i].iova));
        }
        return NULL;
 }
@@ -168,15 +155,6 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
        /* Clear FQ options */
        memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
 
-       flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
-               QMAN_FQ_FLAG_TO_DCPORTAL;
-
-       ret = qman_create_fq(0, flags, fq_in);
-       if (unlikely(ret != 0)) {
-               PMD_INIT_LOG(ERR, "qman_create_fq failed");
-               return ret;
-       }
-
        flags = QMAN_INITFQ_FLAG_SCHED;
        fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
                          QM_INITFQ_WE_CONTEXTB;
@@ -188,9 +166,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
 
        fq_in->cb.ern  = ern_sec_fq_handler;
 
+       PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+
        ret = qman_init_fq(fq_in, flags, &fq_opts);
        if (unlikely(ret != 0))
-               PMD_INIT_LOG(ERR, "qman_init_fq failed");
+               PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
 
        return ret;
 }
@@ -217,8 +197,19 @@ dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
         * sg[1] for input
         */
        job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
        ctx = container_of(job, struct dpaa_sec_op_ctx, job);
        ctx->fd_status = fd->status;
+       if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+               struct qm_sg_entry *sg_out;
+               uint32_t len;
+
+               sg_out = &job->sg[0];
+               hw_sg_to_cpu(sg_out);
+               len = sg_out->length;
+               ctx->op->sym->m_src->pkt_len = len;
+               ctx->op->sym->m_src->data_len = len;
+       }
        dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
        dpaa_sec_op_ending(ctx);
 
@@ -282,7 +273,13 @@ static inline int is_aead(dpaa_sec_session *ses)
 static inline int is_auth_cipher(dpaa_sec_session *ses)
 {
        return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
-               (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+               (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+               (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int is_proto_ipsec(dpaa_sec_session *ses)
+{
+       return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
 }
 
 static inline int is_encode(dpaa_sec_session *ses)
@@ -303,27 +300,39 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
                ses->digest_length = 0;
                break;
        case RTE_CRYPTO_AUTH_MD5_HMAC:
-               alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
+               alginfo_a->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
                alginfo_a->algmode = OP_ALG_AAI_HMAC;
                break;
        case RTE_CRYPTO_AUTH_SHA1_HMAC:
-               alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
+               alginfo_a->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
                alginfo_a->algmode = OP_ALG_AAI_HMAC;
                break;
        case RTE_CRYPTO_AUTH_SHA224_HMAC:
-               alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
+               alginfo_a->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
                alginfo_a->algmode = OP_ALG_AAI_HMAC;
                break;
        case RTE_CRYPTO_AUTH_SHA256_HMAC:
-               alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
+               alginfo_a->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
                alginfo_a->algmode = OP_ALG_AAI_HMAC;
                break;
        case RTE_CRYPTO_AUTH_SHA384_HMAC:
-               alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
+               alginfo_a->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
                alginfo_a->algmode = OP_ALG_AAI_HMAC;
                break;
        case RTE_CRYPTO_AUTH_SHA512_HMAC:
-               alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
+               alginfo_a->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
                alginfo_a->algmode = OP_ALG_AAI_HMAC;
                break;
        default:
@@ -338,15 +347,21 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
        case RTE_CRYPTO_CIPHER_NULL:
                break;
        case RTE_CRYPTO_CIPHER_AES_CBC:
-               alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+               alginfo_c->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
                alginfo_c->algmode = OP_ALG_AAI_CBC;
                break;
        case RTE_CRYPTO_CIPHER_3DES_CBC:
-               alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
+               alginfo_c->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
                alginfo_c->algmode = OP_ALG_AAI_CBC;
                break;
        case RTE_CRYPTO_CIPHER_AES_CTR:
-               alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+               alginfo_c->algtype =
+                       (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+                       OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
                alginfo_c->algmode = OP_ALG_AAI_CTR;
                break;
        default:
@@ -374,7 +389,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
 {
        struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
        uint32_t shared_desc_len = 0;
-       struct sec_cdb *cdb = &ses->qp->cdb;
+       struct sec_cdb *cdb = &ses->cdb;
        int err;
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
        int swap = false;
@@ -492,14 +507,28 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
                cdb->sh_desc[0] = 0;
                cdb->sh_desc[1] = 0;
                cdb->sh_desc[2] = 0;
-
-               /* Auth_only_len is set as 0 here and it will be overwritten
-                *  in fd for each packet.
-                */
-               shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
-                               true, swap, &alginfo_c, &alginfo_a,
-                               ses->iv.length, 0,
-                               ses->digest_length, ses->dir);
+               if (is_proto_ipsec(ses)) {
+                       if (ses->dir == DIR_ENC) {
+                               shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+                                               cdb->sh_desc,
+                                               true, swap, &ses->encap_pdb,
+                                               (uint8_t *)&ses->ip4_hdr,
+                                               &alginfo_c, &alginfo_a);
+                       } else if (ses->dir == DIR_DEC) {
+                               shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+                                               cdb->sh_desc,
+                                               true, swap, &ses->decap_pdb,
+                                               &alginfo_c, &alginfo_a);
+                       }
+               } else {
+                       /* Auth_only_len is set as 0 here and it will be
+                        * overwritten in fd for each packet.
+                        */
+                       shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+                                       true, swap, &alginfo_c, &alginfo_a,
+                                       ses->iv.length, 0,
+                                       ses->digest_length, ses->dir);
+               }
        }
        cdb->sh_hdr.hi.field.idlen = shared_desc_len;
        cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
@@ -508,46 +537,146 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
        return 0;
 }
 
-static inline unsigned int
-dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
+/* qp is lockless, should be accessed by only one thread */
+static int
+dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
 {
+       struct qman_fq *fq;
        unsigned int pkts = 0;
        int ret;
-       struct qm_mcr_queryfq_np np;
-       enum qman_fq_state state;
-       uint32_t flags;
-       uint32_t vdqcr;
-
-       qman_query_fq_np(fq, &np);
-       if (np.frm_cnt) {
-               vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
-               if (exact)
-                       vdqcr |= QM_VDQCR_EXACT;
-               ret = qman_volatile_dequeue(fq, 0, vdqcr);
-               if (ret)
-                       return 0;
-               do {
-                       pkts += qman_poll_dqrr(len);
-                       qman_fq_state(fq, &state, &flags);
-               } while (flags & QMAN_FQ_STATE_VDQCR);
-       }
+       struct qm_dqrr_entry *dq;
+
+       fq = &qp->outq;
+       ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+                               DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
+       if (ret)
+               return 0;
+
+       do {
+               const struct qm_fd *fd;
+               struct dpaa_sec_job *job;
+               struct dpaa_sec_op_ctx *ctx;
+               struct rte_crypto_op *op;
+
+               dq = qman_dequeue(fq);
+               if (!dq)
+                       continue;
+
+               fd = &dq->fd;
+               /* sg is embedded in an op ctx,
+                * sg[0] is for output
+                * sg[1] for input
+                */
+               job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+
+               ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+               ctx->fd_status = fd->status;
+               op = ctx->op;
+               if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+                       struct qm_sg_entry *sg_out;
+                       uint32_t len;
+
+                       sg_out = &job->sg[0];
+                       hw_sg_to_cpu(sg_out);
+                       len = sg_out->length;
+                       op->sym->m_src->pkt_len = len;
+                       op->sym->m_src->data_len = len;
+               }
+               if (!ctx->fd_status) {
+                       op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+               } else {
+                       printf("\nSEC return err: 0x%x", ctx->fd_status);
+                       op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+               }
+               ops[pkts++] = op;
+
+               /* report op status to sym->op and then free the ctx memeory */
+               rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+
+               qman_dqrr_consume(fq, dq);
+       } while (fq->flags & QMAN_FQ_STATE_VDQCR);
+
        return pkts;
 }
 
-/* qp is lockless, should be accessed by only one thread */
-static int
-dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+static inline struct dpaa_sec_job *
+build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
-       struct qman_fq *fq;
+       struct rte_crypto_sym_op *sym = op->sym;
+       struct rte_mbuf *mbuf = sym->m_src;
+       struct dpaa_sec_job *cf;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg, *out_sg, *in_sg;
+       phys_addr_t start_addr;
+       uint8_t *old_digest, extra_segs;
 
-       fq = &qp->outq;
-       dpaa_sec_op_nb = 0;
-       dpaa_sec_ops = ops;
+       if (is_decode(ses))
+               extra_segs = 3;
+       else
+               extra_segs = 2;
 
-       if (unlikely(nb_ops > DPAA_SEC_BURST))
-               nb_ops = DPAA_SEC_BURST;
+       if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+               PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
+                                                               MAX_SG_ENTRIES);
+               return NULL;
+       }
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
 
-       return dpaa_volatile_deq(fq, nb_ops, 1);
+       cf = &ctx->job;
+       ctx->op = op;
+       old_digest = ctx->digest;
+
+       /* output */
+       out_sg = &cf->sg[0];
+       qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
+       out_sg->length = ses->digest_length;
+       cpu_to_hw_sg(out_sg);
+
+       /* input */
+       in_sg = &cf->sg[1];
+       /* need to extend the input to a compound frame */
+       in_sg->extension = 1;
+       in_sg->final = 1;
+       in_sg->length = sym->auth.data.length;
+       qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+
+       /* 1st seg */
+       sg = in_sg + 1;
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->auth.data.offset;
+       sg->offset = sym->auth.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+
+       if (is_decode(ses)) {
+               /* Digest verification case */
+               cpu_to_hw_sg(sg);
+               sg++;
+               rte_memcpy(old_digest, sym->auth.digest.data,
+                               ses->digest_length);
+               start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+               qm_sg_entry_set64(sg, start_addr);
+               sg->length = ses->digest_length;
+               in_sg->length += ses->digest_length;
+       } else {
+               /* Digest calculation case */
+               sg->length -= ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+       cpu_to_hw_sg(in_sg);
+
+       return cf;
 }
 
 /**
@@ -589,7 +718,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        if (is_decode(ses)) {
                /* need to extend the input to a compound frame */
                sg->extension = 1;
-               qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
                sg->length = sym->auth.data.length + ses->digest_length;
                sg->final = 1;
                cpu_to_hw_sg(sg);
@@ -603,7 +732,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
                cpu_to_hw_sg(sg);
 
                /* let's check digest by hw */
-               start_addr = dpaa_mem_vtop(old_digest);
+               start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
                sg++;
                qm_sg_entry_set64(sg, start_addr);
                sg->length = ses->digest_length;
@@ -619,6 +748,101 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+       struct rte_crypto_sym_op *sym = op->sym;
+       struct dpaa_sec_job *cf;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg, *out_sg, *in_sg;
+       struct rte_mbuf *mbuf;
+       uint8_t req_segs;
+       uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+                       ses->iv.offset);
+
+       if (sym->m_dst) {
+               mbuf = sym->m_dst;
+               req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+       } else {
+               mbuf = sym->m_src;
+               req_segs = mbuf->nb_segs * 2 + 3;
+       }
+
+       if (req_segs > MAX_SG_ENTRIES) {
+               PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
+                                                               MAX_SG_ENTRIES);
+               return NULL;
+       }
+
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
+
+       cf = &ctx->job;
+       ctx->op = op;
+
+       /* output */
+       out_sg = &cf->sg[0];
+       out_sg->extension = 1;
+       out_sg->length = sym->cipher.data.length;
+       qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+       cpu_to_hw_sg(out_sg);
+
+       /* 1st seg */
+       sg = &cf->sg[2];
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->cipher.data.offset;
+       sg->offset = sym->cipher.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       /* input */
+       mbuf = sym->m_src;
+       in_sg = &cf->sg[1];
+       in_sg->extension = 1;
+       in_sg->final = 1;
+       in_sg->length = sym->cipher.data.length + ses->iv.length;
+
+       sg++;
+       qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(in_sg);
+
+       /* IV */
+       qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+       sg->length = ses->iv.length;
+       cpu_to_hw_sg(sg);
+
+       /* 1st seg */
+       sg++;
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->cipher.data.offset;
+       sg->offset = sym->cipher.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
@@ -657,7 +881,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        sg->extension = 1;
        sg->final = 1;
        sg->length = sym->cipher.data.length + ses->iv.length;
-       qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+       qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
        cpu_to_hw_sg(sg);
 
        sg = &cf->sg[2];
@@ -674,6 +898,145 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
        return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+       struct rte_crypto_sym_op *sym = op->sym;
+       struct dpaa_sec_job *cf;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg, *out_sg, *in_sg;
+       struct rte_mbuf *mbuf;
+       uint8_t req_segs;
+       uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+                       ses->iv.offset);
+
+       if (sym->m_dst) {
+               mbuf = sym->m_dst;
+               req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+       } else {
+               mbuf = sym->m_src;
+               req_segs = mbuf->nb_segs * 2 + 4;
+       }
+
+       if (ses->auth_only_len)
+               req_segs++;
+
+       if (req_segs > MAX_SG_ENTRIES) {
+               PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
+                               MAX_SG_ENTRIES);
+               return NULL;
+       }
+
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
+
+       cf = &ctx->job;
+       ctx->op = op;
+
+       rte_prefetch0(cf->sg);
+
+       /* output */
+       out_sg = &cf->sg[0];
+       out_sg->extension = 1;
+       if (is_encode(ses))
+               out_sg->length = sym->aead.data.length + ses->auth_only_len
+                                               + ses->digest_length;
+       else
+               out_sg->length = sym->aead.data.length + ses->auth_only_len;
+
+       /* output sg entries */
+       sg = &cf->sg[2];
+       qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(out_sg);
+
+       /* 1st seg */
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->aead.data.offset +
+                                       ses->auth_only_len;
+       sg->offset = sym->aead.data.offset - ses->auth_only_len;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+       sg->length -= ses->digest_length;
+
+       if (is_encode(ses)) {
+               cpu_to_hw_sg(sg);
+               /* set auth output */
+               sg++;
+               qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+               sg->length = ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       /* input */
+       mbuf = sym->m_src;
+       in_sg = &cf->sg[1];
+       in_sg->extension = 1;
+       in_sg->final = 1;
+       if (is_encode(ses))
+               in_sg->length = ses->iv.length + sym->aead.data.length
+                                                       + ses->auth_only_len;
+       else
+               in_sg->length = ses->iv.length + sym->aead.data.length
+                               + ses->auth_only_len + ses->digest_length;
+
+       /* input sg entries */
+       sg++;
+       qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(in_sg);
+
+       /* 1st seg IV */
+       qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+       sg->length = ses->iv.length;
+       cpu_to_hw_sg(sg);
+
+       /* 2nd seg auth only */
+       if (ses->auth_only_len) {
+               sg++;
+               qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
+               sg->length = ses->auth_only_len;
+               cpu_to_hw_sg(sg);
+       }
+
+       /* 3rd seg */
+       sg++;
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->aead.data.offset;
+       sg->offset = sym->aead.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+
+       if (is_decode(ses)) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               memcpy(ctx->digest, sym->aead.digest.data,
+                       ses->digest_length);
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+               sg->length = ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
@@ -703,7 +1066,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
        /* input */
        rte_prefetch0(cf->sg);
        sg = &cf->sg[2];
-       qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+       qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
        if (is_encode(ses)) {
                qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
                sg->length = ses->iv.length;
@@ -748,7 +1111,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
                       ses->digest_length);
                sg++;
 
-               qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
                sg->length = ses->digest_length;
                length += sg->length;
                sg->final = 1;
@@ -762,7 +1125,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
        /* output */
        sg++;
-       qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+       qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
        qm_sg_entry_set64(sg,
                dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
        sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -786,6 +1149,132 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
        return cf;
 }
 
+static inline struct dpaa_sec_job *
+build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+       struct rte_crypto_sym_op *sym = op->sym;
+       struct dpaa_sec_job *cf;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg, *out_sg, *in_sg;
+       struct rte_mbuf *mbuf;
+       uint8_t req_segs;
+       uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+                       ses->iv.offset);
+
+       if (sym->m_dst) {
+               mbuf = sym->m_dst;
+               req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
+       } else {
+               mbuf = sym->m_src;
+               req_segs = mbuf->nb_segs * 2 + 4;
+       }
+
+       if (req_segs > MAX_SG_ENTRIES) {
+               PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
+                               MAX_SG_ENTRIES);
+               return NULL;
+       }
+
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
+
+       cf = &ctx->job;
+       ctx->op = op;
+
+       rte_prefetch0(cf->sg);
+
+       /* output */
+       out_sg = &cf->sg[0];
+       out_sg->extension = 1;
+       if (is_encode(ses))
+               out_sg->length = sym->auth.data.length + ses->digest_length;
+       else
+               out_sg->length = sym->auth.data.length;
+
+       /* output sg entries */
+       sg = &cf->sg[2];
+       qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(out_sg);
+
+       /* 1st seg */
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->auth.data.offset;
+       sg->offset = sym->auth.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+       sg->length -= ses->digest_length;
+
+       if (is_encode(ses)) {
+               cpu_to_hw_sg(sg);
+               /* set auth output */
+               sg++;
+               qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+               sg->length = ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       /* input */
+       mbuf = sym->m_src;
+       in_sg = &cf->sg[1];
+       in_sg->extension = 1;
+       in_sg->final = 1;
+       if (is_encode(ses))
+               in_sg->length = ses->iv.length + sym->auth.data.length;
+       else
+               in_sg->length = ses->iv.length + sym->auth.data.length
+                                               + ses->digest_length;
+
+       /* input sg entries */
+       sg++;
+       qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+       cpu_to_hw_sg(in_sg);
+
+       /* 1st seg IV */
+       qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+       sg->length = ses->iv.length;
+       cpu_to_hw_sg(sg);
+
+       /* 2nd seg */
+       sg++;
+       qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+       sg->length = mbuf->data_len - sym->auth.data.offset;
+       sg->offset = sym->auth.data.offset;
+
+       /* Successive segs */
+       mbuf = mbuf->next;
+       while (mbuf) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
+               sg->length = mbuf->data_len;
+               mbuf = mbuf->next;
+       }
+
+       sg->length -= ses->digest_length;
+       if (is_decode(ses)) {
+               cpu_to_hw_sg(sg);
+               sg++;
+               memcpy(ctx->digest, sym->auth.digest.data,
+                       ses->digest_length);
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+               sg->length = ses->digest_length;
+       }
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
+
+       return cf;
+}
+
 static inline struct dpaa_sec_job *
 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
@@ -814,7 +1303,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
        /* input */
        rte_prefetch0(cf->sg);
        sg = &cf->sg[2];
-       qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+       qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
        if (is_encode(ses)) {
                qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
                sg->length = ses->iv.length;
@@ -844,7 +1333,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
                       ses->digest_length);
                sg++;
 
-               qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+               qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
                sg->length = ses->digest_length;
                length += sg->length;
                sg->final = 1;
@@ -858,7 +1347,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
 
        /* output */
        sg++;
-       qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+       qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
        qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
        sg->length = sym->cipher.data.length;
        length = sg->length;
@@ -881,64 +1370,43 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
        return cf;
 }
 
-static int
-dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
+static inline struct dpaa_sec_job *
+build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
 {
+       struct rte_crypto_sym_op *sym = op->sym;
        struct dpaa_sec_job *cf;
-       dpaa_sec_session *ses;
-       struct qm_fd fd;
-       int ret;
-       uint32_t auth_only_len = op->sym->auth.data.length -
-                               op->sym->cipher.data.length;
+       struct dpaa_sec_op_ctx *ctx;
+       struct qm_sg_entry *sg;
+       phys_addr_t src_start_addr, dst_start_addr;
 
-       ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
-                                       cryptodev_driver_id);
+       ctx = dpaa_sec_alloc_ctx(ses);
+       if (!ctx)
+               return NULL;
+       cf = &ctx->job;
+       ctx->op = op;
 
-       if (unlikely(!qp->ses || qp->ses != ses)) {
-               qp->ses = ses;
-               ses->qp = qp;
-               ret = dpaa_sec_prep_cdb(ses);
-               if (ret)
-                       return ret;
-       }
+       src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
 
-       /*
-        * Segmented buffer is not supported.
-        */
-       if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
-               op->status = RTE_CRYPTO_OP_STATUS_ERROR;
-               return -ENOTSUP;
-       }
-       if (is_auth_only(ses)) {
-               cf = build_auth_only(op, ses);
-       } else if (is_cipher_only(ses)) {
-               cf = build_cipher_only(op, ses);
-       } else if (is_aead(ses)) {
-               cf = build_cipher_auth_gcm(op, ses);
-               auth_only_len = ses->auth_only_len;
-       } else if (is_auth_cipher(ses)) {
-               cf = build_cipher_auth(op, ses);
-       } else {
-               PMD_TX_LOG(ERR, "not supported sec op");
-               return -ENOTSUP;
-       }
-       if (unlikely(!cf))
-               return -ENOMEM;
+       if (sym->m_dst)
+               dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
+       else
+               dst_start_addr = src_start_addr;
 
-       memset(&fd, 0, sizeof(struct qm_fd));
-       qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
-       fd._format1 = qm_fd_compound;
-       fd.length29 = 2 * sizeof(struct qm_sg_entry);
-       /* Auth_only_len is set as 0 in descriptor and it is overwritten
-        * here in the fd.cmd which will update the DPOVRD reg.
-        */
-       if (auth_only_len)
-               fd.cmd = 0x80000000 | auth_only_len;
-       do {
-               ret = qman_enqueue(&qp->inq, &fd, 0);
-       } while (ret != 0);
+       /* input */
+       sg = &cf->sg[1];
+       qm_sg_entry_set64(sg, src_start_addr);
+       sg->length = sym->m_src->pkt_len;
+       sg->final = 1;
+       cpu_to_hw_sg(sg);
 
-       return 0;
+       sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+       /* output */
+       sg = &cf->sg[0];
+       qm_sg_entry_set64(sg, dst_start_addr);
+       sg->length = sym->m_src->buf_len - sym->m_src->data_off;
+       cpu_to_hw_sg(sg);
+
+       return cf;
 }
 
 static uint16_t
@@ -947,23 +1415,120 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
 {
        /* Function to transmit the frames to given device and queuepair */
        uint32_t loop;
-       int32_t ret;
        struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
        uint16_t num_tx = 0;
+       struct qm_fd fds[DPAA_SEC_BURST], *fd;
+       uint32_t frames_to_send;
+       struct rte_crypto_op *op;
+       struct dpaa_sec_job *cf;
+       dpaa_sec_session *ses;
+       struct dpaa_sec_op_ctx *ctx;
+       uint32_t auth_only_len;
+       struct qman_fq *inq[DPAA_SEC_BURST];
+
+       while (nb_ops) {
+               frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+                               DPAA_SEC_BURST : nb_ops;
+               for (loop = 0; loop < frames_to_send; loop++) {
+                       op = *(ops++);
+                       switch (op->sess_type) {
+                       case RTE_CRYPTO_OP_WITH_SESSION:
+                               ses = (dpaa_sec_session *)
+                                       get_session_private_data(
+                                                       op->sym->session,
+                                                       cryptodev_driver_id);
+                               break;
+                       case RTE_CRYPTO_OP_SECURITY_SESSION:
+                               ses = (dpaa_sec_session *)
+                                       get_sec_session_private_data(
+                                                       op->sym->sec_session);
+                               break;
+                       default:
+                               PMD_TX_LOG(ERR,
+                                       "sessionless crypto op not supported");
+                               frames_to_send = loop;
+                               nb_ops = loop;
+                               goto send_pkts;
+                       }
+                       if (unlikely(!ses->qp || ses->qp != qp)) {
+                               PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
+                                               ses->qp, qp);
+                               if (dpaa_sec_attach_sess_q(qp, ses)) {
+                                       frames_to_send = loop;
+                                       nb_ops = loop;
+                                       goto send_pkts;
+                               }
+                       }
 
-       if (unlikely(nb_ops == 0))
-               return 0;
+                       auth_only_len = op->sym->auth.data.length -
+                                               op->sym->cipher.data.length;
+                       if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+                               if (is_auth_only(ses)) {
+                                       cf = build_auth_only(op, ses);
+                               } else if (is_cipher_only(ses)) {
+                                       cf = build_cipher_only(op, ses);
+                               } else if (is_aead(ses)) {
+                                       cf = build_cipher_auth_gcm(op, ses);
+                                       auth_only_len = ses->auth_only_len;
+                               } else if (is_auth_cipher(ses)) {
+                                       cf = build_cipher_auth(op, ses);
+                               } else if (is_proto_ipsec(ses)) {
+                                       cf = build_proto(op, ses);
+                               } else {
+                                       PMD_TX_LOG(ERR, "not supported sec op");
+                                       frames_to_send = loop;
+                                       nb_ops = loop;
+                                       goto send_pkts;
+                               }
+                       } else {
+                               if (is_auth_only(ses)) {
+                                       cf = build_auth_only_sg(op, ses);
+                               } else if (is_cipher_only(ses)) {
+                                       cf = build_cipher_only_sg(op, ses);
+                               } else if (is_aead(ses)) {
+                                       cf = build_cipher_auth_gcm_sg(op, ses);
+                                       auth_only_len = ses->auth_only_len;
+                               } else if (is_auth_cipher(ses)) {
+                                       cf = build_cipher_auth_sg(op, ses);
+                               } else {
+                                       PMD_TX_LOG(ERR, "not supported sec op");
+                                       frames_to_send = loop;
+                                       nb_ops = loop;
+                                       goto send_pkts;
+                               }
+                       }
+                       if (unlikely(!cf)) {
+                               frames_to_send = loop;
+                               nb_ops = loop;
+                               goto send_pkts;
+                       }
 
-       /*Prepare each packet which is to be sent*/
-       for (loop = 0; loop < nb_ops; loop++) {
-               if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
-                       PMD_TX_LOG(ERR, "sessionless crypto op not supported");
-                       return 0;
+                       fd = &fds[loop];
+                       inq[loop] = ses->inq;
+                       fd->opaque_addr = 0;
+                       fd->cmd = 0;
+                       ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
+                       qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
+                       fd->_format1 = qm_fd_compound;
+                       fd->length29 = 2 * sizeof(struct qm_sg_entry);
+                       /* Auth_only_len is set as 0 in descriptor and it is
+                        * overwritten here in the fd.cmd which will update
+                        * the DPOVRD reg.
+                        */
+                       if (auth_only_len)
+                               fd->cmd = 0x80000000 | auth_only_len;
+
+               }
+send_pkts:
+               loop = 0;
+               while (loop < frames_to_send) {
+                       loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
+                                       frames_to_send - loop);
                }
-               ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
-               if (!ret)
-                       num_tx++;
+               nb_ops -= frames_to_send;
+               num_tx += frames_to_send;
        }
+
        dpaa_qp->tx_pkts += num_tx;
        dpaa_qp->tx_errs += nb_ops - num_tx;
 
@@ -1151,43 +1716,84 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
        return 0;
 }
 
-static int
-dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+static struct qman_fq *
+dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
 {
-       dpaa_sec_session *sess = ses;
-       struct dpaa_sec_qp *qp;
+       unsigned int i;
 
-       PMD_INIT_FUNC_TRACE();
+       for (i = 0; i < qi->max_nb_sessions; i++) {
+               if (qi->inq_attach[i] == 0) {
+                       qi->inq_attach[i] = 1;
+                       return &qi->inq[i];
+               }
+       }
+       PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
 
-       qp = dev->data->queue_pairs[qp_id];
-       if (qp->ses != NULL) {
-               PMD_INIT_LOG(ERR, "qp in-use by another session\n");
-               return -EBUSY;
+       return NULL;
+}
+
+static int
+dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
+{
+       unsigned int i;
+
+       for (i = 0; i < qi->max_nb_sessions; i++) {
+               if (&qi->inq[i] == fq) {
+                       qman_retire_fq(fq, NULL);
+                       qman_oos_fq(fq);
+                       qi->inq_attach[i] = 0;
+                       return 0;
+               }
        }
+       return -1;
+}
+
+static int
+dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
+{
+       int ret;
 
-       qp->ses = sess;
        sess->qp = qp;
+       ret = dpaa_sec_prep_cdb(sess);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+               return -1;
+       }
+
+       ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
+                              qman_fq_fqid(&qp->outq));
+       if (ret)
+               PMD_DRV_LOG(ERR, "Unable to init sec queue");
+
+       return ret;
+}
 
-       return dpaa_sec_prep_cdb(sess);
+static int
+dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
+                       uint16_t qp_id __rte_unused,
+                       void *ses __rte_unused)
+{
+       PMD_INIT_FUNC_TRACE();
+       return 0;
 }
 
 static int
-dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
+                       uint16_t qp_id  __rte_unused,
+                       void *ses)
 {
        dpaa_sec_session *sess = ses;
-       struct dpaa_sec_qp *qp;
+       struct dpaa_sec_dev_private *qi = dev->data->dev_private;
 
        PMD_INIT_FUNC_TRACE();
 
-       qp = dev->data->queue_pairs[qp_id];
-       if (qp->ses != NULL) {
-               qp->ses = NULL;
-               sess->qp = NULL;
-               return 0;
-       }
+       if (sess->inq)
+               dpaa_sec_detach_rxq(qi, sess->inq);
+       sess->inq = NULL;
 
-       PMD_DRV_LOG(ERR, "No session attached to qp");
-       return -EINVAL;
+       sess->qp = NULL;
+
+       return 0;
 }
 
 static int
@@ -1250,8 +1856,20 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
                return -EINVAL;
        }
        session->ctx_pool = internals->ctx_pool;
+       session->inq = dpaa_sec_attach_rxq(internals);
+       if (session->inq == NULL) {
+               PMD_DRV_LOG(ERR, "unable to attach sec queue");
+               goto err1;
+       }
 
        return 0;
+
+err1:
+       rte_free(session->cipher_key.data);
+       rte_free(session->auth_key.data);
+       memset(session, 0, sizeof(dpaa_sec_session));
+
+       return -EINVAL;
 }
 
 static int
@@ -1284,6 +1902,7 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
        set_session_private_data(sess, dev->driver_id,
                        sess_private_data);
 
+
        return 0;
 }
 
@@ -1292,21 +1911,257 @@ static void
 dpaa_sec_session_clear(struct rte_cryptodev *dev,
                struct rte_cryptodev_sym_session *sess)
 {
-       PMD_INIT_FUNC_TRACE();
+       struct dpaa_sec_dev_private *qi = dev->data->dev_private;
        uint8_t index = dev->driver_id;
        void *sess_priv = get_session_private_data(sess, index);
+
+       PMD_INIT_FUNC_TRACE();
+
        dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
 
        if (sess_priv) {
+               struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+               if (s->inq)
+                       dpaa_sec_detach_rxq(qi, s->inq);
                rte_free(s->cipher_key.data);
                rte_free(s->auth_key.data);
                memset(s, 0, sizeof(dpaa_sec_session));
-               struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
                set_session_private_data(sess, index, NULL);
                rte_mempool_put(sess_mp, sess_priv);
        }
 }
 
+static int
+dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+                          struct rte_security_session_conf *conf,
+                          void *sess)
+{
+       struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+       struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+       struct rte_crypto_auth_xform *auth_xform;
+       struct rte_crypto_cipher_xform *cipher_xform;
+       dpaa_sec_session *session = (dpaa_sec_session *)sess;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+               cipher_xform = &conf->crypto_xform->cipher;
+               auth_xform = &conf->crypto_xform->next->auth;
+       } else {
+               auth_xform = &conf->crypto_xform->auth;
+               cipher_xform = &conf->crypto_xform->next->cipher;
+       }
+       session->proto_alg = conf->protocol;
+       session->cipher_key.data = rte_zmalloc(NULL,
+                                              cipher_xform->key.length,
+                                              RTE_CACHE_LINE_SIZE);
+       if (session->cipher_key.data == NULL &&
+                       cipher_xform->key.length > 0) {
+               RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+               return -ENOMEM;
+       }
+
+       session->cipher_key.length = cipher_xform->key.length;
+       session->auth_key.data = rte_zmalloc(NULL,
+                                       auth_xform->key.length,
+                                       RTE_CACHE_LINE_SIZE);
+       if (session->auth_key.data == NULL &&
+                       auth_xform->key.length > 0) {
+               RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+               rte_free(session->cipher_key.data);
+               return -ENOMEM;
+       }
+       session->auth_key.length = auth_xform->key.length;
+       memcpy(session->cipher_key.data, cipher_xform->key.data,
+                       cipher_xform->key.length);
+       memcpy(session->auth_key.data, auth_xform->key.data,
+                       auth_xform->key.length);
+
+       switch (auth_xform->algo) {
+       case RTE_CRYPTO_AUTH_SHA1_HMAC:
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_MD5_HMAC:
+               session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA256_HMAC:
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA384_HMAC:
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_SHA512_HMAC:
+               session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+               break;
+       case RTE_CRYPTO_AUTH_AES_CMAC:
+               session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+               break;
+       case RTE_CRYPTO_AUTH_NULL:
+               session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+               break;
+       case RTE_CRYPTO_AUTH_SHA224_HMAC:
+       case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+       case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+       case RTE_CRYPTO_AUTH_SHA1:
+       case RTE_CRYPTO_AUTH_SHA256:
+       case RTE_CRYPTO_AUTH_SHA512:
+       case RTE_CRYPTO_AUTH_SHA224:
+       case RTE_CRYPTO_AUTH_SHA384:
+       case RTE_CRYPTO_AUTH_MD5:
+       case RTE_CRYPTO_AUTH_AES_GMAC:
+       case RTE_CRYPTO_AUTH_KASUMI_F9:
+       case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+       case RTE_CRYPTO_AUTH_ZUC_EIA3:
+               RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+                       auth_xform->algo);
+               goto out;
+       default:
+               RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+                       auth_xform->algo);
+               goto out;
+       }
+
+       switch (cipher_xform->algo) {
+       case RTE_CRYPTO_CIPHER_AES_CBC:
+               session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+               break;
+       case RTE_CRYPTO_CIPHER_3DES_CBC:
+               session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+               break;
+       case RTE_CRYPTO_CIPHER_AES_CTR:
+               session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+               break;
+       case RTE_CRYPTO_CIPHER_NULL:
+       case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+       case RTE_CRYPTO_CIPHER_3DES_ECB:
+       case RTE_CRYPTO_CIPHER_AES_ECB:
+       case RTE_CRYPTO_CIPHER_KASUMI_F8:
+               RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+                       cipher_xform->algo);
+               goto out;
+       default:
+               RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+                       cipher_xform->algo);
+               goto out;
+       }
+
+       if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+               memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+                               sizeof(session->ip4_hdr));
+               session->ip4_hdr.ip_v = IPVERSION;
+               session->ip4_hdr.ip_hl = 5;
+               session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+                                               sizeof(session->ip4_hdr));
+               session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+               session->ip4_hdr.ip_id = 0;
+               session->ip4_hdr.ip_off = 0;
+               session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+               session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+                               RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+                               : IPPROTO_AH;
+               session->ip4_hdr.ip_sum = 0;
+               session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+               session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+               session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+                                               (void *)&session->ip4_hdr,
+                                               sizeof(struct ip));
+
+               session->encap_pdb.options =
+                       (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+                       PDBOPTS_ESP_OIHI_PDB_INL |
+                       PDBOPTS_ESP_IVSRC |
+                       PDBHMO_ESP_ENCAP_DTTL;
+               session->encap_pdb.spi = ipsec_xform->spi;
+               session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+               session->dir = DIR_ENC;
+       } else if (ipsec_xform->direction ==
+                       RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+               memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+               session->decap_pdb.options = sizeof(struct ip) << 16;
+               session->dir = DIR_DEC;
+       } else
+               goto out;
+       session->ctx_pool = internals->ctx_pool;
+       session->inq = dpaa_sec_attach_rxq(internals);
+       if (session->inq == NULL) {
+               PMD_DRV_LOG(ERR, "unable to attach sec queue");
+               goto out;
+       }
+
+
+       return 0;
+out:
+       rte_free(session->auth_key.data);
+       rte_free(session->cipher_key.data);
+       memset(session, 0, sizeof(dpaa_sec_session));
+       return -1;
+}
+
+static int
+dpaa_sec_security_session_create(void *dev,
+                                struct rte_security_session_conf *conf,
+                                struct rte_security_session *sess,
+                                struct rte_mempool *mempool)
+{
+       void *sess_private_data;
+       struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+       int ret;
+
+       if (rte_mempool_get(mempool, &sess_private_data)) {
+               CDEV_LOG_ERR(
+                       "Couldn't get object from session mempool");
+               return -ENOMEM;
+       }
+
+       switch (conf->protocol) {
+       case RTE_SECURITY_PROTOCOL_IPSEC:
+               ret = dpaa_sec_set_ipsec_session(cdev, conf,
+                               sess_private_data);
+               break;
+       case RTE_SECURITY_PROTOCOL_MACSEC:
+               return -ENOTSUP;
+       default:
+               return -EINVAL;
+       }
+       if (ret != 0) {
+               PMD_DRV_LOG(ERR,
+                       "DPAA2 PMD: failed to configure session parameters");
+
+               /* Return session to mempool */
+               rte_mempool_put(mempool, sess_private_data);
+               return ret;
+       }
+
+       set_sec_session_private_data(sess, sess_private_data);
+
+       return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa_sec_security_session_destroy(void *dev __rte_unused,
+               struct rte_security_session *sess)
+{
+       PMD_INIT_FUNC_TRACE();
+       void *sess_priv = get_sec_session_private_data(sess);
+
+       dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+       if (sess_priv) {
+               struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+               rte_free(s->cipher_key.data);
+               rte_free(s->auth_key.data);
+               memset(sess, 0, sizeof(dpaa_sec_session));
+               set_sec_session_private_data(sess, NULL);
+               rte_mempool_put(sess_mp, sess_priv);
+       }
+       return 0;
+}
+
+
 static int
 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
                       struct rte_cryptodev_config *config __rte_unused)
@@ -1349,7 +2204,8 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
                info->capabilities = dpaa_sec_capabilities;
                info->sym.max_nb_sessions = internals->max_nb_sessions;
                info->sym.max_nb_sessions_per_qp =
-                       RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
+                       RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
+                       RTE_DPAA_MAX_NB_SEC_QPS;
                info->driver_id = cryptodev_driver_id;
        }
 }
@@ -1372,6 +2228,21 @@ static struct rte_cryptodev_ops crypto_ops = {
        .qp_detach_session    = dpaa_sec_qp_detach_sess,
 };
 
+static const struct rte_security_capability *
+dpaa_sec_capabilities_get(void *device __rte_unused)
+{
+       return dpaa_sec_security_cap;
+}
+
+struct rte_security_ops dpaa_sec_security_ops = {
+       .session_create = dpaa_sec_security_session_create,
+       .session_update = NULL,
+       .session_stats_get = NULL,
+       .session_destroy = dpaa_sec_security_session_destroy,
+       .set_pkt_metadata = NULL,
+       .capabilities_get = dpaa_sec_capabilities_get
+};
+
 static int
 dpaa_sec_uninit(struct rte_cryptodev *dev)
 {
@@ -1380,6 +2251,8 @@ dpaa_sec_uninit(struct rte_cryptodev *dev)
        if (dev == NULL)
                return -ENODEV;
 
+       rte_free(dev->security_ctx);
+
        rte_mempool_free(internals->ctx_pool);
        rte_free(internals);
 
@@ -1393,8 +2266,9 @@ static int
 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
 {
        struct dpaa_sec_dev_private *internals;
+       struct rte_security_ctx *security_instance;
        struct dpaa_sec_qp *qp;
-       uint32_t i;
+       uint32_t i, flags;
        int ret;
        char str[20];
 
@@ -1407,12 +2281,34 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
        cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
        cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
                        RTE_CRYPTODEV_FF_HW_ACCELERATED |
-                       RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+                       RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+                       RTE_CRYPTODEV_FF_SECURITY |
+                       RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
 
        internals = cryptodev->data->dev_private;
-       internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
+       internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
        internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
 
+       /*
+        * For secondary processes, we don't initialise any further as primary
+        * has already done this work. Only check we don't need a different
+        * RX function
+        */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+               PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+               return 0;
+       }
+
+       /* Initialize security_ctx only for primary process*/
+       security_instance = rte_malloc("rte_security_instances_ops",
+                               sizeof(struct rte_security_ctx), 0);
+       if (security_instance == NULL)
+               return -ENOMEM;
+       security_instance->device = (void *)cryptodev;
+       security_instance->ops = &dpaa_sec_security_ops;
+       security_instance->sess_cnt = 0;
+       cryptodev->security_ctx = security_instance;
+
        for (i = 0; i < internals->max_nb_queue_pairs; i++) {
                /* init qman fq for queue pair */
                qp = &internals->qps[i];
@@ -1421,10 +2317,15 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
                        PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
                        goto init_error;
                }
-               ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
-                                      qman_fq_fqid(&qp->outq));
-               if (ret) {
-                       PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
+       }
+
+       flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+               QMAN_FQ_FLAG_TO_DCPORTAL;
+       for (i = 0; i < internals->max_nb_sessions; i++) {
+               /* create rx qman fq for sessions*/
+               ret = qman_create_fq(0, flags, &internals->inq[i]);
+               if (unlikely(ret != 0)) {
+                       PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
                        goto init_error;
                }
        }