New upstream version 17.11.1
[deb_dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright 2017 NXP.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of NXP nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <fcntl.h>
35 #include <unistd.h>
36 #include <sched.h>
37 #include <net/if.h>
38
39 #include <rte_byteorder.h>
40 #include <rte_common.h>
41 #include <rte_cryptodev_pmd.h>
42 #include <rte_crypto.h>
43 #include <rte_cryptodev.h>
44 #include <rte_cycles.h>
45 #include <rte_dev.h>
46 #include <rte_kvargs.h>
47 #include <rte_malloc.h>
48 #include <rte_mbuf.h>
49 #include <rte_memcpy.h>
50 #include <rte_string_fns.h>
51
52 #include <fsl_usd.h>
53 #include <fsl_qman.h>
54 #include <of.h>
55
56 /* RTA header files */
57 #include <hw/desc/common.h>
58 #include <hw/desc/algo.h>
59 #include <hw/desc/ipsec.h>
60
61 #include <rte_dpaa_bus.h>
62 #include <dpaa_sec.h>
63 #include <dpaa_sec_log.h>
64
65 enum rta_sec_era rta_sec_era;
66
67 static uint8_t cryptodev_driver_id;
68
69 static __thread struct rte_crypto_op **dpaa_sec_ops;
70 static __thread int dpaa_sec_op_nb;
71
72 static inline void
73 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
74 {
75         if (!ctx->fd_status) {
76                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
77         } else {
78                 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
79                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
80         }
81
82         /* report op status to sym->op and then free the ctx memeory  */
83         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
84 }
85
86 static inline struct dpaa_sec_op_ctx *
87 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
88 {
89         struct dpaa_sec_op_ctx *ctx;
90         int retval;
91
92         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
93         if (!ctx || retval) {
94                 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
95                 return NULL;
96         }
97         /*
98          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
99          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
100          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
101          * each packet, memset is costlier than dcbz_64().
102          */
103         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
104         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
105         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
106         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
107
108         ctx->ctx_pool = ses->ctx_pool;
109
110         return ctx;
111 }
112
113 static inline rte_iova_t
114 dpaa_mem_vtop(void *vaddr)
115 {
116         const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
117         uint64_t vaddr_64, paddr;
118         int i;
119
120         vaddr_64 = (uint64_t)vaddr;
121         for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
122                 if (vaddr_64 >= memseg[i].addr_64 &&
123                     vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
124                         paddr = memseg[i].iova +
125                                 (vaddr_64 - memseg[i].addr_64);
126
127                         return (rte_iova_t)paddr;
128                 }
129         }
130         return (rte_iova_t)(NULL);
131 }
132
133 static inline void *
134 dpaa_mem_ptov(rte_iova_t paddr)
135 {
136         const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
137         int i;
138
139         for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
140                 if (paddr >= memseg[i].iova &&
141                     (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
142                         return (void *)(memseg[i].addr_64 +
143                                         (paddr - memseg[i].iova));
144         }
145         return NULL;
146 }
147
148 static void
149 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
150                    struct qman_fq *fq,
151                    const struct qm_mr_entry *msg)
152 {
153         RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
154                    fq->fqid, msg->ern.rc, msg->ern.seqnum);
155 }
156
157 /* initialize the queue with dest chan as caam chan so that
158  * all the packets in this queue could be dispatched into caam
159  */
160 static int
161 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
162                  uint32_t fqid_out)
163 {
164         struct qm_mcc_initfq fq_opts;
165         uint32_t flags;
166         int ret = -1;
167
168         /* Clear FQ options */
169         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
170
171         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
172                 QMAN_FQ_FLAG_TO_DCPORTAL;
173
174         ret = qman_create_fq(0, flags, fq_in);
175         if (unlikely(ret != 0)) {
176                 PMD_INIT_LOG(ERR, "qman_create_fq failed");
177                 return ret;
178         }
179
180         flags = QMAN_INITFQ_FLAG_SCHED;
181         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
182                           QM_INITFQ_WE_CONTEXTB;
183
184         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
185         fq_opts.fqd.context_b = fqid_out;
186         fq_opts.fqd.dest.channel = qm_channel_caam;
187         fq_opts.fqd.dest.wq = 0;
188
189         fq_in->cb.ern  = ern_sec_fq_handler;
190
191         ret = qman_init_fq(fq_in, flags, &fq_opts);
192         if (unlikely(ret != 0))
193                 PMD_INIT_LOG(ERR, "qman_init_fq failed");
194
195         return ret;
196 }
197
198 /* something is put into in_fq and caam put the crypto result into out_fq */
199 static enum qman_cb_dqrr_result
200 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
201                   struct qman_fq *fq __always_unused,
202                   const struct qm_dqrr_entry *dqrr)
203 {
204         const struct qm_fd *fd;
205         struct dpaa_sec_job *job;
206         struct dpaa_sec_op_ctx *ctx;
207
208         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
209                 return qman_cb_dqrr_defer;
210
211         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
212                 return qman_cb_dqrr_consume;
213
214         fd = &dqrr->fd;
215         /* sg is embedded in an op ctx,
216          * sg[0] is for output
217          * sg[1] for input
218          */
219         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
220         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
221         ctx->fd_status = fd->status;
222         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
223         dpaa_sec_op_ending(ctx);
224
225         return qman_cb_dqrr_consume;
226 }
227
228 /* caam result is put into this queue */
229 static int
230 dpaa_sec_init_tx(struct qman_fq *fq)
231 {
232         int ret;
233         struct qm_mcc_initfq opts;
234         uint32_t flags;
235
236         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
237                 QMAN_FQ_FLAG_DYNAMIC_FQID;
238
239         ret = qman_create_fq(0, flags, fq);
240         if (unlikely(ret)) {
241                 PMD_INIT_LOG(ERR, "qman_create_fq failed");
242                 return ret;
243         }
244
245         memset(&opts, 0, sizeof(opts));
246         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
247                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
248
249         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
250
251         fq->cb.dqrr = dqrr_out_fq_cb_rx;
252         fq->cb.ern  = ern_sec_fq_handler;
253
254         ret = qman_init_fq(fq, 0, &opts);
255         if (unlikely(ret)) {
256                 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
257                 return ret;
258         }
259
260         return ret;
261 }
262
263 static inline int is_cipher_only(dpaa_sec_session *ses)
264 {
265         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
266                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
267 }
268
269 static inline int is_auth_only(dpaa_sec_session *ses)
270 {
271         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
272                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
273 }
274
275 static inline int is_aead(dpaa_sec_session *ses)
276 {
277         return ((ses->cipher_alg == 0) &&
278                 (ses->auth_alg == 0) &&
279                 (ses->aead_alg != 0));
280 }
281
282 static inline int is_auth_cipher(dpaa_sec_session *ses)
283 {
284         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
285                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
286 }
287
288 static inline int is_encode(dpaa_sec_session *ses)
289 {
290         return ses->dir == DIR_ENC;
291 }
292
293 static inline int is_decode(dpaa_sec_session *ses)
294 {
295         return ses->dir == DIR_DEC;
296 }
297
298 static inline void
299 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
300 {
301         switch (ses->auth_alg) {
302         case RTE_CRYPTO_AUTH_NULL:
303                 ses->digest_length = 0;
304                 break;
305         case RTE_CRYPTO_AUTH_MD5_HMAC:
306                 alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
307                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
308                 break;
309         case RTE_CRYPTO_AUTH_SHA1_HMAC:
310                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
311                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
312                 break;
313         case RTE_CRYPTO_AUTH_SHA224_HMAC:
314                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
315                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
316                 break;
317         case RTE_CRYPTO_AUTH_SHA256_HMAC:
318                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
319                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
320                 break;
321         case RTE_CRYPTO_AUTH_SHA384_HMAC:
322                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
323                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
324                 break;
325         case RTE_CRYPTO_AUTH_SHA512_HMAC:
326                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
327                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
328                 break;
329         default:
330                 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
331         }
332 }
333
334 static inline void
335 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
336 {
337         switch (ses->cipher_alg) {
338         case RTE_CRYPTO_CIPHER_NULL:
339                 break;
340         case RTE_CRYPTO_CIPHER_AES_CBC:
341                 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
342                 alginfo_c->algmode = OP_ALG_AAI_CBC;
343                 break;
344         case RTE_CRYPTO_CIPHER_3DES_CBC:
345                 alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
346                 alginfo_c->algmode = OP_ALG_AAI_CBC;
347                 break;
348         case RTE_CRYPTO_CIPHER_AES_CTR:
349                 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
350                 alginfo_c->algmode = OP_ALG_AAI_CTR;
351                 break;
352         default:
353                 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
354         }
355 }
356
357 static inline void
358 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
359 {
360         switch (ses->aead_alg) {
361         case RTE_CRYPTO_AEAD_AES_GCM:
362                 alginfo->algtype = OP_ALG_ALGSEL_AES;
363                 alginfo->algmode = OP_ALG_AAI_GCM;
364                 break;
365         default:
366                 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
367         }
368 }
369
370
371 /* prepare command block of the session */
372 static int
373 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
374 {
375         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
376         uint32_t shared_desc_len = 0;
377         struct sec_cdb *cdb = &ses->qp->cdb;
378         int err;
379 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
380         int swap = false;
381 #else
382         int swap = true;
383 #endif
384
385         memset(cdb, 0, sizeof(struct sec_cdb));
386
387         if (is_cipher_only(ses)) {
388                 caam_cipher_alg(ses, &alginfo_c);
389                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
390                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
391                         return -ENOTSUP;
392                 }
393
394                 alginfo_c.key = (uint64_t)ses->cipher_key.data;
395                 alginfo_c.keylen = ses->cipher_key.length;
396                 alginfo_c.key_enc_flags = 0;
397                 alginfo_c.key_type = RTA_DATA_IMM;
398
399                 shared_desc_len = cnstr_shdsc_blkcipher(
400                                                 cdb->sh_desc, true,
401                                                 swap, &alginfo_c,
402                                                 NULL,
403                                                 ses->iv.length,
404                                                 ses->dir);
405         } else if (is_auth_only(ses)) {
406                 caam_auth_alg(ses, &alginfo_a);
407                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
408                         PMD_TX_LOG(ERR, "not supported auth alg\n");
409                         return -ENOTSUP;
410                 }
411
412                 alginfo_a.key = (uint64_t)ses->auth_key.data;
413                 alginfo_a.keylen = ses->auth_key.length;
414                 alginfo_a.key_enc_flags = 0;
415                 alginfo_a.key_type = RTA_DATA_IMM;
416
417                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
418                                                    swap, &alginfo_a,
419                                                    !ses->dir,
420                                                    ses->digest_length);
421         } else if (is_aead(ses)) {
422                 caam_aead_alg(ses, &alginfo);
423                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
424                         PMD_TX_LOG(ERR, "not supported aead alg\n");
425                         return -ENOTSUP;
426                 }
427                 alginfo.key = (uint64_t)ses->aead_key.data;
428                 alginfo.keylen = ses->aead_key.length;
429                 alginfo.key_enc_flags = 0;
430                 alginfo.key_type = RTA_DATA_IMM;
431
432                 if (ses->dir == DIR_ENC)
433                         shared_desc_len = cnstr_shdsc_gcm_encap(
434                                         cdb->sh_desc, true, swap,
435                                         &alginfo,
436                                         ses->iv.length,
437                                         ses->digest_length);
438                 else
439                         shared_desc_len = cnstr_shdsc_gcm_decap(
440                                         cdb->sh_desc, true, swap,
441                                         &alginfo,
442                                         ses->iv.length,
443                                         ses->digest_length);
444         } else {
445                 caam_cipher_alg(ses, &alginfo_c);
446                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
447                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
448                         return -ENOTSUP;
449                 }
450
451                 alginfo_c.key = (uint64_t)ses->cipher_key.data;
452                 alginfo_c.keylen = ses->cipher_key.length;
453                 alginfo_c.key_enc_flags = 0;
454                 alginfo_c.key_type = RTA_DATA_IMM;
455
456                 caam_auth_alg(ses, &alginfo_a);
457                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
458                         PMD_TX_LOG(ERR, "not supported auth alg\n");
459                         return -ENOTSUP;
460                 }
461
462                 alginfo_a.key = (uint64_t)ses->auth_key.data;
463                 alginfo_a.keylen = ses->auth_key.length;
464                 alginfo_a.key_enc_flags = 0;
465                 alginfo_a.key_type = RTA_DATA_IMM;
466
467                 cdb->sh_desc[0] = alginfo_c.keylen;
468                 cdb->sh_desc[1] = alginfo_a.keylen;
469                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
470                                        MIN_JOB_DESC_SIZE,
471                                        (unsigned int *)cdb->sh_desc,
472                                        &cdb->sh_desc[2], 2);
473
474                 if (err < 0) {
475                         PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
476                         return err;
477                 }
478                 if (cdb->sh_desc[2] & 1)
479                         alginfo_c.key_type = RTA_DATA_IMM;
480                 else {
481                         alginfo_c.key = (uint64_t)dpaa_mem_vtop(
482                                                         (void *)alginfo_c.key);
483                         alginfo_c.key_type = RTA_DATA_PTR;
484                 }
485                 if (cdb->sh_desc[2] & (1<<1))
486                         alginfo_a.key_type = RTA_DATA_IMM;
487                 else {
488                         alginfo_a.key = (uint64_t)dpaa_mem_vtop(
489                                                         (void *)alginfo_a.key);
490                         alginfo_a.key_type = RTA_DATA_PTR;
491                 }
492                 cdb->sh_desc[0] = 0;
493                 cdb->sh_desc[1] = 0;
494                 cdb->sh_desc[2] = 0;
495
496                 /* Auth_only_len is set as 0 here and it will be overwritten
497                  *  in fd for each packet.
498                  */
499                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
500                                 true, swap, &alginfo_c, &alginfo_a,
501                                 ses->iv.length, 0,
502                                 ses->digest_length, ses->dir);
503         }
504         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
505         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
506         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
507
508         return 0;
509 }
510
511 static inline unsigned int
512 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
513 {
514         unsigned int pkts = 0;
515         int ret;
516         struct qm_mcr_queryfq_np np;
517         enum qman_fq_state state;
518         uint32_t flags;
519         uint32_t vdqcr;
520
521         qman_query_fq_np(fq, &np);
522         if (np.frm_cnt) {
523                 vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
524                 if (exact)
525                         vdqcr |= QM_VDQCR_EXACT;
526                 ret = qman_volatile_dequeue(fq, 0, vdqcr);
527                 if (ret)
528                         return 0;
529                 do {
530                         pkts += qman_poll_dqrr(len);
531                         qman_fq_state(fq, &state, &flags);
532                 } while (flags & QMAN_FQ_STATE_VDQCR);
533         }
534         return pkts;
535 }
536
537 /* qp is lockless, should be accessed by only one thread */
538 static int
539 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
540 {
541         struct qman_fq *fq;
542
543         fq = &qp->outq;
544         dpaa_sec_op_nb = 0;
545         dpaa_sec_ops = ops;
546
547         if (unlikely(nb_ops > DPAA_SEC_BURST))
548                 nb_ops = DPAA_SEC_BURST;
549
550         return dpaa_volatile_deq(fq, nb_ops, 1);
551 }
552
553 /**
554  * packet looks like:
555  *              |<----data_len------->|
556  *    |ip_header|ah_header|icv|payload|
557  *              ^
558  *              |
559  *         mbuf->pkt.data
560  */
561 static inline struct dpaa_sec_job *
562 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
563 {
564         struct rte_crypto_sym_op *sym = op->sym;
565         struct rte_mbuf *mbuf = sym->m_src;
566         struct dpaa_sec_job *cf;
567         struct dpaa_sec_op_ctx *ctx;
568         struct qm_sg_entry *sg;
569         rte_iova_t start_addr;
570         uint8_t *old_digest;
571
572         ctx = dpaa_sec_alloc_ctx(ses);
573         if (!ctx)
574                 return NULL;
575
576         cf = &ctx->job;
577         ctx->op = op;
578         old_digest = ctx->digest;
579
580         start_addr = rte_pktmbuf_iova(mbuf);
581         /* output */
582         sg = &cf->sg[0];
583         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
584         sg->length = ses->digest_length;
585         cpu_to_hw_sg(sg);
586
587         /* input */
588         sg = &cf->sg[1];
589         if (is_decode(ses)) {
590                 /* need to extend the input to a compound frame */
591                 sg->extension = 1;
592                 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
593                 sg->length = sym->auth.data.length + ses->digest_length;
594                 sg->final = 1;
595                 cpu_to_hw_sg(sg);
596
597                 sg = &cf->sg[2];
598                 /* hash result or digest, save digest first */
599                 rte_memcpy(old_digest, sym->auth.digest.data,
600                            ses->digest_length);
601                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
602                 sg->length = sym->auth.data.length;
603                 cpu_to_hw_sg(sg);
604
605                 /* let's check digest by hw */
606                 start_addr = dpaa_mem_vtop(old_digest);
607                 sg++;
608                 qm_sg_entry_set64(sg, start_addr);
609                 sg->length = ses->digest_length;
610                 sg->final = 1;
611                 cpu_to_hw_sg(sg);
612         } else {
613                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
614                 sg->length = sym->auth.data.length;
615                 sg->final = 1;
616                 cpu_to_hw_sg(sg);
617         }
618
619         return cf;
620 }
621
622 static inline struct dpaa_sec_job *
623 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
624 {
625         struct rte_crypto_sym_op *sym = op->sym;
626         struct dpaa_sec_job *cf;
627         struct dpaa_sec_op_ctx *ctx;
628         struct qm_sg_entry *sg;
629         rte_iova_t src_start_addr, dst_start_addr;
630         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
631                         ses->iv.offset);
632
633         ctx = dpaa_sec_alloc_ctx(ses);
634         if (!ctx)
635                 return NULL;
636
637         cf = &ctx->job;
638         ctx->op = op;
639
640         src_start_addr = rte_pktmbuf_iova(sym->m_src);
641
642         if (sym->m_dst)
643                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
644         else
645                 dst_start_addr = src_start_addr;
646
647         /* output */
648         sg = &cf->sg[0];
649         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
650         sg->length = sym->cipher.data.length + ses->iv.length;
651         cpu_to_hw_sg(sg);
652
653         /* input */
654         sg = &cf->sg[1];
655
656         /* need to extend the input to a compound frame */
657         sg->extension = 1;
658         sg->final = 1;
659         sg->length = sym->cipher.data.length + ses->iv.length;
660         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
661         cpu_to_hw_sg(sg);
662
663         sg = &cf->sg[2];
664         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
665         sg->length = ses->iv.length;
666         cpu_to_hw_sg(sg);
667
668         sg++;
669         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
670         sg->length = sym->cipher.data.length;
671         sg->final = 1;
672         cpu_to_hw_sg(sg);
673
674         return cf;
675 }
676
677 static inline struct dpaa_sec_job *
678 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
679 {
680         struct rte_crypto_sym_op *sym = op->sym;
681         struct dpaa_sec_job *cf;
682         struct dpaa_sec_op_ctx *ctx;
683         struct qm_sg_entry *sg;
684         uint32_t length = 0;
685         rte_iova_t src_start_addr, dst_start_addr;
686         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
687                         ses->iv.offset);
688
689         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
690
691         if (sym->m_dst)
692                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
693         else
694                 dst_start_addr = src_start_addr;
695
696         ctx = dpaa_sec_alloc_ctx(ses);
697         if (!ctx)
698                 return NULL;
699
700         cf = &ctx->job;
701         ctx->op = op;
702
703         /* input */
704         rte_prefetch0(cf->sg);
705         sg = &cf->sg[2];
706         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
707         if (is_encode(ses)) {
708                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
709                 sg->length = ses->iv.length;
710                 length += sg->length;
711                 cpu_to_hw_sg(sg);
712
713                 sg++;
714                 if (ses->auth_only_len) {
715                         qm_sg_entry_set64(sg,
716                                           dpaa_mem_vtop(sym->aead.aad.data));
717                         sg->length = ses->auth_only_len;
718                         length += sg->length;
719                         cpu_to_hw_sg(sg);
720                         sg++;
721                 }
722                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
723                 sg->length = sym->aead.data.length;
724                 length += sg->length;
725                 sg->final = 1;
726                 cpu_to_hw_sg(sg);
727         } else {
728                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
729                 sg->length = ses->iv.length;
730                 length += sg->length;
731                 cpu_to_hw_sg(sg);
732
733                 sg++;
734                 if (ses->auth_only_len) {
735                         qm_sg_entry_set64(sg,
736                                           dpaa_mem_vtop(sym->aead.aad.data));
737                         sg->length = ses->auth_only_len;
738                         length += sg->length;
739                         cpu_to_hw_sg(sg);
740                         sg++;
741                 }
742                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
743                 sg->length = sym->aead.data.length;
744                 length += sg->length;
745                 cpu_to_hw_sg(sg);
746
747                 memcpy(ctx->digest, sym->aead.digest.data,
748                        ses->digest_length);
749                 sg++;
750
751                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
752                 sg->length = ses->digest_length;
753                 length += sg->length;
754                 sg->final = 1;
755                 cpu_to_hw_sg(sg);
756         }
757         /* input compound frame */
758         cf->sg[1].length = length;
759         cf->sg[1].extension = 1;
760         cf->sg[1].final = 1;
761         cpu_to_hw_sg(&cf->sg[1]);
762
763         /* output */
764         sg++;
765         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
766         qm_sg_entry_set64(sg,
767                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
768         sg->length = sym->aead.data.length + ses->auth_only_len;
769         length = sg->length;
770         if (is_encode(ses)) {
771                 cpu_to_hw_sg(sg);
772                 /* set auth output */
773                 sg++;
774                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
775                 sg->length = ses->digest_length;
776                 length += sg->length;
777         }
778         sg->final = 1;
779         cpu_to_hw_sg(sg);
780
781         /* output compound frame */
782         cf->sg[0].length = length;
783         cf->sg[0].extension = 1;
784         cpu_to_hw_sg(&cf->sg[0]);
785
786         return cf;
787 }
788
789 static inline struct dpaa_sec_job *
790 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
791 {
792         struct rte_crypto_sym_op *sym = op->sym;
793         struct dpaa_sec_job *cf;
794         struct dpaa_sec_op_ctx *ctx;
795         struct qm_sg_entry *sg;
796         rte_iova_t src_start_addr, dst_start_addr;
797         uint32_t length = 0;
798         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
799                         ses->iv.offset);
800
801         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
802         if (sym->m_dst)
803                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
804         else
805                 dst_start_addr = src_start_addr;
806
807         ctx = dpaa_sec_alloc_ctx(ses);
808         if (!ctx)
809                 return NULL;
810
811         cf = &ctx->job;
812         ctx->op = op;
813
814         /* input */
815         rte_prefetch0(cf->sg);
816         sg = &cf->sg[2];
817         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
818         if (is_encode(ses)) {
819                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
820                 sg->length = ses->iv.length;
821                 length += sg->length;
822                 cpu_to_hw_sg(sg);
823
824                 sg++;
825                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
826                 sg->length = sym->auth.data.length;
827                 length += sg->length;
828                 sg->final = 1;
829                 cpu_to_hw_sg(sg);
830         } else {
831                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
832                 sg->length = ses->iv.length;
833                 length += sg->length;
834                 cpu_to_hw_sg(sg);
835
836                 sg++;
837
838                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
839                 sg->length = sym->auth.data.length;
840                 length += sg->length;
841                 cpu_to_hw_sg(sg);
842
843                 memcpy(ctx->digest, sym->auth.digest.data,
844                        ses->digest_length);
845                 sg++;
846
847                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
848                 sg->length = ses->digest_length;
849                 length += sg->length;
850                 sg->final = 1;
851                 cpu_to_hw_sg(sg);
852         }
853         /* input compound frame */
854         cf->sg[1].length = length;
855         cf->sg[1].extension = 1;
856         cf->sg[1].final = 1;
857         cpu_to_hw_sg(&cf->sg[1]);
858
859         /* output */
860         sg++;
861         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
862         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
863         sg->length = sym->cipher.data.length;
864         length = sg->length;
865         if (is_encode(ses)) {
866                 cpu_to_hw_sg(sg);
867                 /* set auth output */
868                 sg++;
869                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
870                 sg->length = ses->digest_length;
871                 length += sg->length;
872         }
873         sg->final = 1;
874         cpu_to_hw_sg(sg);
875
876         /* output compound frame */
877         cf->sg[0].length = length;
878         cf->sg[0].extension = 1;
879         cpu_to_hw_sg(&cf->sg[0]);
880
881         return cf;
882 }
883
884 static int
885 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
886 {
887         struct dpaa_sec_job *cf;
888         dpaa_sec_session *ses;
889         struct qm_fd fd;
890         int ret;
891         uint32_t auth_only_len = op->sym->auth.data.length -
892                                 op->sym->cipher.data.length;
893
894         ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
895                                         cryptodev_driver_id);
896
897         if (unlikely(!qp->ses || qp->ses != ses)) {
898                 qp->ses = ses;
899                 ses->qp = qp;
900                 ret = dpaa_sec_prep_cdb(ses);
901                 if (ret)
902                         return ret;
903         }
904
905         /*
906          * Segmented buffer is not supported.
907          */
908         if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
909                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
910                 return -ENOTSUP;
911         }
912         if (is_auth_only(ses)) {
913                 cf = build_auth_only(op, ses);
914         } else if (is_cipher_only(ses)) {
915                 cf = build_cipher_only(op, ses);
916         } else if (is_aead(ses)) {
917                 cf = build_cipher_auth_gcm(op, ses);
918                 auth_only_len = ses->auth_only_len;
919         } else if (is_auth_cipher(ses)) {
920                 cf = build_cipher_auth(op, ses);
921         } else {
922                 PMD_TX_LOG(ERR, "not supported sec op");
923                 return -ENOTSUP;
924         }
925         if (unlikely(!cf))
926                 return -ENOMEM;
927
928         memset(&fd, 0, sizeof(struct qm_fd));
929         qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
930         fd._format1 = qm_fd_compound;
931         fd.length29 = 2 * sizeof(struct qm_sg_entry);
932         /* Auth_only_len is set as 0 in descriptor and it is overwritten
933          * here in the fd.cmd which will update the DPOVRD reg.
934          */
935         if (auth_only_len)
936                 fd.cmd = 0x80000000 | auth_only_len;
937         do {
938                 ret = qman_enqueue(&qp->inq, &fd, 0);
939         } while (ret != 0);
940
941         return 0;
942 }
943
944 static uint16_t
945 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
946                        uint16_t nb_ops)
947 {
948         /* Function to transmit the frames to given device and queuepair */
949         uint32_t loop;
950         int32_t ret;
951         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
952         uint16_t num_tx = 0;
953
954         if (unlikely(nb_ops == 0))
955                 return 0;
956
957         /*Prepare each packet which is to be sent*/
958         for (loop = 0; loop < nb_ops; loop++) {
959                 if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
960                         PMD_TX_LOG(ERR, "sessionless crypto op not supported");
961                         return 0;
962                 }
963                 ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
964                 if (!ret)
965                         num_tx++;
966         }
967         dpaa_qp->tx_pkts += num_tx;
968         dpaa_qp->tx_errs += nb_ops - num_tx;
969
970         return num_tx;
971 }
972
973 static uint16_t
974 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
975                        uint16_t nb_ops)
976 {
977         uint16_t num_rx;
978         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
979
980         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
981
982         dpaa_qp->rx_pkts += num_rx;
983         dpaa_qp->rx_errs += nb_ops - num_rx;
984
985         PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
986
987         return num_rx;
988 }
989
990 /** Release queue pair */
991 static int
992 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
993                             uint16_t qp_id)
994 {
995         struct dpaa_sec_dev_private *internals;
996         struct dpaa_sec_qp *qp = NULL;
997
998         PMD_INIT_FUNC_TRACE();
999
1000         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1001
1002         internals = dev->data->dev_private;
1003         if (qp_id >= internals->max_nb_queue_pairs) {
1004                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1005                              internals->max_nb_queue_pairs);
1006                 return -EINVAL;
1007         }
1008
1009         qp = &internals->qps[qp_id];
1010         qp->internals = NULL;
1011         dev->data->queue_pairs[qp_id] = NULL;
1012
1013         return 0;
1014 }
1015
1016 /** Setup a queue pair */
1017 static int
1018 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1019                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1020                 __rte_unused int socket_id,
1021                 __rte_unused struct rte_mempool *session_pool)
1022 {
1023         struct dpaa_sec_dev_private *internals;
1024         struct dpaa_sec_qp *qp = NULL;
1025
1026         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1027                      dev, qp_id, qp_conf);
1028
1029         internals = dev->data->dev_private;
1030         if (qp_id >= internals->max_nb_queue_pairs) {
1031                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1032                              internals->max_nb_queue_pairs);
1033                 return -EINVAL;
1034         }
1035
1036         qp = &internals->qps[qp_id];
1037         qp->internals = internals;
1038         dev->data->queue_pairs[qp_id] = qp;
1039
1040         return 0;
1041 }
1042
1043 /** Start queue pair */
1044 static int
1045 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1046                           __rte_unused uint16_t queue_pair_id)
1047 {
1048         PMD_INIT_FUNC_TRACE();
1049
1050         return 0;
1051 }
1052
1053 /** Stop queue pair */
1054 static int
1055 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1056                          __rte_unused uint16_t queue_pair_id)
1057 {
1058         PMD_INIT_FUNC_TRACE();
1059
1060         return 0;
1061 }
1062
1063 /** Return the number of allocated queue pairs */
1064 static uint32_t
1065 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1066 {
1067         PMD_INIT_FUNC_TRACE();
1068
1069         return dev->data->nb_queue_pairs;
1070 }
1071
1072 /** Returns the size of session structure */
1073 static unsigned int
1074 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1075 {
1076         PMD_INIT_FUNC_TRACE();
1077
1078         return sizeof(dpaa_sec_session);
1079 }
1080
1081 static int
1082 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1083                      struct rte_crypto_sym_xform *xform,
1084                      dpaa_sec_session *session)
1085 {
1086         session->cipher_alg = xform->cipher.algo;
1087         session->iv.length = xform->cipher.iv.length;
1088         session->iv.offset = xform->cipher.iv.offset;
1089         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1090                                                RTE_CACHE_LINE_SIZE);
1091         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1092                 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1093                 return -ENOMEM;
1094         }
1095         session->cipher_key.length = xform->cipher.key.length;
1096
1097         memcpy(session->cipher_key.data, xform->cipher.key.data,
1098                xform->cipher.key.length);
1099         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1100                         DIR_ENC : DIR_DEC;
1101
1102         return 0;
1103 }
1104
1105 static int
1106 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1107                    struct rte_crypto_sym_xform *xform,
1108                    dpaa_sec_session *session)
1109 {
1110         session->auth_alg = xform->auth.algo;
1111         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1112                                              RTE_CACHE_LINE_SIZE);
1113         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1114                 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1115                 return -ENOMEM;
1116         }
1117         session->auth_key.length = xform->auth.key.length;
1118         session->digest_length = xform->auth.digest_length;
1119
1120         memcpy(session->auth_key.data, xform->auth.key.data,
1121                xform->auth.key.length);
1122         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1123                         DIR_ENC : DIR_DEC;
1124
1125         return 0;
1126 }
1127
1128 static int
1129 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1130                    struct rte_crypto_sym_xform *xform,
1131                    dpaa_sec_session *session)
1132 {
1133         session->aead_alg = xform->aead.algo;
1134         session->iv.length = xform->aead.iv.length;
1135         session->iv.offset = xform->aead.iv.offset;
1136         session->auth_only_len = xform->aead.aad_length;
1137         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1138                                              RTE_CACHE_LINE_SIZE);
1139         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1140                 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1141                 return -ENOMEM;
1142         }
1143         session->aead_key.length = xform->aead.key.length;
1144         session->digest_length = xform->aead.digest_length;
1145
1146         memcpy(session->aead_key.data, xform->aead.key.data,
1147                xform->aead.key.length);
1148         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1149                         DIR_ENC : DIR_DEC;
1150
1151         return 0;
1152 }
1153
1154 static int
1155 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1156 {
1157         dpaa_sec_session *sess = ses;
1158         struct dpaa_sec_qp *qp;
1159
1160         PMD_INIT_FUNC_TRACE();
1161
1162         qp = dev->data->queue_pairs[qp_id];
1163         if (qp->ses != NULL) {
1164                 PMD_INIT_LOG(ERR, "qp in-use by another session\n");
1165                 return -EBUSY;
1166         }
1167
1168         qp->ses = sess;
1169         sess->qp = qp;
1170
1171         return dpaa_sec_prep_cdb(sess);
1172 }
1173
1174 static int
1175 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1176 {
1177         dpaa_sec_session *sess = ses;
1178         struct dpaa_sec_qp *qp;
1179
1180         PMD_INIT_FUNC_TRACE();
1181
1182         qp = dev->data->queue_pairs[qp_id];
1183         if (qp->ses != NULL) {
1184                 qp->ses = NULL;
1185                 sess->qp = NULL;
1186                 return 0;
1187         }
1188
1189         PMD_DRV_LOG(ERR, "No session attached to qp");
1190         return -EINVAL;
1191 }
1192
1193 static int
1194 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1195                             struct rte_crypto_sym_xform *xform, void *sess)
1196 {
1197         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1198         dpaa_sec_session *session = sess;
1199
1200         PMD_INIT_FUNC_TRACE();
1201
1202         if (unlikely(sess == NULL)) {
1203                 RTE_LOG(ERR, PMD, "invalid session struct\n");
1204                 return -EINVAL;
1205         }
1206
1207         /* Default IV length = 0 */
1208         session->iv.length = 0;
1209
1210         /* Cipher Only */
1211         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1212                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1213                 dpaa_sec_cipher_init(dev, xform, session);
1214
1215         /* Authentication Only */
1216         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1217                    xform->next == NULL) {
1218                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1219                 dpaa_sec_auth_init(dev, xform, session);
1220
1221         /* Cipher then Authenticate */
1222         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1223                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1224                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1225                         dpaa_sec_cipher_init(dev, xform, session);
1226                         dpaa_sec_auth_init(dev, xform->next, session);
1227                 } else {
1228                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1229                         return -EINVAL;
1230                 }
1231
1232         /* Authenticate then Cipher */
1233         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1234                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1235                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1236                         dpaa_sec_auth_init(dev, xform, session);
1237                         dpaa_sec_cipher_init(dev, xform->next, session);
1238                 } else {
1239                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1240                         return -EINVAL;
1241                 }
1242
1243         /* AEAD operation for AES-GCM kind of Algorithms */
1244         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1245                    xform->next == NULL) {
1246                 dpaa_sec_aead_init(dev, xform, session);
1247
1248         } else {
1249                 PMD_DRV_LOG(ERR, "Invalid crypto type");
1250                 return -EINVAL;
1251         }
1252         session->ctx_pool = internals->ctx_pool;
1253
1254         return 0;
1255 }
1256
1257 static int
1258 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1259                 struct rte_crypto_sym_xform *xform,
1260                 struct rte_cryptodev_sym_session *sess,
1261                 struct rte_mempool *mempool)
1262 {
1263         void *sess_private_data;
1264         int ret;
1265
1266         PMD_INIT_FUNC_TRACE();
1267
1268         if (rte_mempool_get(mempool, &sess_private_data)) {
1269                 CDEV_LOG_ERR(
1270                         "Couldn't get object from session mempool");
1271                 return -ENOMEM;
1272         }
1273
1274         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1275         if (ret != 0) {
1276                 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1277                                 "session parameters");
1278
1279                 /* Return session to mempool */
1280                 rte_mempool_put(mempool, sess_private_data);
1281                 return ret;
1282         }
1283
1284         set_session_private_data(sess, dev->driver_id,
1285                         sess_private_data);
1286
1287         return 0;
1288 }
1289
1290 /** Clear the memory of session so it doesn't leave key material behind */
1291 static void
1292 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1293                 struct rte_cryptodev_sym_session *sess)
1294 {
1295         PMD_INIT_FUNC_TRACE();
1296         uint8_t index = dev->driver_id;
1297         void *sess_priv = get_session_private_data(sess, index);
1298         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1299
1300         if (sess_priv) {
1301                 rte_free(s->cipher_key.data);
1302                 rte_free(s->auth_key.data);
1303                 memset(s, 0, sizeof(dpaa_sec_session));
1304                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1305                 set_session_private_data(sess, index, NULL);
1306                 rte_mempool_put(sess_mp, sess_priv);
1307         }
1308 }
1309
1310 static int
1311 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1312                        struct rte_cryptodev_config *config __rte_unused)
1313 {
1314         PMD_INIT_FUNC_TRACE();
1315
1316         return 0;
1317 }
1318
1319 static int
1320 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1321 {
1322         PMD_INIT_FUNC_TRACE();
1323         return 0;
1324 }
1325
1326 static void
1327 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1328 {
1329         PMD_INIT_FUNC_TRACE();
1330 }
1331
1332 static int
1333 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1334 {
1335         PMD_INIT_FUNC_TRACE();
1336         return 0;
1337 }
1338
1339 static void
1340 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1341                        struct rte_cryptodev_info *info)
1342 {
1343         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1344
1345         PMD_INIT_FUNC_TRACE();
1346         if (info != NULL) {
1347                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1348                 info->feature_flags = dev->feature_flags;
1349                 info->capabilities = dpaa_sec_capabilities;
1350                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1351                 info->sym.max_nb_sessions_per_qp =
1352                         RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
1353                 info->driver_id = cryptodev_driver_id;
1354         }
1355 }
1356
1357 static struct rte_cryptodev_ops crypto_ops = {
1358         .dev_configure        = dpaa_sec_dev_configure,
1359         .dev_start            = dpaa_sec_dev_start,
1360         .dev_stop             = dpaa_sec_dev_stop,
1361         .dev_close            = dpaa_sec_dev_close,
1362         .dev_infos_get        = dpaa_sec_dev_infos_get,
1363         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
1364         .queue_pair_release   = dpaa_sec_queue_pair_release,
1365         .queue_pair_start     = dpaa_sec_queue_pair_start,
1366         .queue_pair_stop      = dpaa_sec_queue_pair_stop,
1367         .queue_pair_count     = dpaa_sec_queue_pair_count,
1368         .session_get_size     = dpaa_sec_session_get_size,
1369         .session_configure    = dpaa_sec_session_configure,
1370         .session_clear        = dpaa_sec_session_clear,
1371         .qp_attach_session    = dpaa_sec_qp_attach_sess,
1372         .qp_detach_session    = dpaa_sec_qp_detach_sess,
1373 };
1374
1375 static int
1376 dpaa_sec_uninit(struct rte_cryptodev *dev)
1377 {
1378         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1379
1380         if (dev == NULL)
1381                 return -ENODEV;
1382
1383         rte_mempool_free(internals->ctx_pool);
1384         rte_free(internals);
1385
1386         PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1387                      dev->data->name, rte_socket_id());
1388
1389         return 0;
1390 }
1391
1392 static int
1393 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1394 {
1395         struct dpaa_sec_dev_private *internals;
1396         struct dpaa_sec_qp *qp;
1397         uint32_t i;
1398         int ret;
1399         char str[20];
1400
1401         PMD_INIT_FUNC_TRACE();
1402
1403         cryptodev->driver_id = cryptodev_driver_id;
1404         cryptodev->dev_ops = &crypto_ops;
1405
1406         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1407         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1408         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1409                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
1410                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1411
1412         internals = cryptodev->data->dev_private;
1413         internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
1414         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1415
1416         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1417                 /* init qman fq for queue pair */
1418                 qp = &internals->qps[i];
1419                 ret = dpaa_sec_init_tx(&qp->outq);
1420                 if (ret) {
1421                         PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
1422                         goto init_error;
1423                 }
1424                 ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
1425                                        qman_fq_fqid(&qp->outq));
1426                 if (ret) {
1427                         PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
1428                         goto init_error;
1429                 }
1430         }
1431
1432         sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1433         internals->ctx_pool = rte_mempool_create((const char *)str,
1434                         CTX_POOL_NUM_BUFS,
1435                         CTX_POOL_BUF_SIZE,
1436                         CTX_POOL_CACHE_SIZE, 0,
1437                         NULL, NULL, NULL, NULL,
1438                         SOCKET_ID_ANY, 0);
1439         if (!internals->ctx_pool) {
1440                 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1441                 goto init_error;
1442         }
1443
1444         PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1445         return 0;
1446
1447 init_error:
1448         PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1449
1450         dpaa_sec_uninit(cryptodev);
1451         return -EFAULT;
1452 }
1453
1454 static int
1455 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1456                                 struct rte_dpaa_device *dpaa_dev)
1457 {
1458         struct rte_cryptodev *cryptodev;
1459         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1460
1461         int retval;
1462
1463         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1464
1465         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1466         if (cryptodev == NULL)
1467                 return -ENOMEM;
1468
1469         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1470                 cryptodev->data->dev_private = rte_zmalloc_socket(
1471                                         "cryptodev private structure",
1472                                         sizeof(struct dpaa_sec_dev_private),
1473                                         RTE_CACHE_LINE_SIZE,
1474                                         rte_socket_id());
1475
1476                 if (cryptodev->data->dev_private == NULL)
1477                         rte_panic("Cannot allocate memzone for private "
1478                                         "device data");
1479         }
1480
1481         dpaa_dev->crypto_dev = cryptodev;
1482         cryptodev->device = &dpaa_dev->device;
1483         cryptodev->device->driver = &dpaa_drv->driver;
1484
1485         /* init user callbacks */
1486         TAILQ_INIT(&(cryptodev->link_intr_cbs));
1487
1488         /* if sec device version is not configured */
1489         if (!rta_get_sec_era()) {
1490                 const struct device_node *caam_node;
1491
1492                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1493                         const uint32_t *prop = of_get_property(caam_node,
1494                                         "fsl,sec-era",
1495                                         NULL);
1496                         if (prop) {
1497                                 rta_set_sec_era(
1498                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1499                                 break;
1500                         }
1501                 }
1502         }
1503
1504         /* Invoke PMD device initialization function */
1505         retval = dpaa_sec_dev_init(cryptodev);
1506         if (retval == 0)
1507                 return 0;
1508
1509         /* In case of error, cleanup is done */
1510         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1511                 rte_free(cryptodev->data->dev_private);
1512
1513         rte_cryptodev_pmd_release_device(cryptodev);
1514
1515         return -ENXIO;
1516 }
1517
1518 static int
1519 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1520 {
1521         struct rte_cryptodev *cryptodev;
1522         int ret;
1523
1524         cryptodev = dpaa_dev->crypto_dev;
1525         if (cryptodev == NULL)
1526                 return -ENODEV;
1527
1528         ret = dpaa_sec_uninit(cryptodev);
1529         if (ret)
1530                 return ret;
1531
1532         return rte_cryptodev_pmd_destroy(cryptodev);
1533 }
1534
1535 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1536         .drv_type = FSL_DPAA_CRYPTO,
1537         .driver = {
1538                 .name = "DPAA SEC PMD"
1539         },
1540         .probe = cryptodev_dpaa_sec_probe,
1541         .remove = cryptodev_dpaa_sec_remove,
1542 };
1543
1544 static struct cryptodev_driver dpaa_sec_crypto_drv;
1545
1546 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1547 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1548                 cryptodev_driver_id);