New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2018 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36
37 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec.h>
39 #include <dpaa_sec_log.h>
40
41 enum rta_sec_era rta_sec_era;
42
43 int dpaa_logtype_sec;
44
45 static uint8_t cryptodev_driver_id;
46
47 static __thread struct rte_crypto_op **dpaa_sec_ops;
48 static __thread int dpaa_sec_op_nb;
49
50 static int
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
52
53 static inline void
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 {
56         if (!ctx->fd_status) {
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58         } else {
59                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
61         }
62
63         /* report op status to sym->op and then free the ctx memeory  */
64         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
65 }
66
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
69 {
70         struct dpaa_sec_op_ctx *ctx;
71         int retval;
72
73         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
74         if (!ctx || retval) {
75                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
76                 return NULL;
77         }
78         /*
79          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82          * each packet, memset is costlier than dcbz_64().
83          */
84         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
85         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
86         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
87         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
88
89         ctx->ctx_pool = ses->ctx_pool;
90         ctx->vtop_offset = (size_t) ctx
91                                 - rte_mempool_virt2iova(ctx);
92
93         return ctx;
94 }
95
96 static inline rte_iova_t
97 dpaa_mem_vtop(void *vaddr)
98 {
99         const struct rte_memseg *ms;
100
101         ms = rte_mem_virt2memseg(vaddr, NULL);
102         if (ms)
103                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
104         return (size_t)NULL;
105 }
106
107 static inline void *
108 dpaa_mem_ptov(rte_iova_t paddr)
109 {
110         void *va;
111
112         va = (void *)dpaax_iova_table_get_va(paddr);
113         if (likely(va))
114                 return va;
115
116         return rte_mem_iova2virt(paddr);
117 }
118
119 static void
120 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
121                    struct qman_fq *fq,
122                    const struct qm_mr_entry *msg)
123 {
124         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
125                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
126 }
127
128 /* initialize the queue with dest chan as caam chan so that
129  * all the packets in this queue could be dispatched into caam
130  */
131 static int
132 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
133                  uint32_t fqid_out)
134 {
135         struct qm_mcc_initfq fq_opts;
136         uint32_t flags;
137         int ret = -1;
138
139         /* Clear FQ options */
140         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
141
142         flags = QMAN_INITFQ_FLAG_SCHED;
143         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
144                           QM_INITFQ_WE_CONTEXTB;
145
146         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
147         fq_opts.fqd.context_b = fqid_out;
148         fq_opts.fqd.dest.channel = qm_channel_caam;
149         fq_opts.fqd.dest.wq = 0;
150
151         fq_in->cb.ern  = ern_sec_fq_handler;
152
153         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
154
155         ret = qman_init_fq(fq_in, flags, &fq_opts);
156         if (unlikely(ret != 0))
157                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
158
159         return ret;
160 }
161
162 /* something is put into in_fq and caam put the crypto result into out_fq */
163 static enum qman_cb_dqrr_result
164 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
165                   struct qman_fq *fq __always_unused,
166                   const struct qm_dqrr_entry *dqrr)
167 {
168         const struct qm_fd *fd;
169         struct dpaa_sec_job *job;
170         struct dpaa_sec_op_ctx *ctx;
171
172         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
173                 return qman_cb_dqrr_defer;
174
175         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
176                 return qman_cb_dqrr_consume;
177
178         fd = &dqrr->fd;
179         /* sg is embedded in an op ctx,
180          * sg[0] is for output
181          * sg[1] for input
182          */
183         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
184
185         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
186         ctx->fd_status = fd->status;
187         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
188                 struct qm_sg_entry *sg_out;
189                 uint32_t len;
190
191                 sg_out = &job->sg[0];
192                 hw_sg_to_cpu(sg_out);
193                 len = sg_out->length;
194                 ctx->op->sym->m_src->pkt_len = len;
195                 ctx->op->sym->m_src->data_len = len;
196         }
197         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
198         dpaa_sec_op_ending(ctx);
199
200         return qman_cb_dqrr_consume;
201 }
202
203 /* caam result is put into this queue */
204 static int
205 dpaa_sec_init_tx(struct qman_fq *fq)
206 {
207         int ret;
208         struct qm_mcc_initfq opts;
209         uint32_t flags;
210
211         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
212                 QMAN_FQ_FLAG_DYNAMIC_FQID;
213
214         ret = qman_create_fq(0, flags, fq);
215         if (unlikely(ret)) {
216                 DPAA_SEC_ERR("qman_create_fq failed");
217                 return ret;
218         }
219
220         memset(&opts, 0, sizeof(opts));
221         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
222                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
223
224         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
225
226         fq->cb.dqrr = dqrr_out_fq_cb_rx;
227         fq->cb.ern  = ern_sec_fq_handler;
228
229         ret = qman_init_fq(fq, 0, &opts);
230         if (unlikely(ret)) {
231                 DPAA_SEC_ERR("unable to init caam source fq!");
232                 return ret;
233         }
234
235         return ret;
236 }
237
238 static inline int is_cipher_only(dpaa_sec_session *ses)
239 {
240         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
241                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
242 }
243
244 static inline int is_auth_only(dpaa_sec_session *ses)
245 {
246         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
247                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
248 }
249
250 static inline int is_aead(dpaa_sec_session *ses)
251 {
252         return ((ses->cipher_alg == 0) &&
253                 (ses->auth_alg == 0) &&
254                 (ses->aead_alg != 0));
255 }
256
257 static inline int is_auth_cipher(dpaa_sec_session *ses)
258 {
259         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
260                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
261                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
262 }
263
264 static inline int is_proto_ipsec(dpaa_sec_session *ses)
265 {
266         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
267 }
268
269 static inline int is_encode(dpaa_sec_session *ses)
270 {
271         return ses->dir == DIR_ENC;
272 }
273
274 static inline int is_decode(dpaa_sec_session *ses)
275 {
276         return ses->dir == DIR_DEC;
277 }
278
279 static inline void
280 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
281 {
282         switch (ses->auth_alg) {
283         case RTE_CRYPTO_AUTH_NULL:
284                 alginfo_a->algtype =
285                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
286                         OP_PCL_IPSEC_HMAC_NULL : 0;
287                 ses->digest_length = 0;
288                 break;
289         case RTE_CRYPTO_AUTH_MD5_HMAC:
290                 alginfo_a->algtype =
291                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
292                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
293                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
294                 break;
295         case RTE_CRYPTO_AUTH_SHA1_HMAC:
296                 alginfo_a->algtype =
297                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
298                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
299                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
300                 break;
301         case RTE_CRYPTO_AUTH_SHA224_HMAC:
302                 alginfo_a->algtype =
303                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
304                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
305                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
306                 break;
307         case RTE_CRYPTO_AUTH_SHA256_HMAC:
308                 alginfo_a->algtype =
309                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
310                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
311                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
312                 break;
313         case RTE_CRYPTO_AUTH_SHA384_HMAC:
314                 alginfo_a->algtype =
315                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
316                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
317                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
318                 break;
319         case RTE_CRYPTO_AUTH_SHA512_HMAC:
320                 alginfo_a->algtype =
321                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
322                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
323                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
324                 break;
325         default:
326                 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
327         }
328 }
329
330 static inline void
331 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
332 {
333         switch (ses->cipher_alg) {
334         case RTE_CRYPTO_CIPHER_NULL:
335                 alginfo_c->algtype =
336                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
337                         OP_PCL_IPSEC_NULL : 0;
338                 break;
339         case RTE_CRYPTO_CIPHER_AES_CBC:
340                 alginfo_c->algtype =
341                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
342                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
343                 alginfo_c->algmode = OP_ALG_AAI_CBC;
344                 break;
345         case RTE_CRYPTO_CIPHER_3DES_CBC:
346                 alginfo_c->algtype =
347                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
349                 alginfo_c->algmode = OP_ALG_AAI_CBC;
350                 break;
351         case RTE_CRYPTO_CIPHER_AES_CTR:
352                 alginfo_c->algtype =
353                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
354                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
355                 alginfo_c->algmode = OP_ALG_AAI_CTR;
356                 break;
357         default:
358                 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
359         }
360 }
361
362 static inline void
363 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
364 {
365         switch (ses->aead_alg) {
366         case RTE_CRYPTO_AEAD_AES_GCM:
367                 alginfo->algtype = OP_ALG_ALGSEL_AES;
368                 alginfo->algmode = OP_ALG_AAI_GCM;
369                 break;
370         default:
371                 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
372         }
373 }
374
375 /* prepare ipsec proto command block of the session */
376 static int
377 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
378 {
379         struct alginfo cipherdata = {0}, authdata = {0};
380         struct sec_cdb *cdb = &ses->cdb;
381         int32_t shared_desc_len = 0;
382         int err;
383 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
384         int swap = false;
385 #else
386         int swap = true;
387 #endif
388
389         caam_cipher_alg(ses, &cipherdata);
390         if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
391                 DPAA_SEC_ERR("not supported cipher alg");
392                 return -ENOTSUP;
393         }
394
395         cipherdata.key = (size_t)ses->cipher_key.data;
396         cipherdata.keylen = ses->cipher_key.length;
397         cipherdata.key_enc_flags = 0;
398         cipherdata.key_type = RTA_DATA_IMM;
399
400         caam_auth_alg(ses, &authdata);
401         if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
402                 DPAA_SEC_ERR("not supported auth alg");
403                 return -ENOTSUP;
404         }
405
406         authdata.key = (size_t)ses->auth_key.data;
407         authdata.keylen = ses->auth_key.length;
408         authdata.key_enc_flags = 0;
409         authdata.key_type = RTA_DATA_IMM;
410
411         cdb->sh_desc[0] = cipherdata.keylen;
412         cdb->sh_desc[1] = authdata.keylen;
413         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
414                                MIN_JOB_DESC_SIZE,
415                                (unsigned int *)cdb->sh_desc,
416                                &cdb->sh_desc[2], 2);
417
418         if (err < 0) {
419                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
420                 return err;
421         }
422         if (cdb->sh_desc[2] & 1)
423                 cipherdata.key_type = RTA_DATA_IMM;
424         else {
425                 cipherdata.key = (size_t)dpaa_mem_vtop(
426                                         (void *)(size_t)cipherdata.key);
427                 cipherdata.key_type = RTA_DATA_PTR;
428         }
429         if (cdb->sh_desc[2] & (1<<1))
430                 authdata.key_type = RTA_DATA_IMM;
431         else {
432                 authdata.key = (size_t)dpaa_mem_vtop(
433                                         (void *)(size_t)authdata.key);
434                 authdata.key_type = RTA_DATA_PTR;
435         }
436
437         cdb->sh_desc[0] = 0;
438         cdb->sh_desc[1] = 0;
439         cdb->sh_desc[2] = 0;
440         if (ses->dir == DIR_ENC) {
441                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
442                                 cdb->sh_desc,
443                                 true, swap, SHR_SERIAL,
444                                 &ses->encap_pdb,
445                                 (uint8_t *)&ses->ip4_hdr,
446                                 &cipherdata, &authdata);
447         } else if (ses->dir == DIR_DEC) {
448                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
449                                 cdb->sh_desc,
450                                 true, swap, SHR_SERIAL,
451                                 &ses->decap_pdb,
452                                 &cipherdata, &authdata);
453         }
454         return shared_desc_len;
455 }
456
457 /* prepare command block of the session */
458 static int
459 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
460 {
461         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
462         int32_t shared_desc_len = 0;
463         struct sec_cdb *cdb = &ses->cdb;
464         int err;
465 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
466         int swap = false;
467 #else
468         int swap = true;
469 #endif
470
471         memset(cdb, 0, sizeof(struct sec_cdb));
472
473         if (is_proto_ipsec(ses)) {
474                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
475         } else if (is_cipher_only(ses)) {
476                 caam_cipher_alg(ses, &alginfo_c);
477                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
478                         DPAA_SEC_ERR("not supported cipher alg");
479                         return -ENOTSUP;
480                 }
481
482                 alginfo_c.key = (size_t)ses->cipher_key.data;
483                 alginfo_c.keylen = ses->cipher_key.length;
484                 alginfo_c.key_enc_flags = 0;
485                 alginfo_c.key_type = RTA_DATA_IMM;
486
487                 shared_desc_len = cnstr_shdsc_blkcipher(
488                                                 cdb->sh_desc, true,
489                                                 swap, &alginfo_c,
490                                                 NULL,
491                                                 ses->iv.length,
492                                                 ses->dir);
493         } else if (is_auth_only(ses)) {
494                 caam_auth_alg(ses, &alginfo_a);
495                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
496                         DPAA_SEC_ERR("not supported auth alg");
497                         return -ENOTSUP;
498                 }
499
500                 alginfo_a.key = (size_t)ses->auth_key.data;
501                 alginfo_a.keylen = ses->auth_key.length;
502                 alginfo_a.key_enc_flags = 0;
503                 alginfo_a.key_type = RTA_DATA_IMM;
504
505                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
506                                                    swap, &alginfo_a,
507                                                    !ses->dir,
508                                                    ses->digest_length);
509         } else if (is_aead(ses)) {
510                 caam_aead_alg(ses, &alginfo);
511                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
512                         DPAA_SEC_ERR("not supported aead alg");
513                         return -ENOTSUP;
514                 }
515                 alginfo.key = (size_t)ses->aead_key.data;
516                 alginfo.keylen = ses->aead_key.length;
517                 alginfo.key_enc_flags = 0;
518                 alginfo.key_type = RTA_DATA_IMM;
519
520                 if (ses->dir == DIR_ENC)
521                         shared_desc_len = cnstr_shdsc_gcm_encap(
522                                         cdb->sh_desc, true, swap,
523                                         &alginfo,
524                                         ses->iv.length,
525                                         ses->digest_length);
526                 else
527                         shared_desc_len = cnstr_shdsc_gcm_decap(
528                                         cdb->sh_desc, true, swap,
529                                         &alginfo,
530                                         ses->iv.length,
531                                         ses->digest_length);
532         } else {
533                 caam_cipher_alg(ses, &alginfo_c);
534                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
535                         DPAA_SEC_ERR("not supported cipher alg");
536                         return -ENOTSUP;
537                 }
538
539                 alginfo_c.key = (size_t)ses->cipher_key.data;
540                 alginfo_c.keylen = ses->cipher_key.length;
541                 alginfo_c.key_enc_flags = 0;
542                 alginfo_c.key_type = RTA_DATA_IMM;
543
544                 caam_auth_alg(ses, &alginfo_a);
545                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
546                         DPAA_SEC_ERR("not supported auth alg");
547                         return -ENOTSUP;
548                 }
549
550                 alginfo_a.key = (size_t)ses->auth_key.data;
551                 alginfo_a.keylen = ses->auth_key.length;
552                 alginfo_a.key_enc_flags = 0;
553                 alginfo_a.key_type = RTA_DATA_IMM;
554
555                 cdb->sh_desc[0] = alginfo_c.keylen;
556                 cdb->sh_desc[1] = alginfo_a.keylen;
557                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
558                                        MIN_JOB_DESC_SIZE,
559                                        (unsigned int *)cdb->sh_desc,
560                                        &cdb->sh_desc[2], 2);
561
562                 if (err < 0) {
563                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
564                         return err;
565                 }
566                 if (cdb->sh_desc[2] & 1)
567                         alginfo_c.key_type = RTA_DATA_IMM;
568                 else {
569                         alginfo_c.key = (size_t)dpaa_mem_vtop(
570                                                 (void *)(size_t)alginfo_c.key);
571                         alginfo_c.key_type = RTA_DATA_PTR;
572                 }
573                 if (cdb->sh_desc[2] & (1<<1))
574                         alginfo_a.key_type = RTA_DATA_IMM;
575                 else {
576                         alginfo_a.key = (size_t)dpaa_mem_vtop(
577                                                 (void *)(size_t)alginfo_a.key);
578                         alginfo_a.key_type = RTA_DATA_PTR;
579                 }
580                 cdb->sh_desc[0] = 0;
581                 cdb->sh_desc[1] = 0;
582                 cdb->sh_desc[2] = 0;
583                 /* Auth_only_len is set as 0 here and it will be
584                  * overwritten in fd for each packet.
585                  */
586                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
587                                 true, swap, &alginfo_c, &alginfo_a,
588                                 ses->iv.length, 0,
589                                 ses->digest_length, ses->dir);
590         }
591
592         if (shared_desc_len < 0) {
593                 DPAA_SEC_ERR("error in preparing command block");
594                 return shared_desc_len;
595         }
596
597         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
598         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
599         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
600
601         return 0;
602 }
603
604 /* qp is lockless, should be accessed by only one thread */
605 static int
606 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
607 {
608         struct qman_fq *fq;
609         unsigned int pkts = 0;
610         int num_rx_bufs, ret;
611         struct qm_dqrr_entry *dq;
612         uint32_t vdqcr_flags = 0;
613
614         fq = &qp->outq;
615         /*
616          * Until request for four buffers, we provide exact number of buffers.
617          * Otherwise we do not set the QM_VDQCR_EXACT flag.
618          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
619          * requested, so we request two less in this case.
620          */
621         if (nb_ops < 4) {
622                 vdqcr_flags = QM_VDQCR_EXACT;
623                 num_rx_bufs = nb_ops;
624         } else {
625                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
626                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
627         }
628         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
629         if (ret)
630                 return 0;
631
632         do {
633                 const struct qm_fd *fd;
634                 struct dpaa_sec_job *job;
635                 struct dpaa_sec_op_ctx *ctx;
636                 struct rte_crypto_op *op;
637
638                 dq = qman_dequeue(fq);
639                 if (!dq)
640                         continue;
641
642                 fd = &dq->fd;
643                 /* sg is embedded in an op ctx,
644                  * sg[0] is for output
645                  * sg[1] for input
646                  */
647                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
648
649                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
650                 ctx->fd_status = fd->status;
651                 op = ctx->op;
652                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
653                         struct qm_sg_entry *sg_out;
654                         uint32_t len;
655
656                         sg_out = &job->sg[0];
657                         hw_sg_to_cpu(sg_out);
658                         len = sg_out->length;
659                         op->sym->m_src->pkt_len = len;
660                         op->sym->m_src->data_len = len;
661                 }
662                 if (!ctx->fd_status) {
663                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
664                 } else {
665                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
666                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
667                 }
668                 ops[pkts++] = op;
669
670                 /* report op status to sym->op and then free the ctx memeory */
671                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
672
673                 qman_dqrr_consume(fq, dq);
674         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
675
676         return pkts;
677 }
678
679 static inline struct dpaa_sec_job *
680 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
681 {
682         struct rte_crypto_sym_op *sym = op->sym;
683         struct rte_mbuf *mbuf = sym->m_src;
684         struct dpaa_sec_job *cf;
685         struct dpaa_sec_op_ctx *ctx;
686         struct qm_sg_entry *sg, *out_sg, *in_sg;
687         phys_addr_t start_addr;
688         uint8_t *old_digest, extra_segs;
689
690         if (is_decode(ses))
691                 extra_segs = 3;
692         else
693                 extra_segs = 2;
694
695         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
696                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
697                                 MAX_SG_ENTRIES);
698                 return NULL;
699         }
700         ctx = dpaa_sec_alloc_ctx(ses);
701         if (!ctx)
702                 return NULL;
703
704         cf = &ctx->job;
705         ctx->op = op;
706         old_digest = ctx->digest;
707
708         /* output */
709         out_sg = &cf->sg[0];
710         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
711         out_sg->length = ses->digest_length;
712         cpu_to_hw_sg(out_sg);
713
714         /* input */
715         in_sg = &cf->sg[1];
716         /* need to extend the input to a compound frame */
717         in_sg->extension = 1;
718         in_sg->final = 1;
719         in_sg->length = sym->auth.data.length;
720         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
721
722         /* 1st seg */
723         sg = in_sg + 1;
724         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
725         sg->length = mbuf->data_len - sym->auth.data.offset;
726         sg->offset = sym->auth.data.offset;
727
728         /* Successive segs */
729         mbuf = mbuf->next;
730         while (mbuf) {
731                 cpu_to_hw_sg(sg);
732                 sg++;
733                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
734                 sg->length = mbuf->data_len;
735                 mbuf = mbuf->next;
736         }
737
738         if (is_decode(ses)) {
739                 /* Digest verification case */
740                 cpu_to_hw_sg(sg);
741                 sg++;
742                 rte_memcpy(old_digest, sym->auth.digest.data,
743                                 ses->digest_length);
744                 start_addr = dpaa_mem_vtop(old_digest);
745                 qm_sg_entry_set64(sg, start_addr);
746                 sg->length = ses->digest_length;
747                 in_sg->length += ses->digest_length;
748         } else {
749                 /* Digest calculation case */
750                 sg->length -= ses->digest_length;
751         }
752         sg->final = 1;
753         cpu_to_hw_sg(sg);
754         cpu_to_hw_sg(in_sg);
755
756         return cf;
757 }
758
759 /**
760  * packet looks like:
761  *              |<----data_len------->|
762  *    |ip_header|ah_header|icv|payload|
763  *              ^
764  *              |
765  *         mbuf->pkt.data
766  */
767 static inline struct dpaa_sec_job *
768 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
769 {
770         struct rte_crypto_sym_op *sym = op->sym;
771         struct rte_mbuf *mbuf = sym->m_src;
772         struct dpaa_sec_job *cf;
773         struct dpaa_sec_op_ctx *ctx;
774         struct qm_sg_entry *sg;
775         rte_iova_t start_addr;
776         uint8_t *old_digest;
777
778         ctx = dpaa_sec_alloc_ctx(ses);
779         if (!ctx)
780                 return NULL;
781
782         cf = &ctx->job;
783         ctx->op = op;
784         old_digest = ctx->digest;
785
786         start_addr = rte_pktmbuf_iova(mbuf);
787         /* output */
788         sg = &cf->sg[0];
789         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
790         sg->length = ses->digest_length;
791         cpu_to_hw_sg(sg);
792
793         /* input */
794         sg = &cf->sg[1];
795         if (is_decode(ses)) {
796                 /* need to extend the input to a compound frame */
797                 sg->extension = 1;
798                 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
799                 sg->length = sym->auth.data.length + ses->digest_length;
800                 sg->final = 1;
801                 cpu_to_hw_sg(sg);
802
803                 sg = &cf->sg[2];
804                 /* hash result or digest, save digest first */
805                 rte_memcpy(old_digest, sym->auth.digest.data,
806                            ses->digest_length);
807                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
808                 sg->length = sym->auth.data.length;
809                 cpu_to_hw_sg(sg);
810
811                 /* let's check digest by hw */
812                 start_addr = dpaa_mem_vtop(old_digest);
813                 sg++;
814                 qm_sg_entry_set64(sg, start_addr);
815                 sg->length = ses->digest_length;
816                 sg->final = 1;
817                 cpu_to_hw_sg(sg);
818         } else {
819                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
820                 sg->length = sym->auth.data.length;
821                 sg->final = 1;
822                 cpu_to_hw_sg(sg);
823         }
824
825         return cf;
826 }
827
828 static inline struct dpaa_sec_job *
829 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
830 {
831         struct rte_crypto_sym_op *sym = op->sym;
832         struct dpaa_sec_job *cf;
833         struct dpaa_sec_op_ctx *ctx;
834         struct qm_sg_entry *sg, *out_sg, *in_sg;
835         struct rte_mbuf *mbuf;
836         uint8_t req_segs;
837         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
838                         ses->iv.offset);
839
840         if (sym->m_dst) {
841                 mbuf = sym->m_dst;
842                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
843         } else {
844                 mbuf = sym->m_src;
845                 req_segs = mbuf->nb_segs * 2 + 3;
846         }
847
848         if (req_segs > MAX_SG_ENTRIES) {
849                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
850                                 MAX_SG_ENTRIES);
851                 return NULL;
852         }
853
854         ctx = dpaa_sec_alloc_ctx(ses);
855         if (!ctx)
856                 return NULL;
857
858         cf = &ctx->job;
859         ctx->op = op;
860
861         /* output */
862         out_sg = &cf->sg[0];
863         out_sg->extension = 1;
864         out_sg->length = sym->cipher.data.length;
865         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
866         cpu_to_hw_sg(out_sg);
867
868         /* 1st seg */
869         sg = &cf->sg[2];
870         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
871         sg->length = mbuf->data_len - sym->cipher.data.offset;
872         sg->offset = sym->cipher.data.offset;
873
874         /* Successive segs */
875         mbuf = mbuf->next;
876         while (mbuf) {
877                 cpu_to_hw_sg(sg);
878                 sg++;
879                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
880                 sg->length = mbuf->data_len;
881                 mbuf = mbuf->next;
882         }
883         sg->final = 1;
884         cpu_to_hw_sg(sg);
885
886         /* input */
887         mbuf = sym->m_src;
888         in_sg = &cf->sg[1];
889         in_sg->extension = 1;
890         in_sg->final = 1;
891         in_sg->length = sym->cipher.data.length + ses->iv.length;
892
893         sg++;
894         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
895         cpu_to_hw_sg(in_sg);
896
897         /* IV */
898         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
899         sg->length = ses->iv.length;
900         cpu_to_hw_sg(sg);
901
902         /* 1st seg */
903         sg++;
904         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
905         sg->length = mbuf->data_len - sym->cipher.data.offset;
906         sg->offset = sym->cipher.data.offset;
907
908         /* Successive segs */
909         mbuf = mbuf->next;
910         while (mbuf) {
911                 cpu_to_hw_sg(sg);
912                 sg++;
913                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
914                 sg->length = mbuf->data_len;
915                 mbuf = mbuf->next;
916         }
917         sg->final = 1;
918         cpu_to_hw_sg(sg);
919
920         return cf;
921 }
922
923 static inline struct dpaa_sec_job *
924 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
925 {
926         struct rte_crypto_sym_op *sym = op->sym;
927         struct dpaa_sec_job *cf;
928         struct dpaa_sec_op_ctx *ctx;
929         struct qm_sg_entry *sg;
930         rte_iova_t src_start_addr, dst_start_addr;
931         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
932                         ses->iv.offset);
933
934         ctx = dpaa_sec_alloc_ctx(ses);
935         if (!ctx)
936                 return NULL;
937
938         cf = &ctx->job;
939         ctx->op = op;
940
941         src_start_addr = rte_pktmbuf_iova(sym->m_src);
942
943         if (sym->m_dst)
944                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
945         else
946                 dst_start_addr = src_start_addr;
947
948         /* output */
949         sg = &cf->sg[0];
950         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
951         sg->length = sym->cipher.data.length + ses->iv.length;
952         cpu_to_hw_sg(sg);
953
954         /* input */
955         sg = &cf->sg[1];
956
957         /* need to extend the input to a compound frame */
958         sg->extension = 1;
959         sg->final = 1;
960         sg->length = sym->cipher.data.length + ses->iv.length;
961         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
962         cpu_to_hw_sg(sg);
963
964         sg = &cf->sg[2];
965         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
966         sg->length = ses->iv.length;
967         cpu_to_hw_sg(sg);
968
969         sg++;
970         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
971         sg->length = sym->cipher.data.length;
972         sg->final = 1;
973         cpu_to_hw_sg(sg);
974
975         return cf;
976 }
977
978 static inline struct dpaa_sec_job *
979 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
980 {
981         struct rte_crypto_sym_op *sym = op->sym;
982         struct dpaa_sec_job *cf;
983         struct dpaa_sec_op_ctx *ctx;
984         struct qm_sg_entry *sg, *out_sg, *in_sg;
985         struct rte_mbuf *mbuf;
986         uint8_t req_segs;
987         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
988                         ses->iv.offset);
989
990         if (sym->m_dst) {
991                 mbuf = sym->m_dst;
992                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
993         } else {
994                 mbuf = sym->m_src;
995                 req_segs = mbuf->nb_segs * 2 + 4;
996         }
997
998         if (ses->auth_only_len)
999                 req_segs++;
1000
1001         if (req_segs > MAX_SG_ENTRIES) {
1002                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1003                                 MAX_SG_ENTRIES);
1004                 return NULL;
1005         }
1006
1007         ctx = dpaa_sec_alloc_ctx(ses);
1008         if (!ctx)
1009                 return NULL;
1010
1011         cf = &ctx->job;
1012         ctx->op = op;
1013
1014         rte_prefetch0(cf->sg);
1015
1016         /* output */
1017         out_sg = &cf->sg[0];
1018         out_sg->extension = 1;
1019         if (is_encode(ses))
1020                 out_sg->length = sym->aead.data.length + ses->auth_only_len
1021                                                 + ses->digest_length;
1022         else
1023                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1024
1025         /* output sg entries */
1026         sg = &cf->sg[2];
1027         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1028         cpu_to_hw_sg(out_sg);
1029
1030         /* 1st seg */
1031         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1032         sg->length = mbuf->data_len - sym->aead.data.offset +
1033                                         ses->auth_only_len;
1034         sg->offset = sym->aead.data.offset - ses->auth_only_len;
1035
1036         /* Successive segs */
1037         mbuf = mbuf->next;
1038         while (mbuf) {
1039                 cpu_to_hw_sg(sg);
1040                 sg++;
1041                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1042                 sg->length = mbuf->data_len;
1043                 mbuf = mbuf->next;
1044         }
1045         sg->length -= ses->digest_length;
1046
1047         if (is_encode(ses)) {
1048                 cpu_to_hw_sg(sg);
1049                 /* set auth output */
1050                 sg++;
1051                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1052                 sg->length = ses->digest_length;
1053         }
1054         sg->final = 1;
1055         cpu_to_hw_sg(sg);
1056
1057         /* input */
1058         mbuf = sym->m_src;
1059         in_sg = &cf->sg[1];
1060         in_sg->extension = 1;
1061         in_sg->final = 1;
1062         if (is_encode(ses))
1063                 in_sg->length = ses->iv.length + sym->aead.data.length
1064                                                         + ses->auth_only_len;
1065         else
1066                 in_sg->length = ses->iv.length + sym->aead.data.length
1067                                 + ses->auth_only_len + ses->digest_length;
1068
1069         /* input sg entries */
1070         sg++;
1071         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1072         cpu_to_hw_sg(in_sg);
1073
1074         /* 1st seg IV */
1075         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1076         sg->length = ses->iv.length;
1077         cpu_to_hw_sg(sg);
1078
1079         /* 2nd seg auth only */
1080         if (ses->auth_only_len) {
1081                 sg++;
1082                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1083                 sg->length = ses->auth_only_len;
1084                 cpu_to_hw_sg(sg);
1085         }
1086
1087         /* 3rd seg */
1088         sg++;
1089         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1090         sg->length = mbuf->data_len - sym->aead.data.offset;
1091         sg->offset = sym->aead.data.offset;
1092
1093         /* Successive segs */
1094         mbuf = mbuf->next;
1095         while (mbuf) {
1096                 cpu_to_hw_sg(sg);
1097                 sg++;
1098                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1099                 sg->length = mbuf->data_len;
1100                 mbuf = mbuf->next;
1101         }
1102
1103         if (is_decode(ses)) {
1104                 cpu_to_hw_sg(sg);
1105                 sg++;
1106                 memcpy(ctx->digest, sym->aead.digest.data,
1107                         ses->digest_length);
1108                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1109                 sg->length = ses->digest_length;
1110         }
1111         sg->final = 1;
1112         cpu_to_hw_sg(sg);
1113
1114         return cf;
1115 }
1116
1117 static inline struct dpaa_sec_job *
1118 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1119 {
1120         struct rte_crypto_sym_op *sym = op->sym;
1121         struct dpaa_sec_job *cf;
1122         struct dpaa_sec_op_ctx *ctx;
1123         struct qm_sg_entry *sg;
1124         uint32_t length = 0;
1125         rte_iova_t src_start_addr, dst_start_addr;
1126         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1127                         ses->iv.offset);
1128
1129         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1130
1131         if (sym->m_dst)
1132                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1133         else
1134                 dst_start_addr = src_start_addr;
1135
1136         ctx = dpaa_sec_alloc_ctx(ses);
1137         if (!ctx)
1138                 return NULL;
1139
1140         cf = &ctx->job;
1141         ctx->op = op;
1142
1143         /* input */
1144         rte_prefetch0(cf->sg);
1145         sg = &cf->sg[2];
1146         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1147         if (is_encode(ses)) {
1148                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1149                 sg->length = ses->iv.length;
1150                 length += sg->length;
1151                 cpu_to_hw_sg(sg);
1152
1153                 sg++;
1154                 if (ses->auth_only_len) {
1155                         qm_sg_entry_set64(sg,
1156                                           dpaa_mem_vtop(sym->aead.aad.data));
1157                         sg->length = ses->auth_only_len;
1158                         length += sg->length;
1159                         cpu_to_hw_sg(sg);
1160                         sg++;
1161                 }
1162                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1163                 sg->length = sym->aead.data.length;
1164                 length += sg->length;
1165                 sg->final = 1;
1166                 cpu_to_hw_sg(sg);
1167         } else {
1168                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1169                 sg->length = ses->iv.length;
1170                 length += sg->length;
1171                 cpu_to_hw_sg(sg);
1172
1173                 sg++;
1174                 if (ses->auth_only_len) {
1175                         qm_sg_entry_set64(sg,
1176                                           dpaa_mem_vtop(sym->aead.aad.data));
1177                         sg->length = ses->auth_only_len;
1178                         length += sg->length;
1179                         cpu_to_hw_sg(sg);
1180                         sg++;
1181                 }
1182                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1183                 sg->length = sym->aead.data.length;
1184                 length += sg->length;
1185                 cpu_to_hw_sg(sg);
1186
1187                 memcpy(ctx->digest, sym->aead.digest.data,
1188                        ses->digest_length);
1189                 sg++;
1190
1191                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1192                 sg->length = ses->digest_length;
1193                 length += sg->length;
1194                 sg->final = 1;
1195                 cpu_to_hw_sg(sg);
1196         }
1197         /* input compound frame */
1198         cf->sg[1].length = length;
1199         cf->sg[1].extension = 1;
1200         cf->sg[1].final = 1;
1201         cpu_to_hw_sg(&cf->sg[1]);
1202
1203         /* output */
1204         sg++;
1205         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1206         qm_sg_entry_set64(sg,
1207                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1208         sg->length = sym->aead.data.length + ses->auth_only_len;
1209         length = sg->length;
1210         if (is_encode(ses)) {
1211                 cpu_to_hw_sg(sg);
1212                 /* set auth output */
1213                 sg++;
1214                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1215                 sg->length = ses->digest_length;
1216                 length += sg->length;
1217         }
1218         sg->final = 1;
1219         cpu_to_hw_sg(sg);
1220
1221         /* output compound frame */
1222         cf->sg[0].length = length;
1223         cf->sg[0].extension = 1;
1224         cpu_to_hw_sg(&cf->sg[0]);
1225
1226         return cf;
1227 }
1228
1229 static inline struct dpaa_sec_job *
1230 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1231 {
1232         struct rte_crypto_sym_op *sym = op->sym;
1233         struct dpaa_sec_job *cf;
1234         struct dpaa_sec_op_ctx *ctx;
1235         struct qm_sg_entry *sg, *out_sg, *in_sg;
1236         struct rte_mbuf *mbuf;
1237         uint8_t req_segs;
1238         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1239                         ses->iv.offset);
1240
1241         if (sym->m_dst) {
1242                 mbuf = sym->m_dst;
1243                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1244         } else {
1245                 mbuf = sym->m_src;
1246                 req_segs = mbuf->nb_segs * 2 + 4;
1247         }
1248
1249         if (req_segs > MAX_SG_ENTRIES) {
1250                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1251                                 MAX_SG_ENTRIES);
1252                 return NULL;
1253         }
1254
1255         ctx = dpaa_sec_alloc_ctx(ses);
1256         if (!ctx)
1257                 return NULL;
1258
1259         cf = &ctx->job;
1260         ctx->op = op;
1261
1262         rte_prefetch0(cf->sg);
1263
1264         /* output */
1265         out_sg = &cf->sg[0];
1266         out_sg->extension = 1;
1267         if (is_encode(ses))
1268                 out_sg->length = sym->auth.data.length + ses->digest_length;
1269         else
1270                 out_sg->length = sym->auth.data.length;
1271
1272         /* output sg entries */
1273         sg = &cf->sg[2];
1274         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1275         cpu_to_hw_sg(out_sg);
1276
1277         /* 1st seg */
1278         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1279         sg->length = mbuf->data_len - sym->auth.data.offset;
1280         sg->offset = sym->auth.data.offset;
1281
1282         /* Successive segs */
1283         mbuf = mbuf->next;
1284         while (mbuf) {
1285                 cpu_to_hw_sg(sg);
1286                 sg++;
1287                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1288                 sg->length = mbuf->data_len;
1289                 mbuf = mbuf->next;
1290         }
1291         sg->length -= ses->digest_length;
1292
1293         if (is_encode(ses)) {
1294                 cpu_to_hw_sg(sg);
1295                 /* set auth output */
1296                 sg++;
1297                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1298                 sg->length = ses->digest_length;
1299         }
1300         sg->final = 1;
1301         cpu_to_hw_sg(sg);
1302
1303         /* input */
1304         mbuf = sym->m_src;
1305         in_sg = &cf->sg[1];
1306         in_sg->extension = 1;
1307         in_sg->final = 1;
1308         if (is_encode(ses))
1309                 in_sg->length = ses->iv.length + sym->auth.data.length;
1310         else
1311                 in_sg->length = ses->iv.length + sym->auth.data.length
1312                                                 + ses->digest_length;
1313
1314         /* input sg entries */
1315         sg++;
1316         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1317         cpu_to_hw_sg(in_sg);
1318
1319         /* 1st seg IV */
1320         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1321         sg->length = ses->iv.length;
1322         cpu_to_hw_sg(sg);
1323
1324         /* 2nd seg */
1325         sg++;
1326         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1327         sg->length = mbuf->data_len - sym->auth.data.offset;
1328         sg->offset = sym->auth.data.offset;
1329
1330         /* Successive segs */
1331         mbuf = mbuf->next;
1332         while (mbuf) {
1333                 cpu_to_hw_sg(sg);
1334                 sg++;
1335                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1336                 sg->length = mbuf->data_len;
1337                 mbuf = mbuf->next;
1338         }
1339
1340         sg->length -= ses->digest_length;
1341         if (is_decode(ses)) {
1342                 cpu_to_hw_sg(sg);
1343                 sg++;
1344                 memcpy(ctx->digest, sym->auth.digest.data,
1345                         ses->digest_length);
1346                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1347                 sg->length = ses->digest_length;
1348         }
1349         sg->final = 1;
1350         cpu_to_hw_sg(sg);
1351
1352         return cf;
1353 }
1354
1355 static inline struct dpaa_sec_job *
1356 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1357 {
1358         struct rte_crypto_sym_op *sym = op->sym;
1359         struct dpaa_sec_job *cf;
1360         struct dpaa_sec_op_ctx *ctx;
1361         struct qm_sg_entry *sg;
1362         rte_iova_t src_start_addr, dst_start_addr;
1363         uint32_t length = 0;
1364         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1365                         ses->iv.offset);
1366
1367         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1368         if (sym->m_dst)
1369                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1370         else
1371                 dst_start_addr = src_start_addr;
1372
1373         ctx = dpaa_sec_alloc_ctx(ses);
1374         if (!ctx)
1375                 return NULL;
1376
1377         cf = &ctx->job;
1378         ctx->op = op;
1379
1380         /* input */
1381         rte_prefetch0(cf->sg);
1382         sg = &cf->sg[2];
1383         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1384         if (is_encode(ses)) {
1385                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1386                 sg->length = ses->iv.length;
1387                 length += sg->length;
1388                 cpu_to_hw_sg(sg);
1389
1390                 sg++;
1391                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1392                 sg->length = sym->auth.data.length;
1393                 length += sg->length;
1394                 sg->final = 1;
1395                 cpu_to_hw_sg(sg);
1396         } else {
1397                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1398                 sg->length = ses->iv.length;
1399                 length += sg->length;
1400                 cpu_to_hw_sg(sg);
1401
1402                 sg++;
1403
1404                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1405                 sg->length = sym->auth.data.length;
1406                 length += sg->length;
1407                 cpu_to_hw_sg(sg);
1408
1409                 memcpy(ctx->digest, sym->auth.digest.data,
1410                        ses->digest_length);
1411                 sg++;
1412
1413                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1414                 sg->length = ses->digest_length;
1415                 length += sg->length;
1416                 sg->final = 1;
1417                 cpu_to_hw_sg(sg);
1418         }
1419         /* input compound frame */
1420         cf->sg[1].length = length;
1421         cf->sg[1].extension = 1;
1422         cf->sg[1].final = 1;
1423         cpu_to_hw_sg(&cf->sg[1]);
1424
1425         /* output */
1426         sg++;
1427         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1428         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1429         sg->length = sym->cipher.data.length;
1430         length = sg->length;
1431         if (is_encode(ses)) {
1432                 cpu_to_hw_sg(sg);
1433                 /* set auth output */
1434                 sg++;
1435                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1436                 sg->length = ses->digest_length;
1437                 length += sg->length;
1438         }
1439         sg->final = 1;
1440         cpu_to_hw_sg(sg);
1441
1442         /* output compound frame */
1443         cf->sg[0].length = length;
1444         cf->sg[0].extension = 1;
1445         cpu_to_hw_sg(&cf->sg[0]);
1446
1447         return cf;
1448 }
1449
1450 static inline struct dpaa_sec_job *
1451 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1452 {
1453         struct rte_crypto_sym_op *sym = op->sym;
1454         struct dpaa_sec_job *cf;
1455         struct dpaa_sec_op_ctx *ctx;
1456         struct qm_sg_entry *sg;
1457         phys_addr_t src_start_addr, dst_start_addr;
1458
1459         ctx = dpaa_sec_alloc_ctx(ses);
1460         if (!ctx)
1461                 return NULL;
1462         cf = &ctx->job;
1463         ctx->op = op;
1464
1465         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1466
1467         if (sym->m_dst)
1468                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1469         else
1470                 dst_start_addr = src_start_addr;
1471
1472         /* input */
1473         sg = &cf->sg[1];
1474         qm_sg_entry_set64(sg, src_start_addr);
1475         sg->length = sym->m_src->pkt_len;
1476         sg->final = 1;
1477         cpu_to_hw_sg(sg);
1478
1479         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1480         /* output */
1481         sg = &cf->sg[0];
1482         qm_sg_entry_set64(sg, dst_start_addr);
1483         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1484         cpu_to_hw_sg(sg);
1485
1486         return cf;
1487 }
1488
1489 static uint16_t
1490 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1491                        uint16_t nb_ops)
1492 {
1493         /* Function to transmit the frames to given device and queuepair */
1494         uint32_t loop;
1495         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1496         uint16_t num_tx = 0;
1497         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1498         uint32_t frames_to_send;
1499         struct rte_crypto_op *op;
1500         struct dpaa_sec_job *cf;
1501         dpaa_sec_session *ses;
1502         uint32_t auth_only_len;
1503         struct qman_fq *inq[DPAA_SEC_BURST];
1504
1505         while (nb_ops) {
1506                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1507                                 DPAA_SEC_BURST : nb_ops;
1508                 for (loop = 0; loop < frames_to_send; loop++) {
1509                         op = *(ops++);
1510                         switch (op->sess_type) {
1511                         case RTE_CRYPTO_OP_WITH_SESSION:
1512                                 ses = (dpaa_sec_session *)
1513                                         get_sym_session_private_data(
1514                                                         op->sym->session,
1515                                                         cryptodev_driver_id);
1516                                 break;
1517                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1518                                 ses = (dpaa_sec_session *)
1519                                         get_sec_session_private_data(
1520                                                         op->sym->sec_session);
1521                                 break;
1522                         default:
1523                                 DPAA_SEC_DP_ERR(
1524                                         "sessionless crypto op not supported");
1525                                 frames_to_send = loop;
1526                                 nb_ops = loop;
1527                                 goto send_pkts;
1528                         }
1529                         if (unlikely(!ses->qp)) {
1530                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1531                                         frames_to_send = loop;
1532                                         nb_ops = loop;
1533                                         goto send_pkts;
1534                                 }
1535                         } else if (unlikely(ses->qp != qp)) {
1536                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1537                                         " New qp = %p\n", ses->qp, qp);
1538                                 frames_to_send = loop;
1539                                 nb_ops = loop;
1540                                 goto send_pkts;
1541                         }
1542
1543                         auth_only_len = op->sym->auth.data.length -
1544                                                 op->sym->cipher.data.length;
1545                         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1546                                 if (is_proto_ipsec(ses)) {
1547                                         cf = build_proto(op, ses);
1548                                 } else if (is_auth_only(ses)) {
1549                                         cf = build_auth_only(op, ses);
1550                                 } else if (is_cipher_only(ses)) {
1551                                         cf = build_cipher_only(op, ses);
1552                                 } else if (is_aead(ses)) {
1553                                         cf = build_cipher_auth_gcm(op, ses);
1554                                         auth_only_len = ses->auth_only_len;
1555                                 } else if (is_auth_cipher(ses)) {
1556                                         cf = build_cipher_auth(op, ses);
1557                                 } else {
1558                                         DPAA_SEC_DP_ERR("not supported ops");
1559                                         frames_to_send = loop;
1560                                         nb_ops = loop;
1561                                         goto send_pkts;
1562                                 }
1563                         } else {
1564                                 if (is_auth_only(ses)) {
1565                                         cf = build_auth_only_sg(op, ses);
1566                                 } else if (is_cipher_only(ses)) {
1567                                         cf = build_cipher_only_sg(op, ses);
1568                                 } else if (is_aead(ses)) {
1569                                         cf = build_cipher_auth_gcm_sg(op, ses);
1570                                         auth_only_len = ses->auth_only_len;
1571                                 } else if (is_auth_cipher(ses)) {
1572                                         cf = build_cipher_auth_sg(op, ses);
1573                                 } else {
1574                                         DPAA_SEC_DP_ERR("not supported ops");
1575                                         frames_to_send = loop;
1576                                         nb_ops = loop;
1577                                         goto send_pkts;
1578                                 }
1579                         }
1580                         if (unlikely(!cf)) {
1581                                 frames_to_send = loop;
1582                                 nb_ops = loop;
1583                                 goto send_pkts;
1584                         }
1585
1586                         fd = &fds[loop];
1587                         inq[loop] = ses->inq;
1588                         fd->opaque_addr = 0;
1589                         fd->cmd = 0;
1590                         qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1591                         fd->_format1 = qm_fd_compound;
1592                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1593                         /* Auth_only_len is set as 0 in descriptor and it is
1594                          * overwritten here in the fd.cmd which will update
1595                          * the DPOVRD reg.
1596                          */
1597                         if (auth_only_len)
1598                                 fd->cmd = 0x80000000 | auth_only_len;
1599
1600                 }
1601 send_pkts:
1602                 loop = 0;
1603                 while (loop < frames_to_send) {
1604                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1605                                         frames_to_send - loop);
1606                 }
1607                 nb_ops -= frames_to_send;
1608                 num_tx += frames_to_send;
1609         }
1610
1611         dpaa_qp->tx_pkts += num_tx;
1612         dpaa_qp->tx_errs += nb_ops - num_tx;
1613
1614         return num_tx;
1615 }
1616
1617 static uint16_t
1618 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1619                        uint16_t nb_ops)
1620 {
1621         uint16_t num_rx;
1622         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1623
1624         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1625
1626         dpaa_qp->rx_pkts += num_rx;
1627         dpaa_qp->rx_errs += nb_ops - num_rx;
1628
1629         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1630
1631         return num_rx;
1632 }
1633
1634 /** Release queue pair */
1635 static int
1636 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1637                             uint16_t qp_id)
1638 {
1639         struct dpaa_sec_dev_private *internals;
1640         struct dpaa_sec_qp *qp = NULL;
1641
1642         PMD_INIT_FUNC_TRACE();
1643
1644         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1645
1646         internals = dev->data->dev_private;
1647         if (qp_id >= internals->max_nb_queue_pairs) {
1648                 DPAA_SEC_ERR("Max supported qpid %d",
1649                              internals->max_nb_queue_pairs);
1650                 return -EINVAL;
1651         }
1652
1653         qp = &internals->qps[qp_id];
1654         qp->internals = NULL;
1655         dev->data->queue_pairs[qp_id] = NULL;
1656
1657         return 0;
1658 }
1659
1660 /** Setup a queue pair */
1661 static int
1662 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1663                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1664                 __rte_unused int socket_id,
1665                 __rte_unused struct rte_mempool *session_pool)
1666 {
1667         struct dpaa_sec_dev_private *internals;
1668         struct dpaa_sec_qp *qp = NULL;
1669
1670         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1671
1672         internals = dev->data->dev_private;
1673         if (qp_id >= internals->max_nb_queue_pairs) {
1674                 DPAA_SEC_ERR("Max supported qpid %d",
1675                              internals->max_nb_queue_pairs);
1676                 return -EINVAL;
1677         }
1678
1679         qp = &internals->qps[qp_id];
1680         qp->internals = internals;
1681         dev->data->queue_pairs[qp_id] = qp;
1682
1683         return 0;
1684 }
1685
1686 /** Return the number of allocated queue pairs */
1687 static uint32_t
1688 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1689 {
1690         PMD_INIT_FUNC_TRACE();
1691
1692         return dev->data->nb_queue_pairs;
1693 }
1694
1695 /** Returns the size of session structure */
1696 static unsigned int
1697 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1698 {
1699         PMD_INIT_FUNC_TRACE();
1700
1701         return sizeof(dpaa_sec_session);
1702 }
1703
1704 static int
1705 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1706                      struct rte_crypto_sym_xform *xform,
1707                      dpaa_sec_session *session)
1708 {
1709         session->cipher_alg = xform->cipher.algo;
1710         session->iv.length = xform->cipher.iv.length;
1711         session->iv.offset = xform->cipher.iv.offset;
1712         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1713                                                RTE_CACHE_LINE_SIZE);
1714         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1715                 DPAA_SEC_ERR("No Memory for cipher key");
1716                 return -ENOMEM;
1717         }
1718         session->cipher_key.length = xform->cipher.key.length;
1719
1720         memcpy(session->cipher_key.data, xform->cipher.key.data,
1721                xform->cipher.key.length);
1722         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1723                         DIR_ENC : DIR_DEC;
1724
1725         return 0;
1726 }
1727
1728 static int
1729 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1730                    struct rte_crypto_sym_xform *xform,
1731                    dpaa_sec_session *session)
1732 {
1733         session->auth_alg = xform->auth.algo;
1734         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1735                                              RTE_CACHE_LINE_SIZE);
1736         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1737                 DPAA_SEC_ERR("No Memory for auth key");
1738                 return -ENOMEM;
1739         }
1740         session->auth_key.length = xform->auth.key.length;
1741         session->digest_length = xform->auth.digest_length;
1742
1743         memcpy(session->auth_key.data, xform->auth.key.data,
1744                xform->auth.key.length);
1745         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1746                         DIR_ENC : DIR_DEC;
1747
1748         return 0;
1749 }
1750
1751 static int
1752 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1753                    struct rte_crypto_sym_xform *xform,
1754                    dpaa_sec_session *session)
1755 {
1756         session->aead_alg = xform->aead.algo;
1757         session->iv.length = xform->aead.iv.length;
1758         session->iv.offset = xform->aead.iv.offset;
1759         session->auth_only_len = xform->aead.aad_length;
1760         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1761                                              RTE_CACHE_LINE_SIZE);
1762         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1763                 DPAA_SEC_ERR("No Memory for aead key\n");
1764                 return -ENOMEM;
1765         }
1766         session->aead_key.length = xform->aead.key.length;
1767         session->digest_length = xform->aead.digest_length;
1768
1769         memcpy(session->aead_key.data, xform->aead.key.data,
1770                xform->aead.key.length);
1771         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1772                         DIR_ENC : DIR_DEC;
1773
1774         return 0;
1775 }
1776
1777 static struct qman_fq *
1778 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1779 {
1780         unsigned int i;
1781
1782         for (i = 0; i < qi->max_nb_sessions; i++) {
1783                 if (qi->inq_attach[i] == 0) {
1784                         qi->inq_attach[i] = 1;
1785                         return &qi->inq[i];
1786                 }
1787         }
1788         DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1789
1790         return NULL;
1791 }
1792
1793 static int
1794 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1795 {
1796         unsigned int i;
1797
1798         for (i = 0; i < qi->max_nb_sessions; i++) {
1799                 if (&qi->inq[i] == fq) {
1800                         qman_retire_fq(fq, NULL);
1801                         qman_oos_fq(fq);
1802                         qi->inq_attach[i] = 0;
1803                         return 0;
1804                 }
1805         }
1806         return -1;
1807 }
1808
1809 static int
1810 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1811 {
1812         int ret;
1813
1814         sess->qp = qp;
1815         ret = dpaa_sec_prep_cdb(sess);
1816         if (ret) {
1817                 DPAA_SEC_ERR("Unable to prepare sec cdb");
1818                 return -1;
1819         }
1820         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1821                 ret = rte_dpaa_portal_init((void *)0);
1822                 if (ret) {
1823                         DPAA_SEC_ERR("Failure in affining portal");
1824                         return ret;
1825                 }
1826         }
1827         ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1828                                qman_fq_fqid(&qp->outq));
1829         if (ret)
1830                 DPAA_SEC_ERR("Unable to init sec queue");
1831
1832         return ret;
1833 }
1834
1835 static int
1836 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1837                             struct rte_crypto_sym_xform *xform, void *sess)
1838 {
1839         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1840         dpaa_sec_session *session = sess;
1841
1842         PMD_INIT_FUNC_TRACE();
1843
1844         if (unlikely(sess == NULL)) {
1845                 DPAA_SEC_ERR("invalid session struct");
1846                 return -EINVAL;
1847         }
1848         memset(session, 0, sizeof(dpaa_sec_session));
1849
1850         /* Default IV length = 0 */
1851         session->iv.length = 0;
1852
1853         /* Cipher Only */
1854         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1855                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1856                 dpaa_sec_cipher_init(dev, xform, session);
1857
1858         /* Authentication Only */
1859         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1860                    xform->next == NULL) {
1861                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1862                 dpaa_sec_auth_init(dev, xform, session);
1863
1864         /* Cipher then Authenticate */
1865         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1866                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1867                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1868                         dpaa_sec_cipher_init(dev, xform, session);
1869                         dpaa_sec_auth_init(dev, xform->next, session);
1870                 } else {
1871                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
1872                         return -EINVAL;
1873                 }
1874
1875         /* Authenticate then Cipher */
1876         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1877                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1878                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1879                         dpaa_sec_auth_init(dev, xform, session);
1880                         dpaa_sec_cipher_init(dev, xform->next, session);
1881                 } else {
1882                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
1883                         return -EINVAL;
1884                 }
1885
1886         /* AEAD operation for AES-GCM kind of Algorithms */
1887         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1888                    xform->next == NULL) {
1889                 dpaa_sec_aead_init(dev, xform, session);
1890
1891         } else {
1892                 DPAA_SEC_ERR("Invalid crypto type");
1893                 return -EINVAL;
1894         }
1895         session->ctx_pool = internals->ctx_pool;
1896         rte_spinlock_lock(&internals->lock);
1897         session->inq = dpaa_sec_attach_rxq(internals);
1898         rte_spinlock_unlock(&internals->lock);
1899         if (session->inq == NULL) {
1900                 DPAA_SEC_ERR("unable to attach sec queue");
1901                 goto err1;
1902         }
1903
1904         return 0;
1905
1906 err1:
1907         rte_free(session->cipher_key.data);
1908         rte_free(session->auth_key.data);
1909         memset(session, 0, sizeof(dpaa_sec_session));
1910
1911         return -EINVAL;
1912 }
1913
1914 static int
1915 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1916                 struct rte_crypto_sym_xform *xform,
1917                 struct rte_cryptodev_sym_session *sess,
1918                 struct rte_mempool *mempool)
1919 {
1920         void *sess_private_data;
1921         int ret;
1922
1923         PMD_INIT_FUNC_TRACE();
1924
1925         if (rte_mempool_get(mempool, &sess_private_data)) {
1926                 DPAA_SEC_ERR("Couldn't get object from session mempool");
1927                 return -ENOMEM;
1928         }
1929
1930         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1931         if (ret != 0) {
1932                 DPAA_SEC_ERR("failed to configure session parameters");
1933
1934                 /* Return session to mempool */
1935                 rte_mempool_put(mempool, sess_private_data);
1936                 return ret;
1937         }
1938
1939         set_sym_session_private_data(sess, dev->driver_id,
1940                         sess_private_data);
1941
1942
1943         return 0;
1944 }
1945
1946 /** Clear the memory of session so it doesn't leave key material behind */
1947 static void
1948 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1949                 struct rte_cryptodev_sym_session *sess)
1950 {
1951         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1952         uint8_t index = dev->driver_id;
1953         void *sess_priv = get_sym_session_private_data(sess, index);
1954
1955         PMD_INIT_FUNC_TRACE();
1956
1957         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1958
1959         if (sess_priv) {
1960                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1961
1962                 if (s->inq)
1963                         dpaa_sec_detach_rxq(qi, s->inq);
1964                 rte_free(s->cipher_key.data);
1965                 rte_free(s->auth_key.data);
1966                 memset(s, 0, sizeof(dpaa_sec_session));
1967                 set_sym_session_private_data(sess, index, NULL);
1968                 rte_mempool_put(sess_mp, sess_priv);
1969         }
1970 }
1971
1972 static int
1973 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1974                            struct rte_security_session_conf *conf,
1975                            void *sess)
1976 {
1977         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1978         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1979         struct rte_crypto_auth_xform *auth_xform = NULL;
1980         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1981         dpaa_sec_session *session = (dpaa_sec_session *)sess;
1982
1983         PMD_INIT_FUNC_TRACE();
1984
1985         memset(session, 0, sizeof(dpaa_sec_session));
1986         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1987                 cipher_xform = &conf->crypto_xform->cipher;
1988                 if (conf->crypto_xform->next)
1989                         auth_xform = &conf->crypto_xform->next->auth;
1990         } else {
1991                 auth_xform = &conf->crypto_xform->auth;
1992                 if (conf->crypto_xform->next)
1993                         cipher_xform = &conf->crypto_xform->next->cipher;
1994         }
1995         session->proto_alg = conf->protocol;
1996
1997         if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
1998                 session->cipher_key.data = rte_zmalloc(NULL,
1999                                                        cipher_xform->key.length,
2000                                                        RTE_CACHE_LINE_SIZE);
2001                 if (session->cipher_key.data == NULL &&
2002                                 cipher_xform->key.length > 0) {
2003                         DPAA_SEC_ERR("No Memory for cipher key");
2004                         return -ENOMEM;
2005                 }
2006                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2007                                 cipher_xform->key.length);
2008                 session->cipher_key.length = cipher_xform->key.length;
2009
2010                 switch (cipher_xform->algo) {
2011                 case RTE_CRYPTO_CIPHER_AES_CBC:
2012                 case RTE_CRYPTO_CIPHER_3DES_CBC:
2013                 case RTE_CRYPTO_CIPHER_AES_CTR:
2014                         break;
2015                 default:
2016                         DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2017                                 cipher_xform->algo);
2018                         goto out;
2019                 }
2020                 session->cipher_alg = cipher_xform->algo;
2021         } else {
2022                 session->cipher_key.data = NULL;
2023                 session->cipher_key.length = 0;
2024                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2025         }
2026
2027         if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2028                 session->auth_key.data = rte_zmalloc(NULL,
2029                                                 auth_xform->key.length,
2030                                                 RTE_CACHE_LINE_SIZE);
2031                 if (session->auth_key.data == NULL &&
2032                                 auth_xform->key.length > 0) {
2033                         DPAA_SEC_ERR("No Memory for auth key");
2034                         rte_free(session->cipher_key.data);
2035                         return -ENOMEM;
2036                 }
2037                 memcpy(session->auth_key.data, auth_xform->key.data,
2038                                 auth_xform->key.length);
2039                 session->auth_key.length = auth_xform->key.length;
2040
2041                 switch (auth_xform->algo) {
2042                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2043                 case RTE_CRYPTO_AUTH_MD5_HMAC:
2044                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2045                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2046                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2047                 case RTE_CRYPTO_AUTH_AES_CMAC:
2048                         break;
2049                 default:
2050                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2051                                 auth_xform->algo);
2052                         goto out;
2053                 }
2054                 session->auth_alg = auth_xform->algo;
2055         } else {
2056                 session->auth_key.data = NULL;
2057                 session->auth_key.length = 0;
2058                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2059         }
2060
2061         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2062                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2063                                 sizeof(session->ip4_hdr));
2064                 session->ip4_hdr.ip_v = IPVERSION;
2065                 session->ip4_hdr.ip_hl = 5;
2066                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2067                                                 sizeof(session->ip4_hdr));
2068                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2069                 session->ip4_hdr.ip_id = 0;
2070                 session->ip4_hdr.ip_off = 0;
2071                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2072                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2073                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2074                                 : IPPROTO_AH;
2075                 session->ip4_hdr.ip_sum = 0;
2076                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2077                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2078                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2079                                                 (void *)&session->ip4_hdr,
2080                                                 sizeof(struct ip));
2081
2082                 session->encap_pdb.options =
2083                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2084                         PDBOPTS_ESP_OIHI_PDB_INL |
2085                         PDBOPTS_ESP_IVSRC |
2086                         PDBHMO_ESP_ENCAP_DTTL |
2087                         PDBHMO_ESP_SNR;
2088                 session->encap_pdb.spi = ipsec_xform->spi;
2089                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2090
2091                 session->dir = DIR_ENC;
2092         } else if (ipsec_xform->direction ==
2093                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2094                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2095                 session->decap_pdb.options = sizeof(struct ip) << 16;
2096                 session->dir = DIR_DEC;
2097         } else
2098                 goto out;
2099         session->ctx_pool = internals->ctx_pool;
2100         rte_spinlock_lock(&internals->lock);
2101         session->inq = dpaa_sec_attach_rxq(internals);
2102         rte_spinlock_unlock(&internals->lock);
2103         if (session->inq == NULL) {
2104                 DPAA_SEC_ERR("unable to attach sec queue");
2105                 goto out;
2106         }
2107
2108
2109         return 0;
2110 out:
2111         rte_free(session->auth_key.data);
2112         rte_free(session->cipher_key.data);
2113         memset(session, 0, sizeof(dpaa_sec_session));
2114         return -1;
2115 }
2116
2117 static int
2118 dpaa_sec_security_session_create(void *dev,
2119                                  struct rte_security_session_conf *conf,
2120                                  struct rte_security_session *sess,
2121                                  struct rte_mempool *mempool)
2122 {
2123         void *sess_private_data;
2124         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2125         int ret;
2126
2127         if (rte_mempool_get(mempool, &sess_private_data)) {
2128                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2129                 return -ENOMEM;
2130         }
2131
2132         switch (conf->protocol) {
2133         case RTE_SECURITY_PROTOCOL_IPSEC:
2134                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2135                                 sess_private_data);
2136                 break;
2137         case RTE_SECURITY_PROTOCOL_MACSEC:
2138                 return -ENOTSUP;
2139         default:
2140                 return -EINVAL;
2141         }
2142         if (ret != 0) {
2143                 DPAA_SEC_ERR("failed to configure session parameters");
2144                 /* Return session to mempool */
2145                 rte_mempool_put(mempool, sess_private_data);
2146                 return ret;
2147         }
2148
2149         set_sec_session_private_data(sess, sess_private_data);
2150
2151         return ret;
2152 }
2153
2154 /** Clear the memory of session so it doesn't leave key material behind */
2155 static int
2156 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2157                 struct rte_security_session *sess)
2158 {
2159         PMD_INIT_FUNC_TRACE();
2160         void *sess_priv = get_sec_session_private_data(sess);
2161
2162         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2163
2164         if (sess_priv) {
2165                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2166
2167                 rte_free(s->cipher_key.data);
2168                 rte_free(s->auth_key.data);
2169                 memset(sess, 0, sizeof(dpaa_sec_session));
2170                 set_sec_session_private_data(sess, NULL);
2171                 rte_mempool_put(sess_mp, sess_priv);
2172         }
2173         return 0;
2174 }
2175
2176
2177 static int
2178 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2179                        struct rte_cryptodev_config *config __rte_unused)
2180 {
2181
2182         char str[20];
2183         struct dpaa_sec_dev_private *internals;
2184
2185         PMD_INIT_FUNC_TRACE();
2186
2187         internals = dev->data->dev_private;
2188         sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2189         if (!internals->ctx_pool) {
2190                 internals->ctx_pool = rte_mempool_create((const char *)str,
2191                                                         CTX_POOL_NUM_BUFS,
2192                                                         CTX_POOL_BUF_SIZE,
2193                                                         CTX_POOL_CACHE_SIZE, 0,
2194                                                         NULL, NULL, NULL, NULL,
2195                                                         SOCKET_ID_ANY, 0);
2196                 if (!internals->ctx_pool) {
2197                         DPAA_SEC_ERR("%s create failed\n", str);
2198                         return -ENOMEM;
2199                 }
2200         } else
2201                 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2202                                 dev->data->dev_id);
2203
2204         return 0;
2205 }
2206
2207 static int
2208 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2209 {
2210         PMD_INIT_FUNC_TRACE();
2211         return 0;
2212 }
2213
2214 static void
2215 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2216 {
2217         PMD_INIT_FUNC_TRACE();
2218 }
2219
2220 static int
2221 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2222 {
2223         struct dpaa_sec_dev_private *internals;
2224
2225         PMD_INIT_FUNC_TRACE();
2226
2227         if (dev == NULL)
2228                 return -ENOMEM;
2229
2230         internals = dev->data->dev_private;
2231         rte_mempool_free(internals->ctx_pool);
2232         internals->ctx_pool = NULL;
2233
2234         return 0;
2235 }
2236
2237 static void
2238 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2239                        struct rte_cryptodev_info *info)
2240 {
2241         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2242
2243         PMD_INIT_FUNC_TRACE();
2244         if (info != NULL) {
2245                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2246                 info->feature_flags = dev->feature_flags;
2247                 info->capabilities = dpaa_sec_capabilities;
2248                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2249                 info->driver_id = cryptodev_driver_id;
2250         }
2251 }
2252
2253 static struct rte_cryptodev_ops crypto_ops = {
2254         .dev_configure        = dpaa_sec_dev_configure,
2255         .dev_start            = dpaa_sec_dev_start,
2256         .dev_stop             = dpaa_sec_dev_stop,
2257         .dev_close            = dpaa_sec_dev_close,
2258         .dev_infos_get        = dpaa_sec_dev_infos_get,
2259         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2260         .queue_pair_release   = dpaa_sec_queue_pair_release,
2261         .queue_pair_count     = dpaa_sec_queue_pair_count,
2262         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
2263         .sym_session_configure    = dpaa_sec_sym_session_configure,
2264         .sym_session_clear        = dpaa_sec_sym_session_clear
2265 };
2266
2267 static const struct rte_security_capability *
2268 dpaa_sec_capabilities_get(void *device __rte_unused)
2269 {
2270         return dpaa_sec_security_cap;
2271 }
2272
2273 static const struct rte_security_ops dpaa_sec_security_ops = {
2274         .session_create = dpaa_sec_security_session_create,
2275         .session_update = NULL,
2276         .session_stats_get = NULL,
2277         .session_destroy = dpaa_sec_security_session_destroy,
2278         .set_pkt_metadata = NULL,
2279         .capabilities_get = dpaa_sec_capabilities_get
2280 };
2281
2282 static int
2283 dpaa_sec_uninit(struct rte_cryptodev *dev)
2284 {
2285         struct dpaa_sec_dev_private *internals;
2286
2287         if (dev == NULL)
2288                 return -ENODEV;
2289
2290         internals = dev->data->dev_private;
2291         rte_free(dev->security_ctx);
2292
2293         /* In case close has been called, internals->ctx_pool would be NULL */
2294         rte_mempool_free(internals->ctx_pool);
2295         rte_free(internals);
2296
2297         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2298                       dev->data->name, rte_socket_id());
2299
2300         return 0;
2301 }
2302
2303 static int
2304 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2305 {
2306         struct dpaa_sec_dev_private *internals;
2307         struct rte_security_ctx *security_instance;
2308         struct dpaa_sec_qp *qp;
2309         uint32_t i, flags;
2310         int ret;
2311
2312         PMD_INIT_FUNC_TRACE();
2313
2314         cryptodev->driver_id = cryptodev_driver_id;
2315         cryptodev->dev_ops = &crypto_ops;
2316
2317         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2318         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2319         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2320                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2321                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2322                         RTE_CRYPTODEV_FF_SECURITY |
2323                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2324                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2325                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2326                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2327                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2328
2329         internals = cryptodev->data->dev_private;
2330         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2331         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2332
2333         /*
2334          * For secondary processes, we don't initialise any further as primary
2335          * has already done this work. Only check we don't need a different
2336          * RX function
2337          */
2338         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2339                 DPAA_SEC_WARN("Device already init by primary process");
2340                 return 0;
2341         }
2342
2343         /* Initialize security_ctx only for primary process*/
2344         security_instance = rte_malloc("rte_security_instances_ops",
2345                                 sizeof(struct rte_security_ctx), 0);
2346         if (security_instance == NULL)
2347                 return -ENOMEM;
2348         security_instance->device = (void *)cryptodev;
2349         security_instance->ops = &dpaa_sec_security_ops;
2350         security_instance->sess_cnt = 0;
2351         cryptodev->security_ctx = security_instance;
2352
2353         rte_spinlock_init(&internals->lock);
2354         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2355                 /* init qman fq for queue pair */
2356                 qp = &internals->qps[i];
2357                 ret = dpaa_sec_init_tx(&qp->outq);
2358                 if (ret) {
2359                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
2360                         goto init_error;
2361                 }
2362         }
2363
2364         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2365                 QMAN_FQ_FLAG_TO_DCPORTAL;
2366         for (i = 0; i < internals->max_nb_sessions; i++) {
2367                 /* create rx qman fq for sessions*/
2368                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2369                 if (unlikely(ret != 0)) {
2370                         DPAA_SEC_ERR("sec qman_create_fq failed");
2371                         goto init_error;
2372                 }
2373         }
2374
2375         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2376         return 0;
2377
2378 init_error:
2379         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2380
2381         dpaa_sec_uninit(cryptodev);
2382         return -EFAULT;
2383 }
2384
2385 static int
2386 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2387                                 struct rte_dpaa_device *dpaa_dev)
2388 {
2389         struct rte_cryptodev *cryptodev;
2390         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2391
2392         int retval;
2393
2394         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2395
2396         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2397         if (cryptodev == NULL)
2398                 return -ENOMEM;
2399
2400         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2401                 cryptodev->data->dev_private = rte_zmalloc_socket(
2402                                         "cryptodev private structure",
2403                                         sizeof(struct dpaa_sec_dev_private),
2404                                         RTE_CACHE_LINE_SIZE,
2405                                         rte_socket_id());
2406
2407                 if (cryptodev->data->dev_private == NULL)
2408                         rte_panic("Cannot allocate memzone for private "
2409                                         "device data");
2410         }
2411
2412         dpaa_dev->crypto_dev = cryptodev;
2413         cryptodev->device = &dpaa_dev->device;
2414
2415         /* init user callbacks */
2416         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2417
2418         /* if sec device version is not configured */
2419         if (!rta_get_sec_era()) {
2420                 const struct device_node *caam_node;
2421
2422                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2423                         const uint32_t *prop = of_get_property(caam_node,
2424                                         "fsl,sec-era",
2425                                         NULL);
2426                         if (prop) {
2427                                 rta_set_sec_era(
2428                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2429                                 break;
2430                         }
2431                 }
2432         }
2433
2434         /* Invoke PMD device initialization function */
2435         retval = dpaa_sec_dev_init(cryptodev);
2436         if (retval == 0)
2437                 return 0;
2438
2439         /* In case of error, cleanup is done */
2440         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2441                 rte_free(cryptodev->data->dev_private);
2442
2443         rte_cryptodev_pmd_release_device(cryptodev);
2444
2445         return -ENXIO;
2446 }
2447
2448 static int
2449 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2450 {
2451         struct rte_cryptodev *cryptodev;
2452         int ret;
2453
2454         cryptodev = dpaa_dev->crypto_dev;
2455         if (cryptodev == NULL)
2456                 return -ENODEV;
2457
2458         ret = dpaa_sec_uninit(cryptodev);
2459         if (ret)
2460                 return ret;
2461
2462         return rte_cryptodev_pmd_destroy(cryptodev);
2463 }
2464
2465 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2466         .drv_type = FSL_DPAA_CRYPTO,
2467         .driver = {
2468                 .name = "DPAA SEC PMD"
2469         },
2470         .probe = cryptodev_dpaa_sec_probe,
2471         .remove = cryptodev_dpaa_sec_remove,
2472 };
2473
2474 static struct cryptodev_driver dpaa_sec_crypto_drv;
2475
2476 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2477 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2478                 cryptodev_driver_id);
2479
2480 RTE_INIT(dpaa_sec_init_log)
2481 {
2482         dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2483         if (dpaa_logtype_sec >= 0)
2484                 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2485 }