Imported Upstream version 16.04
[deb_dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_memzone.h>
48 #include <rte_tailq.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_launch.h>
52 #include <rte_eal.h>
53 #include <rte_per_lcore.h>
54 #include <rte_lcore.h>
55 #include <rte_atomic.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_ring.h>
58 #include <rte_mempool.h>
59 #include <rte_mbuf.h>
60 #include <rte_string_fns.h>
61 #include <rte_spinlock.h>
62 #include <rte_hexdump.h>
63
64 #include "qat_logs.h"
65 #include "qat_algs.h"
66 #include "qat_crypto.h"
67 #include "adf_transport_access_macros.h"
68
69 #define BYTE_LENGTH    8
70
71 static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
72         {       /* SHA1 HMAC */
73                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
74                 {.sym = {
75                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
76                         {.auth = {
77                                 .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
78                                 .block_size = 64,
79                                 .key_size = {
80                                         .min = 64,
81                                         .max = 64,
82                                         .increment = 0
83                                 },
84                                 .digest_size = {
85                                         .min = 20,
86                                         .max = 20,
87                                         .increment = 0
88                                 },
89                                 .aad_size = { 0 }
90                         }, }
91                 }, }
92         },
93         {       /* SHA256 HMAC */
94                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
95                 {.sym = {
96                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
97                         {.auth = {
98                                 .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
99                                 .block_size = 64,
100                                 .key_size = {
101                                         .min = 64,
102                                         .max = 64,
103                                         .increment = 0
104                                 },
105                                 .digest_size = {
106                                         .min = 32,
107                                         .max = 32,
108                                         .increment = 0
109                                 },
110                                 .aad_size = { 0 }
111                         }, }
112                 }, }
113         },
114         {       /* SHA512 HMAC */
115                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
116                 {.sym = {
117                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
118                         {.auth = {
119                                 .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
120                                 .block_size = 128,
121                                 .key_size = {
122                                         .min = 128,
123                                         .max = 128,
124                                         .increment = 0
125                                 },
126                                 .digest_size = {
127                                         .min = 64,
128                                         .max = 64,
129                                         .increment = 0
130                                 },
131                                 .aad_size = { 0 }
132                         }, }
133                 }, }
134         },
135         {       /* AES XCBC MAC */
136                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
137                 {.sym = {
138                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
139                         {.auth = {
140                                 .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
141                                 .block_size = 16,
142                                 .key_size = {
143                                         .min = 16,
144                                         .max = 16,
145                                         .increment = 0
146                                 },
147                                 .digest_size = {
148                                         .min = 16,
149                                         .max = 16,
150                                         .increment = 0
151                                 },
152                                 .aad_size = { 0 }
153                         }, }
154                 }, }
155         },
156         {       /* AES GCM (AUTH) */
157                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
158                 {.sym = {
159                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
160                         {.auth = {
161                                 .algo = RTE_CRYPTO_AUTH_AES_GCM,
162                                 .block_size = 16,
163                                 .key_size = {
164                                         .min = 16,
165                                         .max = 32,
166                                         .increment = 8
167                                 },
168                                 .digest_size = {
169                                         .min = 8,
170                                         .max = 16,
171                                         .increment = 4
172                                 },
173                                 .aad_size = {
174                                         .min = 8,
175                                         .max = 12,
176                                         .increment = 4
177                                 }
178                         }, }
179                 }, }
180         },
181         {       /* SNOW3G (UIA2) */
182                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
183                 {.sym = {
184                         .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
185                         {.auth = {
186                                 .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
187                                 .block_size = 16,
188                                 .key_size = {
189                                         .min = 16,
190                                         .max = 16,
191                                         .increment = 0
192                                 },
193                                 .digest_size = {
194                                         .min = 4,
195                                         .max = 4,
196                                         .increment = 0
197                                 },
198                                 .aad_size = {
199                                         .min = 16,
200                                         .max = 16,
201                                         .increment = 0
202                                 }
203                         }, }
204                 }, }
205         },
206         {       /* AES GCM (CIPHER) */
207                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
208                 {.sym = {
209                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
210                         {.cipher = {
211                                 .algo = RTE_CRYPTO_CIPHER_AES_GCM,
212                                 .block_size = 16,
213                                 .key_size = {
214                                         .min = 16,
215                                         .max = 32,
216                                         .increment = 8
217                                 },
218                                 .iv_size = {
219                                         .min = 16,
220                                         .max = 16,
221                                         .increment = 0
222                                 }
223                         }, }
224                 }, }
225         },
226         {       /* AES CBC */
227                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
228                 {.sym = {
229                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
230                         {.cipher = {
231                                 .algo = RTE_CRYPTO_CIPHER_AES_CBC,
232                                 .block_size = 16,
233                                 .key_size = {
234                                         .min = 16,
235                                         .max = 32,
236                                         .increment = 8
237                                 },
238                                 .iv_size = {
239                                         .min = 16,
240                                         .max = 16,
241                                         .increment = 0
242                                 }
243                         }, }
244                 }, }
245         },
246         {       /* SNOW3G (UEA2) */
247                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
248                 {.sym = {
249                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
250                         {.cipher = {
251                                 .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
252                                 .block_size = 16,
253                                 .key_size = {
254                                         .min = 16,
255                                         .max = 16,
256                                         .increment = 0
257                                 },
258                                 .iv_size = {
259                                         .min = 16,
260                                         .max = 16,
261                                         .increment = 0
262                                 }
263                         }, }
264                 }, }
265         },
266         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
267 };
268
269 static inline uint32_t
270 adf_modulo(uint32_t data, uint32_t shift);
271
272 static inline int
273 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
274
275 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
276                 void *session)
277 {
278         struct qat_session *sess = session;
279         phys_addr_t cd_paddr = sess->cd_paddr;
280
281         PMD_INIT_FUNC_TRACE();
282         if (session) {
283                 memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
284
285                 sess->cd_paddr = cd_paddr;
286         }
287 }
288
289 static int
290 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
291 {
292         /* Cipher Only */
293         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
294                 return ICP_QAT_FW_LA_CMD_CIPHER;
295
296         /* Authentication Only */
297         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
298                 return ICP_QAT_FW_LA_CMD_AUTH;
299
300         if (xform->next == NULL)
301                 return -1;
302
303         /* Cipher then Authenticate */
304         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
305                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
306                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
307
308         /* Authenticate then Cipher */
309         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
310                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
311                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
312
313         return -1;
314 }
315
316 static struct rte_crypto_auth_xform *
317 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
318 {
319         do {
320                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
321                         return &xform->auth;
322
323                 xform = xform->next;
324         } while (xform);
325
326         return NULL;
327 }
328
329 static struct rte_crypto_cipher_xform *
330 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
331 {
332         do {
333                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
334                         return &xform->cipher;
335
336                 xform = xform->next;
337         } while (xform);
338
339         return NULL;
340 }
341 void *
342 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
343                 struct rte_crypto_sym_xform *xform, void *session_private)
344 {
345         struct qat_pmd_private *internals = dev->data->dev_private;
346
347         struct qat_session *session = session_private;
348
349         struct rte_crypto_cipher_xform *cipher_xform = NULL;
350
351         /* Get cipher xform from crypto xform chain */
352         cipher_xform = qat_get_cipher_xform(xform);
353
354         switch (cipher_xform->algo) {
355         case RTE_CRYPTO_CIPHER_AES_CBC:
356                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
357                                 &session->qat_cipher_alg) != 0) {
358                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
359                         goto error_out;
360                 }
361                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
362                 break;
363         case RTE_CRYPTO_CIPHER_AES_GCM:
364                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
365                                 &session->qat_cipher_alg) != 0) {
366                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
367                         goto error_out;
368                 }
369                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
370                 break;
371         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
372                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
373                                         &session->qat_cipher_alg) != 0) {
374                         PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size");
375                         goto error_out;
376                 }
377                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
378                 break;
379         case RTE_CRYPTO_CIPHER_NULL:
380         case RTE_CRYPTO_CIPHER_3DES_ECB:
381         case RTE_CRYPTO_CIPHER_3DES_CBC:
382         case RTE_CRYPTO_CIPHER_AES_ECB:
383         case RTE_CRYPTO_CIPHER_AES_CTR:
384         case RTE_CRYPTO_CIPHER_AES_CCM:
385         case RTE_CRYPTO_CIPHER_KASUMI_F8:
386                 PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
387                                 cipher_xform->algo);
388                 goto error_out;
389         default:
390                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
391                                 cipher_xform->algo);
392                 goto error_out;
393         }
394
395         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
396                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
397         else
398                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
399
400         if (qat_alg_aead_session_create_content_desc_cipher(session,
401                                                 cipher_xform->key.data,
402                                                 cipher_xform->key.length))
403                 goto error_out;
404
405         return session;
406
407 error_out:
408         rte_mempool_put(internals->sess_mp, session);
409         return NULL;
410 }
411
412
413 void *
414 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
415                 struct rte_crypto_sym_xform *xform, void *session_private)
416 {
417         struct qat_pmd_private *internals = dev->data->dev_private;
418
419         struct qat_session *session = session_private;
420
421         int qat_cmd_id;
422
423         PMD_INIT_FUNC_TRACE();
424
425         /* Get requested QAT command id */
426         qat_cmd_id = qat_get_cmd_id(xform);
427         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
428                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
429                 goto error_out;
430         }
431         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
432         switch (session->qat_cmd) {
433         case ICP_QAT_FW_LA_CMD_CIPHER:
434         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
435                 break;
436         case ICP_QAT_FW_LA_CMD_AUTH:
437         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
438                 break;
439         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
440         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
441         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
442                 break;
443         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
444         session = qat_crypto_sym_configure_session_auth(dev, xform, session);
445         session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
446                 break;
447         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
448         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
449         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
450         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
451         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
452         case ICP_QAT_FW_LA_CMD_MGF1:
453         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
454         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
455         case ICP_QAT_FW_LA_CMD_DELIMITER:
456         PMD_DRV_LOG(ERR, "Unsupported Service %u",
457                 session->qat_cmd);
458                 goto error_out;
459         default:
460         PMD_DRV_LOG(ERR, "Unsupported Service %u",
461                 session->qat_cmd);
462                 goto error_out;
463         }
464         return session;
465
466 error_out:
467         rte_mempool_put(internals->sess_mp, session);
468         return NULL;
469 }
470
471 struct qat_session *
472 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
473                                 struct rte_crypto_sym_xform *xform,
474                                 struct qat_session *session_private)
475 {
476
477         struct qat_pmd_private *internals = dev->data->dev_private;
478         struct qat_session *session = session_private;
479         struct rte_crypto_auth_xform *auth_xform = NULL;
480         struct rte_crypto_cipher_xform *cipher_xform = NULL;
481         auth_xform = qat_get_auth_xform(xform);
482
483         switch (auth_xform->algo) {
484         case RTE_CRYPTO_AUTH_SHA1_HMAC:
485                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
486                 break;
487         case RTE_CRYPTO_AUTH_SHA256_HMAC:
488                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
489                 break;
490         case RTE_CRYPTO_AUTH_SHA512_HMAC:
491                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
492                 break;
493         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
494                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
495                 break;
496         case RTE_CRYPTO_AUTH_AES_GCM:
497                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
498                 break;
499         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
500                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
501                 break;
502         case RTE_CRYPTO_AUTH_NULL:
503         case RTE_CRYPTO_AUTH_SHA1:
504         case RTE_CRYPTO_AUTH_SHA256:
505         case RTE_CRYPTO_AUTH_SHA512:
506         case RTE_CRYPTO_AUTH_SHA224:
507         case RTE_CRYPTO_AUTH_SHA224_HMAC:
508         case RTE_CRYPTO_AUTH_SHA384:
509         case RTE_CRYPTO_AUTH_SHA384_HMAC:
510         case RTE_CRYPTO_AUTH_MD5:
511         case RTE_CRYPTO_AUTH_MD5_HMAC:
512         case RTE_CRYPTO_AUTH_AES_CCM:
513         case RTE_CRYPTO_AUTH_AES_GMAC:
514         case RTE_CRYPTO_AUTH_KASUMI_F9:
515         case RTE_CRYPTO_AUTH_AES_CMAC:
516         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
517         case RTE_CRYPTO_AUTH_ZUC_EIA3:
518                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
519                                 auth_xform->algo);
520                 goto error_out;
521         default:
522                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
523                                 auth_xform->algo);
524                 goto error_out;
525         }
526         cipher_xform = qat_get_cipher_xform(xform);
527
528         if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
529                         (session->qat_hash_alg ==
530                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_64))  {
531                 if (qat_alg_aead_session_create_content_desc_auth(session,
532                                 cipher_xform->key.data,
533                                 cipher_xform->key.length,
534                                 auth_xform->add_auth_data_length,
535                                 auth_xform->digest_length))
536                         goto error_out;
537         } else {
538                 if (qat_alg_aead_session_create_content_desc_auth(session,
539                                 auth_xform->key.data,
540                                 auth_xform->key.length,
541                                 auth_xform->add_auth_data_length,
542                                 auth_xform->digest_length))
543                         goto error_out;
544         }
545         return session;
546
547 error_out:
548         rte_mempool_put(internals->sess_mp, session);
549         return NULL;
550 }
551
552 unsigned qat_crypto_sym_get_session_private_size(
553                 struct rte_cryptodev *dev __rte_unused)
554 {
555         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
556 }
557
558
559 uint16_t
560 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
561                 uint16_t nb_ops)
562 {
563         register struct qat_queue *queue;
564         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
565         register uint32_t nb_ops_sent = 0;
566         register struct rte_crypto_op **cur_op = ops;
567         register int ret;
568         uint16_t nb_ops_possible = nb_ops;
569         register uint8_t *base_addr;
570         register uint32_t tail;
571         int overflow;
572
573         if (unlikely(nb_ops == 0))
574                 return 0;
575
576         /* read params used a lot in main loop into registers */
577         queue = &(tmp_qp->tx_q);
578         base_addr = (uint8_t *)queue->base_addr;
579         tail = queue->tail;
580
581         /* Find how many can actually fit on the ring */
582         overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
583                                 - queue->max_inflights;
584         if (overflow > 0) {
585                 rte_atomic16_sub(&tmp_qp->inflights16, overflow);
586                 nb_ops_possible = nb_ops - overflow;
587                 if (nb_ops_possible == 0)
588                         return 0;
589         }
590
591         while (nb_ops_sent != nb_ops_possible) {
592                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
593                 if (ret != 0) {
594                         tmp_qp->stats.enqueue_err_count++;
595                         if (nb_ops_sent == 0)
596                                 return 0;
597                         goto kick_tail;
598                 }
599
600                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
601                 nb_ops_sent++;
602                 cur_op++;
603         }
604 kick_tail:
605         WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
606                         queue->hw_queue_number, tail);
607         queue->tail = tail;
608         tmp_qp->stats.enqueued_count += nb_ops_sent;
609         return nb_ops_sent;
610 }
611
612 uint16_t
613 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
614                 uint16_t nb_ops)
615 {
616         struct qat_queue *queue;
617         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
618         uint32_t msg_counter = 0;
619         struct rte_crypto_op *rx_op;
620         struct icp_qat_fw_comn_resp *resp_msg;
621
622         queue = &(tmp_qp->rx_q);
623         resp_msg = (struct icp_qat_fw_comn_resp *)
624                         ((uint8_t *)queue->base_addr + queue->head);
625
626         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
627                         msg_counter != nb_ops) {
628                 rx_op = (struct rte_crypto_op *)(uintptr_t)
629                                 (resp_msg->opaque_data);
630
631 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
632                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
633                                 sizeof(struct icp_qat_fw_comn_resp));
634 #endif
635                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
636                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
637                                         resp_msg->comn_hdr.comn_status)) {
638                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
639                 } else {
640                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
641                 }
642                 *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
643                 queue->head = adf_modulo(queue->head +
644                                 queue->msg_size,
645                                 ADF_RING_SIZE_MODULO(queue->queue_size));
646                 resp_msg = (struct icp_qat_fw_comn_resp *)
647                                         ((uint8_t *)queue->base_addr +
648                                                         queue->head);
649                 *ops = rx_op;
650                 ops++;
651                 msg_counter++;
652         }
653         if (msg_counter > 0) {
654                 WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
655                                         queue->hw_bundle_number,
656                                         queue->hw_queue_number, queue->head);
657                 rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
658                 tmp_qp->stats.dequeued_count += msg_counter;
659         }
660         return msg_counter;
661 }
662
663 static inline int
664 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
665 {
666         struct qat_session *ctx;
667         struct icp_qat_fw_la_cipher_req_params *cipher_param;
668         struct icp_qat_fw_la_auth_req_params *auth_param;
669         register struct icp_qat_fw_la_bulk_req *qat_req;
670
671 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
672         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
673                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
674                                 "operation requests, op (%p) is not a "
675                                 "symmetric operation.", op);
676                 return -EINVAL;
677         }
678 #endif
679         if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
680                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
681                                 " requests, op (%p) is sessionless.", op);
682                 return -EINVAL;
683         }
684
685         if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
686                 PMD_DRV_LOG(ERR, "Session was not created for this device");
687                 return -EINVAL;
688         }
689
690         ctx = (struct qat_session *)op->sym->session->_private;
691         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
692         *qat_req = ctx->fw_req;
693         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
694
695         qat_req->comn_mid.dst_length =
696                 qat_req->comn_mid.src_length =
697                                 rte_pktmbuf_data_len(op->sym->m_src);
698
699         qat_req->comn_mid.dest_data_addr =
700                 qat_req->comn_mid.src_data_addr =
701                             rte_pktmbuf_mtophys(op->sym->m_src);
702
703         if (unlikely(op->sym->m_dst != NULL)) {
704                 qat_req->comn_mid.dest_data_addr =
705                                 rte_pktmbuf_mtophys(op->sym->m_dst);
706                 qat_req->comn_mid.dst_length =
707                                 rte_pktmbuf_data_len(op->sym->m_dst);
708         }
709
710         cipher_param = (void *)&qat_req->serv_specif_rqpars;
711         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
712
713         cipher_param->cipher_length = op->sym->cipher.data.length;
714         cipher_param->cipher_offset = op->sym->cipher.data.offset;
715         if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
716                 if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) ||
717                                 (cipher_param->cipher_offset
718                                         % BYTE_LENGTH != 0))) {
719                         PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
720                                 "supports byte aligned values");
721                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
722                         return -EINVAL;
723                 }
724                 cipher_param->cipher_length >>= 3;
725                 cipher_param->cipher_offset >>= 3;
726         }
727
728         if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
729                         sizeof(cipher_param->u.cipher_IV_array))) {
730                 rte_memcpy(cipher_param->u.cipher_IV_array,
731                                 op->sym->cipher.iv.data,
732                                 op->sym->cipher.iv.length);
733         } else {
734                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
735                                 qat_req->comn_hdr.serv_specif_flags,
736                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
737                 cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
738         }
739         if (op->sym->auth.digest.phys_addr) {
740                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
741                                 qat_req->comn_hdr.serv_specif_flags,
742                                 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
743                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
744         }
745         auth_param->auth_off = op->sym->auth.data.offset;
746         auth_param->auth_len = op->sym->auth.data.length;
747         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
748                 if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) ||
749                                 (auth_param->auth_len % BYTE_LENGTH != 0))) {
750                         PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
751                                 "supports byte aligned values");
752                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
753                         return -EINVAL;
754                 }
755                 auth_param->auth_off >>= 3;
756                 auth_param->auth_len >>= 3;
757         }
758         auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
759         /* (GCM) aad length(240 max) will be at this location after precompute */
760         if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
761                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
762                 struct icp_qat_hw_auth_algo_blk *hash;
763
764                 if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)
765                         hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd);
766                 else
767                         hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd +
768                                 sizeof(struct icp_qat_hw_cipher_algo_blk));
769
770                 auth_param->u2.aad_sz = ALIGN_POW2_ROUNDUP(hash->sha.state1[
771                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
772                                         ICP_QAT_HW_GALOIS_H_SZ + 3], 16);
773                 if (op->sym->cipher.iv.length == 12) {
774                         /*
775                          * For GCM a 12 bit IV is allowed,
776                          * but we need to inform the f/w
777                          */
778                         ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
779                                 qat_req->comn_hdr.serv_specif_flags,
780                                 ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
781                 }
782         }
783         auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
784
785
786 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
787         rte_hexdump(stdout, "qat_req:", qat_req,
788                         sizeof(struct icp_qat_fw_la_bulk_req));
789         rte_hexdump(stdout, "src_data:",
790                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
791                         rte_pktmbuf_data_len(op->sym->m_src));
792         rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
793                         op->sym->cipher.iv.length);
794         rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
795                         op->sym->auth.digest.length);
796         rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
797                         op->sym->auth.aad.length);
798 #endif
799         return 0;
800 }
801
802 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
803 {
804         uint32_t div = data >> shift;
805         uint32_t mult = div << shift;
806
807         return data - mult;
808 }
809
810 void qat_crypto_sym_session_init(struct rte_mempool *mp, void *priv_sess)
811 {
812         struct qat_session *s = priv_sess;
813
814         PMD_INIT_FUNC_TRACE();
815         s->cd_paddr = rte_mempool_virt2phy(mp, &s->cd);
816 }
817
818 int qat_dev_config(__rte_unused struct rte_cryptodev *dev)
819 {
820         PMD_INIT_FUNC_TRACE();
821         return -ENOTSUP;
822 }
823
824 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
825 {
826         PMD_INIT_FUNC_TRACE();
827         return 0;
828 }
829
830 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
831 {
832         PMD_INIT_FUNC_TRACE();
833 }
834
835 int qat_dev_close(struct rte_cryptodev *dev)
836 {
837         int i, ret;
838
839         PMD_INIT_FUNC_TRACE();
840
841         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
842                 ret = qat_crypto_sym_qp_release(dev, i);
843                 if (ret < 0)
844                         return ret;
845         }
846
847         return 0;
848 }
849
850 void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
851                                 struct rte_cryptodev_info *info)
852 {
853         struct qat_pmd_private *internals = dev->data->dev_private;
854
855         PMD_INIT_FUNC_TRACE();
856         if (info != NULL) {
857                 info->max_nb_queue_pairs =
858                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
859                                 ADF_NUM_BUNDLES_PER_DEV;
860                 info->feature_flags = dev->feature_flags;
861                 info->capabilities = qat_pmd_capabilities;
862                 info->sym.max_nb_sessions = internals->max_nb_sessions;
863                 info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
864         }
865 }
866
867 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
868                 struct rte_cryptodev_stats *stats)
869 {
870         int i;
871         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
872
873         PMD_INIT_FUNC_TRACE();
874         if (stats == NULL) {
875                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
876                 return;
877         }
878         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
879                 if (qp[i] == NULL) {
880                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
881                         continue;
882                 }
883
884                 stats->enqueued_count += qp[i]->stats.enqueued_count;
885                 stats->dequeued_count += qp[i]->stats.enqueued_count;
886                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
887                 stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
888         }
889 }
890
891 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
892 {
893         int i;
894         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
895
896         PMD_INIT_FUNC_TRACE();
897         for (i = 0; i < dev->data->nb_queue_pairs; i++)
898                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
899         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
900 }