New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / crypto / qat / qat_crypto.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *       * Redistributions of source code must retain the above copyright
12  *         notice, this list of conditions and the following disclaimer.
13  *       * Redistributions in binary form must reproduce the above copyright
14  *         notice, this list of conditions and the following disclaimer in
15  *         the documentation and/or other materials provided with the
16  *         distribution.
17  *       * Neither the name of Intel Corporation nor the names of its
18  *         contributors may be used to endorse or promote products derived
19  *         from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <strings.h>
37 #include <string.h>
38 #include <inttypes.h>
39 #include <errno.h>
40 #include <sys/queue.h>
41 #include <stdarg.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45 #include <rte_debug.h>
46 #include <rte_memory.h>
47 #include <rte_tailq.h>
48 #include <rte_malloc.h>
49 #include <rte_launch.h>
50 #include <rte_eal.h>
51 #include <rte_per_lcore.h>
52 #include <rte_lcore.h>
53 #include <rte_branch_prediction.h>
54 #include <rte_mempool.h>
55 #include <rte_mbuf.h>
56 #include <rte_string_fns.h>
57 #include <rte_spinlock.h>
58 #include <rte_hexdump.h>
59 #include <rte_crypto_sym.h>
60 #include <rte_byteorder.h>
61 #include <rte_pci.h>
62 #include <rte_bus_pci.h>
63
64 #include <openssl/evp.h>
65
66 #include "qat_logs.h"
67 #include "qat_algs.h"
68 #include "qat_crypto.h"
69 #include "adf_transport_access_macros.h"
70
71 #define BYTE_LENGTH    8
72
73 static int
74 qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
75                 struct qat_pmd_private *internals) {
76         int i = 0;
77         const struct rte_cryptodev_capabilities *capability;
78
79         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
80                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
81                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
82                         continue;
83
84                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
85                         continue;
86
87                 if (capability->sym.cipher.algo == algo)
88                         return 1;
89         }
90         return 0;
91 }
92
93 static int
94 qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
95                 struct qat_pmd_private *internals) {
96         int i = 0;
97         const struct rte_cryptodev_capabilities *capability;
98
99         while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
100                         RTE_CRYPTO_OP_TYPE_UNDEFINED) {
101                 if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
102                         continue;
103
104                 if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
105                         continue;
106
107                 if (capability->sym.auth.algo == algo)
108                         return 1;
109         }
110         return 0;
111 }
112
113 /** Encrypt a single partial block
114  *  Depends on openssl libcrypto
115  *  Uses ECB+XOR to do CFB encryption, same result, more performant
116  */
117 static inline int
118 bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
119                 uint8_t *iv, int ivlen, int srclen,
120                 void *bpi_ctx)
121 {
122         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
123         int encrypted_ivlen;
124         uint8_t encrypted_iv[16];
125         int i;
126
127         /* ECB method: encrypt the IV, then XOR this with plaintext */
128         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
129                                                                 <= 0)
130                 goto cipher_encrypt_err;
131
132         for (i = 0; i < srclen; i++)
133                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
134
135         return 0;
136
137 cipher_encrypt_err:
138         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
139         return -EINVAL;
140 }
141
142 /** Decrypt a single partial block
143  *  Depends on openssl libcrypto
144  *  Uses ECB+XOR to do CFB encryption, same result, more performant
145  */
146 static inline int
147 bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
148                 uint8_t *iv, int ivlen, int srclen,
149                 void *bpi_ctx)
150 {
151         EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
152         int encrypted_ivlen;
153         uint8_t encrypted_iv[16];
154         int i;
155
156         /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
157         if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
158                                                                 <= 0)
159                 goto cipher_decrypt_err;
160
161         for (i = 0; i < srclen; i++)
162                 *(dst+i) = *(src+i)^(encrypted_iv[i]);
163
164         return 0;
165
166 cipher_decrypt_err:
167         PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
168         return -EINVAL;
169 }
170
171 /** Creates a context in either AES or DES in ECB mode
172  *  Depends on openssl libcrypto
173  */
174 static int
175 bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
176                 enum rte_crypto_cipher_operation direction __rte_unused,
177                 uint8_t *key, void **ctx)
178 {
179         const EVP_CIPHER *algo = NULL;
180         int ret;
181         *ctx = EVP_CIPHER_CTX_new();
182
183         if (*ctx == NULL) {
184                 ret = -ENOMEM;
185                 goto ctx_init_err;
186         }
187
188         if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
189                 algo = EVP_des_ecb();
190         else
191                 algo = EVP_aes_128_ecb();
192
193         /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
194         if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
195                 ret = -EINVAL;
196                 goto ctx_init_err;
197         }
198
199         return 0;
200
201 ctx_init_err:
202         if (*ctx != NULL)
203                 EVP_CIPHER_CTX_free(*ctx);
204         return ret;
205 }
206
207 /** Frees a context previously created
208  *  Depends on openssl libcrypto
209  */
210 static void
211 bpi_cipher_ctx_free(void *bpi_ctx)
212 {
213         if (bpi_ctx != NULL)
214                 EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
215 }
216
217 static inline uint32_t
218 adf_modulo(uint32_t data, uint32_t shift);
219
220 static inline int
221 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
222                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
223
224 void
225 qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
226                 struct rte_cryptodev_sym_session *sess)
227 {
228         PMD_INIT_FUNC_TRACE();
229         uint8_t index = dev->driver_id;
230         void *sess_priv = get_session_private_data(sess, index);
231         struct qat_session *s = (struct qat_session *)sess_priv;
232
233         if (sess_priv) {
234                 if (s->bpi_ctx)
235                         bpi_cipher_ctx_free(s->bpi_ctx);
236                 memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
237                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
238                 set_session_private_data(sess, index, NULL);
239                 rte_mempool_put(sess_mp, sess_priv);
240         }
241 }
242
243 static int
244 qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
245 {
246         /* Cipher Only */
247         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
248                 return ICP_QAT_FW_LA_CMD_CIPHER;
249
250         /* Authentication Only */
251         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
252                 return ICP_QAT_FW_LA_CMD_AUTH;
253
254         /* AEAD */
255         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
256                 /* AES-GCM and AES-CCM works with different direction
257                  * GCM first encrypts and generate hash where AES-CCM
258                  * first generate hash and encrypts. Similar relation
259                  * applies to decryption.
260                  */
261                 if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
262                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
263                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
264                         else
265                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
266                 else
267                         if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
268                                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
269                         else
270                                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
271         }
272
273         if (xform->next == NULL)
274                 return -1;
275
276         /* Cipher then Authenticate */
277         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
278                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
279                 return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
280
281         /* Authenticate then Cipher */
282         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
283                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
284                 return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
285
286         return -1;
287 }
288
289 static struct rte_crypto_auth_xform *
290 qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
291 {
292         do {
293                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
294                         return &xform->auth;
295
296                 xform = xform->next;
297         } while (xform);
298
299         return NULL;
300 }
301
302 static struct rte_crypto_cipher_xform *
303 qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
304 {
305         do {
306                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
307                         return &xform->cipher;
308
309                 xform = xform->next;
310         } while (xform);
311
312         return NULL;
313 }
314
315 int
316 qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
317                 struct rte_crypto_sym_xform *xform,
318                 struct qat_session *session)
319 {
320         struct qat_pmd_private *internals = dev->data->dev_private;
321         struct rte_crypto_cipher_xform *cipher_xform = NULL;
322         int ret;
323
324         /* Get cipher xform from crypto xform chain */
325         cipher_xform = qat_get_cipher_xform(xform);
326
327         session->cipher_iv.offset = cipher_xform->iv.offset;
328         session->cipher_iv.length = cipher_xform->iv.length;
329
330         switch (cipher_xform->algo) {
331         case RTE_CRYPTO_CIPHER_AES_CBC:
332                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
333                                 &session->qat_cipher_alg) != 0) {
334                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
335                         ret = -EINVAL;
336                         goto error_out;
337                 }
338                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
339                 break;
340         case RTE_CRYPTO_CIPHER_AES_CTR:
341                 if (qat_alg_validate_aes_key(cipher_xform->key.length,
342                                 &session->qat_cipher_alg) != 0) {
343                         PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
344                         ret = -EINVAL;
345                         goto error_out;
346                 }
347                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
348                 break;
349         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
350                 if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
351                                         &session->qat_cipher_alg) != 0) {
352                         PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
353                         ret = -EINVAL;
354                         goto error_out;
355                 }
356                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
357                 break;
358         case RTE_CRYPTO_CIPHER_NULL:
359                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
360                 break;
361         case RTE_CRYPTO_CIPHER_KASUMI_F8:
362                 if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
363                                         &session->qat_cipher_alg) != 0) {
364                         PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
365                         ret = -EINVAL;
366                         goto error_out;
367                 }
368                 session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
369                 break;
370         case RTE_CRYPTO_CIPHER_3DES_CBC:
371                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
372                                 &session->qat_cipher_alg) != 0) {
373                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
374                         ret = -EINVAL;
375                         goto error_out;
376                 }
377                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
378                 break;
379         case RTE_CRYPTO_CIPHER_DES_CBC:
380                 if (qat_alg_validate_des_key(cipher_xform->key.length,
381                                 &session->qat_cipher_alg) != 0) {
382                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
383                         ret = -EINVAL;
384                         goto error_out;
385                 }
386                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
387                 break;
388         case RTE_CRYPTO_CIPHER_3DES_CTR:
389                 if (qat_alg_validate_3des_key(cipher_xform->key.length,
390                                 &session->qat_cipher_alg) != 0) {
391                         PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
392                         ret = -EINVAL;
393                         goto error_out;
394                 }
395                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
396                 break;
397         case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
398                 ret = bpi_cipher_ctx_init(
399                                         cipher_xform->algo,
400                                         cipher_xform->op,
401                                         cipher_xform->key.data,
402                                         &session->bpi_ctx);
403                 if (ret != 0) {
404                         PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
405                         goto error_out;
406                 }
407                 if (qat_alg_validate_des_key(cipher_xform->key.length,
408                                 &session->qat_cipher_alg) != 0) {
409                         PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
410                         ret = -EINVAL;
411                         goto error_out;
412                 }
413                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
414                 break;
415         case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
416                 ret = bpi_cipher_ctx_init(
417                                         cipher_xform->algo,
418                                         cipher_xform->op,
419                                         cipher_xform->key.data,
420                                         &session->bpi_ctx);
421                 if (ret != 0) {
422                         PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
423                         goto error_out;
424                 }
425                 if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
426                                 &session->qat_cipher_alg) != 0) {
427                         PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
428                         ret = -EINVAL;
429                         goto error_out;
430                 }
431                 session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
432                 break;
433         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
434                 if (!qat_is_cipher_alg_supported(
435                         cipher_xform->algo, internals)) {
436                         PMD_DRV_LOG(ERR, "%s not supported on this device",
437                                 rte_crypto_cipher_algorithm_strings
438                                         [cipher_xform->algo]);
439                         ret = -ENOTSUP;
440                         goto error_out;
441                 }
442                 if (qat_alg_validate_zuc_key(cipher_xform->key.length,
443                                 &session->qat_cipher_alg) != 0) {
444                         PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
445                         ret = -EINVAL;
446                         goto error_out;
447                 }
448                 session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
449                 break;
450         case RTE_CRYPTO_CIPHER_3DES_ECB:
451         case RTE_CRYPTO_CIPHER_AES_ECB:
452         case RTE_CRYPTO_CIPHER_AES_F8:
453         case RTE_CRYPTO_CIPHER_AES_XTS:
454         case RTE_CRYPTO_CIPHER_ARC4:
455                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
456                                 cipher_xform->algo);
457                 ret = -ENOTSUP;
458                 goto error_out;
459         default:
460                 PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
461                                 cipher_xform->algo);
462                 ret = -EINVAL;
463                 goto error_out;
464         }
465
466         if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
467                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
468         else
469                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
470
471         if (qat_alg_aead_session_create_content_desc_cipher(session,
472                                                 cipher_xform->key.data,
473                                                 cipher_xform->key.length)) {
474                 ret = -EINVAL;
475                 goto error_out;
476         }
477
478         return 0;
479
480 error_out:
481         if (session->bpi_ctx) {
482                 bpi_cipher_ctx_free(session->bpi_ctx);
483                 session->bpi_ctx = NULL;
484         }
485         return ret;
486 }
487
488 int
489 qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
490                 struct rte_crypto_sym_xform *xform,
491                 struct rte_cryptodev_sym_session *sess,
492                 struct rte_mempool *mempool)
493 {
494         void *sess_private_data;
495         int ret;
496
497         if (rte_mempool_get(mempool, &sess_private_data)) {
498                 CDEV_LOG_ERR(
499                         "Couldn't get object from session mempool");
500                 return -ENOMEM;
501         }
502
503         ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
504         if (ret != 0) {
505                 PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
506                                 "session parameters");
507
508                 /* Return session to mempool */
509                 rte_mempool_put(mempool, sess_private_data);
510                 return ret;
511         }
512
513         set_session_private_data(sess, dev->driver_id,
514                 sess_private_data);
515
516         return 0;
517 }
518
519 int
520 qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
521                 struct rte_crypto_sym_xform *xform, void *session_private)
522 {
523         struct qat_session *session = session_private;
524         int ret;
525
526         int qat_cmd_id;
527         PMD_INIT_FUNC_TRACE();
528
529         /* Set context descriptor physical address */
530         session->cd_paddr = rte_mempool_virt2iova(session) +
531                         offsetof(struct qat_session, cd);
532
533         session->min_qat_dev_gen = QAT_GEN1;
534
535         /* Get requested QAT command id */
536         qat_cmd_id = qat_get_cmd_id(xform);
537         if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
538                 PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
539                 return -ENOTSUP;
540         }
541         session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
542         switch (session->qat_cmd) {
543         case ICP_QAT_FW_LA_CMD_CIPHER:
544                 ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
545                 if (ret < 0)
546                         return ret;
547                 break;
548         case ICP_QAT_FW_LA_CMD_AUTH:
549                 ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
550                 if (ret < 0)
551                         return ret;
552                 break;
553         case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
554                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
555                         ret = qat_crypto_sym_configure_session_aead(xform,
556                                         session);
557                         if (ret < 0)
558                                 return ret;
559                 } else {
560                         ret = qat_crypto_sym_configure_session_cipher(dev,
561                                         xform, session);
562                         if (ret < 0)
563                                 return ret;
564                         ret = qat_crypto_sym_configure_session_auth(dev,
565                                         xform, session);
566                         if (ret < 0)
567                                 return ret;
568                 }
569                 break;
570         case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
571                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
572                         ret = qat_crypto_sym_configure_session_aead(xform,
573                                         session);
574                         if (ret < 0)
575                                 return ret;
576                 } else {
577                         ret = qat_crypto_sym_configure_session_auth(dev,
578                                         xform, session);
579                         if (ret < 0)
580                                 return ret;
581                         ret = qat_crypto_sym_configure_session_cipher(dev,
582                                         xform, session);
583                         if (ret < 0)
584                                 return ret;
585                 }
586                 break;
587         case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
588         case ICP_QAT_FW_LA_CMD_TRNG_TEST:
589         case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
590         case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
591         case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
592         case ICP_QAT_FW_LA_CMD_MGF1:
593         case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
594         case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
595         case ICP_QAT_FW_LA_CMD_DELIMITER:
596         PMD_DRV_LOG(ERR, "Unsupported Service %u",
597                 session->qat_cmd);
598                 return -ENOTSUP;
599         default:
600         PMD_DRV_LOG(ERR, "Unsupported Service %u",
601                 session->qat_cmd);
602                 return -ENOTSUP;
603         }
604
605         return 0;
606 }
607
608 int
609 qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
610                                 struct rte_crypto_sym_xform *xform,
611                                 struct qat_session *session)
612 {
613         struct rte_crypto_auth_xform *auth_xform = NULL;
614         struct qat_pmd_private *internals = dev->data->dev_private;
615         auth_xform = qat_get_auth_xform(xform);
616         uint8_t *key_data = auth_xform->key.data;
617         uint8_t key_length = auth_xform->key.length;
618
619         switch (auth_xform->algo) {
620         case RTE_CRYPTO_AUTH_SHA1_HMAC:
621                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
622                 break;
623         case RTE_CRYPTO_AUTH_SHA224_HMAC:
624                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
625                 break;
626         case RTE_CRYPTO_AUTH_SHA256_HMAC:
627                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
628                 break;
629         case RTE_CRYPTO_AUTH_SHA384_HMAC:
630                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
631                 break;
632         case RTE_CRYPTO_AUTH_SHA512_HMAC:
633                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
634                 break;
635         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
636                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
637                 break;
638         case RTE_CRYPTO_AUTH_AES_GMAC:
639                 if (qat_alg_validate_aes_key(auth_xform->key.length,
640                                 &session->qat_cipher_alg) != 0) {
641                         PMD_DRV_LOG(ERR, "Invalid AES key size");
642                         return -EINVAL;
643                 }
644                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
645                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
646
647                 break;
648         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
649                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
650                 break;
651         case RTE_CRYPTO_AUTH_MD5_HMAC:
652                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
653                 break;
654         case RTE_CRYPTO_AUTH_NULL:
655                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
656                 break;
657         case RTE_CRYPTO_AUTH_KASUMI_F9:
658                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
659                 break;
660         case RTE_CRYPTO_AUTH_ZUC_EIA3:
661                 if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
662                         PMD_DRV_LOG(ERR, "%s not supported on this device",
663                                 rte_crypto_auth_algorithm_strings
664                                 [auth_xform->algo]);
665                         return -ENOTSUP;
666                 }
667                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
668                 break;
669         case RTE_CRYPTO_AUTH_SHA1:
670         case RTE_CRYPTO_AUTH_SHA256:
671         case RTE_CRYPTO_AUTH_SHA512:
672         case RTE_CRYPTO_AUTH_SHA224:
673         case RTE_CRYPTO_AUTH_SHA384:
674         case RTE_CRYPTO_AUTH_MD5:
675         case RTE_CRYPTO_AUTH_AES_CMAC:
676         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
677                 PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
678                                 auth_xform->algo);
679                 return -ENOTSUP;
680         default:
681                 PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
682                                 auth_xform->algo);
683                 return -EINVAL;
684         }
685
686         session->auth_iv.offset = auth_xform->iv.offset;
687         session->auth_iv.length = auth_xform->iv.length;
688
689         if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
690                 if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
691                         session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
692                         session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
693                         /*
694                          * It needs to create cipher desc content first,
695                          * then authentication
696                          */
697                         if (qat_alg_aead_session_create_content_desc_cipher(session,
698                                                 auth_xform->key.data,
699                                                 auth_xform->key.length))
700                                 return -EINVAL;
701
702                         if (qat_alg_aead_session_create_content_desc_auth(session,
703                                                 key_data,
704                                                 key_length,
705                                                 0,
706                                                 auth_xform->digest_length,
707                                                 auth_xform->op))
708                                 return -EINVAL;
709                 } else {
710                         session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
711                         session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
712                         /*
713                          * It needs to create authentication desc content first,
714                          * then cipher
715                          */
716                         if (qat_alg_aead_session_create_content_desc_auth(session,
717                                         key_data,
718                                         key_length,
719                                         0,
720                                         auth_xform->digest_length,
721                                         auth_xform->op))
722                                 return -EINVAL;
723
724                         if (qat_alg_aead_session_create_content_desc_cipher(session,
725                                                 auth_xform->key.data,
726                                                 auth_xform->key.length))
727                                 return -EINVAL;
728                 }
729                 /* Restore to authentication only only */
730                 session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
731         } else {
732                 if (qat_alg_aead_session_create_content_desc_auth(session,
733                                 key_data,
734                                 key_length,
735                                 0,
736                                 auth_xform->digest_length,
737                                 auth_xform->op))
738                         return -EINVAL;
739         }
740
741         session->digest_length = auth_xform->digest_length;
742         return 0;
743 }
744
745 int
746 qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
747                                 struct qat_session *session)
748 {
749         struct rte_crypto_aead_xform *aead_xform = &xform->aead;
750         enum rte_crypto_auth_operation crypto_operation;
751
752         /*
753          * Store AEAD IV parameters as cipher IV,
754          * to avoid unnecessary memory usage
755          */
756         session->cipher_iv.offset = xform->aead.iv.offset;
757         session->cipher_iv.length = xform->aead.iv.length;
758
759         switch (aead_xform->algo) {
760         case RTE_CRYPTO_AEAD_AES_GCM:
761                 if (qat_alg_validate_aes_key(aead_xform->key.length,
762                                 &session->qat_cipher_alg) != 0) {
763                         PMD_DRV_LOG(ERR, "Invalid AES key size");
764                         return -EINVAL;
765                 }
766                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
767                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
768                 break;
769         case RTE_CRYPTO_AEAD_AES_CCM:
770                 if (qat_alg_validate_aes_key(aead_xform->key.length,
771                                 &session->qat_cipher_alg) != 0) {
772                         PMD_DRV_LOG(ERR, "Invalid AES key size");
773                         return -EINVAL;
774                 }
775                 session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
776                 session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
777                 break;
778         default:
779                 PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
780                                 aead_xform->algo);
781                 return -EINVAL;
782         }
783
784         if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
785                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
786                         (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
787                         aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
788                 session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
789                 /*
790                  * It needs to create cipher desc content first,
791                  * then authentication
792                  */
793
794                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
795                         RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
796
797                 if (qat_alg_aead_session_create_content_desc_cipher(session,
798                                         aead_xform->key.data,
799                                         aead_xform->key.length))
800                         return -EINVAL;
801
802                 if (qat_alg_aead_session_create_content_desc_auth(session,
803                                         aead_xform->key.data,
804                                         aead_xform->key.length,
805                                         aead_xform->aad_length,
806                                         aead_xform->digest_length,
807                                         crypto_operation))
808                         return -EINVAL;
809         } else {
810                 session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
811                 /*
812                  * It needs to create authentication desc content first,
813                  * then cipher
814                  */
815
816                 crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
817                         RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
818
819                 if (qat_alg_aead_session_create_content_desc_auth(session,
820                                         aead_xform->key.data,
821                                         aead_xform->key.length,
822                                         aead_xform->aad_length,
823                                         aead_xform->digest_length,
824                                         crypto_operation))
825                         return -EINVAL;
826
827                 if (qat_alg_aead_session_create_content_desc_cipher(session,
828                                         aead_xform->key.data,
829                                         aead_xform->key.length))
830                         return -EINVAL;
831         }
832
833         session->digest_length = aead_xform->digest_length;
834         return 0;
835 }
836
837 unsigned qat_crypto_sym_get_session_private_size(
838                 struct rte_cryptodev *dev __rte_unused)
839 {
840         return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
841 }
842
843 static inline uint32_t
844 qat_bpicipher_preprocess(struct qat_session *ctx,
845                                 struct rte_crypto_op *op)
846 {
847         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
848         struct rte_crypto_sym_op *sym_op = op->sym;
849         uint8_t last_block_len = block_len > 0 ?
850                         sym_op->cipher.data.length % block_len : 0;
851
852         if (last_block_len &&
853                         ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
854
855                 /* Decrypt last block */
856                 uint8_t *last_block, *dst, *iv;
857                 uint32_t last_block_offset = sym_op->cipher.data.offset +
858                                 sym_op->cipher.data.length - last_block_len;
859                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
860                                 uint8_t *, last_block_offset);
861
862                 if (unlikely(sym_op->m_dst != NULL))
863                         /* out-of-place operation (OOP) */
864                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
865                                                 uint8_t *, last_block_offset);
866                 else
867                         dst = last_block;
868
869                 if (last_block_len < sym_op->cipher.data.length)
870                         /* use previous block ciphertext as IV */
871                         iv = last_block - block_len;
872                 else
873                         /* runt block, i.e. less than one full block */
874                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
875                                         ctx->cipher_iv.offset);
876
877 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
878                 rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
879                         last_block_len);
880                 if (sym_op->m_dst != NULL)
881                         rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
882                                 last_block_len);
883 #endif
884                 bpi_cipher_decrypt(last_block, dst, iv, block_len,
885                                 last_block_len, ctx->bpi_ctx);
886 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
887                 rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
888                         last_block_len);
889                 if (sym_op->m_dst != NULL)
890                         rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
891                                 last_block_len);
892 #endif
893         }
894
895         return sym_op->cipher.data.length - last_block_len;
896 }
897
898 static inline uint32_t
899 qat_bpicipher_postprocess(struct qat_session *ctx,
900                                 struct rte_crypto_op *op)
901 {
902         uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
903         struct rte_crypto_sym_op *sym_op = op->sym;
904         uint8_t last_block_len = block_len > 0 ?
905                         sym_op->cipher.data.length % block_len : 0;
906
907         if (last_block_len > 0 &&
908                         ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
909
910                 /* Encrypt last block */
911                 uint8_t *last_block, *dst, *iv;
912                 uint32_t last_block_offset;
913
914                 last_block_offset = sym_op->cipher.data.offset +
915                                 sym_op->cipher.data.length - last_block_len;
916                 last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
917                                 uint8_t *, last_block_offset);
918
919                 if (unlikely(sym_op->m_dst != NULL))
920                         /* out-of-place operation (OOP) */
921                         dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
922                                                 uint8_t *, last_block_offset);
923                 else
924                         dst = last_block;
925
926                 if (last_block_len < sym_op->cipher.data.length)
927                         /* use previous block ciphertext as IV */
928                         iv = dst - block_len;
929                 else
930                         /* runt block, i.e. less than one full block */
931                         iv = rte_crypto_op_ctod_offset(op, uint8_t *,
932                                         ctx->cipher_iv.offset);
933
934 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
935                 rte_hexdump(stdout, "BPI: src before post-process:", last_block,
936                         last_block_len);
937                 if (sym_op->m_dst != NULL)
938                         rte_hexdump(stdout, "BPI: dst before post-process:",
939                                         dst, last_block_len);
940 #endif
941                 bpi_cipher_encrypt(last_block, dst, iv, block_len,
942                                 last_block_len, ctx->bpi_ctx);
943 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
944                 rte_hexdump(stdout, "BPI: src after post-process:", last_block,
945                         last_block_len);
946                 if (sym_op->m_dst != NULL)
947                         rte_hexdump(stdout, "BPI: dst after post-process:", dst,
948                                 last_block_len);
949 #endif
950         }
951         return sym_op->cipher.data.length - last_block_len;
952 }
953
954 static inline void
955 txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
956         WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
957                         q->hw_queue_number, q->tail);
958         q->nb_pending_requests = 0;
959         q->csr_tail = q->tail;
960 }
961
962 uint16_t
963 qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
964                 uint16_t nb_ops)
965 {
966         register struct qat_queue *queue;
967         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
968         register uint32_t nb_ops_sent = 0;
969         register struct rte_crypto_op **cur_op = ops;
970         register int ret;
971         uint16_t nb_ops_possible = nb_ops;
972         register uint8_t *base_addr;
973         register uint32_t tail;
974         int overflow;
975
976         if (unlikely(nb_ops == 0))
977                 return 0;
978
979         /* read params used a lot in main loop into registers */
980         queue = &(tmp_qp->tx_q);
981         base_addr = (uint8_t *)queue->base_addr;
982         tail = queue->tail;
983
984         /* Find how many can actually fit on the ring */
985         tmp_qp->inflights16 += nb_ops;
986         overflow = tmp_qp->inflights16 - queue->max_inflights;
987         if (overflow > 0) {
988                 tmp_qp->inflights16 -= overflow;
989                 nb_ops_possible = nb_ops - overflow;
990                 if (nb_ops_possible == 0)
991                         return 0;
992         }
993
994         while (nb_ops_sent != nb_ops_possible) {
995                 ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
996                         tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
997                 if (ret != 0) {
998                         tmp_qp->stats.enqueue_err_count++;
999                         /*
1000                          * This message cannot be enqueued,
1001                          * decrease number of ops that wasn't sent
1002                          */
1003                         tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
1004                         if (nb_ops_sent == 0)
1005                                 return 0;
1006                         goto kick_tail;
1007                 }
1008
1009                 tail = adf_modulo(tail + queue->msg_size, queue->modulo);
1010                 nb_ops_sent++;
1011                 cur_op++;
1012         }
1013 kick_tail:
1014         queue->tail = tail;
1015         tmp_qp->stats.enqueued_count += nb_ops_sent;
1016         queue->nb_pending_requests += nb_ops_sent;
1017         if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
1018                         queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
1019                 txq_write_tail(tmp_qp, queue);
1020         }
1021         return nb_ops_sent;
1022 }
1023
1024 static inline
1025 void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
1026 {
1027         uint32_t old_head, new_head;
1028         uint32_t max_head;
1029
1030         old_head = q->csr_head;
1031         new_head = q->head;
1032         max_head = qp->nb_descriptors * q->msg_size;
1033
1034         /* write out free descriptors */
1035         void *cur_desc = (uint8_t *)q->base_addr + old_head;
1036
1037         if (new_head < old_head) {
1038                 memset(cur_desc, ADF_RING_EMPTY_SIG, max_head - old_head);
1039                 memset(q->base_addr, ADF_RING_EMPTY_SIG, new_head);
1040         } else {
1041                 memset(cur_desc, ADF_RING_EMPTY_SIG, new_head - old_head);
1042         }
1043         q->nb_processed_responses = 0;
1044         q->csr_head = new_head;
1045
1046         /* write current head to CSR */
1047         WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
1048                             q->hw_queue_number, new_head);
1049 }
1050
1051 uint16_t
1052 qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
1053                 uint16_t nb_ops)
1054 {
1055         struct qat_queue *rx_queue, *tx_queue;
1056         struct qat_qp *tmp_qp = (struct qat_qp *)qp;
1057         uint32_t msg_counter = 0;
1058         struct rte_crypto_op *rx_op;
1059         struct icp_qat_fw_comn_resp *resp_msg;
1060         uint32_t head;
1061
1062         rx_queue = &(tmp_qp->rx_q);
1063         tx_queue = &(tmp_qp->tx_q);
1064         head = rx_queue->head;
1065         resp_msg = (struct icp_qat_fw_comn_resp *)
1066                         ((uint8_t *)rx_queue->base_addr + head);
1067
1068         while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
1069                         msg_counter != nb_ops) {
1070                 rx_op = (struct rte_crypto_op *)(uintptr_t)
1071                                 (resp_msg->opaque_data);
1072
1073 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
1074                 rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
1075                         sizeof(struct icp_qat_fw_comn_resp));
1076 #endif
1077                 if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
1078                                 ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
1079                                         resp_msg->comn_hdr.comn_status)) {
1080                         rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1081                 } else {
1082                         struct qat_session *sess = (struct qat_session *)
1083                                         get_session_private_data(
1084                                         rx_op->sym->session,
1085                                         cryptodev_qat_driver_id);
1086
1087                         if (sess->bpi_ctx)
1088                                 qat_bpicipher_postprocess(sess, rx_op);
1089                         rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1090                 }
1091
1092                 head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
1093                 resp_msg = (struct icp_qat_fw_comn_resp *)
1094                                 ((uint8_t *)rx_queue->base_addr + head);
1095                 *ops = rx_op;
1096                 ops++;
1097                 msg_counter++;
1098         }
1099         if (msg_counter > 0) {
1100                 rx_queue->head = head;
1101                 tmp_qp->stats.dequeued_count += msg_counter;
1102                 rx_queue->nb_processed_responses += msg_counter;
1103                 tmp_qp->inflights16 -= msg_counter;
1104
1105                 if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
1106                         rxq_free_desc(tmp_qp, rx_queue);
1107         }
1108         /* also check if tail needs to be advanced */
1109         if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
1110                         tx_queue->tail != tx_queue->csr_tail) {
1111                 txq_write_tail(tmp_qp, tx_queue);
1112         }
1113         return msg_counter;
1114 }
1115
1116 static inline int
1117 qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
1118                 struct qat_alg_buf_list *list, uint32_t data_len)
1119 {
1120         int nr = 1;
1121
1122         uint32_t buf_len = rte_pktmbuf_iova(buf) -
1123                         buff_start + rte_pktmbuf_data_len(buf);
1124
1125         list->bufers[0].addr = buff_start;
1126         list->bufers[0].resrvd = 0;
1127         list->bufers[0].len = buf_len;
1128
1129         if (data_len <= buf_len) {
1130                 list->num_bufs = nr;
1131                 list->bufers[0].len = data_len;
1132                 return 0;
1133         }
1134
1135         buf = buf->next;
1136         while (buf) {
1137                 if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
1138                         PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
1139                                         " entry(%u)",
1140                                         QAT_SGL_MAX_NUMBER);
1141                         return -EINVAL;
1142                 }
1143
1144                 list->bufers[nr].len = rte_pktmbuf_data_len(buf);
1145                 list->bufers[nr].resrvd = 0;
1146                 list->bufers[nr].addr = rte_pktmbuf_iova(buf);
1147
1148                 buf_len += list->bufers[nr].len;
1149                 buf = buf->next;
1150
1151                 if (buf_len > data_len) {
1152                         list->bufers[nr].len -=
1153                                 buf_len - data_len;
1154                         buf = NULL;
1155                 }
1156                 ++nr;
1157         }
1158         list->num_bufs = nr;
1159
1160         return 0;
1161 }
1162
1163 static inline void
1164 set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
1165                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1166                 struct rte_crypto_op *op,
1167                 struct icp_qat_fw_la_bulk_req *qat_req)
1168 {
1169         /* copy IV into request if it fits */
1170         if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
1171                 rte_memcpy(cipher_param->u.cipher_IV_array,
1172                                 rte_crypto_op_ctod_offset(op, uint8_t *,
1173                                         iv_offset),
1174                                 iv_length);
1175         } else {
1176                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
1177                                 qat_req->comn_hdr.serv_specif_flags,
1178                                 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
1179                 cipher_param->u.s.cipher_IV_ptr =
1180                                 rte_crypto_op_ctophys_offset(op,
1181                                         iv_offset);
1182         }
1183 }
1184
1185 /** Set IV for CCM is special case, 0th byte is set to q-1
1186  *  where q is padding of nonce in 16 byte block
1187  */
1188 static inline void
1189 set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
1190                 struct icp_qat_fw_la_cipher_req_params *cipher_param,
1191                 struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
1192 {
1193         rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
1194                         ICP_QAT_HW_CCM_NONCE_OFFSET,
1195                         rte_crypto_op_ctod_offset(op, uint8_t *,
1196                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1197                         iv_length);
1198         *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
1199                         q - ICP_QAT_HW_CCM_NONCE_OFFSET;
1200
1201         if (aad_len_field_sz)
1202                 rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
1203                         rte_crypto_op_ctod_offset(op, uint8_t *,
1204                                 iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
1205                         iv_length);
1206 }
1207
1208 static inline int
1209 qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
1210                 struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
1211 {
1212         int ret = 0;
1213         struct qat_session *ctx;
1214         struct icp_qat_fw_la_cipher_req_params *cipher_param;
1215         struct icp_qat_fw_la_auth_req_params *auth_param;
1216         register struct icp_qat_fw_la_bulk_req *qat_req;
1217         uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
1218         uint32_t cipher_len = 0, cipher_ofs = 0;
1219         uint32_t auth_len = 0, auth_ofs = 0;
1220         uint32_t min_ofs = 0;
1221         uint64_t src_buf_start = 0, dst_buf_start = 0;
1222         uint8_t do_sgl = 0;
1223
1224 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1225         if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
1226                 PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
1227                                 "operation requests, op (%p) is not a "
1228                                 "symmetric operation.", op);
1229                 return -EINVAL;
1230         }
1231 #endif
1232         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1233                 PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
1234                                 " requests, op (%p) is sessionless.", op);
1235                 return -EINVAL;
1236         }
1237
1238         ctx = (struct qat_session *)get_session_private_data(
1239                         op->sym->session, cryptodev_qat_driver_id);
1240
1241         if (unlikely(ctx == NULL)) {
1242                 PMD_DRV_LOG(ERR, "Session was not created for this device");
1243                 return -EINVAL;
1244         }
1245
1246         if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
1247                 PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
1248                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
1249                 return -EINVAL;
1250         }
1251
1252
1253
1254         qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
1255         rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
1256         qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
1257         cipher_param = (void *)&qat_req->serv_specif_rqpars;
1258         auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
1259
1260         if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
1261                         ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
1262                 /* AES-GCM or AES-CCM */
1263                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1264                                 ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
1265                                 (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
1266                                 && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
1267                                 && ctx->qat_hash_alg ==
1268                                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
1269                         do_aead = 1;
1270                 } else {
1271                         do_auth = 1;
1272                         do_cipher = 1;
1273                 }
1274         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
1275                 do_auth = 1;
1276                 do_cipher = 0;
1277         } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
1278                 do_auth = 0;
1279                 do_cipher = 1;
1280         }
1281
1282         if (do_cipher) {
1283
1284                 if (ctx->qat_cipher_alg ==
1285                                          ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
1286                         ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
1287                         ctx->qat_cipher_alg ==
1288                                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
1289
1290                         if (unlikely(
1291                                 (cipher_param->cipher_length % BYTE_LENGTH != 0)
1292                                  || (cipher_param->cipher_offset
1293                                                         % BYTE_LENGTH != 0))) {
1294                                 PMD_DRV_LOG(ERR,
1295                   "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
1296                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1297                                 return -EINVAL;
1298                         }
1299                         cipher_len = op->sym->cipher.data.length >> 3;
1300                         cipher_ofs = op->sym->cipher.data.offset >> 3;
1301
1302                 } else if (ctx->bpi_ctx) {
1303                         /* DOCSIS - only send complete blocks to device
1304                          * Process any partial block using CFB mode.
1305                          * Even if 0 complete blocks, still send this to device
1306                          * to get into rx queue for post-process and dequeuing
1307                          */
1308                         cipher_len = qat_bpicipher_preprocess(ctx, op);
1309                         cipher_ofs = op->sym->cipher.data.offset;
1310                 } else {
1311                         cipher_len = op->sym->cipher.data.length;
1312                         cipher_ofs = op->sym->cipher.data.offset;
1313                 }
1314
1315                 set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
1316                                 cipher_param, op, qat_req);
1317                 min_ofs = cipher_ofs;
1318         }
1319
1320         if (do_auth) {
1321
1322                 if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
1323                         ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
1324                         ctx->qat_hash_alg ==
1325                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
1326                         if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
1327                                 || (auth_param->auth_len % BYTE_LENGTH != 0))) {
1328                                 PMD_DRV_LOG(ERR,
1329                 "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
1330                                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
1331                                 return -EINVAL;
1332                         }
1333                         auth_ofs = op->sym->auth.data.offset >> 3;
1334                         auth_len = op->sym->auth.data.length >> 3;
1335
1336                         auth_param->u1.aad_adr =
1337                                         rte_crypto_op_ctophys_offset(op,
1338                                                         ctx->auth_iv.offset);
1339
1340                 } else if (ctx->qat_hash_alg ==
1341                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1342                                 ctx->qat_hash_alg ==
1343                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1344                         /* AES-GMAC */
1345                         set_cipher_iv(ctx->auth_iv.length,
1346                                 ctx->auth_iv.offset,
1347                                 cipher_param, op, qat_req);
1348                         auth_ofs = op->sym->auth.data.offset;
1349                         auth_len = op->sym->auth.data.length;
1350
1351                         auth_param->u1.aad_adr = 0;
1352                         auth_param->u2.aad_sz = 0;
1353
1354                         /*
1355                          * If len(iv)==12B fw computes J0
1356                          */
1357                         if (ctx->auth_iv.length == 12) {
1358                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1359                                         qat_req->comn_hdr.serv_specif_flags,
1360                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1361
1362                         }
1363                 } else {
1364                         auth_ofs = op->sym->auth.data.offset;
1365                         auth_len = op->sym->auth.data.length;
1366
1367                 }
1368                 min_ofs = auth_ofs;
1369
1370                 auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
1371
1372         }
1373
1374         if (do_aead) {
1375                 /*
1376                  * This address may used for setting AAD physical pointer
1377                  * into IV offset from op
1378                  */
1379                 rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
1380                 if (ctx->qat_hash_alg ==
1381                                 ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
1382                                 ctx->qat_hash_alg ==
1383                                         ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
1384                         /*
1385                          * If len(iv)==12B fw computes J0
1386                          */
1387                         if (ctx->cipher_iv.length == 12) {
1388                                 ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
1389                                         qat_req->comn_hdr.serv_specif_flags,
1390                                         ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
1391                         }
1392
1393                         set_cipher_iv(ctx->cipher_iv.length,
1394                                         ctx->cipher_iv.offset,
1395                                         cipher_param, op, qat_req);
1396
1397                 } else if (ctx->qat_hash_alg ==
1398                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
1399
1400                         /* In case of AES-CCM this may point to user selected memory
1401                          * or iv offset in cypto_op
1402                          */
1403                         uint8_t *aad_data = op->sym->aead.aad.data;
1404                         /* This is true AAD length, it not includes 18 bytes of
1405                          * preceding data
1406                          */
1407                         uint8_t aad_ccm_real_len = 0;
1408
1409                         uint8_t aad_len_field_sz = 0;
1410                         uint32_t msg_len_be =
1411                                         rte_bswap32(op->sym->aead.data.length);
1412
1413                         if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
1414                                 aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
1415                                 aad_ccm_real_len = ctx->aad_len -
1416                                         ICP_QAT_HW_CCM_AAD_B0_LEN -
1417                                         ICP_QAT_HW_CCM_AAD_LEN_INFO;
1418                         } else {
1419                                 /*
1420                                  * aad_len not greater than 18, so no actual aad data,
1421                                  * then use IV after op for B0 block
1422                                  */
1423                                 aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
1424                                                 ctx->cipher_iv.offset);
1425                                 aad_phys_addr_aead =
1426                                                 rte_crypto_op_ctophys_offset(op,
1427                                                                 ctx->cipher_iv.offset);
1428                         }
1429
1430                         uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
1431
1432                         aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
1433                                                         ctx->digest_length, q);
1434
1435                         if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
1436                                 memcpy(aad_data + ctx->cipher_iv.length +
1437                                         ICP_QAT_HW_CCM_NONCE_OFFSET
1438                                         + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
1439                                         (uint8_t *)&msg_len_be,
1440                                         ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
1441                         } else {
1442                                 memcpy(aad_data + ctx->cipher_iv.length +
1443                                         ICP_QAT_HW_CCM_NONCE_OFFSET,
1444                                         (uint8_t *)&msg_len_be
1445                                         + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
1446                                         - q), q);
1447                         }
1448
1449                         if (aad_len_field_sz > 0) {
1450                                 *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
1451                                                 = rte_bswap16(aad_ccm_real_len);
1452
1453                                 if ((aad_ccm_real_len + aad_len_field_sz)
1454                                                 % ICP_QAT_HW_CCM_AAD_B0_LEN) {
1455                                         uint8_t pad_len = 0;
1456                                         uint8_t pad_idx = 0;
1457
1458                                         pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
1459                                                 ((aad_ccm_real_len + aad_len_field_sz) %
1460                                                         ICP_QAT_HW_CCM_AAD_B0_LEN);
1461                                         pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
1462                                                 aad_ccm_real_len + aad_len_field_sz;
1463                                         memset(&aad_data[pad_idx],
1464                                                         0, pad_len);
1465                                 }
1466
1467                         }
1468
1469                         set_cipher_iv_ccm(ctx->cipher_iv.length,
1470                                         ctx->cipher_iv.offset,
1471                                         cipher_param, op, q,
1472                                         aad_len_field_sz);
1473
1474                 }
1475
1476                 cipher_len = op->sym->aead.data.length;
1477                 cipher_ofs = op->sym->aead.data.offset;
1478                 auth_len = op->sym->aead.data.length;
1479                 auth_ofs = op->sym->aead.data.offset;
1480
1481                 auth_param->u1.aad_adr = aad_phys_addr_aead;
1482                 auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
1483                 min_ofs = op->sym->aead.data.offset;
1484         }
1485
1486         if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
1487                 do_sgl = 1;
1488
1489         /* adjust for chain case */
1490         if (do_cipher && do_auth)
1491                 min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
1492
1493         if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
1494                 min_ofs = 0;
1495
1496         if (unlikely(op->sym->m_dst != NULL)) {
1497                 /* Out-of-place operation (OOP)
1498                  * Don't align DMA start. DMA the minimum data-set
1499                  * so as not to overwrite data in dest buffer
1500                  */
1501                 src_buf_start =
1502                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
1503                 dst_buf_start =
1504                         rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
1505
1506         } else {
1507                 /* In-place operation
1508                  * Start DMA at nearest aligned address below min_ofs
1509                  */
1510                 src_buf_start =
1511                         rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
1512                                                 & QAT_64_BTYE_ALIGN_MASK;
1513
1514                 if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
1515                                         rte_pktmbuf_headroom(op->sym->m_src))
1516                                                         > src_buf_start)) {
1517                         /* alignment has pushed addr ahead of start of mbuf
1518                          * so revert and take the performance hit
1519                          */
1520                         src_buf_start =
1521                                 rte_pktmbuf_iova_offset(op->sym->m_src,
1522                                                                 min_ofs);
1523                 }
1524                 dst_buf_start = src_buf_start;
1525         }
1526
1527         if (do_cipher || do_aead) {
1528                 cipher_param->cipher_offset =
1529                                 (uint32_t)rte_pktmbuf_iova_offset(
1530                                 op->sym->m_src, cipher_ofs) - src_buf_start;
1531                 cipher_param->cipher_length = cipher_len;
1532         } else {
1533                 cipher_param->cipher_offset = 0;
1534                 cipher_param->cipher_length = 0;
1535         }
1536
1537         if (do_auth || do_aead) {
1538                 auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
1539                                 op->sym->m_src, auth_ofs) - src_buf_start;
1540                 auth_param->auth_len = auth_len;
1541         } else {
1542                 auth_param->auth_off = 0;
1543                 auth_param->auth_len = 0;
1544         }
1545
1546         qat_req->comn_mid.dst_length =
1547                 qat_req->comn_mid.src_length =
1548                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1549                 > (auth_param->auth_off + auth_param->auth_len) ?
1550                 (cipher_param->cipher_offset + cipher_param->cipher_length)
1551                 : (auth_param->auth_off + auth_param->auth_len);
1552
1553         if (do_sgl) {
1554
1555                 ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
1556                                 QAT_COMN_PTR_TYPE_SGL);
1557                 ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
1558                                 &qat_op_cookie->qat_sgl_list_src,
1559                                 qat_req->comn_mid.src_length);
1560                 if (ret) {
1561                         PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
1562                         return ret;
1563                 }
1564
1565                 if (likely(op->sym->m_dst == NULL))
1566                         qat_req->comn_mid.dest_data_addr =
1567                                 qat_req->comn_mid.src_data_addr =
1568                                 qat_op_cookie->qat_sgl_src_phys_addr;
1569                 else {
1570                         ret = qat_sgl_fill_array(op->sym->m_dst,
1571                                         dst_buf_start,
1572                                         &qat_op_cookie->qat_sgl_list_dst,
1573                                                 qat_req->comn_mid.dst_length);
1574
1575                         if (ret) {
1576                                 PMD_DRV_LOG(ERR, "QAT PMD Cannot "
1577                                                 "fill sgl array");
1578                                 return ret;
1579                         }
1580
1581                         qat_req->comn_mid.src_data_addr =
1582                                 qat_op_cookie->qat_sgl_src_phys_addr;
1583                         qat_req->comn_mid.dest_data_addr =
1584                                         qat_op_cookie->qat_sgl_dst_phys_addr;
1585                 }
1586         } else {
1587                 qat_req->comn_mid.src_data_addr = src_buf_start;
1588                 qat_req->comn_mid.dest_data_addr = dst_buf_start;
1589         }
1590
1591 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
1592         rte_hexdump(stdout, "qat_req:", qat_req,
1593                         sizeof(struct icp_qat_fw_la_bulk_req));
1594         rte_hexdump(stdout, "src_data:",
1595                         rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
1596                         rte_pktmbuf_data_len(op->sym->m_src));
1597         if (do_cipher) {
1598                 uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
1599                                                 uint8_t *,
1600                                                 ctx->cipher_iv.offset);
1601                 rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
1602                                 ctx->cipher_iv.length);
1603         }
1604
1605         if (do_auth) {
1606                 if (ctx->auth_iv.length) {
1607                         uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
1608                                                         uint8_t *,
1609                                                         ctx->auth_iv.offset);
1610                         rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
1611                                                 ctx->auth_iv.length);
1612                 }
1613                 rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
1614                                 ctx->digest_length);
1615         }
1616
1617         if (do_aead) {
1618                 rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
1619                                 ctx->digest_length);
1620                 rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
1621                                 ctx->aad_len);
1622         }
1623 #endif
1624         return 0;
1625 }
1626
1627 static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
1628 {
1629         uint32_t div = data >> shift;
1630         uint32_t mult = div << shift;
1631
1632         return data - mult;
1633 }
1634
1635 int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
1636                 __rte_unused struct rte_cryptodev_config *config)
1637 {
1638         PMD_INIT_FUNC_TRACE();
1639         return 0;
1640 }
1641
1642 int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
1643 {
1644         PMD_INIT_FUNC_TRACE();
1645         return 0;
1646 }
1647
1648 void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
1649 {
1650         PMD_INIT_FUNC_TRACE();
1651 }
1652
1653 int qat_dev_close(struct rte_cryptodev *dev)
1654 {
1655         int i, ret;
1656
1657         PMD_INIT_FUNC_TRACE();
1658
1659         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1660                 ret = qat_crypto_sym_qp_release(dev, i);
1661                 if (ret < 0)
1662                         return ret;
1663         }
1664
1665         return 0;
1666 }
1667
1668 void qat_dev_info_get(struct rte_cryptodev *dev,
1669                         struct rte_cryptodev_info *info)
1670 {
1671         struct qat_pmd_private *internals = dev->data->dev_private;
1672
1673         PMD_INIT_FUNC_TRACE();
1674         if (info != NULL) {
1675                 info->max_nb_queue_pairs =
1676                                 ADF_NUM_SYM_QPS_PER_BUNDLE *
1677                                 ADF_NUM_BUNDLES_PER_DEV;
1678                 info->feature_flags = dev->feature_flags;
1679                 info->capabilities = internals->qat_dev_capabilities;
1680                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1681                 info->driver_id = cryptodev_qat_driver_id;
1682                 info->pci_dev = RTE_DEV_TO_PCI(dev->device);
1683         }
1684 }
1685
1686 void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
1687                 struct rte_cryptodev_stats *stats)
1688 {
1689         int i;
1690         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1691
1692         PMD_INIT_FUNC_TRACE();
1693         if (stats == NULL) {
1694                 PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
1695                 return;
1696         }
1697         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1698                 if (qp[i] == NULL) {
1699                         PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
1700                         continue;
1701                 }
1702
1703                 stats->enqueued_count += qp[i]->stats.enqueued_count;
1704                 stats->dequeued_count += qp[i]->stats.dequeued_count;
1705                 stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
1706                 stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
1707         }
1708 }
1709
1710 void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
1711 {
1712         int i;
1713         struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
1714
1715         PMD_INIT_FUNC_TRACE();
1716         for (i = 0; i < dev->data->nb_queue_pairs; i++)
1717                 memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
1718         PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
1719 }