154e1ddd0c8fe9ac775fbd801611ee89d2ce78f2
[deb_dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57
58 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
61
62 #include "qat_algs.h"
63
64 /* returns block size in bytes per cipher algo */
65 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
66 {
67         switch (qat_cipher_alg) {
68         case ICP_QAT_HW_CIPHER_ALGO_DES:
69                 return ICP_QAT_HW_DES_BLK_SZ;
70         case ICP_QAT_HW_CIPHER_ALGO_3DES:
71                 return ICP_QAT_HW_3DES_BLK_SZ;
72         case ICP_QAT_HW_CIPHER_ALGO_AES128:
73         case ICP_QAT_HW_CIPHER_ALGO_AES192:
74         case ICP_QAT_HW_CIPHER_ALGO_AES256:
75                 return ICP_QAT_HW_AES_BLK_SZ;
76         default:
77                 PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
78                 return -EFAULT;
79         };
80         return -EFAULT;
81 }
82
83 /*
84  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
85  * This is digest size rounded up to nearest quadword
86  */
87 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
88 {
89         switch (qat_hash_alg) {
90         case ICP_QAT_HW_AUTH_ALGO_SHA1:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SHA224:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_SHA256:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_SHA384:
100                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
101                                                 QAT_HW_DEFAULT_ALIGNMENT);
102         case ICP_QAT_HW_AUTH_ALGO_SHA512:
103                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
104                                                 QAT_HW_DEFAULT_ALIGNMENT);
105         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
106                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
107                                                 QAT_HW_DEFAULT_ALIGNMENT);
108         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
109         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
110                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
111                                                 QAT_HW_DEFAULT_ALIGNMENT);
112         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
113                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
114                                                 QAT_HW_DEFAULT_ALIGNMENT);
115         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
116                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
117                                                 QAT_HW_DEFAULT_ALIGNMENT);
118         case ICP_QAT_HW_AUTH_ALGO_MD5:
119                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
120                                                 QAT_HW_DEFAULT_ALIGNMENT);
121         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
122                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
123                                                 QAT_HW_DEFAULT_ALIGNMENT);
124         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
125                 /* return maximum state1 size in this case */
126                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
127                                                 QAT_HW_DEFAULT_ALIGNMENT);
128         default:
129                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
130                 return -EFAULT;
131         };
132         return -EFAULT;
133 }
134
135 /* returns digest size in bytes  per hash algo */
136 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
137 {
138         switch (qat_hash_alg) {
139         case ICP_QAT_HW_AUTH_ALGO_SHA1:
140                 return ICP_QAT_HW_SHA1_STATE1_SZ;
141         case ICP_QAT_HW_AUTH_ALGO_SHA224:
142                 return ICP_QAT_HW_SHA224_STATE1_SZ;
143         case ICP_QAT_HW_AUTH_ALGO_SHA256:
144                 return ICP_QAT_HW_SHA256_STATE1_SZ;
145         case ICP_QAT_HW_AUTH_ALGO_SHA384:
146                 return ICP_QAT_HW_SHA384_STATE1_SZ;
147         case ICP_QAT_HW_AUTH_ALGO_SHA512:
148                 return ICP_QAT_HW_SHA512_STATE1_SZ;
149         case ICP_QAT_HW_AUTH_ALGO_MD5:
150                 return ICP_QAT_HW_MD5_STATE1_SZ;
151         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
152                 /* return maximum digest size in this case */
153                 return ICP_QAT_HW_SHA512_STATE1_SZ;
154         default:
155                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
156                 return -EFAULT;
157         };
158         return -EFAULT;
159 }
160
161 /* returns block size in byes per hash algo */
162 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
163 {
164         switch (qat_hash_alg) {
165         case ICP_QAT_HW_AUTH_ALGO_SHA1:
166                 return SHA_CBLOCK;
167         case ICP_QAT_HW_AUTH_ALGO_SHA224:
168                 return SHA256_CBLOCK;
169         case ICP_QAT_HW_AUTH_ALGO_SHA256:
170                 return SHA256_CBLOCK;
171         case ICP_QAT_HW_AUTH_ALGO_SHA384:
172                 return SHA512_CBLOCK;
173         case ICP_QAT_HW_AUTH_ALGO_SHA512:
174                 return SHA512_CBLOCK;
175         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
176                 return 16;
177         case ICP_QAT_HW_AUTH_ALGO_MD5:
178                 return MD5_CBLOCK;
179         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
180                 /* return maximum block size in this case */
181                 return SHA512_CBLOCK;
182         default:
183                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
184                 return -EFAULT;
185         };
186         return -EFAULT;
187 }
188
189 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
190 {
191         SHA_CTX ctx;
192
193         if (!SHA1_Init(&ctx))
194                 return -EFAULT;
195         SHA1_Transform(&ctx, data_in);
196         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
197         return 0;
198 }
199
200 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
201 {
202         SHA256_CTX ctx;
203
204         if (!SHA224_Init(&ctx))
205                 return -EFAULT;
206         SHA256_Transform(&ctx, data_in);
207         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
208         return 0;
209 }
210
211 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
212 {
213         SHA256_CTX ctx;
214
215         if (!SHA256_Init(&ctx))
216                 return -EFAULT;
217         SHA256_Transform(&ctx, data_in);
218         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
219         return 0;
220 }
221
222 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
223 {
224         SHA512_CTX ctx;
225
226         if (!SHA384_Init(&ctx))
227                 return -EFAULT;
228         SHA512_Transform(&ctx, data_in);
229         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
230         return 0;
231 }
232
233 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
234 {
235         SHA512_CTX ctx;
236
237         if (!SHA512_Init(&ctx))
238                 return -EFAULT;
239         SHA512_Transform(&ctx, data_in);
240         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
241         return 0;
242 }
243
244 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
245 {
246         MD5_CTX ctx;
247
248         if (!MD5_Init(&ctx))
249                 return -EFAULT;
250         MD5_Transform(&ctx, data_in);
251         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
252
253         return 0;
254 }
255
256 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
257                         uint8_t *data_in,
258                         uint8_t *data_out)
259 {
260         int digest_size;
261         uint8_t digest[qat_hash_get_digest_size(
262                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
263         uint32_t *hash_state_out_be32;
264         uint64_t *hash_state_out_be64;
265         int i;
266
267         PMD_INIT_FUNC_TRACE();
268         digest_size = qat_hash_get_digest_size(hash_alg);
269         if (digest_size <= 0)
270                 return -EFAULT;
271
272         hash_state_out_be32 = (uint32_t *)data_out;
273         hash_state_out_be64 = (uint64_t *)data_out;
274
275         switch (hash_alg) {
276         case ICP_QAT_HW_AUTH_ALGO_SHA1:
277                 if (partial_hash_sha1(data_in, digest))
278                         return -EFAULT;
279                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
280                         *hash_state_out_be32 =
281                                 rte_bswap32(*(((uint32_t *)digest)+i));
282                 break;
283         case ICP_QAT_HW_AUTH_ALGO_SHA224:
284                 if (partial_hash_sha224(data_in, digest))
285                         return -EFAULT;
286                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
287                         *hash_state_out_be32 =
288                                 rte_bswap32(*(((uint32_t *)digest)+i));
289                 break;
290         case ICP_QAT_HW_AUTH_ALGO_SHA256:
291                 if (partial_hash_sha256(data_in, digest))
292                         return -EFAULT;
293                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
294                         *hash_state_out_be32 =
295                                 rte_bswap32(*(((uint32_t *)digest)+i));
296                 break;
297         case ICP_QAT_HW_AUTH_ALGO_SHA384:
298                 if (partial_hash_sha384(data_in, digest))
299                         return -EFAULT;
300                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
301                         *hash_state_out_be64 =
302                                 rte_bswap64(*(((uint64_t *)digest)+i));
303                 break;
304         case ICP_QAT_HW_AUTH_ALGO_SHA512:
305                 if (partial_hash_sha512(data_in, digest))
306                         return -EFAULT;
307                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
308                         *hash_state_out_be64 =
309                                 rte_bswap64(*(((uint64_t *)digest)+i));
310                 break;
311         case ICP_QAT_HW_AUTH_ALGO_MD5:
312                 if (partial_hash_md5(data_in, data_out))
313                         return -EFAULT;
314                 break;
315         default:
316                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
317                 return -EFAULT;
318         }
319
320         return 0;
321 }
322 #define HMAC_IPAD_VALUE 0x36
323 #define HMAC_OPAD_VALUE 0x5c
324 #define HASH_XCBC_PRECOMP_KEY_NUM 3
325
326 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
327                                 const uint8_t *auth_key,
328                                 uint16_t auth_keylen,
329                                 uint8_t *p_state_buf,
330                                 uint16_t *p_state_len)
331 {
332         int block_size;
333         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
334         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
335         int i;
336
337         PMD_INIT_FUNC_TRACE();
338         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
339                 static uint8_t qat_aes_xcbc_key_seed[
340                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
341                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
342                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
343                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
344                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
345                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
346                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
347                 };
348
349                 uint8_t *in = NULL;
350                 uint8_t *out = p_state_buf;
351                 int x;
352                 AES_KEY enc_key;
353
354                 in = rte_zmalloc("working mem for key",
355                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
356                 rte_memcpy(in, qat_aes_xcbc_key_seed,
357                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
358                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
359                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
360                                 &enc_key) != 0) {
361                                 rte_free(in -
362                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
363                                 memset(out -
364                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
365                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
366                                 return -EFAULT;
367                         }
368                         AES_encrypt(in, out, &enc_key);
369                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
370                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
371                 }
372                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
373                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
374                 return 0;
375         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
376                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
377                 uint8_t *in = NULL;
378                 uint8_t *out = p_state_buf;
379                 AES_KEY enc_key;
380
381                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
382                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
383                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
384                 in = rte_zmalloc("working mem for key",
385                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
386                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
387                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
388                         &enc_key) != 0) {
389                         return -EFAULT;
390                 }
391                 AES_encrypt(in, out, &enc_key);
392                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
393                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
394                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
395                 rte_free(in);
396                 return 0;
397         }
398
399         block_size = qat_hash_get_block_size(hash_alg);
400         if (block_size <= 0)
401                 return -EFAULT;
402         /* init ipad and opad from key and xor with fixed values */
403         memset(ipad, 0, block_size);
404         memset(opad, 0, block_size);
405
406         if (auth_keylen > (unsigned int)block_size) {
407                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
408                 return -EFAULT;
409         }
410         rte_memcpy(ipad, auth_key, auth_keylen);
411         rte_memcpy(opad, auth_key, auth_keylen);
412
413         for (i = 0; i < block_size; i++) {
414                 uint8_t *ipad_ptr = ipad + i;
415                 uint8_t *opad_ptr = opad + i;
416                 *ipad_ptr ^= HMAC_IPAD_VALUE;
417                 *opad_ptr ^= HMAC_OPAD_VALUE;
418         }
419
420         /* do partial hash of ipad and copy to state1 */
421         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
422                 memset(ipad, 0, block_size);
423                 memset(opad, 0, block_size);
424                 PMD_DRV_LOG(ERR, "ipad precompute failed");
425                 return -EFAULT;
426         }
427
428         /*
429          * State len is a multiple of 8, so may be larger than the digest.
430          * Put the partial hash of opad state_len bytes after state1
431          */
432         *p_state_len = qat_hash_get_state1_size(hash_alg);
433         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
434                 memset(ipad, 0, block_size);
435                 memset(opad, 0, block_size);
436                 PMD_DRV_LOG(ERR, "opad precompute failed");
437                 return -EFAULT;
438         }
439
440         /*  don't leave data lying around */
441         memset(ipad, 0, block_size);
442         memset(opad, 0, block_size);
443         return 0;
444 }
445
446 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
447                 enum qat_crypto_proto_flag proto_flags)
448 {
449         PMD_INIT_FUNC_TRACE();
450         header->hdr_flags =
451                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
452         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
453         header->comn_req_flags =
454                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
455                                         QAT_COMN_PTR_TYPE_FLAT);
456         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
457                                   ICP_QAT_FW_LA_PARTIAL_NONE);
458         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
459                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
460
461         switch (proto_flags)            {
462         case QAT_CRYPTO_PROTO_FLAG_NONE:
463                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
464                                         ICP_QAT_FW_LA_NO_PROTO);
465                 break;
466         case QAT_CRYPTO_PROTO_FLAG_CCM:
467                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
468                                         ICP_QAT_FW_LA_CCM_PROTO);
469                 break;
470         case QAT_CRYPTO_PROTO_FLAG_GCM:
471                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
472                                         ICP_QAT_FW_LA_GCM_PROTO);
473                 break;
474         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
475                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
476                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
477                 break;
478         case QAT_CRYPTO_PROTO_FLAG_ZUC:
479                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
480                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
481                 break;
482         }
483
484         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
485                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
486         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
487                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
488 }
489
490 /*
491  *      Snow3G and ZUC should never use this function
492  *      and set its protocol flag in both cipher and auth part of content
493  *      descriptor building function
494  */
495 static enum qat_crypto_proto_flag
496 qat_get_crypto_proto_flag(uint16_t flags)
497 {
498         int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
499         enum qat_crypto_proto_flag qat_proto_flag =
500                         QAT_CRYPTO_PROTO_FLAG_NONE;
501
502         switch (proto) {
503         case ICP_QAT_FW_LA_GCM_PROTO:
504                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
505                 break;
506         case ICP_QAT_FW_LA_CCM_PROTO:
507                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
508                 break;
509         }
510
511         return qat_proto_flag;
512 }
513
514 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
515                                                 uint8_t *cipherkey,
516                                                 uint32_t cipherkeylen)
517 {
518         struct icp_qat_hw_cipher_algo_blk *cipher;
519         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
520         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
521         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
522         void *ptr = &req_tmpl->cd_ctrl;
523         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
524         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
525         enum icp_qat_hw_cipher_convert key_convert;
526         enum qat_crypto_proto_flag qat_proto_flag =
527                 QAT_CRYPTO_PROTO_FLAG_NONE;
528         uint32_t total_key_size;
529         uint16_t cipher_offset, cd_size;
530         uint32_t wordIndex  = 0;
531         uint32_t *temp_key = NULL;
532         PMD_INIT_FUNC_TRACE();
533
534         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
535                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
536                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
537                                         ICP_QAT_FW_SLICE_CIPHER);
538                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
539                                         ICP_QAT_FW_SLICE_DRAM_WR);
540                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
541                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
542                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
543                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
544                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
545         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
546                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
547                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
548                                         ICP_QAT_FW_SLICE_CIPHER);
549                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
550                                         ICP_QAT_FW_SLICE_AUTH);
551                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
552                                         ICP_QAT_FW_SLICE_AUTH);
553                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
554                                         ICP_QAT_FW_SLICE_DRAM_WR);
555                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
556         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
557                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
558                 return -EFAULT;
559         }
560
561         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
562                 /*
563                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
564                  * Overriding default values previously set
565                  */
566                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
567                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
568         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
569                 || cdesc->qat_cipher_alg ==
570                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
571                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
572         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
573                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
574         else
575                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
576
577         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
578                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
579                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
580                 cipher_cd_ctrl->cipher_state_sz =
581                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
582                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
583
584         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
585                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
586                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
587                 cipher_cd_ctrl->cipher_padding_sz =
588                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
589         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
590                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
591                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
592                 qat_proto_flag =
593                         qat_get_crypto_proto_flag(header->serv_specif_flags);
594         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
595                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
596                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
597                 qat_proto_flag =
598                         qat_get_crypto_proto_flag(header->serv_specif_flags);
599         } else if (cdesc->qat_cipher_alg ==
600                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
601                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
602                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
603                 cipher_cd_ctrl->cipher_state_sz =
604                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
605                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
606         } else {
607                 total_key_size = cipherkeylen;
608                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
609                 qat_proto_flag =
610                         qat_get_crypto_proto_flag(header->serv_specif_flags);
611         }
612         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
613         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
614         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
615
616         header->service_cmd_id = cdesc->qat_cmd;
617         qat_alg_init_common_hdr(header, qat_proto_flag);
618
619         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
620         cipher->cipher_config.val =
621             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
622                                         cdesc->qat_cipher_alg, key_convert,
623                                         cdesc->qat_dir);
624
625         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
626                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
627                                         sizeof(struct icp_qat_hw_cipher_config)
628                                         + cipherkeylen);
629                 memcpy(cipher->key, cipherkey, cipherkeylen);
630                 memcpy(temp_key, cipherkey, cipherkeylen);
631
632                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
633                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
634                                                                 wordIndex++)
635                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
636
637                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
638                                         cipherkeylen + cipherkeylen;
639         } else {
640                 memcpy(cipher->key, cipherkey, cipherkeylen);
641                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
642                                         cipherkeylen;
643         }
644
645         if (total_key_size > cipherkeylen) {
646                 uint32_t padding_size =  total_key_size-cipherkeylen;
647                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
648                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
649                         /* K3 not provided so use K1 = K3*/
650                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
651                 else
652                         memset(cdesc->cd_cur_ptr, 0, padding_size);
653                 cdesc->cd_cur_ptr += padding_size;
654         }
655         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
656         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
657
658         return 0;
659 }
660
661 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
662                                                 uint8_t *authkey,
663                                                 uint32_t authkeylen,
664                                                 uint32_t add_auth_data_length,
665                                                 uint32_t digestsize,
666                                                 unsigned int operation)
667 {
668         struct icp_qat_hw_auth_setup *hash;
669         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
670         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
671         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
672         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
673         void *ptr = &req_tmpl->cd_ctrl;
674         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
675         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
676         struct icp_qat_fw_la_auth_req_params *auth_param =
677                 (struct icp_qat_fw_la_auth_req_params *)
678                 ((char *)&req_tmpl->serv_specif_rqpars +
679                 sizeof(struct icp_qat_fw_la_cipher_req_params));
680         uint16_t state1_size = 0, state2_size = 0;
681         uint16_t hash_offset, cd_size;
682         uint32_t *aad_len = NULL;
683         uint32_t wordIndex  = 0;
684         uint32_t *pTempKey;
685         enum qat_crypto_proto_flag qat_proto_flag =
686                 QAT_CRYPTO_PROTO_FLAG_NONE;
687
688         PMD_INIT_FUNC_TRACE();
689
690         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
691                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
692                                         ICP_QAT_FW_SLICE_AUTH);
693                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
694                                         ICP_QAT_FW_SLICE_DRAM_WR);
695                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
696         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
697                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
698                                 ICP_QAT_FW_SLICE_AUTH);
699                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
700                                 ICP_QAT_FW_SLICE_CIPHER);
701                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
702                                 ICP_QAT_FW_SLICE_CIPHER);
703                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
704                                 ICP_QAT_FW_SLICE_DRAM_WR);
705                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
706         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
707                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
708                 return -EFAULT;
709         }
710
711         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
712                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
713                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
714                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
715                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
716                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
717         } else {
718                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
719                                            ICP_QAT_FW_LA_RET_AUTH_RES);
720                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
721                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
722                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
723         }
724
725         /*
726          * Setup the inner hash config
727          */
728         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
729         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
730         hash->auth_config.reserved = 0;
731         hash->auth_config.config =
732                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
733                                 cdesc->qat_hash_alg, digestsize);
734
735         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
736                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
737                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
738                 hash->auth_counter.counter = 0;
739         else
740                 hash->auth_counter.counter = rte_bswap32(
741                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
742
743         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
744
745         /*
746          * cd_cur_ptr now points at the state1 information.
747          */
748         switch (cdesc->qat_hash_alg) {
749         case ICP_QAT_HW_AUTH_ALGO_SHA1:
750                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
751                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
752                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
753                         return -EFAULT;
754                 }
755                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
756                 break;
757         case ICP_QAT_HW_AUTH_ALGO_SHA224:
758                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
759                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
760                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
761                         return -EFAULT;
762                 }
763                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
764                 break;
765         case ICP_QAT_HW_AUTH_ALGO_SHA256:
766                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
767                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
768                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
769                         return -EFAULT;
770                 }
771                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
772                 break;
773         case ICP_QAT_HW_AUTH_ALGO_SHA384:
774                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
775                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
776                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
777                         return -EFAULT;
778                 }
779                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
780                 break;
781         case ICP_QAT_HW_AUTH_ALGO_SHA512:
782                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
783                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
784                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
785                         return -EFAULT;
786                 }
787                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
788                 break;
789         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
790                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
791                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
792                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
793                         &state2_size)) {
794                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
795                         return -EFAULT;
796                 }
797                 break;
798         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
799         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
800                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
801                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
802                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
803                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
804                         &state2_size)) {
805                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
806                         return -EFAULT;
807                 }
808                 /*
809                  * Write (the length of AAD) into bytes 16-19 of state2
810                  * in big-endian format. This field is 8 bytes
811                  */
812                 auth_param->u2.aad_sz =
813                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
814                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
815
816                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
817                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
818                                         ICP_QAT_HW_GALOIS_H_SZ);
819                 *aad_len = rte_bswap32(add_auth_data_length);
820                 break;
821         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
822                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
823                 state1_size = qat_hash_get_state1_size(
824                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
825                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
826                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
827
828                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
829                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
830                 cipherconfig->cipher_config.val =
831                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
832                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
833                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
834                         ICP_QAT_HW_CIPHER_ENCRYPT);
835                 memcpy(cipherconfig->key, authkey, authkeylen);
836                 memset(cipherconfig->key + authkeylen,
837                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
838                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
839                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
840                 auth_param->hash_state_sz =
841                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
842                 break;
843         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
844                 hash->auth_config.config =
845                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
846                                 cdesc->qat_hash_alg, digestsize);
847                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
848                 state1_size = qat_hash_get_state1_size(
849                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
850                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
851                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
852                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
853
854                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
855                 cdesc->cd_cur_ptr += state1_size + state2_size
856                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
857                 auth_param->hash_state_sz =
858                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
859
860                 break;
861         case ICP_QAT_HW_AUTH_ALGO_MD5:
862                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
863                         authkey, authkeylen, cdesc->cd_cur_ptr,
864                         &state1_size)) {
865                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
866                         return -EFAULT;
867                 }
868                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
869                 break;
870         case ICP_QAT_HW_AUTH_ALGO_NULL:
871                 break;
872         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
873                 state1_size = qat_hash_get_state1_size(
874                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
875                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
876                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
877                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
878                                                         + authkeylen);
879                 /*
880                 * The Inner Hash Initial State2 block must contain IK
881                 * (Initialisation Key), followed by IK XOR-ed with KM
882                 * (Key Modifier): IK||(IK^KM).
883                 */
884                 /* write the auth key */
885                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
886                 /* initialise temp key with auth key */
887                 memcpy(pTempKey, authkey, authkeylen);
888                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
889                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
890                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
891                 break;
892         default:
893                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
894                 return -EFAULT;
895         }
896
897         /* Request template setup */
898         qat_alg_init_common_hdr(header, qat_proto_flag);
899         header->service_cmd_id = cdesc->qat_cmd;
900
901         /* Auth CD config setup */
902         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
903         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
904         hash_cd_ctrl->inner_res_sz = digestsize;
905         hash_cd_ctrl->final_sz = digestsize;
906         hash_cd_ctrl->inner_state1_sz = state1_size;
907         auth_param->auth_res_sz = digestsize;
908
909         hash_cd_ctrl->inner_state2_sz  = state2_size;
910         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
911                         ((sizeof(struct icp_qat_hw_auth_setup) +
912                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
913                                         >> 3);
914
915         cdesc->cd_cur_ptr += state1_size + state2_size;
916         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
917
918         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
919         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
920
921         return 0;
922 }
923
924 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
925 {
926         switch (key_len) {
927         case ICP_QAT_HW_AES_128_KEY_SZ:
928                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
929                 break;
930         case ICP_QAT_HW_AES_192_KEY_SZ:
931                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
932                 break;
933         case ICP_QAT_HW_AES_256_KEY_SZ:
934                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
935                 break;
936         default:
937                 return -EINVAL;
938         }
939         return 0;
940 }
941
942 int qat_alg_validate_aes_docsisbpi_key(int key_len,
943                 enum icp_qat_hw_cipher_algo *alg)
944 {
945         switch (key_len) {
946         case ICP_QAT_HW_AES_128_KEY_SZ:
947                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
948                 break;
949         default:
950                 return -EINVAL;
951         }
952         return 0;
953 }
954
955 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
956 {
957         switch (key_len) {
958         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
959                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
960                 break;
961         default:
962                 return -EINVAL;
963         }
964         return 0;
965 }
966
967 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
968 {
969         switch (key_len) {
970         case ICP_QAT_HW_KASUMI_KEY_SZ:
971                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
972                 break;
973         default:
974                 return -EINVAL;
975         }
976         return 0;
977 }
978
979 int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
980 {
981         switch (key_len) {
982         case ICP_QAT_HW_DES_KEY_SZ:
983                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
984                 break;
985         default:
986                 return -EINVAL;
987         }
988         return 0;
989 }
990
991 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
992 {
993         switch (key_len) {
994         case QAT_3DES_KEY_SZ_OPT1:
995         case QAT_3DES_KEY_SZ_OPT2:
996                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
997                 break;
998         default:
999                 return -EINVAL;
1000         }
1001         return 0;
1002 }
1003
1004 int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1005 {
1006         switch (key_len) {
1007         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
1008                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
1009                 break;
1010         default:
1011                 return -EINVAL;
1012         }
1013         return 0;
1014 }