New upstream version 17.11.1
[deb_dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2017 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57
58 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
61
62 #include "qat_algs.h"
63
64 /* returns block size in bytes per cipher algo */
65 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
66 {
67         switch (qat_cipher_alg) {
68         case ICP_QAT_HW_CIPHER_ALGO_DES:
69                 return ICP_QAT_HW_DES_BLK_SZ;
70         case ICP_QAT_HW_CIPHER_ALGO_3DES:
71                 return ICP_QAT_HW_3DES_BLK_SZ;
72         case ICP_QAT_HW_CIPHER_ALGO_AES128:
73         case ICP_QAT_HW_CIPHER_ALGO_AES192:
74         case ICP_QAT_HW_CIPHER_ALGO_AES256:
75                 return ICP_QAT_HW_AES_BLK_SZ;
76         default:
77                 PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
78                 return -EFAULT;
79         };
80         return -EFAULT;
81 }
82
83 /*
84  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
85  * This is digest size rounded up to nearest quadword
86  */
87 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
88 {
89         switch (qat_hash_alg) {
90         case ICP_QAT_HW_AUTH_ALGO_SHA1:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SHA224:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_SHA256:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_SHA384:
100                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
101                                                 QAT_HW_DEFAULT_ALIGNMENT);
102         case ICP_QAT_HW_AUTH_ALGO_SHA512:
103                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
104                                                 QAT_HW_DEFAULT_ALIGNMENT);
105         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
106                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
107                                                 QAT_HW_DEFAULT_ALIGNMENT);
108         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
109         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
110                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
111                                                 QAT_HW_DEFAULT_ALIGNMENT);
112         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
113                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
114                                                 QAT_HW_DEFAULT_ALIGNMENT);
115         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
116                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
117                                                 QAT_HW_DEFAULT_ALIGNMENT);
118         case ICP_QAT_HW_AUTH_ALGO_MD5:
119                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
120                                                 QAT_HW_DEFAULT_ALIGNMENT);
121         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
122                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
123                                                 QAT_HW_DEFAULT_ALIGNMENT);
124         case ICP_QAT_HW_AUTH_ALGO_NULL:
125                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
126                                                 QAT_HW_DEFAULT_ALIGNMENT);
127         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
128                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
129                                                 QAT_HW_DEFAULT_ALIGNMENT);
130         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
131                 /* return maximum state1 size in this case */
132                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
133                                                 QAT_HW_DEFAULT_ALIGNMENT);
134         default:
135                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
136                 return -EFAULT;
137         };
138         return -EFAULT;
139 }
140
141 /* returns digest size in bytes  per hash algo */
142 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
143 {
144         switch (qat_hash_alg) {
145         case ICP_QAT_HW_AUTH_ALGO_SHA1:
146                 return ICP_QAT_HW_SHA1_STATE1_SZ;
147         case ICP_QAT_HW_AUTH_ALGO_SHA224:
148                 return ICP_QAT_HW_SHA224_STATE1_SZ;
149         case ICP_QAT_HW_AUTH_ALGO_SHA256:
150                 return ICP_QAT_HW_SHA256_STATE1_SZ;
151         case ICP_QAT_HW_AUTH_ALGO_SHA384:
152                 return ICP_QAT_HW_SHA384_STATE1_SZ;
153         case ICP_QAT_HW_AUTH_ALGO_SHA512:
154                 return ICP_QAT_HW_SHA512_STATE1_SZ;
155         case ICP_QAT_HW_AUTH_ALGO_MD5:
156                 return ICP_QAT_HW_MD5_STATE1_SZ;
157         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
158                 /* return maximum digest size in this case */
159                 return ICP_QAT_HW_SHA512_STATE1_SZ;
160         default:
161                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
162                 return -EFAULT;
163         };
164         return -EFAULT;
165 }
166
167 /* returns block size in byes per hash algo */
168 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
169 {
170         switch (qat_hash_alg) {
171         case ICP_QAT_HW_AUTH_ALGO_SHA1:
172                 return SHA_CBLOCK;
173         case ICP_QAT_HW_AUTH_ALGO_SHA224:
174                 return SHA256_CBLOCK;
175         case ICP_QAT_HW_AUTH_ALGO_SHA256:
176                 return SHA256_CBLOCK;
177         case ICP_QAT_HW_AUTH_ALGO_SHA384:
178                 return SHA512_CBLOCK;
179         case ICP_QAT_HW_AUTH_ALGO_SHA512:
180                 return SHA512_CBLOCK;
181         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
182                 return 16;
183         case ICP_QAT_HW_AUTH_ALGO_MD5:
184                 return MD5_CBLOCK;
185         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
186                 /* return maximum block size in this case */
187                 return SHA512_CBLOCK;
188         default:
189                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
190                 return -EFAULT;
191         };
192         return -EFAULT;
193 }
194
195 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
196 {
197         SHA_CTX ctx;
198
199         if (!SHA1_Init(&ctx))
200                 return -EFAULT;
201         SHA1_Transform(&ctx, data_in);
202         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
203         return 0;
204 }
205
206 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
207 {
208         SHA256_CTX ctx;
209
210         if (!SHA224_Init(&ctx))
211                 return -EFAULT;
212         SHA256_Transform(&ctx, data_in);
213         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
214         return 0;
215 }
216
217 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
218 {
219         SHA256_CTX ctx;
220
221         if (!SHA256_Init(&ctx))
222                 return -EFAULT;
223         SHA256_Transform(&ctx, data_in);
224         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
225         return 0;
226 }
227
228 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
229 {
230         SHA512_CTX ctx;
231
232         if (!SHA384_Init(&ctx))
233                 return -EFAULT;
234         SHA512_Transform(&ctx, data_in);
235         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
236         return 0;
237 }
238
239 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
240 {
241         SHA512_CTX ctx;
242
243         if (!SHA512_Init(&ctx))
244                 return -EFAULT;
245         SHA512_Transform(&ctx, data_in);
246         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
247         return 0;
248 }
249
250 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
251 {
252         MD5_CTX ctx;
253
254         if (!MD5_Init(&ctx))
255                 return -EFAULT;
256         MD5_Transform(&ctx, data_in);
257         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
258
259         return 0;
260 }
261
262 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
263                         uint8_t *data_in,
264                         uint8_t *data_out)
265 {
266         int digest_size;
267         uint8_t digest[qat_hash_get_digest_size(
268                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
269         uint32_t *hash_state_out_be32;
270         uint64_t *hash_state_out_be64;
271         int i;
272
273         PMD_INIT_FUNC_TRACE();
274         digest_size = qat_hash_get_digest_size(hash_alg);
275         if (digest_size <= 0)
276                 return -EFAULT;
277
278         hash_state_out_be32 = (uint32_t *)data_out;
279         hash_state_out_be64 = (uint64_t *)data_out;
280
281         switch (hash_alg) {
282         case ICP_QAT_HW_AUTH_ALGO_SHA1:
283                 if (partial_hash_sha1(data_in, digest))
284                         return -EFAULT;
285                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
286                         *hash_state_out_be32 =
287                                 rte_bswap32(*(((uint32_t *)digest)+i));
288                 break;
289         case ICP_QAT_HW_AUTH_ALGO_SHA224:
290                 if (partial_hash_sha224(data_in, digest))
291                         return -EFAULT;
292                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
293                         *hash_state_out_be32 =
294                                 rte_bswap32(*(((uint32_t *)digest)+i));
295                 break;
296         case ICP_QAT_HW_AUTH_ALGO_SHA256:
297                 if (partial_hash_sha256(data_in, digest))
298                         return -EFAULT;
299                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
300                         *hash_state_out_be32 =
301                                 rte_bswap32(*(((uint32_t *)digest)+i));
302                 break;
303         case ICP_QAT_HW_AUTH_ALGO_SHA384:
304                 if (partial_hash_sha384(data_in, digest))
305                         return -EFAULT;
306                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
307                         *hash_state_out_be64 =
308                                 rte_bswap64(*(((uint64_t *)digest)+i));
309                 break;
310         case ICP_QAT_HW_AUTH_ALGO_SHA512:
311                 if (partial_hash_sha512(data_in, digest))
312                         return -EFAULT;
313                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
314                         *hash_state_out_be64 =
315                                 rte_bswap64(*(((uint64_t *)digest)+i));
316                 break;
317         case ICP_QAT_HW_AUTH_ALGO_MD5:
318                 if (partial_hash_md5(data_in, data_out))
319                         return -EFAULT;
320                 break;
321         default:
322                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
323                 return -EFAULT;
324         }
325
326         return 0;
327 }
328 #define HMAC_IPAD_VALUE 0x36
329 #define HMAC_OPAD_VALUE 0x5c
330 #define HASH_XCBC_PRECOMP_KEY_NUM 3
331
332 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
333                                 const uint8_t *auth_key,
334                                 uint16_t auth_keylen,
335                                 uint8_t *p_state_buf,
336                                 uint16_t *p_state_len)
337 {
338         int block_size;
339         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
340         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
341         int i;
342
343         PMD_INIT_FUNC_TRACE();
344         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
345                 static uint8_t qat_aes_xcbc_key_seed[
346                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
347                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
348                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
349                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
350                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
351                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
352                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
353                 };
354
355                 uint8_t *in = NULL;
356                 uint8_t *out = p_state_buf;
357                 int x;
358                 AES_KEY enc_key;
359
360                 in = rte_zmalloc("working mem for key",
361                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
362                 if (in == NULL) {
363                         PMD_DRV_LOG(ERR, "Failed to alloc memory");
364                         return -ENOMEM;
365                 }
366
367                 rte_memcpy(in, qat_aes_xcbc_key_seed,
368                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
369                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
370                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
371                                 &enc_key) != 0) {
372                                 rte_free(in -
373                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
374                                 memset(out -
375                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
376                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
377                                 return -EFAULT;
378                         }
379                         AES_encrypt(in, out, &enc_key);
380                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
381                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
382                 }
383                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
384                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
385                 return 0;
386         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
387                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
388                 uint8_t *in = NULL;
389                 uint8_t *out = p_state_buf;
390                 AES_KEY enc_key;
391
392                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
393                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
394                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
395                 in = rte_zmalloc("working mem for key",
396                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
397                 if (in == NULL) {
398                         PMD_DRV_LOG(ERR, "Failed to alloc memory");
399                         return -ENOMEM;
400                 }
401
402                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
403                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
404                         &enc_key) != 0) {
405                         return -EFAULT;
406                 }
407                 AES_encrypt(in, out, &enc_key);
408                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
409                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
410                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
411                 rte_free(in);
412                 return 0;
413         }
414
415         block_size = qat_hash_get_block_size(hash_alg);
416         if (block_size <= 0)
417                 return -EFAULT;
418         /* init ipad and opad from key and xor with fixed values */
419         memset(ipad, 0, block_size);
420         memset(opad, 0, block_size);
421
422         if (auth_keylen > (unsigned int)block_size) {
423                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
424                 return -EFAULT;
425         }
426         rte_memcpy(ipad, auth_key, auth_keylen);
427         rte_memcpy(opad, auth_key, auth_keylen);
428
429         for (i = 0; i < block_size; i++) {
430                 uint8_t *ipad_ptr = ipad + i;
431                 uint8_t *opad_ptr = opad + i;
432                 *ipad_ptr ^= HMAC_IPAD_VALUE;
433                 *opad_ptr ^= HMAC_OPAD_VALUE;
434         }
435
436         /* do partial hash of ipad and copy to state1 */
437         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
438                 memset(ipad, 0, block_size);
439                 memset(opad, 0, block_size);
440                 PMD_DRV_LOG(ERR, "ipad precompute failed");
441                 return -EFAULT;
442         }
443
444         /*
445          * State len is a multiple of 8, so may be larger than the digest.
446          * Put the partial hash of opad state_len bytes after state1
447          */
448         *p_state_len = qat_hash_get_state1_size(hash_alg);
449         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
450                 memset(ipad, 0, block_size);
451                 memset(opad, 0, block_size);
452                 PMD_DRV_LOG(ERR, "opad precompute failed");
453                 return -EFAULT;
454         }
455
456         /*  don't leave data lying around */
457         memset(ipad, 0, block_size);
458         memset(opad, 0, block_size);
459         return 0;
460 }
461
462 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
463                 enum qat_crypto_proto_flag proto_flags)
464 {
465         PMD_INIT_FUNC_TRACE();
466         header->hdr_flags =
467                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
468         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
469         header->comn_req_flags =
470                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
471                                         QAT_COMN_PTR_TYPE_FLAT);
472         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
473                                   ICP_QAT_FW_LA_PARTIAL_NONE);
474         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
475                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
476
477         switch (proto_flags)            {
478         case QAT_CRYPTO_PROTO_FLAG_NONE:
479                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
480                                         ICP_QAT_FW_LA_NO_PROTO);
481                 break;
482         case QAT_CRYPTO_PROTO_FLAG_CCM:
483                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
484                                         ICP_QAT_FW_LA_CCM_PROTO);
485                 break;
486         case QAT_CRYPTO_PROTO_FLAG_GCM:
487                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
488                                         ICP_QAT_FW_LA_GCM_PROTO);
489                 break;
490         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
491                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
492                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
493                 break;
494         case QAT_CRYPTO_PROTO_FLAG_ZUC:
495                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
496                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
497                 break;
498         }
499
500         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
501                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
502         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
503                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
504 }
505
506 /*
507  *      Snow3G and ZUC should never use this function
508  *      and set its protocol flag in both cipher and auth part of content
509  *      descriptor building function
510  */
511 static enum qat_crypto_proto_flag
512 qat_get_crypto_proto_flag(uint16_t flags)
513 {
514         int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
515         enum qat_crypto_proto_flag qat_proto_flag =
516                         QAT_CRYPTO_PROTO_FLAG_NONE;
517
518         switch (proto) {
519         case ICP_QAT_FW_LA_GCM_PROTO:
520                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
521                 break;
522         case ICP_QAT_FW_LA_CCM_PROTO:
523                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
524                 break;
525         }
526
527         return qat_proto_flag;
528 }
529
530 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
531                                                 uint8_t *cipherkey,
532                                                 uint32_t cipherkeylen)
533 {
534         struct icp_qat_hw_cipher_algo_blk *cipher;
535         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
536         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
537         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
538         void *ptr = &req_tmpl->cd_ctrl;
539         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
540         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
541         enum icp_qat_hw_cipher_convert key_convert;
542         enum qat_crypto_proto_flag qat_proto_flag =
543                 QAT_CRYPTO_PROTO_FLAG_NONE;
544         uint32_t total_key_size;
545         uint16_t cipher_offset, cd_size;
546         uint32_t wordIndex  = 0;
547         uint32_t *temp_key = NULL;
548         PMD_INIT_FUNC_TRACE();
549
550         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
551                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
552                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
553                                         ICP_QAT_FW_SLICE_CIPHER);
554                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
555                                         ICP_QAT_FW_SLICE_DRAM_WR);
556                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
557                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
558                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
559                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
560                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
561         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
562                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
563                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
564                                         ICP_QAT_FW_SLICE_CIPHER);
565                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
566                                         ICP_QAT_FW_SLICE_AUTH);
567                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
568                                         ICP_QAT_FW_SLICE_AUTH);
569                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
570                                         ICP_QAT_FW_SLICE_DRAM_WR);
571                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
572         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
573                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
574                 return -EFAULT;
575         }
576
577         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
578                 /*
579                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
580                  * Overriding default values previously set
581                  */
582                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
583                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
584         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
585                 || cdesc->qat_cipher_alg ==
586                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
587                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
588         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
589                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
590         else
591                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
592
593         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
594                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
595                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
596                 cipher_cd_ctrl->cipher_state_sz =
597                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
598                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
599
600         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
601                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
602                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
603                 cipher_cd_ctrl->cipher_padding_sz =
604                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
605         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
606                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
607                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
608                 qat_proto_flag =
609                         qat_get_crypto_proto_flag(header->serv_specif_flags);
610         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
611                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
612                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
613                 qat_proto_flag =
614                         qat_get_crypto_proto_flag(header->serv_specif_flags);
615         } else if (cdesc->qat_cipher_alg ==
616                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
617                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
618                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
619                 cipher_cd_ctrl->cipher_state_sz =
620                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
621                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
622                 cdesc->min_qat_dev_gen = QAT_GEN2;
623         } else {
624                 total_key_size = cipherkeylen;
625                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
626                 qat_proto_flag =
627                         qat_get_crypto_proto_flag(header->serv_specif_flags);
628         }
629         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
630         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
631         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
632
633         header->service_cmd_id = cdesc->qat_cmd;
634         qat_alg_init_common_hdr(header, qat_proto_flag);
635
636         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
637         cipher->cipher_config.val =
638             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
639                                         cdesc->qat_cipher_alg, key_convert,
640                                         cdesc->qat_dir);
641
642         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
643                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
644                                         sizeof(struct icp_qat_hw_cipher_config)
645                                         + cipherkeylen);
646                 memcpy(cipher->key, cipherkey, cipherkeylen);
647                 memcpy(temp_key, cipherkey, cipherkeylen);
648
649                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
650                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
651                                                                 wordIndex++)
652                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
653
654                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
655                                         cipherkeylen + cipherkeylen;
656         } else {
657                 memcpy(cipher->key, cipherkey, cipherkeylen);
658                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
659                                         cipherkeylen;
660         }
661
662         if (total_key_size > cipherkeylen) {
663                 uint32_t padding_size =  total_key_size-cipherkeylen;
664                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
665                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
666                         /* K3 not provided so use K1 = K3*/
667                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
668                 else
669                         memset(cdesc->cd_cur_ptr, 0, padding_size);
670                 cdesc->cd_cur_ptr += padding_size;
671         }
672         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
673         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
674
675         return 0;
676 }
677
678 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
679                                                 uint8_t *authkey,
680                                                 uint32_t authkeylen,
681                                                 uint32_t aad_length,
682                                                 uint32_t digestsize,
683                                                 unsigned int operation)
684 {
685         struct icp_qat_hw_auth_setup *hash;
686         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
687         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
688         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
689         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
690         void *ptr = &req_tmpl->cd_ctrl;
691         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
692         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
693         struct icp_qat_fw_la_auth_req_params *auth_param =
694                 (struct icp_qat_fw_la_auth_req_params *)
695                 ((char *)&req_tmpl->serv_specif_rqpars +
696                 sizeof(struct icp_qat_fw_la_cipher_req_params));
697         uint16_t state1_size = 0, state2_size = 0;
698         uint16_t hash_offset, cd_size;
699         uint32_t *aad_len = NULL;
700         uint32_t wordIndex  = 0;
701         uint32_t *pTempKey;
702         enum qat_crypto_proto_flag qat_proto_flag =
703                 QAT_CRYPTO_PROTO_FLAG_NONE;
704
705         PMD_INIT_FUNC_TRACE();
706
707         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
708                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
709                                         ICP_QAT_FW_SLICE_AUTH);
710                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
711                                         ICP_QAT_FW_SLICE_DRAM_WR);
712                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
713         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
714                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
715                                 ICP_QAT_FW_SLICE_AUTH);
716                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
717                                 ICP_QAT_FW_SLICE_CIPHER);
718                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
719                                 ICP_QAT_FW_SLICE_CIPHER);
720                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
721                                 ICP_QAT_FW_SLICE_DRAM_WR);
722                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
723         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
724                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
725                 return -EFAULT;
726         }
727
728         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
729                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
730                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
731                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
732                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
733                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
734         } else {
735                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
736                                            ICP_QAT_FW_LA_RET_AUTH_RES);
737                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
738                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
739                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
740         }
741
742         /*
743          * Setup the inner hash config
744          */
745         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
746         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
747         hash->auth_config.reserved = 0;
748         hash->auth_config.config =
749                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
750                                 cdesc->qat_hash_alg, digestsize);
751
752         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
753                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
754                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
755                 hash->auth_counter.counter = 0;
756         else
757                 hash->auth_counter.counter = rte_bswap32(
758                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
759
760         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
761
762         /*
763          * cd_cur_ptr now points at the state1 information.
764          */
765         switch (cdesc->qat_hash_alg) {
766         case ICP_QAT_HW_AUTH_ALGO_SHA1:
767                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
768                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
769                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
770                         return -EFAULT;
771                 }
772                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
773                 break;
774         case ICP_QAT_HW_AUTH_ALGO_SHA224:
775                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
776                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
777                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
778                         return -EFAULT;
779                 }
780                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
781                 break;
782         case ICP_QAT_HW_AUTH_ALGO_SHA256:
783                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
784                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
785                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
786                         return -EFAULT;
787                 }
788                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
789                 break;
790         case ICP_QAT_HW_AUTH_ALGO_SHA384:
791                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
792                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
793                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
794                         return -EFAULT;
795                 }
796                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
797                 break;
798         case ICP_QAT_HW_AUTH_ALGO_SHA512:
799                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
800                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
801                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
802                         return -EFAULT;
803                 }
804                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
805                 break;
806         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
807                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
808                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
809                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
810                         &state2_size)) {
811                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
812                         return -EFAULT;
813                 }
814                 break;
815         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
816         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
817                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
818                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
819                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
820                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
821                         &state2_size)) {
822                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
823                         return -EFAULT;
824                 }
825                 /*
826                  * Write (the length of AAD) into bytes 16-19 of state2
827                  * in big-endian format. This field is 8 bytes
828                  */
829                 auth_param->u2.aad_sz =
830                                 RTE_ALIGN_CEIL(aad_length, 16);
831                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
832
833                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
834                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
835                                         ICP_QAT_HW_GALOIS_H_SZ);
836                 *aad_len = rte_bswap32(aad_length);
837                 cdesc->aad_len = aad_length;
838                 break;
839         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
840                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
841                 state1_size = qat_hash_get_state1_size(
842                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
843                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
844                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
845
846                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
847                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
848                 cipherconfig->cipher_config.val =
849                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
850                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
851                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
852                         ICP_QAT_HW_CIPHER_ENCRYPT);
853                 memcpy(cipherconfig->key, authkey, authkeylen);
854                 memset(cipherconfig->key + authkeylen,
855                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
856                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
857                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
858                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
859                 break;
860         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
861                 hash->auth_config.config =
862                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
863                                 cdesc->qat_hash_alg, digestsize);
864                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
865                 state1_size = qat_hash_get_state1_size(
866                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
867                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
868                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
869                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
870
871                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
872                 cdesc->cd_cur_ptr += state1_size + state2_size
873                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
874                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
875                 cdesc->min_qat_dev_gen = QAT_GEN2;
876
877                 break;
878         case ICP_QAT_HW_AUTH_ALGO_MD5:
879                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
880                         authkey, authkeylen, cdesc->cd_cur_ptr,
881                         &state1_size)) {
882                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
883                         return -EFAULT;
884                 }
885                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
886                 break;
887         case ICP_QAT_HW_AUTH_ALGO_NULL:
888                 state1_size = qat_hash_get_state1_size(
889                                 ICP_QAT_HW_AUTH_ALGO_NULL);
890                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
891                 break;
892         case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
893                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
894                 state1_size = qat_hash_get_state1_size(
895                                 ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
896                 state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
897                                 ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
898
899                 if (aad_length > 0) {
900                         aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
901                                 ICP_QAT_HW_CCM_AAD_LEN_INFO;
902                         auth_param->u2.aad_sz =
903                                         RTE_ALIGN_CEIL(aad_length,
904                                         ICP_QAT_HW_CCM_AAD_ALIGNMENT);
905                 } else {
906                         auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
907                 }
908
909                 cdesc->aad_len = aad_length;
910                 hash->auth_counter.counter = 0;
911
912                 hash_cd_ctrl->outer_prefix_sz = digestsize;
913                 auth_param->hash_state_sz = digestsize;
914
915                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
916                 break;
917         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
918                 state1_size = qat_hash_get_state1_size(
919                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
920                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
921                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
922                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
923                                                         + authkeylen);
924                 /*
925                 * The Inner Hash Initial State2 block must contain IK
926                 * (Initialisation Key), followed by IK XOR-ed with KM
927                 * (Key Modifier): IK||(IK^KM).
928                 */
929                 /* write the auth key */
930                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
931                 /* initialise temp key with auth key */
932                 memcpy(pTempKey, authkey, authkeylen);
933                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
934                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
935                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
936                 break;
937         default:
938                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
939                 return -EFAULT;
940         }
941
942         /* Request template setup */
943         qat_alg_init_common_hdr(header, qat_proto_flag);
944         header->service_cmd_id = cdesc->qat_cmd;
945
946         /* Auth CD config setup */
947         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
948         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
949         hash_cd_ctrl->inner_res_sz = digestsize;
950         hash_cd_ctrl->final_sz = digestsize;
951         hash_cd_ctrl->inner_state1_sz = state1_size;
952         auth_param->auth_res_sz = digestsize;
953
954         hash_cd_ctrl->inner_state2_sz  = state2_size;
955         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
956                         ((sizeof(struct icp_qat_hw_auth_setup) +
957                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
958                                         >> 3);
959
960         cdesc->cd_cur_ptr += state1_size + state2_size;
961         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
962
963         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
964         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
965
966         return 0;
967 }
968
969 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
970 {
971         switch (key_len) {
972         case ICP_QAT_HW_AES_128_KEY_SZ:
973                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
974                 break;
975         case ICP_QAT_HW_AES_192_KEY_SZ:
976                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
977                 break;
978         case ICP_QAT_HW_AES_256_KEY_SZ:
979                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
980                 break;
981         default:
982                 return -EINVAL;
983         }
984         return 0;
985 }
986
987 int qat_alg_validate_aes_docsisbpi_key(int key_len,
988                 enum icp_qat_hw_cipher_algo *alg)
989 {
990         switch (key_len) {
991         case ICP_QAT_HW_AES_128_KEY_SZ:
992                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
993                 break;
994         default:
995                 return -EINVAL;
996         }
997         return 0;
998 }
999
1000 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1001 {
1002         switch (key_len) {
1003         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
1004                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
1005                 break;
1006         default:
1007                 return -EINVAL;
1008         }
1009         return 0;
1010 }
1011
1012 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1013 {
1014         switch (key_len) {
1015         case ICP_QAT_HW_KASUMI_KEY_SZ:
1016                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
1017                 break;
1018         default:
1019                 return -EINVAL;
1020         }
1021         return 0;
1022 }
1023
1024 int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1025 {
1026         switch (key_len) {
1027         case ICP_QAT_HW_DES_KEY_SZ:
1028                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
1029                 break;
1030         default:
1031                 return -EINVAL;
1032         }
1033         return 0;
1034 }
1035
1036 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1037 {
1038         switch (key_len) {
1039         case QAT_3DES_KEY_SZ_OPT1:
1040         case QAT_3DES_KEY_SZ_OPT2:
1041                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
1042                 break;
1043         default:
1044                 return -EINVAL;
1045         }
1046         return 0;
1047 }
1048
1049 int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1050 {
1051         switch (key_len) {
1052         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
1053                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
1054                 break;
1055         default:
1056                 return -EINVAL;
1057         }
1058         return 0;
1059 }