2d16c9e2090a6a28d1619571e2432c4e84f041d9
[deb_dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2017 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57
58 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
61
62 #include "qat_algs.h"
63
64 /* returns block size in bytes per cipher algo */
65 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
66 {
67         switch (qat_cipher_alg) {
68         case ICP_QAT_HW_CIPHER_ALGO_DES:
69                 return ICP_QAT_HW_DES_BLK_SZ;
70         case ICP_QAT_HW_CIPHER_ALGO_3DES:
71                 return ICP_QAT_HW_3DES_BLK_SZ;
72         case ICP_QAT_HW_CIPHER_ALGO_AES128:
73         case ICP_QAT_HW_CIPHER_ALGO_AES192:
74         case ICP_QAT_HW_CIPHER_ALGO_AES256:
75                 return ICP_QAT_HW_AES_BLK_SZ;
76         default:
77                 PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
78                 return -EFAULT;
79         };
80         return -EFAULT;
81 }
82
83 /*
84  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
85  * This is digest size rounded up to nearest quadword
86  */
87 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
88 {
89         switch (qat_hash_alg) {
90         case ICP_QAT_HW_AUTH_ALGO_SHA1:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SHA224:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_SHA256:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_SHA384:
100                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
101                                                 QAT_HW_DEFAULT_ALIGNMENT);
102         case ICP_QAT_HW_AUTH_ALGO_SHA512:
103                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
104                                                 QAT_HW_DEFAULT_ALIGNMENT);
105         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
106                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
107                                                 QAT_HW_DEFAULT_ALIGNMENT);
108         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
109         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
110                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
111                                                 QAT_HW_DEFAULT_ALIGNMENT);
112         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
113                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
114                                                 QAT_HW_DEFAULT_ALIGNMENT);
115         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
116                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
117                                                 QAT_HW_DEFAULT_ALIGNMENT);
118         case ICP_QAT_HW_AUTH_ALGO_MD5:
119                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
120                                                 QAT_HW_DEFAULT_ALIGNMENT);
121         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
122                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
123                                                 QAT_HW_DEFAULT_ALIGNMENT);
124         case ICP_QAT_HW_AUTH_ALGO_NULL:
125                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
126                                                 QAT_HW_DEFAULT_ALIGNMENT);
127         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
128                 /* return maximum state1 size in this case */
129                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
130                                                 QAT_HW_DEFAULT_ALIGNMENT);
131         default:
132                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
133                 return -EFAULT;
134         };
135         return -EFAULT;
136 }
137
138 /* returns digest size in bytes  per hash algo */
139 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
140 {
141         switch (qat_hash_alg) {
142         case ICP_QAT_HW_AUTH_ALGO_SHA1:
143                 return ICP_QAT_HW_SHA1_STATE1_SZ;
144         case ICP_QAT_HW_AUTH_ALGO_SHA224:
145                 return ICP_QAT_HW_SHA224_STATE1_SZ;
146         case ICP_QAT_HW_AUTH_ALGO_SHA256:
147                 return ICP_QAT_HW_SHA256_STATE1_SZ;
148         case ICP_QAT_HW_AUTH_ALGO_SHA384:
149                 return ICP_QAT_HW_SHA384_STATE1_SZ;
150         case ICP_QAT_HW_AUTH_ALGO_SHA512:
151                 return ICP_QAT_HW_SHA512_STATE1_SZ;
152         case ICP_QAT_HW_AUTH_ALGO_MD5:
153                 return ICP_QAT_HW_MD5_STATE1_SZ;
154         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
155                 /* return maximum digest size in this case */
156                 return ICP_QAT_HW_SHA512_STATE1_SZ;
157         default:
158                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
159                 return -EFAULT;
160         };
161         return -EFAULT;
162 }
163
164 /* returns block size in byes per hash algo */
165 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
166 {
167         switch (qat_hash_alg) {
168         case ICP_QAT_HW_AUTH_ALGO_SHA1:
169                 return SHA_CBLOCK;
170         case ICP_QAT_HW_AUTH_ALGO_SHA224:
171                 return SHA256_CBLOCK;
172         case ICP_QAT_HW_AUTH_ALGO_SHA256:
173                 return SHA256_CBLOCK;
174         case ICP_QAT_HW_AUTH_ALGO_SHA384:
175                 return SHA512_CBLOCK;
176         case ICP_QAT_HW_AUTH_ALGO_SHA512:
177                 return SHA512_CBLOCK;
178         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
179                 return 16;
180         case ICP_QAT_HW_AUTH_ALGO_MD5:
181                 return MD5_CBLOCK;
182         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
183                 /* return maximum block size in this case */
184                 return SHA512_CBLOCK;
185         default:
186                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
187                 return -EFAULT;
188         };
189         return -EFAULT;
190 }
191
192 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
193 {
194         SHA_CTX ctx;
195
196         if (!SHA1_Init(&ctx))
197                 return -EFAULT;
198         SHA1_Transform(&ctx, data_in);
199         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
200         return 0;
201 }
202
203 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
204 {
205         SHA256_CTX ctx;
206
207         if (!SHA224_Init(&ctx))
208                 return -EFAULT;
209         SHA256_Transform(&ctx, data_in);
210         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
211         return 0;
212 }
213
214 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
215 {
216         SHA256_CTX ctx;
217
218         if (!SHA256_Init(&ctx))
219                 return -EFAULT;
220         SHA256_Transform(&ctx, data_in);
221         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
222         return 0;
223 }
224
225 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
226 {
227         SHA512_CTX ctx;
228
229         if (!SHA384_Init(&ctx))
230                 return -EFAULT;
231         SHA512_Transform(&ctx, data_in);
232         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
233         return 0;
234 }
235
236 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
237 {
238         SHA512_CTX ctx;
239
240         if (!SHA512_Init(&ctx))
241                 return -EFAULT;
242         SHA512_Transform(&ctx, data_in);
243         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
244         return 0;
245 }
246
247 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
248 {
249         MD5_CTX ctx;
250
251         if (!MD5_Init(&ctx))
252                 return -EFAULT;
253         MD5_Transform(&ctx, data_in);
254         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
255
256         return 0;
257 }
258
259 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
260                         uint8_t *data_in,
261                         uint8_t *data_out)
262 {
263         int digest_size;
264         uint8_t digest[qat_hash_get_digest_size(
265                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
266         uint32_t *hash_state_out_be32;
267         uint64_t *hash_state_out_be64;
268         int i;
269
270         PMD_INIT_FUNC_TRACE();
271         digest_size = qat_hash_get_digest_size(hash_alg);
272         if (digest_size <= 0)
273                 return -EFAULT;
274
275         hash_state_out_be32 = (uint32_t *)data_out;
276         hash_state_out_be64 = (uint64_t *)data_out;
277
278         switch (hash_alg) {
279         case ICP_QAT_HW_AUTH_ALGO_SHA1:
280                 if (partial_hash_sha1(data_in, digest))
281                         return -EFAULT;
282                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
283                         *hash_state_out_be32 =
284                                 rte_bswap32(*(((uint32_t *)digest)+i));
285                 break;
286         case ICP_QAT_HW_AUTH_ALGO_SHA224:
287                 if (partial_hash_sha224(data_in, digest))
288                         return -EFAULT;
289                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
290                         *hash_state_out_be32 =
291                                 rte_bswap32(*(((uint32_t *)digest)+i));
292                 break;
293         case ICP_QAT_HW_AUTH_ALGO_SHA256:
294                 if (partial_hash_sha256(data_in, digest))
295                         return -EFAULT;
296                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
297                         *hash_state_out_be32 =
298                                 rte_bswap32(*(((uint32_t *)digest)+i));
299                 break;
300         case ICP_QAT_HW_AUTH_ALGO_SHA384:
301                 if (partial_hash_sha384(data_in, digest))
302                         return -EFAULT;
303                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
304                         *hash_state_out_be64 =
305                                 rte_bswap64(*(((uint64_t *)digest)+i));
306                 break;
307         case ICP_QAT_HW_AUTH_ALGO_SHA512:
308                 if (partial_hash_sha512(data_in, digest))
309                         return -EFAULT;
310                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
311                         *hash_state_out_be64 =
312                                 rte_bswap64(*(((uint64_t *)digest)+i));
313                 break;
314         case ICP_QAT_HW_AUTH_ALGO_MD5:
315                 if (partial_hash_md5(data_in, data_out))
316                         return -EFAULT;
317                 break;
318         default:
319                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
320                 return -EFAULT;
321         }
322
323         return 0;
324 }
325 #define HMAC_IPAD_VALUE 0x36
326 #define HMAC_OPAD_VALUE 0x5c
327 #define HASH_XCBC_PRECOMP_KEY_NUM 3
328
329 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
330                                 const uint8_t *auth_key,
331                                 uint16_t auth_keylen,
332                                 uint8_t *p_state_buf,
333                                 uint16_t *p_state_len)
334 {
335         int block_size;
336         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
337         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
338         int i;
339
340         PMD_INIT_FUNC_TRACE();
341         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
342                 static uint8_t qat_aes_xcbc_key_seed[
343                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
344                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
345                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
346                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
347                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
348                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
349                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
350                 };
351
352                 uint8_t *in = NULL;
353                 uint8_t *out = p_state_buf;
354                 int x;
355                 AES_KEY enc_key;
356
357                 in = rte_zmalloc("working mem for key",
358                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
359                 rte_memcpy(in, qat_aes_xcbc_key_seed,
360                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
361                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
362                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
363                                 &enc_key) != 0) {
364                                 rte_free(in -
365                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
366                                 memset(out -
367                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
368                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
369                                 return -EFAULT;
370                         }
371                         AES_encrypt(in, out, &enc_key);
372                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
373                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
374                 }
375                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
376                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
377                 return 0;
378         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
379                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
380                 uint8_t *in = NULL;
381                 uint8_t *out = p_state_buf;
382                 AES_KEY enc_key;
383
384                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
385                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
386                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
387                 in = rte_zmalloc("working mem for key",
388                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
389                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
390                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
391                         &enc_key) != 0) {
392                         return -EFAULT;
393                 }
394                 AES_encrypt(in, out, &enc_key);
395                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
396                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
397                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
398                 rte_free(in);
399                 return 0;
400         }
401
402         block_size = qat_hash_get_block_size(hash_alg);
403         if (block_size <= 0)
404                 return -EFAULT;
405         /* init ipad and opad from key and xor with fixed values */
406         memset(ipad, 0, block_size);
407         memset(opad, 0, block_size);
408
409         if (auth_keylen > (unsigned int)block_size) {
410                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
411                 return -EFAULT;
412         }
413         rte_memcpy(ipad, auth_key, auth_keylen);
414         rte_memcpy(opad, auth_key, auth_keylen);
415
416         for (i = 0; i < block_size; i++) {
417                 uint8_t *ipad_ptr = ipad + i;
418                 uint8_t *opad_ptr = opad + i;
419                 *ipad_ptr ^= HMAC_IPAD_VALUE;
420                 *opad_ptr ^= HMAC_OPAD_VALUE;
421         }
422
423         /* do partial hash of ipad and copy to state1 */
424         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
425                 memset(ipad, 0, block_size);
426                 memset(opad, 0, block_size);
427                 PMD_DRV_LOG(ERR, "ipad precompute failed");
428                 return -EFAULT;
429         }
430
431         /*
432          * State len is a multiple of 8, so may be larger than the digest.
433          * Put the partial hash of opad state_len bytes after state1
434          */
435         *p_state_len = qat_hash_get_state1_size(hash_alg);
436         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
437                 memset(ipad, 0, block_size);
438                 memset(opad, 0, block_size);
439                 PMD_DRV_LOG(ERR, "opad precompute failed");
440                 return -EFAULT;
441         }
442
443         /*  don't leave data lying around */
444         memset(ipad, 0, block_size);
445         memset(opad, 0, block_size);
446         return 0;
447 }
448
449 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
450                 enum qat_crypto_proto_flag proto_flags)
451 {
452         PMD_INIT_FUNC_TRACE();
453         header->hdr_flags =
454                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
455         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
456         header->comn_req_flags =
457                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
458                                         QAT_COMN_PTR_TYPE_FLAT);
459         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
460                                   ICP_QAT_FW_LA_PARTIAL_NONE);
461         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
462                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
463
464         switch (proto_flags)            {
465         case QAT_CRYPTO_PROTO_FLAG_NONE:
466                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
467                                         ICP_QAT_FW_LA_NO_PROTO);
468                 break;
469         case QAT_CRYPTO_PROTO_FLAG_CCM:
470                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
471                                         ICP_QAT_FW_LA_CCM_PROTO);
472                 break;
473         case QAT_CRYPTO_PROTO_FLAG_GCM:
474                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
475                                         ICP_QAT_FW_LA_GCM_PROTO);
476                 break;
477         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
478                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
479                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
480                 break;
481         case QAT_CRYPTO_PROTO_FLAG_ZUC:
482                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
483                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
484                 break;
485         }
486
487         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
488                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
489         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
490                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
491 }
492
493 /*
494  *      Snow3G and ZUC should never use this function
495  *      and set its protocol flag in both cipher and auth part of content
496  *      descriptor building function
497  */
498 static enum qat_crypto_proto_flag
499 qat_get_crypto_proto_flag(uint16_t flags)
500 {
501         int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
502         enum qat_crypto_proto_flag qat_proto_flag =
503                         QAT_CRYPTO_PROTO_FLAG_NONE;
504
505         switch (proto) {
506         case ICP_QAT_FW_LA_GCM_PROTO:
507                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
508                 break;
509         case ICP_QAT_FW_LA_CCM_PROTO:
510                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
511                 break;
512         }
513
514         return qat_proto_flag;
515 }
516
517 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
518                                                 uint8_t *cipherkey,
519                                                 uint32_t cipherkeylen)
520 {
521         struct icp_qat_hw_cipher_algo_blk *cipher;
522         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
523         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
524         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
525         void *ptr = &req_tmpl->cd_ctrl;
526         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
527         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
528         enum icp_qat_hw_cipher_convert key_convert;
529         enum qat_crypto_proto_flag qat_proto_flag =
530                 QAT_CRYPTO_PROTO_FLAG_NONE;
531         uint32_t total_key_size;
532         uint16_t cipher_offset, cd_size;
533         uint32_t wordIndex  = 0;
534         uint32_t *temp_key = NULL;
535         PMD_INIT_FUNC_TRACE();
536
537         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
538                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
539                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
540                                         ICP_QAT_FW_SLICE_CIPHER);
541                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
542                                         ICP_QAT_FW_SLICE_DRAM_WR);
543                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
544                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
545                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
546                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
547                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
548         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
549                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
550                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
551                                         ICP_QAT_FW_SLICE_CIPHER);
552                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
553                                         ICP_QAT_FW_SLICE_AUTH);
554                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
555                                         ICP_QAT_FW_SLICE_AUTH);
556                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
557                                         ICP_QAT_FW_SLICE_DRAM_WR);
558                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
559         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
560                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
561                 return -EFAULT;
562         }
563
564         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
565                 /*
566                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
567                  * Overriding default values previously set
568                  */
569                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
570                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
571         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
572                 || cdesc->qat_cipher_alg ==
573                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
574                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
575         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
576                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
577         else
578                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
579
580         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
581                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
582                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
583                 cipher_cd_ctrl->cipher_state_sz =
584                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
585                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
586
587         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
588                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
589                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
590                 cipher_cd_ctrl->cipher_padding_sz =
591                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
592         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
593                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
594                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
595                 qat_proto_flag =
596                         qat_get_crypto_proto_flag(header->serv_specif_flags);
597         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
598                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
599                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
600                 qat_proto_flag =
601                         qat_get_crypto_proto_flag(header->serv_specif_flags);
602         } else if (cdesc->qat_cipher_alg ==
603                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
604                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
605                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
606                 cipher_cd_ctrl->cipher_state_sz =
607                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
608                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
609                 cdesc->min_qat_dev_gen = QAT_GEN2;
610         } else {
611                 total_key_size = cipherkeylen;
612                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
613                 qat_proto_flag =
614                         qat_get_crypto_proto_flag(header->serv_specif_flags);
615         }
616         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
617         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
618         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
619
620         header->service_cmd_id = cdesc->qat_cmd;
621         qat_alg_init_common_hdr(header, qat_proto_flag);
622
623         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
624         cipher->cipher_config.val =
625             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
626                                         cdesc->qat_cipher_alg, key_convert,
627                                         cdesc->qat_dir);
628
629         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
630                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
631                                         sizeof(struct icp_qat_hw_cipher_config)
632                                         + cipherkeylen);
633                 memcpy(cipher->key, cipherkey, cipherkeylen);
634                 memcpy(temp_key, cipherkey, cipherkeylen);
635
636                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
637                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
638                                                                 wordIndex++)
639                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
640
641                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
642                                         cipherkeylen + cipherkeylen;
643         } else {
644                 memcpy(cipher->key, cipherkey, cipherkeylen);
645                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
646                                         cipherkeylen;
647         }
648
649         if (total_key_size > cipherkeylen) {
650                 uint32_t padding_size =  total_key_size-cipherkeylen;
651                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
652                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
653                         /* K3 not provided so use K1 = K3*/
654                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
655                 else
656                         memset(cdesc->cd_cur_ptr, 0, padding_size);
657                 cdesc->cd_cur_ptr += padding_size;
658         }
659         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
660         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
661
662         return 0;
663 }
664
665 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
666                                                 uint8_t *authkey,
667                                                 uint32_t authkeylen,
668                                                 uint32_t aad_length,
669                                                 uint32_t digestsize,
670                                                 unsigned int operation)
671 {
672         struct icp_qat_hw_auth_setup *hash;
673         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
674         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
675         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
676         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
677         void *ptr = &req_tmpl->cd_ctrl;
678         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
679         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
680         struct icp_qat_fw_la_auth_req_params *auth_param =
681                 (struct icp_qat_fw_la_auth_req_params *)
682                 ((char *)&req_tmpl->serv_specif_rqpars +
683                 sizeof(struct icp_qat_fw_la_cipher_req_params));
684         uint16_t state1_size = 0, state2_size = 0;
685         uint16_t hash_offset, cd_size;
686         uint32_t *aad_len = NULL;
687         uint32_t wordIndex  = 0;
688         uint32_t *pTempKey;
689         enum qat_crypto_proto_flag qat_proto_flag =
690                 QAT_CRYPTO_PROTO_FLAG_NONE;
691
692         PMD_INIT_FUNC_TRACE();
693
694         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
695                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
696                                         ICP_QAT_FW_SLICE_AUTH);
697                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
698                                         ICP_QAT_FW_SLICE_DRAM_WR);
699                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
700         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
701                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
702                                 ICP_QAT_FW_SLICE_AUTH);
703                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
704                                 ICP_QAT_FW_SLICE_CIPHER);
705                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
706                                 ICP_QAT_FW_SLICE_CIPHER);
707                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
708                                 ICP_QAT_FW_SLICE_DRAM_WR);
709                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
710         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
711                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
712                 return -EFAULT;
713         }
714
715         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
716                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
717                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
718                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
719                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
720                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
721         } else {
722                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
723                                            ICP_QAT_FW_LA_RET_AUTH_RES);
724                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
725                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
726                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
727         }
728
729         /*
730          * Setup the inner hash config
731          */
732         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
733         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
734         hash->auth_config.reserved = 0;
735         hash->auth_config.config =
736                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
737                                 cdesc->qat_hash_alg, digestsize);
738
739         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
740                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
741                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
742                 hash->auth_counter.counter = 0;
743         else
744                 hash->auth_counter.counter = rte_bswap32(
745                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
746
747         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
748
749         /*
750          * cd_cur_ptr now points at the state1 information.
751          */
752         switch (cdesc->qat_hash_alg) {
753         case ICP_QAT_HW_AUTH_ALGO_SHA1:
754                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
755                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
756                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
757                         return -EFAULT;
758                 }
759                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
760                 break;
761         case ICP_QAT_HW_AUTH_ALGO_SHA224:
762                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
763                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
764                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
765                         return -EFAULT;
766                 }
767                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
768                 break;
769         case ICP_QAT_HW_AUTH_ALGO_SHA256:
770                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
771                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
772                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
773                         return -EFAULT;
774                 }
775                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
776                 break;
777         case ICP_QAT_HW_AUTH_ALGO_SHA384:
778                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
779                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
780                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
781                         return -EFAULT;
782                 }
783                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
784                 break;
785         case ICP_QAT_HW_AUTH_ALGO_SHA512:
786                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
787                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
788                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
789                         return -EFAULT;
790                 }
791                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
792                 break;
793         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
794                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
795                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
796                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
797                         &state2_size)) {
798                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
799                         return -EFAULT;
800                 }
801                 break;
802         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
803         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
804                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
805                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
806                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
807                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
808                         &state2_size)) {
809                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
810                         return -EFAULT;
811                 }
812                 /*
813                  * Write (the length of AAD) into bytes 16-19 of state2
814                  * in big-endian format. This field is 8 bytes
815                  */
816                 auth_param->u2.aad_sz =
817                                 RTE_ALIGN_CEIL(aad_length, 16);
818                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
819
820                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
821                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
822                                         ICP_QAT_HW_GALOIS_H_SZ);
823                 *aad_len = rte_bswap32(aad_length);
824                 cdesc->aad_len = aad_length;
825                 break;
826         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
827                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
828                 state1_size = qat_hash_get_state1_size(
829                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
830                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
831                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
832
833                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
834                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
835                 cipherconfig->cipher_config.val =
836                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
837                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
838                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
839                         ICP_QAT_HW_CIPHER_ENCRYPT);
840                 memcpy(cipherconfig->key, authkey, authkeylen);
841                 memset(cipherconfig->key + authkeylen,
842                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
843                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
844                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
845                 auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
846                 break;
847         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
848                 hash->auth_config.config =
849                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
850                                 cdesc->qat_hash_alg, digestsize);
851                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
852                 state1_size = qat_hash_get_state1_size(
853                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
854                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
855                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
856                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
857
858                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
859                 cdesc->cd_cur_ptr += state1_size + state2_size
860                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
861                 auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
862                 cdesc->min_qat_dev_gen = QAT_GEN2;
863
864                 break;
865         case ICP_QAT_HW_AUTH_ALGO_MD5:
866                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
867                         authkey, authkeylen, cdesc->cd_cur_ptr,
868                         &state1_size)) {
869                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
870                         return -EFAULT;
871                 }
872                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
873                 break;
874         case ICP_QAT_HW_AUTH_ALGO_NULL:
875                 state1_size = qat_hash_get_state1_size(
876                                 ICP_QAT_HW_AUTH_ALGO_NULL);
877                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
878                 break;
879         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
880                 state1_size = qat_hash_get_state1_size(
881                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
882                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
883                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
884                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
885                                                         + authkeylen);
886                 /*
887                 * The Inner Hash Initial State2 block must contain IK
888                 * (Initialisation Key), followed by IK XOR-ed with KM
889                 * (Key Modifier): IK||(IK^KM).
890                 */
891                 /* write the auth key */
892                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
893                 /* initialise temp key with auth key */
894                 memcpy(pTempKey, authkey, authkeylen);
895                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
896                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
897                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
898                 break;
899         default:
900                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
901                 return -EFAULT;
902         }
903
904         /* Request template setup */
905         qat_alg_init_common_hdr(header, qat_proto_flag);
906         header->service_cmd_id = cdesc->qat_cmd;
907
908         /* Auth CD config setup */
909         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
910         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
911         hash_cd_ctrl->inner_res_sz = digestsize;
912         hash_cd_ctrl->final_sz = digestsize;
913         hash_cd_ctrl->inner_state1_sz = state1_size;
914         auth_param->auth_res_sz = digestsize;
915
916         hash_cd_ctrl->inner_state2_sz  = state2_size;
917         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
918                         ((sizeof(struct icp_qat_hw_auth_setup) +
919                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
920                                         >> 3);
921
922         cdesc->cd_cur_ptr += state1_size + state2_size;
923         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
924
925         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
926         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
927
928         return 0;
929 }
930
931 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
932 {
933         switch (key_len) {
934         case ICP_QAT_HW_AES_128_KEY_SZ:
935                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
936                 break;
937         case ICP_QAT_HW_AES_192_KEY_SZ:
938                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
939                 break;
940         case ICP_QAT_HW_AES_256_KEY_SZ:
941                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
942                 break;
943         default:
944                 return -EINVAL;
945         }
946         return 0;
947 }
948
949 int qat_alg_validate_aes_docsisbpi_key(int key_len,
950                 enum icp_qat_hw_cipher_algo *alg)
951 {
952         switch (key_len) {
953         case ICP_QAT_HW_AES_128_KEY_SZ:
954                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
955                 break;
956         default:
957                 return -EINVAL;
958         }
959         return 0;
960 }
961
962 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
963 {
964         switch (key_len) {
965         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
966                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
967                 break;
968         default:
969                 return -EINVAL;
970         }
971         return 0;
972 }
973
974 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
975 {
976         switch (key_len) {
977         case ICP_QAT_HW_KASUMI_KEY_SZ:
978                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
979                 break;
980         default:
981                 return -EINVAL;
982         }
983         return 0;
984 }
985
986 int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
987 {
988         switch (key_len) {
989         case ICP_QAT_HW_DES_KEY_SZ:
990                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
991                 break;
992         default:
993                 return -EINVAL;
994         }
995         return 0;
996 }
997
998 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
999 {
1000         switch (key_len) {
1001         case QAT_3DES_KEY_SZ_OPT1:
1002         case QAT_3DES_KEY_SZ_OPT2:
1003                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
1004                 break;
1005         default:
1006                 return -EINVAL;
1007         }
1008         return 0;
1009 }
1010
1011 int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1012 {
1013         switch (key_len) {
1014         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
1015                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
1016                 break;
1017         default:
1018                 return -EINVAL;
1019         }
1020         return 0;
1021 }