Imported Upstream version 17.05.2
[deb_dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57
58 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
61
62 #include "qat_algs.h"
63
64 /* returns block size in bytes per cipher algo */
65 int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
66 {
67         switch (qat_cipher_alg) {
68         case ICP_QAT_HW_CIPHER_ALGO_DES:
69                 return ICP_QAT_HW_DES_BLK_SZ;
70         case ICP_QAT_HW_CIPHER_ALGO_3DES:
71                 return ICP_QAT_HW_3DES_BLK_SZ;
72         case ICP_QAT_HW_CIPHER_ALGO_AES128:
73         case ICP_QAT_HW_CIPHER_ALGO_AES192:
74         case ICP_QAT_HW_CIPHER_ALGO_AES256:
75                 return ICP_QAT_HW_AES_BLK_SZ;
76         default:
77                 PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
78                 return -EFAULT;
79         };
80         return -EFAULT;
81 }
82
83 /*
84  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
85  * This is digest size rounded up to nearest quadword
86  */
87 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
88 {
89         switch (qat_hash_alg) {
90         case ICP_QAT_HW_AUTH_ALGO_SHA1:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SHA224:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_SHA256:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_SHA384:
100                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
101                                                 QAT_HW_DEFAULT_ALIGNMENT);
102         case ICP_QAT_HW_AUTH_ALGO_SHA512:
103                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
104                                                 QAT_HW_DEFAULT_ALIGNMENT);
105         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
106                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
107                                                 QAT_HW_DEFAULT_ALIGNMENT);
108         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
109         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
110                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
111                                                 QAT_HW_DEFAULT_ALIGNMENT);
112         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
113                 return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
114                                                 QAT_HW_DEFAULT_ALIGNMENT);
115         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
116                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
117                                                 QAT_HW_DEFAULT_ALIGNMENT);
118         case ICP_QAT_HW_AUTH_ALGO_MD5:
119                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
120                                                 QAT_HW_DEFAULT_ALIGNMENT);
121         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
122                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
123                                                 QAT_HW_DEFAULT_ALIGNMENT);
124         case ICP_QAT_HW_AUTH_ALGO_NULL:
125                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
126                                                 QAT_HW_DEFAULT_ALIGNMENT);
127         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
128                 /* return maximum state1 size in this case */
129                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
130                                                 QAT_HW_DEFAULT_ALIGNMENT);
131         default:
132                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
133                 return -EFAULT;
134         };
135         return -EFAULT;
136 }
137
138 /* returns digest size in bytes  per hash algo */
139 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
140 {
141         switch (qat_hash_alg) {
142         case ICP_QAT_HW_AUTH_ALGO_SHA1:
143                 return ICP_QAT_HW_SHA1_STATE1_SZ;
144         case ICP_QAT_HW_AUTH_ALGO_SHA224:
145                 return ICP_QAT_HW_SHA224_STATE1_SZ;
146         case ICP_QAT_HW_AUTH_ALGO_SHA256:
147                 return ICP_QAT_HW_SHA256_STATE1_SZ;
148         case ICP_QAT_HW_AUTH_ALGO_SHA384:
149                 return ICP_QAT_HW_SHA384_STATE1_SZ;
150         case ICP_QAT_HW_AUTH_ALGO_SHA512:
151                 return ICP_QAT_HW_SHA512_STATE1_SZ;
152         case ICP_QAT_HW_AUTH_ALGO_MD5:
153                 return ICP_QAT_HW_MD5_STATE1_SZ;
154         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
155                 /* return maximum digest size in this case */
156                 return ICP_QAT_HW_SHA512_STATE1_SZ;
157         default:
158                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
159                 return -EFAULT;
160         };
161         return -EFAULT;
162 }
163
164 /* returns block size in byes per hash algo */
165 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
166 {
167         switch (qat_hash_alg) {
168         case ICP_QAT_HW_AUTH_ALGO_SHA1:
169                 return SHA_CBLOCK;
170         case ICP_QAT_HW_AUTH_ALGO_SHA224:
171                 return SHA256_CBLOCK;
172         case ICP_QAT_HW_AUTH_ALGO_SHA256:
173                 return SHA256_CBLOCK;
174         case ICP_QAT_HW_AUTH_ALGO_SHA384:
175                 return SHA512_CBLOCK;
176         case ICP_QAT_HW_AUTH_ALGO_SHA512:
177                 return SHA512_CBLOCK;
178         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
179                 return 16;
180         case ICP_QAT_HW_AUTH_ALGO_MD5:
181                 return MD5_CBLOCK;
182         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
183                 /* return maximum block size in this case */
184                 return SHA512_CBLOCK;
185         default:
186                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
187                 return -EFAULT;
188         };
189         return -EFAULT;
190 }
191
192 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
193 {
194         SHA_CTX ctx;
195
196         if (!SHA1_Init(&ctx))
197                 return -EFAULT;
198         SHA1_Transform(&ctx, data_in);
199         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
200         return 0;
201 }
202
203 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
204 {
205         SHA256_CTX ctx;
206
207         if (!SHA224_Init(&ctx))
208                 return -EFAULT;
209         SHA256_Transform(&ctx, data_in);
210         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
211         return 0;
212 }
213
214 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
215 {
216         SHA256_CTX ctx;
217
218         if (!SHA256_Init(&ctx))
219                 return -EFAULT;
220         SHA256_Transform(&ctx, data_in);
221         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
222         return 0;
223 }
224
225 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
226 {
227         SHA512_CTX ctx;
228
229         if (!SHA384_Init(&ctx))
230                 return -EFAULT;
231         SHA512_Transform(&ctx, data_in);
232         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
233         return 0;
234 }
235
236 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
237 {
238         SHA512_CTX ctx;
239
240         if (!SHA512_Init(&ctx))
241                 return -EFAULT;
242         SHA512_Transform(&ctx, data_in);
243         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
244         return 0;
245 }
246
247 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
248 {
249         MD5_CTX ctx;
250
251         if (!MD5_Init(&ctx))
252                 return -EFAULT;
253         MD5_Transform(&ctx, data_in);
254         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
255
256         return 0;
257 }
258
259 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
260                         uint8_t *data_in,
261                         uint8_t *data_out)
262 {
263         int digest_size;
264         uint8_t digest[qat_hash_get_digest_size(
265                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
266         uint32_t *hash_state_out_be32;
267         uint64_t *hash_state_out_be64;
268         int i;
269
270         PMD_INIT_FUNC_TRACE();
271         digest_size = qat_hash_get_digest_size(hash_alg);
272         if (digest_size <= 0)
273                 return -EFAULT;
274
275         hash_state_out_be32 = (uint32_t *)data_out;
276         hash_state_out_be64 = (uint64_t *)data_out;
277
278         switch (hash_alg) {
279         case ICP_QAT_HW_AUTH_ALGO_SHA1:
280                 if (partial_hash_sha1(data_in, digest))
281                         return -EFAULT;
282                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
283                         *hash_state_out_be32 =
284                                 rte_bswap32(*(((uint32_t *)digest)+i));
285                 break;
286         case ICP_QAT_HW_AUTH_ALGO_SHA224:
287                 if (partial_hash_sha224(data_in, digest))
288                         return -EFAULT;
289                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
290                         *hash_state_out_be32 =
291                                 rte_bswap32(*(((uint32_t *)digest)+i));
292                 break;
293         case ICP_QAT_HW_AUTH_ALGO_SHA256:
294                 if (partial_hash_sha256(data_in, digest))
295                         return -EFAULT;
296                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
297                         *hash_state_out_be32 =
298                                 rte_bswap32(*(((uint32_t *)digest)+i));
299                 break;
300         case ICP_QAT_HW_AUTH_ALGO_SHA384:
301                 if (partial_hash_sha384(data_in, digest))
302                         return -EFAULT;
303                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
304                         *hash_state_out_be64 =
305                                 rte_bswap64(*(((uint64_t *)digest)+i));
306                 break;
307         case ICP_QAT_HW_AUTH_ALGO_SHA512:
308                 if (partial_hash_sha512(data_in, digest))
309                         return -EFAULT;
310                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
311                         *hash_state_out_be64 =
312                                 rte_bswap64(*(((uint64_t *)digest)+i));
313                 break;
314         case ICP_QAT_HW_AUTH_ALGO_MD5:
315                 if (partial_hash_md5(data_in, data_out))
316                         return -EFAULT;
317                 break;
318         default:
319                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
320                 return -EFAULT;
321         }
322
323         return 0;
324 }
325 #define HMAC_IPAD_VALUE 0x36
326 #define HMAC_OPAD_VALUE 0x5c
327 #define HASH_XCBC_PRECOMP_KEY_NUM 3
328
329 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
330                                 const uint8_t *auth_key,
331                                 uint16_t auth_keylen,
332                                 uint8_t *p_state_buf,
333                                 uint16_t *p_state_len)
334 {
335         int block_size;
336         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
337         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
338         int i;
339
340         PMD_INIT_FUNC_TRACE();
341         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
342                 static uint8_t qat_aes_xcbc_key_seed[
343                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
344                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
345                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
346                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
347                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
348                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
349                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
350                 };
351
352                 uint8_t *in = NULL;
353                 uint8_t *out = p_state_buf;
354                 int x;
355                 AES_KEY enc_key;
356
357                 in = rte_zmalloc("working mem for key",
358                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
359                 rte_memcpy(in, qat_aes_xcbc_key_seed,
360                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
361                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
362                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
363                                 &enc_key) != 0) {
364                                 rte_free(in -
365                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
366                                 memset(out -
367                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
368                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
369                                 return -EFAULT;
370                         }
371                         AES_encrypt(in, out, &enc_key);
372                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
373                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
374                 }
375                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
376                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
377                 return 0;
378         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
379                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
380                 uint8_t *in = NULL;
381                 uint8_t *out = p_state_buf;
382                 AES_KEY enc_key;
383
384                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
385                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
386                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
387                 in = rte_zmalloc("working mem for key",
388                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
389                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
390                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
391                         &enc_key) != 0) {
392                         return -EFAULT;
393                 }
394                 AES_encrypt(in, out, &enc_key);
395                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
396                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
397                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
398                 rte_free(in);
399                 return 0;
400         }
401
402         block_size = qat_hash_get_block_size(hash_alg);
403         if (block_size <= 0)
404                 return -EFAULT;
405         /* init ipad and opad from key and xor with fixed values */
406         memset(ipad, 0, block_size);
407         memset(opad, 0, block_size);
408
409         if (auth_keylen > (unsigned int)block_size) {
410                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
411                 return -EFAULT;
412         }
413         rte_memcpy(ipad, auth_key, auth_keylen);
414         rte_memcpy(opad, auth_key, auth_keylen);
415
416         for (i = 0; i < block_size; i++) {
417                 uint8_t *ipad_ptr = ipad + i;
418                 uint8_t *opad_ptr = opad + i;
419                 *ipad_ptr ^= HMAC_IPAD_VALUE;
420                 *opad_ptr ^= HMAC_OPAD_VALUE;
421         }
422
423         /* do partial hash of ipad and copy to state1 */
424         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
425                 memset(ipad, 0, block_size);
426                 memset(opad, 0, block_size);
427                 PMD_DRV_LOG(ERR, "ipad precompute failed");
428                 return -EFAULT;
429         }
430
431         /*
432          * State len is a multiple of 8, so may be larger than the digest.
433          * Put the partial hash of opad state_len bytes after state1
434          */
435         *p_state_len = qat_hash_get_state1_size(hash_alg);
436         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
437                 memset(ipad, 0, block_size);
438                 memset(opad, 0, block_size);
439                 PMD_DRV_LOG(ERR, "opad precompute failed");
440                 return -EFAULT;
441         }
442
443         /*  don't leave data lying around */
444         memset(ipad, 0, block_size);
445         memset(opad, 0, block_size);
446         return 0;
447 }
448
449 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
450                 enum qat_crypto_proto_flag proto_flags)
451 {
452         PMD_INIT_FUNC_TRACE();
453         header->hdr_flags =
454                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
455         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
456         header->comn_req_flags =
457                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
458                                         QAT_COMN_PTR_TYPE_FLAT);
459         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
460                                   ICP_QAT_FW_LA_PARTIAL_NONE);
461         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
462                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
463
464         switch (proto_flags)            {
465         case QAT_CRYPTO_PROTO_FLAG_NONE:
466                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
467                                         ICP_QAT_FW_LA_NO_PROTO);
468                 break;
469         case QAT_CRYPTO_PROTO_FLAG_CCM:
470                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
471                                         ICP_QAT_FW_LA_CCM_PROTO);
472                 break;
473         case QAT_CRYPTO_PROTO_FLAG_GCM:
474                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
475                                         ICP_QAT_FW_LA_GCM_PROTO);
476                 break;
477         case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
478                 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
479                                         ICP_QAT_FW_LA_SNOW_3G_PROTO);
480                 break;
481         case QAT_CRYPTO_PROTO_FLAG_ZUC:
482                 ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
483                         ICP_QAT_FW_LA_ZUC_3G_PROTO);
484                 break;
485         }
486
487         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
488                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
489         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
490                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
491 }
492
493 /*
494  *      Snow3G and ZUC should never use this function
495  *      and set its protocol flag in both cipher and auth part of content
496  *      descriptor building function
497  */
498 static enum qat_crypto_proto_flag
499 qat_get_crypto_proto_flag(uint16_t flags)
500 {
501         int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
502         enum qat_crypto_proto_flag qat_proto_flag =
503                         QAT_CRYPTO_PROTO_FLAG_NONE;
504
505         switch (proto) {
506         case ICP_QAT_FW_LA_GCM_PROTO:
507                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
508                 break;
509         case ICP_QAT_FW_LA_CCM_PROTO:
510                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
511                 break;
512         }
513
514         return qat_proto_flag;
515 }
516
517 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
518                                                 uint8_t *cipherkey,
519                                                 uint32_t cipherkeylen)
520 {
521         struct icp_qat_hw_cipher_algo_blk *cipher;
522         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
523         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
524         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
525         void *ptr = &req_tmpl->cd_ctrl;
526         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
527         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
528         enum icp_qat_hw_cipher_convert key_convert;
529         enum qat_crypto_proto_flag qat_proto_flag =
530                 QAT_CRYPTO_PROTO_FLAG_NONE;
531         uint32_t total_key_size;
532         uint16_t cipher_offset, cd_size;
533         uint32_t wordIndex  = 0;
534         uint32_t *temp_key = NULL;
535         PMD_INIT_FUNC_TRACE();
536
537         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
538                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
539                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
540                                         ICP_QAT_FW_SLICE_CIPHER);
541                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
542                                         ICP_QAT_FW_SLICE_DRAM_WR);
543                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
544                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
545                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
546                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
547                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
548         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
549                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
550                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
551                                         ICP_QAT_FW_SLICE_CIPHER);
552                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
553                                         ICP_QAT_FW_SLICE_AUTH);
554                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
555                                         ICP_QAT_FW_SLICE_AUTH);
556                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
557                                         ICP_QAT_FW_SLICE_DRAM_WR);
558                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
559         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
560                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
561                 return -EFAULT;
562         }
563
564         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
565                 /*
566                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
567                  * Overriding default values previously set
568                  */
569                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
570                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
571         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
572                 || cdesc->qat_cipher_alg ==
573                         ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
574                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
575         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
576                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
577         else
578                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
579
580         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
581                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
582                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
583                 cipher_cd_ctrl->cipher_state_sz =
584                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
585                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
586
587         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
588                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
589                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
590                 cipher_cd_ctrl->cipher_padding_sz =
591                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
592         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
593                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
594                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
595                 qat_proto_flag =
596                         qat_get_crypto_proto_flag(header->serv_specif_flags);
597         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
598                 total_key_size = ICP_QAT_HW_DES_KEY_SZ;
599                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
600                 qat_proto_flag =
601                         qat_get_crypto_proto_flag(header->serv_specif_flags);
602         } else if (cdesc->qat_cipher_alg ==
603                 ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
604                 total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
605                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
606                 cipher_cd_ctrl->cipher_state_sz =
607                         ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
608                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
609         } else {
610                 total_key_size = cipherkeylen;
611                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
612                 qat_proto_flag =
613                         qat_get_crypto_proto_flag(header->serv_specif_flags);
614         }
615         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
616         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
617         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
618
619         header->service_cmd_id = cdesc->qat_cmd;
620         qat_alg_init_common_hdr(header, qat_proto_flag);
621
622         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
623         cipher->cipher_config.val =
624             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
625                                         cdesc->qat_cipher_alg, key_convert,
626                                         cdesc->qat_dir);
627
628         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
629                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
630                                         sizeof(struct icp_qat_hw_cipher_config)
631                                         + cipherkeylen);
632                 memcpy(cipher->key, cipherkey, cipherkeylen);
633                 memcpy(temp_key, cipherkey, cipherkeylen);
634
635                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
636                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
637                                                                 wordIndex++)
638                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
639
640                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
641                                         cipherkeylen + cipherkeylen;
642         } else {
643                 memcpy(cipher->key, cipherkey, cipherkeylen);
644                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
645                                         cipherkeylen;
646         }
647
648         if (total_key_size > cipherkeylen) {
649                 uint32_t padding_size =  total_key_size-cipherkeylen;
650                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
651                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
652                         /* K3 not provided so use K1 = K3*/
653                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
654                 else
655                         memset(cdesc->cd_cur_ptr, 0, padding_size);
656                 cdesc->cd_cur_ptr += padding_size;
657         }
658         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
659         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
660
661         return 0;
662 }
663
664 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
665                                                 uint8_t *authkey,
666                                                 uint32_t authkeylen,
667                                                 uint32_t add_auth_data_length,
668                                                 uint32_t digestsize,
669                                                 unsigned int operation)
670 {
671         struct icp_qat_hw_auth_setup *hash;
672         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
673         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
674         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
675         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
676         void *ptr = &req_tmpl->cd_ctrl;
677         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
678         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
679         struct icp_qat_fw_la_auth_req_params *auth_param =
680                 (struct icp_qat_fw_la_auth_req_params *)
681                 ((char *)&req_tmpl->serv_specif_rqpars +
682                 sizeof(struct icp_qat_fw_la_cipher_req_params));
683         uint16_t state1_size = 0, state2_size = 0;
684         uint16_t hash_offset, cd_size;
685         uint32_t *aad_len = NULL;
686         uint32_t wordIndex  = 0;
687         uint32_t *pTempKey;
688         enum qat_crypto_proto_flag qat_proto_flag =
689                 QAT_CRYPTO_PROTO_FLAG_NONE;
690
691         PMD_INIT_FUNC_TRACE();
692
693         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
694                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
695                                         ICP_QAT_FW_SLICE_AUTH);
696                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
697                                         ICP_QAT_FW_SLICE_DRAM_WR);
698                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
699         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
700                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
701                                 ICP_QAT_FW_SLICE_AUTH);
702                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
703                                 ICP_QAT_FW_SLICE_CIPHER);
704                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
705                                 ICP_QAT_FW_SLICE_CIPHER);
706                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
707                                 ICP_QAT_FW_SLICE_DRAM_WR);
708                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
709         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
710                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
711                 return -EFAULT;
712         }
713
714         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
715                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
716                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
717                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
718                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
719                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
720         } else {
721                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
722                                            ICP_QAT_FW_LA_RET_AUTH_RES);
723                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
724                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
725                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
726         }
727
728         /*
729          * Setup the inner hash config
730          */
731         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
732         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
733         hash->auth_config.reserved = 0;
734         hash->auth_config.config =
735                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
736                                 cdesc->qat_hash_alg, digestsize);
737
738         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
739                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
740                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
741                 hash->auth_counter.counter = 0;
742         else
743                 hash->auth_counter.counter = rte_bswap32(
744                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
745
746         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
747
748         /*
749          * cd_cur_ptr now points at the state1 information.
750          */
751         switch (cdesc->qat_hash_alg) {
752         case ICP_QAT_HW_AUTH_ALGO_SHA1:
753                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
754                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
755                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
756                         return -EFAULT;
757                 }
758                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
759                 break;
760         case ICP_QAT_HW_AUTH_ALGO_SHA224:
761                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
762                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
763                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
764                         return -EFAULT;
765                 }
766                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
767                 break;
768         case ICP_QAT_HW_AUTH_ALGO_SHA256:
769                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
770                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
771                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
772                         return -EFAULT;
773                 }
774                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
775                 break;
776         case ICP_QAT_HW_AUTH_ALGO_SHA384:
777                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
778                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
779                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
780                         return -EFAULT;
781                 }
782                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
783                 break;
784         case ICP_QAT_HW_AUTH_ALGO_SHA512:
785                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
786                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
787                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
788                         return -EFAULT;
789                 }
790                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
791                 break;
792         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
793                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
794                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
795                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
796                         &state2_size)) {
797                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
798                         return -EFAULT;
799                 }
800                 break;
801         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
802         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
803                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
804                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
805                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
806                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
807                         &state2_size)) {
808                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
809                         return -EFAULT;
810                 }
811                 /*
812                  * Write (the length of AAD) into bytes 16-19 of state2
813                  * in big-endian format. This field is 8 bytes
814                  */
815                 auth_param->u2.aad_sz =
816                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
817                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
818
819                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
820                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
821                                         ICP_QAT_HW_GALOIS_H_SZ);
822                 *aad_len = rte_bswap32(add_auth_data_length);
823                 break;
824         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
825                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
826                 state1_size = qat_hash_get_state1_size(
827                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
828                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
829                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
830
831                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
832                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
833                 cipherconfig->cipher_config.val =
834                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
835                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
836                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
837                         ICP_QAT_HW_CIPHER_ENCRYPT);
838                 memcpy(cipherconfig->key, authkey, authkeylen);
839                 memset(cipherconfig->key + authkeylen,
840                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
841                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
842                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
843                 auth_param->hash_state_sz =
844                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
845                 break;
846         case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
847                 hash->auth_config.config =
848                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
849                                 cdesc->qat_hash_alg, digestsize);
850                 qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
851                 state1_size = qat_hash_get_state1_size(
852                                 ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
853                 state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
854                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
855                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
856
857                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
858                 cdesc->cd_cur_ptr += state1_size + state2_size
859                         + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
860                 auth_param->hash_state_sz =
861                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
862
863                 break;
864         case ICP_QAT_HW_AUTH_ALGO_MD5:
865                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
866                         authkey, authkeylen, cdesc->cd_cur_ptr,
867                         &state1_size)) {
868                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
869                         return -EFAULT;
870                 }
871                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
872                 break;
873         case ICP_QAT_HW_AUTH_ALGO_NULL:
874                 state1_size = qat_hash_get_state1_size(
875                                 ICP_QAT_HW_AUTH_ALGO_NULL);
876                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
877                 break;
878         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
879                 state1_size = qat_hash_get_state1_size(
880                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
881                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
882                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
883                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
884                                                         + authkeylen);
885                 /*
886                 * The Inner Hash Initial State2 block must contain IK
887                 * (Initialisation Key), followed by IK XOR-ed with KM
888                 * (Key Modifier): IK||(IK^KM).
889                 */
890                 /* write the auth key */
891                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
892                 /* initialise temp key with auth key */
893                 memcpy(pTempKey, authkey, authkeylen);
894                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
895                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
896                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
897                 break;
898         default:
899                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
900                 return -EFAULT;
901         }
902
903         /* Request template setup */
904         qat_alg_init_common_hdr(header, qat_proto_flag);
905         header->service_cmd_id = cdesc->qat_cmd;
906
907         /* Auth CD config setup */
908         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
909         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
910         hash_cd_ctrl->inner_res_sz = digestsize;
911         hash_cd_ctrl->final_sz = digestsize;
912         hash_cd_ctrl->inner_state1_sz = state1_size;
913         auth_param->auth_res_sz = digestsize;
914
915         hash_cd_ctrl->inner_state2_sz  = state2_size;
916         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
917                         ((sizeof(struct icp_qat_hw_auth_setup) +
918                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
919                                         >> 3);
920
921         cdesc->cd_cur_ptr += state1_size + state2_size;
922         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
923
924         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
925         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
926
927         return 0;
928 }
929
930 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
931 {
932         switch (key_len) {
933         case ICP_QAT_HW_AES_128_KEY_SZ:
934                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
935                 break;
936         case ICP_QAT_HW_AES_192_KEY_SZ:
937                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
938                 break;
939         case ICP_QAT_HW_AES_256_KEY_SZ:
940                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
941                 break;
942         default:
943                 return -EINVAL;
944         }
945         return 0;
946 }
947
948 int qat_alg_validate_aes_docsisbpi_key(int key_len,
949                 enum icp_qat_hw_cipher_algo *alg)
950 {
951         switch (key_len) {
952         case ICP_QAT_HW_AES_128_KEY_SZ:
953                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
954                 break;
955         default:
956                 return -EINVAL;
957         }
958         return 0;
959 }
960
961 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
962 {
963         switch (key_len) {
964         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
965                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
966                 break;
967         default:
968                 return -EINVAL;
969         }
970         return 0;
971 }
972
973 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
974 {
975         switch (key_len) {
976         case ICP_QAT_HW_KASUMI_KEY_SZ:
977                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
978                 break;
979         default:
980                 return -EINVAL;
981         }
982         return 0;
983 }
984
985 int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
986 {
987         switch (key_len) {
988         case ICP_QAT_HW_DES_KEY_SZ:
989                 *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
990                 break;
991         default:
992                 return -EINVAL;
993         }
994         return 0;
995 }
996
997 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
998 {
999         switch (key_len) {
1000         case QAT_3DES_KEY_SZ_OPT1:
1001         case QAT_3DES_KEY_SZ_OPT2:
1002                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
1003                 break;
1004         default:
1005                 return -EINVAL;
1006         }
1007         return 0;
1008 }
1009
1010 int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
1011 {
1012         switch (key_len) {
1013         case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
1014                 *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
1015                 break;
1016         default:
1017                 return -EINVAL;
1018         }
1019         return 0;
1020 }