New upstream version 16.11.5
[deb_dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57 #include "qat_algs.h"
58
59 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
62
63
64 /*
65  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66  * This is digest size rounded up to nearest quadword
67  */
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 {
70         switch (qat_hash_alg) {
71         case ICP_QAT_HW_AUTH_ALGO_SHA1:
72                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73                                                 QAT_HW_DEFAULT_ALIGNMENT);
74         case ICP_QAT_HW_AUTH_ALGO_SHA224:
75                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76                                                 QAT_HW_DEFAULT_ALIGNMENT);
77         case ICP_QAT_HW_AUTH_ALGO_SHA256:
78                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79                                                 QAT_HW_DEFAULT_ALIGNMENT);
80         case ICP_QAT_HW_AUTH_ALGO_SHA384:
81                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82                                                 QAT_HW_DEFAULT_ALIGNMENT);
83         case ICP_QAT_HW_AUTH_ALGO_SHA512:
84                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85                                                 QAT_HW_DEFAULT_ALIGNMENT);
86         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88                                                 QAT_HW_DEFAULT_ALIGNMENT);
89         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_MD5:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
100                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
101                                                 QAT_HW_DEFAULT_ALIGNMENT);
102         case ICP_QAT_HW_AUTH_ALGO_NULL:
103                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
104                                                 QAT_HW_DEFAULT_ALIGNMENT);
105         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
106                 /* return maximum state1 size in this case */
107                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
108                                                 QAT_HW_DEFAULT_ALIGNMENT);
109         default:
110                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
111                 return -EFAULT;
112         };
113         return -EFAULT;
114 }
115
116 /* returns digest size in bytes  per hash algo */
117 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
118 {
119         switch (qat_hash_alg) {
120         case ICP_QAT_HW_AUTH_ALGO_SHA1:
121                 return ICP_QAT_HW_SHA1_STATE1_SZ;
122         case ICP_QAT_HW_AUTH_ALGO_SHA224:
123                 return ICP_QAT_HW_SHA224_STATE1_SZ;
124         case ICP_QAT_HW_AUTH_ALGO_SHA256:
125                 return ICP_QAT_HW_SHA256_STATE1_SZ;
126         case ICP_QAT_HW_AUTH_ALGO_SHA384:
127                 return ICP_QAT_HW_SHA384_STATE1_SZ;
128         case ICP_QAT_HW_AUTH_ALGO_SHA512:
129                 return ICP_QAT_HW_SHA512_STATE1_SZ;
130         case ICP_QAT_HW_AUTH_ALGO_MD5:
131                 return ICP_QAT_HW_MD5_STATE1_SZ;
132         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
133                 /* return maximum digest size in this case */
134                 return ICP_QAT_HW_SHA512_STATE1_SZ;
135         default:
136                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
137                 return -EFAULT;
138         };
139         return -EFAULT;
140 }
141
142 /* returns block size in byes per hash algo */
143 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
144 {
145         switch (qat_hash_alg) {
146         case ICP_QAT_HW_AUTH_ALGO_SHA1:
147                 return SHA_CBLOCK;
148         case ICP_QAT_HW_AUTH_ALGO_SHA224:
149                 return SHA256_CBLOCK;
150         case ICP_QAT_HW_AUTH_ALGO_SHA256:
151                 return SHA256_CBLOCK;
152         case ICP_QAT_HW_AUTH_ALGO_SHA384:
153                 return SHA512_CBLOCK;
154         case ICP_QAT_HW_AUTH_ALGO_SHA512:
155                 return SHA512_CBLOCK;
156         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
157                 return 16;
158         case ICP_QAT_HW_AUTH_ALGO_MD5:
159                 return MD5_CBLOCK;
160         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
161                 /* return maximum block size in this case */
162                 return SHA512_CBLOCK;
163         default:
164                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
165                 return -EFAULT;
166         };
167         return -EFAULT;
168 }
169
170 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
171 {
172         SHA_CTX ctx;
173
174         if (!SHA1_Init(&ctx))
175                 return -EFAULT;
176         SHA1_Transform(&ctx, data_in);
177         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
178         return 0;
179 }
180
181 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
182 {
183         SHA256_CTX ctx;
184
185         if (!SHA224_Init(&ctx))
186                 return -EFAULT;
187         SHA256_Transform(&ctx, data_in);
188         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
189         return 0;
190 }
191
192 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
193 {
194         SHA256_CTX ctx;
195
196         if (!SHA256_Init(&ctx))
197                 return -EFAULT;
198         SHA256_Transform(&ctx, data_in);
199         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
200         return 0;
201 }
202
203 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
204 {
205         SHA512_CTX ctx;
206
207         if (!SHA384_Init(&ctx))
208                 return -EFAULT;
209         SHA512_Transform(&ctx, data_in);
210         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
211         return 0;
212 }
213
214 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
215 {
216         SHA512_CTX ctx;
217
218         if (!SHA512_Init(&ctx))
219                 return -EFAULT;
220         SHA512_Transform(&ctx, data_in);
221         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
222         return 0;
223 }
224
225 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
226 {
227         MD5_CTX ctx;
228
229         if (!MD5_Init(&ctx))
230                 return -EFAULT;
231         MD5_Transform(&ctx, data_in);
232         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
233
234         return 0;
235 }
236
237 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
238                         uint8_t *data_in,
239                         uint8_t *data_out)
240 {
241         int digest_size;
242         uint8_t digest[qat_hash_get_digest_size(
243                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
244         uint32_t *hash_state_out_be32;
245         uint64_t *hash_state_out_be64;
246         int i;
247
248         PMD_INIT_FUNC_TRACE();
249         digest_size = qat_hash_get_digest_size(hash_alg);
250         if (digest_size <= 0)
251                 return -EFAULT;
252
253         hash_state_out_be32 = (uint32_t *)data_out;
254         hash_state_out_be64 = (uint64_t *)data_out;
255
256         switch (hash_alg) {
257         case ICP_QAT_HW_AUTH_ALGO_SHA1:
258                 if (partial_hash_sha1(data_in, digest))
259                         return -EFAULT;
260                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
261                         *hash_state_out_be32 =
262                                 rte_bswap32(*(((uint32_t *)digest)+i));
263                 break;
264         case ICP_QAT_HW_AUTH_ALGO_SHA224:
265                 if (partial_hash_sha224(data_in, digest))
266                         return -EFAULT;
267                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
268                         *hash_state_out_be32 =
269                                 rte_bswap32(*(((uint32_t *)digest)+i));
270                 break;
271         case ICP_QAT_HW_AUTH_ALGO_SHA256:
272                 if (partial_hash_sha256(data_in, digest))
273                         return -EFAULT;
274                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
275                         *hash_state_out_be32 =
276                                 rte_bswap32(*(((uint32_t *)digest)+i));
277                 break;
278         case ICP_QAT_HW_AUTH_ALGO_SHA384:
279                 if (partial_hash_sha384(data_in, digest))
280                         return -EFAULT;
281                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
282                         *hash_state_out_be64 =
283                                 rte_bswap64(*(((uint64_t *)digest)+i));
284                 break;
285         case ICP_QAT_HW_AUTH_ALGO_SHA512:
286                 if (partial_hash_sha512(data_in, digest))
287                         return -EFAULT;
288                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
289                         *hash_state_out_be64 =
290                                 rte_bswap64(*(((uint64_t *)digest)+i));
291                 break;
292         case ICP_QAT_HW_AUTH_ALGO_MD5:
293                 if (partial_hash_md5(data_in, data_out))
294                         return -EFAULT;
295                 break;
296         default:
297                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
298                 return -EFAULT;
299         }
300
301         return 0;
302 }
303 #define HMAC_IPAD_VALUE 0x36
304 #define HMAC_OPAD_VALUE 0x5c
305 #define HASH_XCBC_PRECOMP_KEY_NUM 3
306
307 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
308                                 const uint8_t *auth_key,
309                                 uint16_t auth_keylen,
310                                 uint8_t *p_state_buf,
311                                 uint16_t *p_state_len)
312 {
313         int block_size;
314         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
315         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
316         int i;
317
318         PMD_INIT_FUNC_TRACE();
319         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
320                 static uint8_t qat_aes_xcbc_key_seed[
321                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
322                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
323                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
324                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
325                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
326                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
327                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
328                 };
329
330                 uint8_t *in = NULL;
331                 uint8_t *out = p_state_buf;
332                 int x;
333                 AES_KEY enc_key;
334
335                 in = rte_zmalloc("working mem for key",
336                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
337                 if (in == NULL) {
338                         PMD_DRV_LOG(ERR, "Failed to alloc memory");
339                         return -ENOMEM;
340                 }
341
342                 rte_memcpy(in, qat_aes_xcbc_key_seed,
343                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
344                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
345                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
346                                 &enc_key) != 0) {
347                                 rte_free(in -
348                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
349                                 memset(out -
350                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
351                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
352                                 return -EFAULT;
353                         }
354                         AES_encrypt(in, out, &enc_key);
355                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
356                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
357                 }
358                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
359                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
360                 return 0;
361         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
362                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
363                 uint8_t *in = NULL;
364                 uint8_t *out = p_state_buf;
365                 AES_KEY enc_key;
366
367                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
368                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
369                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
370                 in = rte_zmalloc("working mem for key",
371                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
372                 if (in == NULL) {
373                         PMD_DRV_LOG(ERR, "Failed to alloc memory");
374                         return -ENOMEM;
375                 }
376
377                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
378                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
379                         &enc_key) != 0) {
380                         return -EFAULT;
381                 }
382                 AES_encrypt(in, out, &enc_key);
383                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
384                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
385                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
386                 rte_free(in);
387                 return 0;
388         }
389
390         block_size = qat_hash_get_block_size(hash_alg);
391         if (block_size <= 0)
392                 return -EFAULT;
393         /* init ipad and opad from key and xor with fixed values */
394         memset(ipad, 0, block_size);
395         memset(opad, 0, block_size);
396
397         if (auth_keylen > (unsigned int)block_size) {
398                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
399                 return -EFAULT;
400         }
401         rte_memcpy(ipad, auth_key, auth_keylen);
402         rte_memcpy(opad, auth_key, auth_keylen);
403
404         for (i = 0; i < block_size; i++) {
405                 uint8_t *ipad_ptr = ipad + i;
406                 uint8_t *opad_ptr = opad + i;
407                 *ipad_ptr ^= HMAC_IPAD_VALUE;
408                 *opad_ptr ^= HMAC_OPAD_VALUE;
409         }
410
411         /* do partial hash of ipad and copy to state1 */
412         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
413                 memset(ipad, 0, block_size);
414                 memset(opad, 0, block_size);
415                 PMD_DRV_LOG(ERR, "ipad precompute failed");
416                 return -EFAULT;
417         }
418
419         /*
420          * State len is a multiple of 8, so may be larger than the digest.
421          * Put the partial hash of opad state_len bytes after state1
422          */
423         *p_state_len = qat_hash_get_state1_size(hash_alg);
424         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
425                 memset(ipad, 0, block_size);
426                 memset(opad, 0, block_size);
427                 PMD_DRV_LOG(ERR, "opad precompute failed");
428                 return -EFAULT;
429         }
430
431         /*  don't leave data lying around */
432         memset(ipad, 0, block_size);
433         memset(opad, 0, block_size);
434         return 0;
435 }
436
437 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
438                 uint16_t proto)
439 {
440         PMD_INIT_FUNC_TRACE();
441         header->hdr_flags =
442                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
443         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
444         header->comn_req_flags =
445                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
446                                         QAT_COMN_PTR_TYPE_FLAT);
447         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
448                                   ICP_QAT_FW_LA_PARTIAL_NONE);
449         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
450                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
451         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
452                                 proto);
453         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
454                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
455 }
456
457 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
458                                                 uint8_t *cipherkey,
459                                                 uint32_t cipherkeylen)
460 {
461         struct icp_qat_hw_cipher_algo_blk *cipher;
462         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
463         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
464         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
465         void *ptr = &req_tmpl->cd_ctrl;
466         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
467         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
468         enum icp_qat_hw_cipher_convert key_convert;
469         uint32_t total_key_size;
470         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/SNOW 3G */
471         uint16_t cipher_offset, cd_size;
472         uint32_t wordIndex  = 0;
473         uint32_t *temp_key = NULL;
474         PMD_INIT_FUNC_TRACE();
475
476         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
477                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
478                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
479                                         ICP_QAT_FW_SLICE_CIPHER);
480                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
481                                         ICP_QAT_FW_SLICE_DRAM_WR);
482                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
483                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
484                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
485                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
486                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
487         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
488                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
489                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
490                                         ICP_QAT_FW_SLICE_CIPHER);
491                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
492                                         ICP_QAT_FW_SLICE_AUTH);
493                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
494                                         ICP_QAT_FW_SLICE_AUTH);
495                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
496                                         ICP_QAT_FW_SLICE_DRAM_WR);
497                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
498         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
499                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
500                 return -EFAULT;
501         }
502
503         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
504                 /*
505                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
506                  * Overriding default values previously set
507                  */
508                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
509                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
510         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
511                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
512         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
513                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
514         else
515                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
516
517         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
518                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
519                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
520                 cipher_cd_ctrl->cipher_state_sz =
521                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
522                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
523         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
524                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
525                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
526                 cipher_cd_ctrl->cipher_padding_sz =
527                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
528         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
529                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
530                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
531                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
532         } else {
533                 total_key_size = cipherkeylen;
534                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
535                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
536         }
537         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
538         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
539         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
540
541         header->service_cmd_id = cdesc->qat_cmd;
542         qat_alg_init_common_hdr(header, proto);
543
544         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
545
546         cipher->cipher_config.val =
547             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
548                                         cdesc->qat_cipher_alg, key_convert,
549                                         cdesc->qat_dir);
550
551         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
552                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
553                                         sizeof(struct icp_qat_hw_cipher_config)
554                                         + cipherkeylen);
555                 memcpy(cipher->key, cipherkey, cipherkeylen);
556                 memcpy(temp_key, cipherkey, cipherkeylen);
557
558                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
559                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
560                                                                 wordIndex++)
561                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
562
563                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
564                                         cipherkeylen + cipherkeylen;
565         } else {
566                 memcpy(cipher->key, cipherkey, cipherkeylen);
567                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
568                                         cipherkeylen;
569         }
570
571         if (total_key_size > cipherkeylen) {
572                 uint32_t padding_size =  total_key_size-cipherkeylen;
573                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
574                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
575                         /* K3 not provided so use K1 = K3*/
576                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
577                 else
578                         memset(cdesc->cd_cur_ptr, 0, padding_size);
579                 cdesc->cd_cur_ptr += padding_size;
580         }
581         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
582         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
583
584         return 0;
585 }
586
587 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
588                                                 uint8_t *authkey,
589                                                 uint32_t authkeylen,
590                                                 uint32_t add_auth_data_length,
591                                                 uint32_t digestsize,
592                                                 unsigned int operation)
593 {
594         struct icp_qat_hw_auth_setup *hash;
595         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
596         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
597         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
598         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
599         void *ptr = &req_tmpl->cd_ctrl;
600         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
601         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
602         struct icp_qat_fw_la_auth_req_params *auth_param =
603                 (struct icp_qat_fw_la_auth_req_params *)
604                 ((char *)&req_tmpl->serv_specif_rqpars +
605                 sizeof(struct icp_qat_fw_la_cipher_req_params));
606         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/SNOW 3G */
607         uint16_t state1_size = 0, state2_size = 0;
608         uint16_t hash_offset, cd_size;
609         uint32_t *aad_len = NULL;
610         uint32_t wordIndex  = 0;
611         uint32_t *pTempKey;
612
613         PMD_INIT_FUNC_TRACE();
614
615         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
616                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
617                                         ICP_QAT_FW_SLICE_AUTH);
618                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
619                                         ICP_QAT_FW_SLICE_DRAM_WR);
620                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
621         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
622                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
623                                 ICP_QAT_FW_SLICE_AUTH);
624                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
625                                 ICP_QAT_FW_SLICE_CIPHER);
626                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
627                                 ICP_QAT_FW_SLICE_CIPHER);
628                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
629                                 ICP_QAT_FW_SLICE_DRAM_WR);
630                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
631         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
632                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
633                 return -EFAULT;
634         }
635
636         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
637                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
638                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
639                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
640                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
641                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
642         } else {
643                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
644                                            ICP_QAT_FW_LA_RET_AUTH_RES);
645                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
646                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
647                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
648         }
649
650         /*
651          * Setup the inner hash config
652          */
653         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
654         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
655         hash->auth_config.reserved = 0;
656         hash->auth_config.config =
657                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
658                                 cdesc->qat_hash_alg, digestsize);
659
660         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
661                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
662                 hash->auth_counter.counter = 0;
663         else
664                 hash->auth_counter.counter = rte_bswap32(
665                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
666
667         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
668
669         /*
670          * cd_cur_ptr now points at the state1 information.
671          */
672         switch (cdesc->qat_hash_alg) {
673         case ICP_QAT_HW_AUTH_ALGO_SHA1:
674                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
675                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
676                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
677                         return -EFAULT;
678                 }
679                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
680                 break;
681         case ICP_QAT_HW_AUTH_ALGO_SHA224:
682                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
683                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
684                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
685                         return -EFAULT;
686                 }
687                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
688                 break;
689         case ICP_QAT_HW_AUTH_ALGO_SHA256:
690                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
691                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
692                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
693                         return -EFAULT;
694                 }
695                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
696                 break;
697         case ICP_QAT_HW_AUTH_ALGO_SHA384:
698                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
699                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
700                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
701                         return -EFAULT;
702                 }
703                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
704                 break;
705         case ICP_QAT_HW_AUTH_ALGO_SHA512:
706                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
707                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
708                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
709                         return -EFAULT;
710                 }
711                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
712                 break;
713         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
714                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
715                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
716                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
717                         &state2_size)) {
718                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
719                         return -EFAULT;
720                 }
721                 break;
722         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
723         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
724                 proto = ICP_QAT_FW_LA_GCM_PROTO;
725                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
726                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
727                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
728                         &state2_size)) {
729                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
730                         return -EFAULT;
731                 }
732                 /*
733                  * Write (the length of AAD) into bytes 16-19 of state2
734                  * in big-endian format. This field is 8 bytes
735                  */
736                 auth_param->u2.aad_sz =
737                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
738                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
739
740                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
741                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
742                                         ICP_QAT_HW_GALOIS_H_SZ);
743                 *aad_len = rte_bswap32(add_auth_data_length);
744                 break;
745         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
746                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
747                 state1_size = qat_hash_get_state1_size(
748                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
749                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
750                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
751
752                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
753                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
754                 cipherconfig->cipher_config.val =
755                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
756                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
757                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
758                         ICP_QAT_HW_CIPHER_ENCRYPT);
759                 memcpy(cipherconfig->key, authkey, authkeylen);
760                 memset(cipherconfig->key + authkeylen,
761                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
762                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
763                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
764                 auth_param->hash_state_sz =
765                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
766                 break;
767         case ICP_QAT_HW_AUTH_ALGO_MD5:
768                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
769                         authkey, authkeylen, cdesc->cd_cur_ptr,
770                         &state1_size)) {
771                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
772                         return -EFAULT;
773                 }
774                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
775                 break;
776         case ICP_QAT_HW_AUTH_ALGO_NULL:
777                 state1_size = qat_hash_get_state1_size(
778                                 ICP_QAT_HW_AUTH_ALGO_NULL);
779                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
780                 break;
781         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
782                 state1_size = qat_hash_get_state1_size(
783                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
784                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
785                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
786                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
787                                                         + authkeylen);
788                 /*
789                 * The Inner Hash Initial State2 block must contain IK
790                 * (Initialisation Key), followed by IK XOR-ed with KM
791                 * (Key Modifier): IK||(IK^KM).
792                 */
793                 /* write the auth key */
794                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
795                 /* initialise temp key with auth key */
796                 memcpy(pTempKey, authkey, authkeylen);
797                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
798                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
799                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
800                 break;
801         default:
802                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
803                 return -EFAULT;
804         }
805
806         /* Request template setup */
807         qat_alg_init_common_hdr(header, proto);
808         header->service_cmd_id = cdesc->qat_cmd;
809
810         /* Auth CD config setup */
811         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
812         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
813         hash_cd_ctrl->inner_res_sz = digestsize;
814         hash_cd_ctrl->final_sz = digestsize;
815         hash_cd_ctrl->inner_state1_sz = state1_size;
816         auth_param->auth_res_sz = digestsize;
817
818         hash_cd_ctrl->inner_state2_sz  = state2_size;
819         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
820                         ((sizeof(struct icp_qat_hw_auth_setup) +
821                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
822                                         >> 3);
823
824         cdesc->cd_cur_ptr += state1_size + state2_size;
825         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
826
827         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
828         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
829
830         return 0;
831 }
832
833 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
834 {
835         switch (key_len) {
836         case ICP_QAT_HW_AES_128_KEY_SZ:
837                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
838                 break;
839         case ICP_QAT_HW_AES_192_KEY_SZ:
840                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
841                 break;
842         case ICP_QAT_HW_AES_256_KEY_SZ:
843                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
844                 break;
845         default:
846                 return -EINVAL;
847         }
848         return 0;
849 }
850
851 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
852 {
853         switch (key_len) {
854         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
855                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
856                 break;
857         default:
858                 return -EINVAL;
859         }
860         return 0;
861 }
862
863 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
864 {
865         switch (key_len) {
866         case ICP_QAT_HW_KASUMI_KEY_SZ:
867                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
868                 break;
869         default:
870                 return -EINVAL;
871         }
872         return 0;
873 }
874
875 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
876 {
877         switch (key_len) {
878         case QAT_3DES_KEY_SZ_OPT1:
879         case QAT_3DES_KEY_SZ_OPT2:
880                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
881                 break;
882         default:
883                 return -EINVAL;
884         }
885         return 0;
886 }