New upstream version 16.11.3
[deb_dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
55
56 #include "../qat_logs.h"
57 #include "qat_algs.h"
58
59 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h>        /* Needed to calculate pre-compute values */
62
63
64 /*
65  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66  * This is digest size rounded up to nearest quadword
67  */
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
69 {
70         switch (qat_hash_alg) {
71         case ICP_QAT_HW_AUTH_ALGO_SHA1:
72                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73                                                 QAT_HW_DEFAULT_ALIGNMENT);
74         case ICP_QAT_HW_AUTH_ALGO_SHA224:
75                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76                                                 QAT_HW_DEFAULT_ALIGNMENT);
77         case ICP_QAT_HW_AUTH_ALGO_SHA256:
78                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79                                                 QAT_HW_DEFAULT_ALIGNMENT);
80         case ICP_QAT_HW_AUTH_ALGO_SHA384:
81                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82                                                 QAT_HW_DEFAULT_ALIGNMENT);
83         case ICP_QAT_HW_AUTH_ALGO_SHA512:
84                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85                                                 QAT_HW_DEFAULT_ALIGNMENT);
86         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88                                                 QAT_HW_DEFAULT_ALIGNMENT);
89         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92                                                 QAT_HW_DEFAULT_ALIGNMENT);
93         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95                                                 QAT_HW_DEFAULT_ALIGNMENT);
96         case ICP_QAT_HW_AUTH_ALGO_MD5:
97                 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98                                                 QAT_HW_DEFAULT_ALIGNMENT);
99         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
100                 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
101                                                 QAT_HW_DEFAULT_ALIGNMENT);
102         case ICP_QAT_HW_AUTH_ALGO_NULL:
103                 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
104                                                 QAT_HW_DEFAULT_ALIGNMENT);
105         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
106                 /* return maximum state1 size in this case */
107                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
108                                                 QAT_HW_DEFAULT_ALIGNMENT);
109         default:
110                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
111                 return -EFAULT;
112         };
113         return -EFAULT;
114 }
115
116 /* returns digest size in bytes  per hash algo */
117 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
118 {
119         switch (qat_hash_alg) {
120         case ICP_QAT_HW_AUTH_ALGO_SHA1:
121                 return ICP_QAT_HW_SHA1_STATE1_SZ;
122         case ICP_QAT_HW_AUTH_ALGO_SHA224:
123                 return ICP_QAT_HW_SHA224_STATE1_SZ;
124         case ICP_QAT_HW_AUTH_ALGO_SHA256:
125                 return ICP_QAT_HW_SHA256_STATE1_SZ;
126         case ICP_QAT_HW_AUTH_ALGO_SHA384:
127                 return ICP_QAT_HW_SHA384_STATE1_SZ;
128         case ICP_QAT_HW_AUTH_ALGO_SHA512:
129                 return ICP_QAT_HW_SHA512_STATE1_SZ;
130         case ICP_QAT_HW_AUTH_ALGO_MD5:
131                 return ICP_QAT_HW_MD5_STATE1_SZ;
132         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
133                 /* return maximum digest size in this case */
134                 return ICP_QAT_HW_SHA512_STATE1_SZ;
135         default:
136                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
137                 return -EFAULT;
138         };
139         return -EFAULT;
140 }
141
142 /* returns block size in byes per hash algo */
143 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
144 {
145         switch (qat_hash_alg) {
146         case ICP_QAT_HW_AUTH_ALGO_SHA1:
147                 return SHA_CBLOCK;
148         case ICP_QAT_HW_AUTH_ALGO_SHA224:
149                 return SHA256_CBLOCK;
150         case ICP_QAT_HW_AUTH_ALGO_SHA256:
151                 return SHA256_CBLOCK;
152         case ICP_QAT_HW_AUTH_ALGO_SHA384:
153                 return SHA512_CBLOCK;
154         case ICP_QAT_HW_AUTH_ALGO_SHA512:
155                 return SHA512_CBLOCK;
156         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
157                 return 16;
158         case ICP_QAT_HW_AUTH_ALGO_MD5:
159                 return MD5_CBLOCK;
160         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
161                 /* return maximum block size in this case */
162                 return SHA512_CBLOCK;
163         default:
164                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
165                 return -EFAULT;
166         };
167         return -EFAULT;
168 }
169
170 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
171 {
172         SHA_CTX ctx;
173
174         if (!SHA1_Init(&ctx))
175                 return -EFAULT;
176         SHA1_Transform(&ctx, data_in);
177         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
178         return 0;
179 }
180
181 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
182 {
183         SHA256_CTX ctx;
184
185         if (!SHA224_Init(&ctx))
186                 return -EFAULT;
187         SHA256_Transform(&ctx, data_in);
188         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
189         return 0;
190 }
191
192 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
193 {
194         SHA256_CTX ctx;
195
196         if (!SHA256_Init(&ctx))
197                 return -EFAULT;
198         SHA256_Transform(&ctx, data_in);
199         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
200         return 0;
201 }
202
203 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
204 {
205         SHA512_CTX ctx;
206
207         if (!SHA384_Init(&ctx))
208                 return -EFAULT;
209         SHA512_Transform(&ctx, data_in);
210         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
211         return 0;
212 }
213
214 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
215 {
216         SHA512_CTX ctx;
217
218         if (!SHA512_Init(&ctx))
219                 return -EFAULT;
220         SHA512_Transform(&ctx, data_in);
221         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
222         return 0;
223 }
224
225 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
226 {
227         MD5_CTX ctx;
228
229         if (!MD5_Init(&ctx))
230                 return -EFAULT;
231         MD5_Transform(&ctx, data_in);
232         rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
233
234         return 0;
235 }
236
237 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
238                         uint8_t *data_in,
239                         uint8_t *data_out)
240 {
241         int digest_size;
242         uint8_t digest[qat_hash_get_digest_size(
243                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
244         uint32_t *hash_state_out_be32;
245         uint64_t *hash_state_out_be64;
246         int i;
247
248         PMD_INIT_FUNC_TRACE();
249         digest_size = qat_hash_get_digest_size(hash_alg);
250         if (digest_size <= 0)
251                 return -EFAULT;
252
253         hash_state_out_be32 = (uint32_t *)data_out;
254         hash_state_out_be64 = (uint64_t *)data_out;
255
256         switch (hash_alg) {
257         case ICP_QAT_HW_AUTH_ALGO_SHA1:
258                 if (partial_hash_sha1(data_in, digest))
259                         return -EFAULT;
260                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
261                         *hash_state_out_be32 =
262                                 rte_bswap32(*(((uint32_t *)digest)+i));
263                 break;
264         case ICP_QAT_HW_AUTH_ALGO_SHA224:
265                 if (partial_hash_sha224(data_in, digest))
266                         return -EFAULT;
267                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
268                         *hash_state_out_be32 =
269                                 rte_bswap32(*(((uint32_t *)digest)+i));
270                 break;
271         case ICP_QAT_HW_AUTH_ALGO_SHA256:
272                 if (partial_hash_sha256(data_in, digest))
273                         return -EFAULT;
274                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
275                         *hash_state_out_be32 =
276                                 rte_bswap32(*(((uint32_t *)digest)+i));
277                 break;
278         case ICP_QAT_HW_AUTH_ALGO_SHA384:
279                 if (partial_hash_sha384(data_in, digest))
280                         return -EFAULT;
281                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
282                         *hash_state_out_be64 =
283                                 rte_bswap64(*(((uint64_t *)digest)+i));
284                 break;
285         case ICP_QAT_HW_AUTH_ALGO_SHA512:
286                 if (partial_hash_sha512(data_in, digest))
287                         return -EFAULT;
288                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
289                         *hash_state_out_be64 =
290                                 rte_bswap64(*(((uint64_t *)digest)+i));
291                 break;
292         case ICP_QAT_HW_AUTH_ALGO_MD5:
293                 if (partial_hash_md5(data_in, data_out))
294                         return -EFAULT;
295                 break;
296         default:
297                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
298                 return -EFAULT;
299         }
300
301         return 0;
302 }
303 #define HMAC_IPAD_VALUE 0x36
304 #define HMAC_OPAD_VALUE 0x5c
305 #define HASH_XCBC_PRECOMP_KEY_NUM 3
306
307 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
308                                 const uint8_t *auth_key,
309                                 uint16_t auth_keylen,
310                                 uint8_t *p_state_buf,
311                                 uint16_t *p_state_len)
312 {
313         int block_size;
314         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
315         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
316         int i;
317
318         PMD_INIT_FUNC_TRACE();
319         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
320                 static uint8_t qat_aes_xcbc_key_seed[
321                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
322                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
323                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
324                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
325                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
326                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
327                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
328                 };
329
330                 uint8_t *in = NULL;
331                 uint8_t *out = p_state_buf;
332                 int x;
333                 AES_KEY enc_key;
334
335                 in = rte_zmalloc("working mem for key",
336                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
337                 rte_memcpy(in, qat_aes_xcbc_key_seed,
338                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
339                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
340                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
341                                 &enc_key) != 0) {
342                                 rte_free(in -
343                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
344                                 memset(out -
345                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
346                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
347                                 return -EFAULT;
348                         }
349                         AES_encrypt(in, out, &enc_key);
350                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
351                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
352                 }
353                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
354                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
355                 return 0;
356         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
357                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
358                 uint8_t *in = NULL;
359                 uint8_t *out = p_state_buf;
360                 AES_KEY enc_key;
361
362                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
363                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
364                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
365                 in = rte_zmalloc("working mem for key",
366                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
367                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
368                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
369                         &enc_key) != 0) {
370                         return -EFAULT;
371                 }
372                 AES_encrypt(in, out, &enc_key);
373                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
374                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
375                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
376                 rte_free(in);
377                 return 0;
378         }
379
380         block_size = qat_hash_get_block_size(hash_alg);
381         if (block_size <= 0)
382                 return -EFAULT;
383         /* init ipad and opad from key and xor with fixed values */
384         memset(ipad, 0, block_size);
385         memset(opad, 0, block_size);
386
387         if (auth_keylen > (unsigned int)block_size) {
388                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
389                 return -EFAULT;
390         }
391         rte_memcpy(ipad, auth_key, auth_keylen);
392         rte_memcpy(opad, auth_key, auth_keylen);
393
394         for (i = 0; i < block_size; i++) {
395                 uint8_t *ipad_ptr = ipad + i;
396                 uint8_t *opad_ptr = opad + i;
397                 *ipad_ptr ^= HMAC_IPAD_VALUE;
398                 *opad_ptr ^= HMAC_OPAD_VALUE;
399         }
400
401         /* do partial hash of ipad and copy to state1 */
402         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
403                 memset(ipad, 0, block_size);
404                 memset(opad, 0, block_size);
405                 PMD_DRV_LOG(ERR, "ipad precompute failed");
406                 return -EFAULT;
407         }
408
409         /*
410          * State len is a multiple of 8, so may be larger than the digest.
411          * Put the partial hash of opad state_len bytes after state1
412          */
413         *p_state_len = qat_hash_get_state1_size(hash_alg);
414         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
415                 memset(ipad, 0, block_size);
416                 memset(opad, 0, block_size);
417                 PMD_DRV_LOG(ERR, "opad precompute failed");
418                 return -EFAULT;
419         }
420
421         /*  don't leave data lying around */
422         memset(ipad, 0, block_size);
423         memset(opad, 0, block_size);
424         return 0;
425 }
426
427 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
428                 uint16_t proto)
429 {
430         PMD_INIT_FUNC_TRACE();
431         header->hdr_flags =
432                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
433         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
434         header->comn_req_flags =
435                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
436                                         QAT_COMN_PTR_TYPE_FLAT);
437         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
438                                   ICP_QAT_FW_LA_PARTIAL_NONE);
439         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
440                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
441         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
442                                 proto);
443         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
444                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
445 }
446
447 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
448                                                 uint8_t *cipherkey,
449                                                 uint32_t cipherkeylen)
450 {
451         struct icp_qat_hw_cipher_algo_blk *cipher;
452         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
453         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
454         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
455         void *ptr = &req_tmpl->cd_ctrl;
456         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
457         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
458         enum icp_qat_hw_cipher_convert key_convert;
459         uint32_t total_key_size;
460         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/SNOW 3G */
461         uint16_t cipher_offset, cd_size;
462         uint32_t wordIndex  = 0;
463         uint32_t *temp_key = NULL;
464         PMD_INIT_FUNC_TRACE();
465
466         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
467                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
468                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
469                                         ICP_QAT_FW_SLICE_CIPHER);
470                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
471                                         ICP_QAT_FW_SLICE_DRAM_WR);
472                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
473                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
474                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
475                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
476                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
477         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
478                 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
479                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
480                                         ICP_QAT_FW_SLICE_CIPHER);
481                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
482                                         ICP_QAT_FW_SLICE_AUTH);
483                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
484                                         ICP_QAT_FW_SLICE_AUTH);
485                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
486                                         ICP_QAT_FW_SLICE_DRAM_WR);
487                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
488         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
489                 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
490                 return -EFAULT;
491         }
492
493         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
494                 /*
495                  * CTR Streaming ciphers are a special case. Decrypt = encrypt
496                  * Overriding default values previously set
497                  */
498                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
499                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
500         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
501                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
502         else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
503                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
504         else
505                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
506
507         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
508                 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
509                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
510                 cipher_cd_ctrl->cipher_state_sz =
511                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
512                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
513         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
514                 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
515                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
516                 cipher_cd_ctrl->cipher_padding_sz =
517                                         (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
518         } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
519                 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
520                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
521                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
522         } else {
523                 total_key_size = cipherkeylen;
524                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
525                 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
526         }
527         cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
528         cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
529         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
530
531         header->service_cmd_id = cdesc->qat_cmd;
532         qat_alg_init_common_hdr(header, proto);
533
534         cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
535
536         cipher->cipher_config.val =
537             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
538                                         cdesc->qat_cipher_alg, key_convert,
539                                         cdesc->qat_dir);
540
541         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
542                 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
543                                         sizeof(struct icp_qat_hw_cipher_config)
544                                         + cipherkeylen);
545                 memcpy(cipher->key, cipherkey, cipherkeylen);
546                 memcpy(temp_key, cipherkey, cipherkeylen);
547
548                 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
549                 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
550                                                                 wordIndex++)
551                         temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
552
553                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
554                                         cipherkeylen + cipherkeylen;
555         } else {
556                 memcpy(cipher->key, cipherkey, cipherkeylen);
557                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
558                                         cipherkeylen;
559         }
560
561         if (total_key_size > cipherkeylen) {
562                 uint32_t padding_size =  total_key_size-cipherkeylen;
563                 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
564                         && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
565                         /* K3 not provided so use K1 = K3*/
566                         memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
567                 else
568                         memset(cdesc->cd_cur_ptr, 0, padding_size);
569                 cdesc->cd_cur_ptr += padding_size;
570         }
571         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
572         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
573
574         return 0;
575 }
576
577 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
578                                                 uint8_t *authkey,
579                                                 uint32_t authkeylen,
580                                                 uint32_t add_auth_data_length,
581                                                 uint32_t digestsize,
582                                                 unsigned int operation)
583 {
584         struct icp_qat_hw_auth_setup *hash;
585         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
586         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
587         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
588         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
589         void *ptr = &req_tmpl->cd_ctrl;
590         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
591         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
592         struct icp_qat_fw_la_auth_req_params *auth_param =
593                 (struct icp_qat_fw_la_auth_req_params *)
594                 ((char *)&req_tmpl->serv_specif_rqpars +
595                 sizeof(struct icp_qat_fw_la_cipher_req_params));
596         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/SNOW 3G */
597         uint16_t state1_size = 0, state2_size = 0;
598         uint16_t hash_offset, cd_size;
599         uint32_t *aad_len = NULL;
600         uint32_t wordIndex  = 0;
601         uint32_t *pTempKey;
602
603         PMD_INIT_FUNC_TRACE();
604
605         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
606                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
607                                         ICP_QAT_FW_SLICE_AUTH);
608                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
609                                         ICP_QAT_FW_SLICE_DRAM_WR);
610                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
611         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
612                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
613                                 ICP_QAT_FW_SLICE_AUTH);
614                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
615                                 ICP_QAT_FW_SLICE_CIPHER);
616                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
617                                 ICP_QAT_FW_SLICE_CIPHER);
618                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
619                                 ICP_QAT_FW_SLICE_DRAM_WR);
620                 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
621         } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
622                 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
623                 return -EFAULT;
624         }
625
626         if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
627                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
628                                 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
629                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
630                                 ICP_QAT_FW_LA_CMP_AUTH_RES);
631                 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
632         } else {
633                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
634                                            ICP_QAT_FW_LA_RET_AUTH_RES);
635                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
636                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
637                 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
638         }
639
640         /*
641          * Setup the inner hash config
642          */
643         hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
644         hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
645         hash->auth_config.reserved = 0;
646         hash->auth_config.config =
647                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
648                                 cdesc->qat_hash_alg, digestsize);
649
650         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
651                 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
652                 hash->auth_counter.counter = 0;
653         else
654                 hash->auth_counter.counter = rte_bswap32(
655                                 qat_hash_get_block_size(cdesc->qat_hash_alg));
656
657         cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
658
659         /*
660          * cd_cur_ptr now points at the state1 information.
661          */
662         switch (cdesc->qat_hash_alg) {
663         case ICP_QAT_HW_AUTH_ALGO_SHA1:
664                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
665                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
666                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
667                         return -EFAULT;
668                 }
669                 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
670                 break;
671         case ICP_QAT_HW_AUTH_ALGO_SHA224:
672                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
673                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
674                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
675                         return -EFAULT;
676                 }
677                 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
678                 break;
679         case ICP_QAT_HW_AUTH_ALGO_SHA256:
680                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
681                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
682                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
683                         return -EFAULT;
684                 }
685                 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
686                 break;
687         case ICP_QAT_HW_AUTH_ALGO_SHA384:
688                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
689                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
690                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
691                         return -EFAULT;
692                 }
693                 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
694                 break;
695         case ICP_QAT_HW_AUTH_ALGO_SHA512:
696                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
697                         authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
698                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
699                         return -EFAULT;
700                 }
701                 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
702                 break;
703         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
704                 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
705                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
706                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
707                         &state2_size)) {
708                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
709                         return -EFAULT;
710                 }
711                 break;
712         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
713         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
714                 proto = ICP_QAT_FW_LA_GCM_PROTO;
715                 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
716                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
717                         authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
718                         &state2_size)) {
719                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
720                         return -EFAULT;
721                 }
722                 /*
723                  * Write (the length of AAD) into bytes 16-19 of state2
724                  * in big-endian format. This field is 8 bytes
725                  */
726                 auth_param->u2.aad_sz =
727                                 RTE_ALIGN_CEIL(add_auth_data_length, 16);
728                 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
729
730                 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
731                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
732                                         ICP_QAT_HW_GALOIS_H_SZ);
733                 *aad_len = rte_bswap32(add_auth_data_length);
734                 break;
735         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
736                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
737                 state1_size = qat_hash_get_state1_size(
738                                 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
739                 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
740                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
741
742                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
743                                 (cdesc->cd_cur_ptr + state1_size + state2_size);
744                 cipherconfig->cipher_config.val =
745                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
746                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
747                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
748                         ICP_QAT_HW_CIPHER_ENCRYPT);
749                 memcpy(cipherconfig->key, authkey, authkeylen);
750                 memset(cipherconfig->key + authkeylen,
751                                 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
752                 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
753                                 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
754                 auth_param->hash_state_sz =
755                                 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
756                 break;
757         case ICP_QAT_HW_AUTH_ALGO_MD5:
758                 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
759                         authkey, authkeylen, cdesc->cd_cur_ptr,
760                         &state1_size)) {
761                         PMD_DRV_LOG(ERR, "(MD5)precompute failed");
762                         return -EFAULT;
763                 }
764                 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
765                 break;
766         case ICP_QAT_HW_AUTH_ALGO_NULL:
767                 state1_size = qat_hash_get_state1_size(
768                                 ICP_QAT_HW_AUTH_ALGO_NULL);
769                 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
770                 break;
771         case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
772                 state1_size = qat_hash_get_state1_size(
773                                 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
774                 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
775                 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
776                 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
777                                                         + authkeylen);
778                 /*
779                 * The Inner Hash Initial State2 block must contain IK
780                 * (Initialisation Key), followed by IK XOR-ed with KM
781                 * (Key Modifier): IK||(IK^KM).
782                 */
783                 /* write the auth key */
784                 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
785                 /* initialise temp key with auth key */
786                 memcpy(pTempKey, authkey, authkeylen);
787                 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
788                 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
789                         pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
790                 break;
791         default:
792                 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
793                 return -EFAULT;
794         }
795
796         /* Request template setup */
797         qat_alg_init_common_hdr(header, proto);
798         header->service_cmd_id = cdesc->qat_cmd;
799
800         /* Auth CD config setup */
801         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
802         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
803         hash_cd_ctrl->inner_res_sz = digestsize;
804         hash_cd_ctrl->final_sz = digestsize;
805         hash_cd_ctrl->inner_state1_sz = state1_size;
806         auth_param->auth_res_sz = digestsize;
807
808         hash_cd_ctrl->inner_state2_sz  = state2_size;
809         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
810                         ((sizeof(struct icp_qat_hw_auth_setup) +
811                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
812                                         >> 3);
813
814         cdesc->cd_cur_ptr += state1_size + state2_size;
815         cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
816
817         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
818         cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
819
820         return 0;
821 }
822
823 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
824 {
825         switch (key_len) {
826         case ICP_QAT_HW_AES_128_KEY_SZ:
827                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
828                 break;
829         case ICP_QAT_HW_AES_192_KEY_SZ:
830                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
831                 break;
832         case ICP_QAT_HW_AES_256_KEY_SZ:
833                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
834                 break;
835         default:
836                 return -EINVAL;
837         }
838         return 0;
839 }
840
841 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
842 {
843         switch (key_len) {
844         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
845                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
846                 break;
847         default:
848                 return -EINVAL;
849         }
850         return 0;
851 }
852
853 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
854 {
855         switch (key_len) {
856         case ICP_QAT_HW_KASUMI_KEY_SZ:
857                 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
858                 break;
859         default:
860                 return -EINVAL;
861         }
862         return 0;
863 }
864
865 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
866 {
867         switch (key_len) {
868         case QAT_3DES_KEY_SZ_OPT1:
869         case QAT_3DES_KEY_SZ_OPT2:
870                 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
871                 break;
872         default:
873                 return -EINVAL;
874         }
875         return 0;
876 }