Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / crypto / qat / qat_adf / qat_algs_build_desc.c
1 /*
2  *  This file is provided under a dual BSD/GPLv2 license.  When using or
3  *  redistributing this file, you may do so under either license.
4  *
5  *  GPL LICENSE SUMMARY
6  *  Copyright(c) 2015-2016 Intel Corporation.
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of version 2 of the GNU General Public License as
9  *  published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful, but
12  *  WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  *  General Public License for more details.
15  *
16  *  Contact Information:
17  *  qat-linux@intel.com
18  *
19  *  BSD LICENSE
20  *  Copyright(c) 2015-2016 Intel Corporation.
21  *  Redistribution and use in source and binary forms, with or without
22  *  modification, are permitted provided that the following conditions
23  *  are met:
24  *
25  *      * Redistributions of source code must retain the above copyright
26  *        notice, this list of conditions and the following disclaimer.
27  *      * Redistributions in binary form must reproduce the above copyright
28  *        notice, this list of conditions and the following disclaimer in
29  *        the documentation and/or other materials provided with the
30  *        distribution.
31  *      * Neither the name of Intel Corporation nor the names of its
32  *        contributors may be used to endorse or promote products derived
33  *        from this software without specific prior written permission.
34  *
35  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  */
47
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
52 #include <rte_log.h>
53 #include <rte_malloc.h>
54
55 #include "../qat_logs.h"
56 #include "qat_algs.h"
57
58 #include <openssl/sha.h>        /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h>        /* Needed to calculate pre-compute values */
60
61
62 /*
63  * Returns size in bytes per hash algo for state1 size field in cd_ctrl
64  * This is digest size rounded up to nearest quadword
65  */
66 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
67 {
68         switch (qat_hash_alg) {
69         case ICP_QAT_HW_AUTH_ALGO_SHA1:
70                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
71                                                 QAT_HW_DEFAULT_ALIGNMENT);
72         case ICP_QAT_HW_AUTH_ALGO_SHA256:
73                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
74                                                 QAT_HW_DEFAULT_ALIGNMENT);
75         case ICP_QAT_HW_AUTH_ALGO_SHA512:
76                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
77                                                 QAT_HW_DEFAULT_ALIGNMENT);
78         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
79                 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
80                                                 QAT_HW_DEFAULT_ALIGNMENT);
81         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
82         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
83                 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
84                                                 QAT_HW_DEFAULT_ALIGNMENT);
85         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
86                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
87                                                 QAT_HW_DEFAULT_ALIGNMENT);
88         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
89                 /* return maximum state1 size in this case */
90                 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
91                                                 QAT_HW_DEFAULT_ALIGNMENT);
92         default:
93                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
94                 return -EFAULT;
95         };
96         return -EFAULT;
97 }
98
99 /* returns digest size in bytes  per hash algo */
100 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
101 {
102         switch (qat_hash_alg) {
103         case ICP_QAT_HW_AUTH_ALGO_SHA1:
104                 return ICP_QAT_HW_SHA1_STATE1_SZ;
105         case ICP_QAT_HW_AUTH_ALGO_SHA256:
106                 return ICP_QAT_HW_SHA256_STATE1_SZ;
107         case ICP_QAT_HW_AUTH_ALGO_SHA512:
108                 return ICP_QAT_HW_SHA512_STATE1_SZ;
109         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
110                 /* return maximum digest size in this case */
111                 return ICP_QAT_HW_SHA512_STATE1_SZ;
112         default:
113                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
114                 return -EFAULT;
115         };
116         return -EFAULT;
117 }
118
119 /* returns block size in byes per hash algo */
120 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
121 {
122         switch (qat_hash_alg) {
123         case ICP_QAT_HW_AUTH_ALGO_SHA1:
124                 return SHA_CBLOCK;
125         case ICP_QAT_HW_AUTH_ALGO_SHA256:
126                 return SHA256_CBLOCK;
127         case ICP_QAT_HW_AUTH_ALGO_SHA512:
128                 return SHA512_CBLOCK;
129         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
130                 return 16;
131         case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
132                 /* return maximum block size in this case */
133                 return SHA512_CBLOCK;
134         default:
135                 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
136                 return -EFAULT;
137         };
138         return -EFAULT;
139 }
140
141 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
142 {
143         SHA_CTX ctx;
144
145         if (!SHA1_Init(&ctx))
146                 return -EFAULT;
147         SHA1_Transform(&ctx, data_in);
148         rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
149         return 0;
150 }
151
152 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
153 {
154         SHA256_CTX ctx;
155
156         if (!SHA256_Init(&ctx))
157                 return -EFAULT;
158         SHA256_Transform(&ctx, data_in);
159         rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
160         return 0;
161 }
162
163 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
164 {
165         SHA512_CTX ctx;
166
167         if (!SHA512_Init(&ctx))
168                 return -EFAULT;
169         SHA512_Transform(&ctx, data_in);
170         rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
171         return 0;
172 }
173
174 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
175                         uint8_t *data_in,
176                         uint8_t *data_out)
177 {
178         int digest_size;
179         uint8_t digest[qat_hash_get_digest_size(
180                         ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
181         uint32_t *hash_state_out_be32;
182         uint64_t *hash_state_out_be64;
183         int i;
184
185         PMD_INIT_FUNC_TRACE();
186         digest_size = qat_hash_get_digest_size(hash_alg);
187         if (digest_size <= 0)
188                 return -EFAULT;
189
190         hash_state_out_be32 = (uint32_t *)data_out;
191         hash_state_out_be64 = (uint64_t *)data_out;
192
193         switch (hash_alg) {
194         case ICP_QAT_HW_AUTH_ALGO_SHA1:
195                 if (partial_hash_sha1(data_in, digest))
196                         return -EFAULT;
197                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
198                         *hash_state_out_be32 =
199                                 rte_bswap32(*(((uint32_t *)digest)+i));
200                 break;
201         case ICP_QAT_HW_AUTH_ALGO_SHA256:
202                 if (partial_hash_sha256(data_in, digest))
203                         return -EFAULT;
204                 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
205                         *hash_state_out_be32 =
206                                 rte_bswap32(*(((uint32_t *)digest)+i));
207                 break;
208         case ICP_QAT_HW_AUTH_ALGO_SHA512:
209                 if (partial_hash_sha512(data_in, digest))
210                         return -EFAULT;
211                 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
212                         *hash_state_out_be64 =
213                                 rte_bswap64(*(((uint64_t *)digest)+i));
214                 break;
215         default:
216                 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
217                 return -EFAULT;
218         }
219
220         return 0;
221 }
222 #define HMAC_IPAD_VALUE 0x36
223 #define HMAC_OPAD_VALUE 0x5c
224 #define HASH_XCBC_PRECOMP_KEY_NUM 3
225
226 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
227                                 const uint8_t *auth_key,
228                                 uint16_t auth_keylen,
229                                 uint8_t *p_state_buf,
230                                 uint16_t *p_state_len)
231 {
232         int block_size;
233         uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
234         uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
235         int i;
236
237         PMD_INIT_FUNC_TRACE();
238         if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
239                 static uint8_t qat_aes_xcbc_key_seed[
240                                         ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
241                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
242                         0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
243                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
244                         0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
245                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
246                         0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
247                 };
248
249                 uint8_t *in = NULL;
250                 uint8_t *out = p_state_buf;
251                 int x;
252                 AES_KEY enc_key;
253
254                 in = rte_zmalloc("working mem for key",
255                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
256                 rte_memcpy(in, qat_aes_xcbc_key_seed,
257                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
258                 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
259                         if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
260                                 &enc_key) != 0) {
261                                 rte_free(in -
262                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
263                                 memset(out -
264                                         (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
265                                         0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
266                                 return -EFAULT;
267                         }
268                         AES_encrypt(in, out, &enc_key);
269                         in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
270                         out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
271                 }
272                 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
273                 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
274                 return 0;
275         } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
276                 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
277                 uint8_t *in = NULL;
278                 uint8_t *out = p_state_buf;
279                 AES_KEY enc_key;
280
281                 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
282                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
283                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
284                 in = rte_zmalloc("working mem for key",
285                                 ICP_QAT_HW_GALOIS_H_SZ, 16);
286                 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
287                 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
288                         &enc_key) != 0) {
289                         return -EFAULT;
290                 }
291                 AES_encrypt(in, out, &enc_key);
292                 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
293                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
294                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
295                 rte_free(in);
296                 return 0;
297         }
298
299         block_size = qat_hash_get_block_size(hash_alg);
300         if (block_size <= 0)
301                 return -EFAULT;
302         /* init ipad and opad from key and xor with fixed values */
303         memset(ipad, 0, block_size);
304         memset(opad, 0, block_size);
305
306         if (auth_keylen > (unsigned int)block_size) {
307                 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
308                 return -EFAULT;
309         }
310         rte_memcpy(ipad, auth_key, auth_keylen);
311         rte_memcpy(opad, auth_key, auth_keylen);
312
313         for (i = 0; i < block_size; i++) {
314                 uint8_t *ipad_ptr = ipad + i;
315                 uint8_t *opad_ptr = opad + i;
316                 *ipad_ptr ^= HMAC_IPAD_VALUE;
317                 *opad_ptr ^= HMAC_OPAD_VALUE;
318         }
319
320         /* do partial hash of ipad and copy to state1 */
321         if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
322                 memset(ipad, 0, block_size);
323                 memset(opad, 0, block_size);
324                 PMD_DRV_LOG(ERR, "ipad precompute failed");
325                 return -EFAULT;
326         }
327
328         /*
329          * State len is a multiple of 8, so may be larger than the digest.
330          * Put the partial hash of opad state_len bytes after state1
331          */
332         *p_state_len = qat_hash_get_state1_size(hash_alg);
333         if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
334                 memset(ipad, 0, block_size);
335                 memset(opad, 0, block_size);
336                 PMD_DRV_LOG(ERR, "opad precompute failed");
337                 return -EFAULT;
338         }
339
340         /*  don't leave data lying around */
341         memset(ipad, 0, block_size);
342         memset(opad, 0, block_size);
343         return 0;
344 }
345
346 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
347 {
348         PMD_INIT_FUNC_TRACE();
349         header->hdr_flags =
350                 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
351         header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
352         header->comn_req_flags =
353                 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
354                                         QAT_COMN_PTR_TYPE_FLAT);
355         ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
356                                   ICP_QAT_FW_LA_PARTIAL_NONE);
357         ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
358                                            ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
359         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
360                                 ICP_QAT_FW_LA_NO_PROTO);
361         ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
362                                            ICP_QAT_FW_LA_NO_UPDATE_STATE);
363 }
364
365 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
366                                                 uint8_t *cipherkey,
367                                                 uint32_t cipherkeylen)
368 {
369         struct icp_qat_hw_cipher_algo_blk *cipher;
370         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
371         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
372         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
373         void *ptr = &req_tmpl->cd_ctrl;
374         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
375         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
376         enum icp_qat_hw_cipher_convert key_convert;
377         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/Snow3G */
378         uint16_t cipher_offset = 0;
379
380         PMD_INIT_FUNC_TRACE();
381
382         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
383                 cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
384                 cipher =
385                     (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
386                                 sizeof(struct icp_qat_hw_auth_algo_blk));
387                 cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk);
388         } else {
389                 cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
390                 cipher_offset = 0;
391         }
392         /* CD setup */
393         if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
394                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
395                                         ICP_QAT_FW_LA_RET_AUTH_RES);
396                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
397                                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
398         } else {
399                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
400                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
401                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
402                                         ICP_QAT_FW_LA_CMP_AUTH_RES);
403         }
404
405         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
406                 /* CTR Streaming ciphers are a special case. Decrypt = encrypt
407                  * Overriding default values previously set
408                  */
409                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
410                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
411         } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
412                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
413         else
414                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
415
416         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
417                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
418
419         /* For Snow3G, set key convert and other bits */
420         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
421                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
422                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
423                                         ICP_QAT_FW_LA_NO_RET_AUTH_RES);
424                 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)  {
425                         ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
426                                 ICP_QAT_FW_LA_RET_AUTH_RES);
427                         ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
428                                 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
429                 }
430         }
431
432         cipher->aes.cipher_config.val =
433             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
434                                         cdesc->qat_cipher_alg, key_convert,
435                                         cdesc->qat_dir);
436         memcpy(cipher->aes.key, cipherkey, cipherkeylen);
437
438         proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
439         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
440                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
441
442         /* Request template setup */
443         qat_alg_init_common_hdr(header);
444         header->service_cmd_id = cdesc->qat_cmd;
445         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
446                                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
447         /* Configure the common header protocol flags */
448         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
449         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
450         cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
451
452         /* Cipher CD config setup */
453         if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
454                 cipher_cd_ctrl->cipher_key_sz =
455                         (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
456                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3;
457                 cipher_cd_ctrl->cipher_state_sz =
458                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
459                 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
460                 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)  {
461                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
462                                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
463                 }
464         } else {
465                 cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
466                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
467                 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
468         }
469
470         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
471                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
472                                         ICP_QAT_FW_SLICE_CIPHER);
473                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
474                                         ICP_QAT_FW_SLICE_DRAM_WR);
475         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
476                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
477                                         ICP_QAT_FW_SLICE_CIPHER);
478                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
479                                         ICP_QAT_FW_SLICE_AUTH);
480                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
481                                         ICP_QAT_FW_SLICE_AUTH);
482                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
483                                         ICP_QAT_FW_SLICE_DRAM_WR);
484         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
485                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
486                                         ICP_QAT_FW_SLICE_AUTH);
487                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
488                                         ICP_QAT_FW_SLICE_CIPHER);
489                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
490                                         ICP_QAT_FW_SLICE_CIPHER);
491                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
492                                         ICP_QAT_FW_SLICE_DRAM_WR);
493         } else {
494                 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
495                             "encryption supported");
496                 return -EFAULT;
497         }
498         return 0;
499 }
500
501 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
502                                                 uint8_t *authkey,
503                                                 uint32_t authkeylen,
504                                                 uint32_t add_auth_data_length,
505                                                 uint32_t digestsize)
506 {
507         struct icp_qat_hw_cipher_algo_blk *cipher;
508         struct icp_qat_hw_auth_algo_blk *hash;
509         struct icp_qat_hw_cipher_algo_blk *cipherconfig;
510         struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
511         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
512         struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
513         void *ptr = &req_tmpl->cd_ctrl;
514         struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
515         struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
516         struct icp_qat_fw_la_auth_req_params *auth_param =
517                 (struct icp_qat_fw_la_auth_req_params *)
518                 ((char *)&req_tmpl->serv_specif_rqpars +
519                 sizeof(struct icp_qat_fw_la_cipher_req_params));
520         enum icp_qat_hw_cipher_convert key_convert;
521         uint16_t proto = ICP_QAT_FW_LA_NO_PROTO;        /* no CCM/GCM/Snow3G */
522         uint16_t state1_size = 0;
523         uint16_t state2_size = 0;
524         uint16_t cipher_offset = 0, hash_offset = 0;
525
526         PMD_INIT_FUNC_TRACE();
527
528         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
529                 cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
530                 hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd;
531                 cipher =
532                 (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
533                                 sizeof(struct icp_qat_hw_auth_algo_blk));
534                 hash_offset = 0;
535                 cipher_offset = ((char *)hash - (char *)cipher);
536         } else {
537                 cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
538                 hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd +
539                                 sizeof(struct icp_qat_hw_cipher_algo_blk));
540                 cipher_offset = 0;
541                 hash_offset = ((char *)hash - (char *)cipher);
542         }
543
544         /* CD setup */
545         if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
546                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
547                                            ICP_QAT_FW_LA_RET_AUTH_RES);
548                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
549                                            ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
550         } else {
551                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
552                                            ICP_QAT_FW_LA_NO_RET_AUTH_RES);
553                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
554                                            ICP_QAT_FW_LA_CMP_AUTH_RES);
555         }
556
557         if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
558                 /* CTR Streaming ciphers are a special case. Decrypt = encrypt
559                  * Overriding default values previously set
560                  */
561                 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
562                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
563         } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
564                 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
565         else
566                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
567
568         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
569                 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
570
571         cipher->aes.cipher_config.val =
572             ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
573                                         cdesc->qat_cipher_alg, key_convert,
574                                         cdesc->qat_dir);
575
576         hash->sha.inner_setup.auth_config.reserved = 0;
577         hash->sha.inner_setup.auth_config.config =
578                         ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
579                                 cdesc->qat_hash_alg, digestsize);
580         hash->sha.inner_setup.auth_counter.counter =
581                 rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
582         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)  {
583                 hash->sha.inner_setup.auth_counter.counter = 0;
584                 hash->sha.outer_setup.auth_config.reserved = 0;
585                 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
586                                 ((char *)&cdesc->cd +
587                                 sizeof(struct icp_qat_hw_auth_algo_blk)
588                                 + 16);
589                 cipherconfig->aes.cipher_config.val =
590                 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
591                         ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
592                         ICP_QAT_HW_CIPHER_KEY_CONVERT,
593                         ICP_QAT_HW_CIPHER_ENCRYPT);
594                 memcpy(cipherconfig->aes.key, authkey, authkeylen);
595                 memset(cipherconfig->aes.key + authkeylen, 0,
596                         ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
597         }
598
599         /* Do precomputes */
600         if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
601                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
602                         authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
603                         ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) {
604                         PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
605                         return -EFAULT;
606                 }
607         } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
608                 (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
609                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
610                         authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
611                         ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) {
612                         PMD_DRV_LOG(ERR, "(GCM)precompute failed");
613                         return -EFAULT;
614                 }
615                 /*
616                  * Write (the length of AAD) into bytes 16-19 of state2
617                  * in big-endian format. This field is 8 bytes
618                  */
619                 uint32_t *aad_len = (uint32_t *)&hash->sha.state1[
620                                         ICP_QAT_HW_GALOIS_128_STATE1_SZ +
621                                         ICP_QAT_HW_GALOIS_H_SZ];
622                 *aad_len = rte_bswap32(add_auth_data_length);
623
624                 proto = ICP_QAT_FW_LA_GCM_PROTO;
625         } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)  {
626                 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
627                 state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
628         } else {
629                 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
630                         authkey, authkeylen, (uint8_t *)(hash->sha.state1),
631                         &state1_size)) {
632                         PMD_DRV_LOG(ERR, "(SHA)precompute failed");
633                         return -EFAULT;
634                 }
635         }
636
637         /* Request template setup */
638         qat_alg_init_common_hdr(header);
639         header->service_cmd_id = cdesc->qat_cmd;
640         ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
641                                            ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
642         /* Configure the common header protocol flags */
643         ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
644         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
645         cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
646
647         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH)  {
648                 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
649                         ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
650                 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
651                         ICP_QAT_FW_CIPH_IV_64BIT_PTR);
652                 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
653                         ICP_QAT_FW_LA_RET_AUTH_RES);
654                 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
655                         ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
656         }
657
658         /* Cipher CD config setup */
659         cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
660         cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
661
662         if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_AUTH) {
663                 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
664                 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset>>3;
665                 } else {
666                 cipher_cd_ctrl->cipher_state_sz = 0;
667                 cipher_cd_ctrl->cipher_cfg_offset = 0;
668         }
669
670         /* Auth CD config setup */
671         hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
672         hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
673         hash_cd_ctrl->inner_res_sz = digestsize;
674         hash_cd_ctrl->final_sz = digestsize;
675         hash_cd_ctrl->inner_state1_sz = state1_size;
676
677         switch (cdesc->qat_hash_alg) {
678         case ICP_QAT_HW_AUTH_ALGO_SHA1:
679                 hash_cd_ctrl->inner_state2_sz =
680                         RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
681                 break;
682         case ICP_QAT_HW_AUTH_ALGO_SHA256:
683                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
684                 break;
685         case ICP_QAT_HW_AUTH_ALGO_SHA512:
686                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
687                 break;
688         case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
689                 hash_cd_ctrl->inner_state2_sz =
690                                 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
691                 hash_cd_ctrl->inner_state1_sz =
692                                 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
693                 memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ);
694                 break;
695         case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
696         case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
697                 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ +
698                                                 ICP_QAT_HW_GALOIS_LEN_A_SZ +
699                                                 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
700                 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
701                 memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
702                 break;
703         case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
704                 hash_cd_ctrl->inner_state2_sz =
705                                 ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
706                 hash_cd_ctrl->inner_state1_sz =
707                                 ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ;
708                 memset(hash->sha.state1, 0, ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ);
709                 break;
710         default:
711                 PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
712                 return -EFAULT;
713         }
714
715         hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
716                         ((sizeof(struct icp_qat_hw_auth_setup) +
717                          RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
718                                         >> 3);
719         auth_param->auth_res_sz = digestsize;
720
721         if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
722                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
723                                         ICP_QAT_FW_SLICE_AUTH);
724                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
725                                         ICP_QAT_FW_SLICE_DRAM_WR);
726         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
727                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
728                                 ICP_QAT_FW_SLICE_CIPHER);
729                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
730                                 ICP_QAT_FW_SLICE_AUTH);
731                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
732                                 ICP_QAT_FW_SLICE_AUTH);
733                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
734                                 ICP_QAT_FW_SLICE_DRAM_WR);
735         } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
736                 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
737                                 ICP_QAT_FW_SLICE_AUTH);
738                 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
739                                 ICP_QAT_FW_SLICE_CIPHER);
740                 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
741                                 ICP_QAT_FW_SLICE_CIPHER);
742                 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
743                                 ICP_QAT_FW_SLICE_DRAM_WR);
744         } else {
745                 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
746                                 "encryption supported");
747                 return -EFAULT;
748         }
749         return 0;
750 }
751
752 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
753                                         struct icp_qat_hw_cipher_algo_blk *cd,
754                                         const uint8_t *key, unsigned int keylen)
755 {
756         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
757         struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
758         struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
759
760         PMD_INIT_FUNC_TRACE();
761         rte_memcpy(cd->aes.key, key, keylen);
762         qat_alg_init_common_hdr(header);
763         header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
764         cd_pars->u.s.content_desc_params_sz =
765                                 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
766         /* Cipher CD config setup */
767         cd_ctrl->cipher_key_sz = keylen >> 3;
768         cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
769         cd_ctrl->cipher_cfg_offset = 0;
770         ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
771         ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
772 }
773
774 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
775                                         int alg, const uint8_t *key,
776                                         unsigned int keylen)
777 {
778         struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
779         struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
780         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
781
782         PMD_INIT_FUNC_TRACE();
783         qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
784         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
785         enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
786 }
787
788 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
789                                         int alg, const uint8_t *key,
790                                         unsigned int keylen)
791 {
792         struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
793         struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
794         struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
795
796         PMD_INIT_FUNC_TRACE();
797         qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
798         cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
799         dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
800 }
801
802 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
803 {
804         switch (key_len) {
805         case ICP_QAT_HW_AES_128_KEY_SZ:
806                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
807                 break;
808         case ICP_QAT_HW_AES_192_KEY_SZ:
809                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
810                 break;
811         case ICP_QAT_HW_AES_256_KEY_SZ:
812                 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
813                 break;
814         default:
815                 return -EINVAL;
816         }
817         return 0;
818 }
819
820 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
821 {
822         switch (key_len) {
823         case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
824                 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
825                 break;
826         default:
827                 return -EINVAL;
828         }
829         return 0;
830 }