2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
59 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
65 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66 * This is digest size rounded up to nearest quadword
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
70 switch (qat_hash_alg) {
71 case ICP_QAT_HW_AUTH_ALGO_SHA1:
72 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73 QAT_HW_DEFAULT_ALIGNMENT);
74 case ICP_QAT_HW_AUTH_ALGO_SHA224:
75 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76 QAT_HW_DEFAULT_ALIGNMENT);
77 case ICP_QAT_HW_AUTH_ALGO_SHA256:
78 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79 QAT_HW_DEFAULT_ALIGNMENT);
80 case ICP_QAT_HW_AUTH_ALGO_SHA384:
81 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82 QAT_HW_DEFAULT_ALIGNMENT);
83 case ICP_QAT_HW_AUTH_ALGO_SHA512:
84 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85 QAT_HW_DEFAULT_ALIGNMENT);
86 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
89 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
93 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95 QAT_HW_DEFAULT_ALIGNMENT);
96 case ICP_QAT_HW_AUTH_ALGO_MD5:
97 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98 QAT_HW_DEFAULT_ALIGNMENT);
99 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
100 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
101 QAT_HW_DEFAULT_ALIGNMENT);
102 case ICP_QAT_HW_AUTH_ALGO_NULL:
103 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
104 QAT_HW_DEFAULT_ALIGNMENT);
105 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
106 /* return maximum state1 size in this case */
107 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
108 QAT_HW_DEFAULT_ALIGNMENT);
110 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
116 /* returns digest size in bytes per hash algo */
117 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
119 switch (qat_hash_alg) {
120 case ICP_QAT_HW_AUTH_ALGO_SHA1:
121 return ICP_QAT_HW_SHA1_STATE1_SZ;
122 case ICP_QAT_HW_AUTH_ALGO_SHA224:
123 return ICP_QAT_HW_SHA224_STATE1_SZ;
124 case ICP_QAT_HW_AUTH_ALGO_SHA256:
125 return ICP_QAT_HW_SHA256_STATE1_SZ;
126 case ICP_QAT_HW_AUTH_ALGO_SHA384:
127 return ICP_QAT_HW_SHA384_STATE1_SZ;
128 case ICP_QAT_HW_AUTH_ALGO_SHA512:
129 return ICP_QAT_HW_SHA512_STATE1_SZ;
130 case ICP_QAT_HW_AUTH_ALGO_MD5:
131 return ICP_QAT_HW_MD5_STATE1_SZ;
132 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
133 /* return maximum digest size in this case */
134 return ICP_QAT_HW_SHA512_STATE1_SZ;
136 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
142 /* returns block size in byes per hash algo */
143 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
145 switch (qat_hash_alg) {
146 case ICP_QAT_HW_AUTH_ALGO_SHA1:
148 case ICP_QAT_HW_AUTH_ALGO_SHA224:
149 return SHA256_CBLOCK;
150 case ICP_QAT_HW_AUTH_ALGO_SHA256:
151 return SHA256_CBLOCK;
152 case ICP_QAT_HW_AUTH_ALGO_SHA384:
153 return SHA512_CBLOCK;
154 case ICP_QAT_HW_AUTH_ALGO_SHA512:
155 return SHA512_CBLOCK;
156 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
158 case ICP_QAT_HW_AUTH_ALGO_MD5:
160 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
161 /* return maximum block size in this case */
162 return SHA512_CBLOCK;
164 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
170 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
174 if (!SHA1_Init(&ctx))
176 SHA1_Transform(&ctx, data_in);
177 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
181 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
185 if (!SHA224_Init(&ctx))
187 SHA256_Transform(&ctx, data_in);
188 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
192 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
196 if (!SHA256_Init(&ctx))
198 SHA256_Transform(&ctx, data_in);
199 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
203 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
207 if (!SHA384_Init(&ctx))
209 SHA512_Transform(&ctx, data_in);
210 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
214 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
218 if (!SHA512_Init(&ctx))
220 SHA512_Transform(&ctx, data_in);
221 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
225 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
231 MD5_Transform(&ctx, data_in);
232 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
237 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
242 uint8_t digest[qat_hash_get_digest_size(
243 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
244 uint32_t *hash_state_out_be32;
245 uint64_t *hash_state_out_be64;
248 PMD_INIT_FUNC_TRACE();
249 digest_size = qat_hash_get_digest_size(hash_alg);
250 if (digest_size <= 0)
253 hash_state_out_be32 = (uint32_t *)data_out;
254 hash_state_out_be64 = (uint64_t *)data_out;
257 case ICP_QAT_HW_AUTH_ALGO_SHA1:
258 if (partial_hash_sha1(data_in, digest))
260 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
261 *hash_state_out_be32 =
262 rte_bswap32(*(((uint32_t *)digest)+i));
264 case ICP_QAT_HW_AUTH_ALGO_SHA224:
265 if (partial_hash_sha224(data_in, digest))
267 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
268 *hash_state_out_be32 =
269 rte_bswap32(*(((uint32_t *)digest)+i));
271 case ICP_QAT_HW_AUTH_ALGO_SHA256:
272 if (partial_hash_sha256(data_in, digest))
274 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
275 *hash_state_out_be32 =
276 rte_bswap32(*(((uint32_t *)digest)+i));
278 case ICP_QAT_HW_AUTH_ALGO_SHA384:
279 if (partial_hash_sha384(data_in, digest))
281 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
282 *hash_state_out_be64 =
283 rte_bswap64(*(((uint64_t *)digest)+i));
285 case ICP_QAT_HW_AUTH_ALGO_SHA512:
286 if (partial_hash_sha512(data_in, digest))
288 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
289 *hash_state_out_be64 =
290 rte_bswap64(*(((uint64_t *)digest)+i));
292 case ICP_QAT_HW_AUTH_ALGO_MD5:
293 if (partial_hash_md5(data_in, data_out))
297 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
303 #define HMAC_IPAD_VALUE 0x36
304 #define HMAC_OPAD_VALUE 0x5c
305 #define HASH_XCBC_PRECOMP_KEY_NUM 3
307 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
308 const uint8_t *auth_key,
309 uint16_t auth_keylen,
310 uint8_t *p_state_buf,
311 uint16_t *p_state_len)
314 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
315 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
318 PMD_INIT_FUNC_TRACE();
319 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
320 static uint8_t qat_aes_xcbc_key_seed[
321 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
322 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
323 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
324 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
325 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
326 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
327 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
331 uint8_t *out = p_state_buf;
335 in = rte_zmalloc("working mem for key",
336 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
337 rte_memcpy(in, qat_aes_xcbc_key_seed,
338 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
339 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
340 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
343 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
345 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
346 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
349 AES_encrypt(in, out, &enc_key);
350 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
351 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
353 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
354 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
356 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
357 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
359 uint8_t *out = p_state_buf;
362 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
363 ICP_QAT_HW_GALOIS_LEN_A_SZ +
364 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
365 in = rte_zmalloc("working mem for key",
366 ICP_QAT_HW_GALOIS_H_SZ, 16);
367 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
368 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
372 AES_encrypt(in, out, &enc_key);
373 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
374 ICP_QAT_HW_GALOIS_LEN_A_SZ +
375 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
380 block_size = qat_hash_get_block_size(hash_alg);
383 /* init ipad and opad from key and xor with fixed values */
384 memset(ipad, 0, block_size);
385 memset(opad, 0, block_size);
387 if (auth_keylen > (unsigned int)block_size) {
388 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
391 rte_memcpy(ipad, auth_key, auth_keylen);
392 rte_memcpy(opad, auth_key, auth_keylen);
394 for (i = 0; i < block_size; i++) {
395 uint8_t *ipad_ptr = ipad + i;
396 uint8_t *opad_ptr = opad + i;
397 *ipad_ptr ^= HMAC_IPAD_VALUE;
398 *opad_ptr ^= HMAC_OPAD_VALUE;
401 /* do partial hash of ipad and copy to state1 */
402 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
403 memset(ipad, 0, block_size);
404 memset(opad, 0, block_size);
405 PMD_DRV_LOG(ERR, "ipad precompute failed");
410 * State len is a multiple of 8, so may be larger than the digest.
411 * Put the partial hash of opad state_len bytes after state1
413 *p_state_len = qat_hash_get_state1_size(hash_alg);
414 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
415 memset(ipad, 0, block_size);
416 memset(opad, 0, block_size);
417 PMD_DRV_LOG(ERR, "opad precompute failed");
421 /* don't leave data lying around */
422 memset(ipad, 0, block_size);
423 memset(opad, 0, block_size);
427 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
430 PMD_INIT_FUNC_TRACE();
432 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
433 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
434 header->comn_req_flags =
435 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
436 QAT_COMN_PTR_TYPE_FLAT);
437 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
438 ICP_QAT_FW_LA_PARTIAL_NONE);
439 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
440 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
441 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
443 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
444 ICP_QAT_FW_LA_NO_UPDATE_STATE);
447 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
449 uint32_t cipherkeylen)
451 struct icp_qat_hw_cipher_algo_blk *cipher;
452 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
453 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
454 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
455 void *ptr = &req_tmpl->cd_ctrl;
456 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
457 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
458 enum icp_qat_hw_cipher_convert key_convert;
459 uint32_t total_key_size;
460 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
461 uint16_t cipher_offset, cd_size;
462 uint32_t wordIndex = 0;
463 uint32_t *temp_key = NULL;
464 PMD_INIT_FUNC_TRACE();
466 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
467 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
468 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
469 ICP_QAT_FW_SLICE_CIPHER);
470 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
471 ICP_QAT_FW_SLICE_DRAM_WR);
472 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
473 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
474 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
475 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
476 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
477 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
478 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
479 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
480 ICP_QAT_FW_SLICE_CIPHER);
481 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
482 ICP_QAT_FW_SLICE_AUTH);
483 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
484 ICP_QAT_FW_SLICE_AUTH);
485 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
486 ICP_QAT_FW_SLICE_DRAM_WR);
487 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
488 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
489 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
493 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
495 * CTR Streaming ciphers are a special case. Decrypt = encrypt
496 * Overriding default values previously set
498 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
499 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
500 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
501 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
502 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
503 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
505 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
507 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
508 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
509 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
510 cipher_cd_ctrl->cipher_state_sz =
511 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
512 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
513 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
514 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
515 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
516 cipher_cd_ctrl->cipher_padding_sz =
517 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
518 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
519 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
520 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
521 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
523 total_key_size = cipherkeylen;
524 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
525 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
527 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
528 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
529 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
531 header->service_cmd_id = cdesc->qat_cmd;
532 qat_alg_init_common_hdr(header, proto);
534 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
536 cipher->cipher_config.val =
537 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
538 cdesc->qat_cipher_alg, key_convert,
541 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
542 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
543 sizeof(struct icp_qat_hw_cipher_config)
545 memcpy(cipher->key, cipherkey, cipherkeylen);
546 memcpy(temp_key, cipherkey, cipherkeylen);
548 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
549 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
551 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
553 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
554 cipherkeylen + cipherkeylen;
556 memcpy(cipher->key, cipherkey, cipherkeylen);
557 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
561 if (total_key_size > cipherkeylen) {
562 uint32_t padding_size = total_key_size-cipherkeylen;
563 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
564 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
565 /* K3 not provided so use K1 = K3*/
566 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
568 memset(cdesc->cd_cur_ptr, 0, padding_size);
569 cdesc->cd_cur_ptr += padding_size;
571 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
572 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
577 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
580 uint32_t add_auth_data_length,
582 unsigned int operation)
584 struct icp_qat_hw_auth_setup *hash;
585 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
586 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
587 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
588 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
589 void *ptr = &req_tmpl->cd_ctrl;
590 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
591 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
592 struct icp_qat_fw_la_auth_req_params *auth_param =
593 (struct icp_qat_fw_la_auth_req_params *)
594 ((char *)&req_tmpl->serv_specif_rqpars +
595 sizeof(struct icp_qat_fw_la_cipher_req_params));
596 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
597 uint16_t state1_size = 0, state2_size = 0;
598 uint16_t hash_offset, cd_size;
599 uint32_t *aad_len = NULL;
600 uint32_t wordIndex = 0;
603 PMD_INIT_FUNC_TRACE();
605 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
606 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
607 ICP_QAT_FW_SLICE_AUTH);
608 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
609 ICP_QAT_FW_SLICE_DRAM_WR);
610 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
611 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
612 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
613 ICP_QAT_FW_SLICE_AUTH);
614 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
615 ICP_QAT_FW_SLICE_CIPHER);
616 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
617 ICP_QAT_FW_SLICE_CIPHER);
618 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
619 ICP_QAT_FW_SLICE_DRAM_WR);
620 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
621 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
622 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
626 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
627 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
628 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
629 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
630 ICP_QAT_FW_LA_CMP_AUTH_RES);
631 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
633 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
634 ICP_QAT_FW_LA_RET_AUTH_RES);
635 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
636 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
637 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
641 * Setup the inner hash config
643 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
644 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
645 hash->auth_config.reserved = 0;
646 hash->auth_config.config =
647 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
648 cdesc->qat_hash_alg, digestsize);
650 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
651 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
652 hash->auth_counter.counter = 0;
654 hash->auth_counter.counter = rte_bswap32(
655 qat_hash_get_block_size(cdesc->qat_hash_alg));
657 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
660 * cd_cur_ptr now points at the state1 information.
662 switch (cdesc->qat_hash_alg) {
663 case ICP_QAT_HW_AUTH_ALGO_SHA1:
664 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
665 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
666 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
669 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
671 case ICP_QAT_HW_AUTH_ALGO_SHA224:
672 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
673 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
674 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
677 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
679 case ICP_QAT_HW_AUTH_ALGO_SHA256:
680 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
681 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
682 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
685 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
687 case ICP_QAT_HW_AUTH_ALGO_SHA384:
688 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
689 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
690 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
693 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
695 case ICP_QAT_HW_AUTH_ALGO_SHA512:
696 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
697 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
698 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
701 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
703 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
704 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
705 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
706 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
708 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
712 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
713 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
714 proto = ICP_QAT_FW_LA_GCM_PROTO;
715 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
716 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
717 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
719 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
723 * Write (the length of AAD) into bytes 16-19 of state2
724 * in big-endian format. This field is 8 bytes
726 auth_param->u2.aad_sz =
727 RTE_ALIGN_CEIL(add_auth_data_length, 16);
728 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
730 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
731 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
732 ICP_QAT_HW_GALOIS_H_SZ);
733 *aad_len = rte_bswap32(add_auth_data_length);
735 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
736 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
737 state1_size = qat_hash_get_state1_size(
738 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
739 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
740 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
742 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
743 (cdesc->cd_cur_ptr + state1_size + state2_size);
744 cipherconfig->cipher_config.val =
745 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
746 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
747 ICP_QAT_HW_CIPHER_KEY_CONVERT,
748 ICP_QAT_HW_CIPHER_ENCRYPT);
749 memcpy(cipherconfig->key, authkey, authkeylen);
750 memset(cipherconfig->key + authkeylen,
751 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
752 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
753 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
754 auth_param->hash_state_sz =
755 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
757 case ICP_QAT_HW_AUTH_ALGO_MD5:
758 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
759 authkey, authkeylen, cdesc->cd_cur_ptr,
761 PMD_DRV_LOG(ERR, "(MD5)precompute failed");
764 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
766 case ICP_QAT_HW_AUTH_ALGO_NULL:
767 state1_size = qat_hash_get_state1_size(
768 ICP_QAT_HW_AUTH_ALGO_NULL);
769 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
771 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
772 state1_size = qat_hash_get_state1_size(
773 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
774 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
775 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
776 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
779 * The Inner Hash Initial State2 block must contain IK
780 * (Initialisation Key), followed by IK XOR-ed with KM
781 * (Key Modifier): IK||(IK^KM).
783 /* write the auth key */
784 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
785 /* initialise temp key with auth key */
786 memcpy(pTempKey, authkey, authkeylen);
787 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
788 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
789 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
792 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
796 /* Request template setup */
797 qat_alg_init_common_hdr(header, proto);
798 header->service_cmd_id = cdesc->qat_cmd;
800 /* Auth CD config setup */
801 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
802 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
803 hash_cd_ctrl->inner_res_sz = digestsize;
804 hash_cd_ctrl->final_sz = digestsize;
805 hash_cd_ctrl->inner_state1_sz = state1_size;
806 auth_param->auth_res_sz = digestsize;
808 hash_cd_ctrl->inner_state2_sz = state2_size;
809 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
810 ((sizeof(struct icp_qat_hw_auth_setup) +
811 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
814 cdesc->cd_cur_ptr += state1_size + state2_size;
815 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
817 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
818 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
823 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
826 case ICP_QAT_HW_AES_128_KEY_SZ:
827 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
829 case ICP_QAT_HW_AES_192_KEY_SZ:
830 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
832 case ICP_QAT_HW_AES_256_KEY_SZ:
833 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
841 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
844 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
845 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
853 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
856 case ICP_QAT_HW_KASUMI_KEY_SZ:
857 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
865 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
868 case QAT_3DES_KEY_SZ_OPT1:
869 case QAT_3DES_KEY_SZ_OPT2:
870 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;