2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
54 #include <rte_crypto_sym.h>
56 #include "../qat_logs.h"
59 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
60 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
61 #include <openssl/md5.h> /* Needed to calculate pre-compute values */
65 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
66 * This is digest size rounded up to nearest quadword
68 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
70 switch (qat_hash_alg) {
71 case ICP_QAT_HW_AUTH_ALGO_SHA1:
72 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
73 QAT_HW_DEFAULT_ALIGNMENT);
74 case ICP_QAT_HW_AUTH_ALGO_SHA224:
75 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
76 QAT_HW_DEFAULT_ALIGNMENT);
77 case ICP_QAT_HW_AUTH_ALGO_SHA256:
78 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
79 QAT_HW_DEFAULT_ALIGNMENT);
80 case ICP_QAT_HW_AUTH_ALGO_SHA384:
81 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
82 QAT_HW_DEFAULT_ALIGNMENT);
83 case ICP_QAT_HW_AUTH_ALGO_SHA512:
84 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
85 QAT_HW_DEFAULT_ALIGNMENT);
86 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
87 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
88 QAT_HW_DEFAULT_ALIGNMENT);
89 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
90 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
91 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
92 QAT_HW_DEFAULT_ALIGNMENT);
93 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
94 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
95 QAT_HW_DEFAULT_ALIGNMENT);
96 case ICP_QAT_HW_AUTH_ALGO_MD5:
97 return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
98 QAT_HW_DEFAULT_ALIGNMENT);
99 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
100 return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
101 QAT_HW_DEFAULT_ALIGNMENT);
102 case ICP_QAT_HW_AUTH_ALGO_NULL:
103 return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
104 QAT_HW_DEFAULT_ALIGNMENT);
105 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
106 /* return maximum state1 size in this case */
107 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
108 QAT_HW_DEFAULT_ALIGNMENT);
110 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
116 /* returns digest size in bytes per hash algo */
117 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
119 switch (qat_hash_alg) {
120 case ICP_QAT_HW_AUTH_ALGO_SHA1:
121 return ICP_QAT_HW_SHA1_STATE1_SZ;
122 case ICP_QAT_HW_AUTH_ALGO_SHA224:
123 return ICP_QAT_HW_SHA224_STATE1_SZ;
124 case ICP_QAT_HW_AUTH_ALGO_SHA256:
125 return ICP_QAT_HW_SHA256_STATE1_SZ;
126 case ICP_QAT_HW_AUTH_ALGO_SHA384:
127 return ICP_QAT_HW_SHA384_STATE1_SZ;
128 case ICP_QAT_HW_AUTH_ALGO_SHA512:
129 return ICP_QAT_HW_SHA512_STATE1_SZ;
130 case ICP_QAT_HW_AUTH_ALGO_MD5:
131 return ICP_QAT_HW_MD5_STATE1_SZ;
132 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
133 /* return maximum digest size in this case */
134 return ICP_QAT_HW_SHA512_STATE1_SZ;
136 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
142 /* returns block size in byes per hash algo */
143 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
145 switch (qat_hash_alg) {
146 case ICP_QAT_HW_AUTH_ALGO_SHA1:
148 case ICP_QAT_HW_AUTH_ALGO_SHA224:
149 return SHA256_CBLOCK;
150 case ICP_QAT_HW_AUTH_ALGO_SHA256:
151 return SHA256_CBLOCK;
152 case ICP_QAT_HW_AUTH_ALGO_SHA384:
153 return SHA512_CBLOCK;
154 case ICP_QAT_HW_AUTH_ALGO_SHA512:
155 return SHA512_CBLOCK;
156 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
158 case ICP_QAT_HW_AUTH_ALGO_MD5:
160 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
161 /* return maximum block size in this case */
162 return SHA512_CBLOCK;
164 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
170 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
174 if (!SHA1_Init(&ctx))
176 SHA1_Transform(&ctx, data_in);
177 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
181 static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
185 if (!SHA224_Init(&ctx))
187 SHA256_Transform(&ctx, data_in);
188 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
192 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
196 if (!SHA256_Init(&ctx))
198 SHA256_Transform(&ctx, data_in);
199 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
203 static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
207 if (!SHA384_Init(&ctx))
209 SHA512_Transform(&ctx, data_in);
210 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
214 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
218 if (!SHA512_Init(&ctx))
220 SHA512_Transform(&ctx, data_in);
221 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
225 static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
231 MD5_Transform(&ctx, data_in);
232 rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
237 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
242 uint8_t digest[qat_hash_get_digest_size(
243 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
244 uint32_t *hash_state_out_be32;
245 uint64_t *hash_state_out_be64;
248 PMD_INIT_FUNC_TRACE();
249 digest_size = qat_hash_get_digest_size(hash_alg);
250 if (digest_size <= 0)
253 hash_state_out_be32 = (uint32_t *)data_out;
254 hash_state_out_be64 = (uint64_t *)data_out;
257 case ICP_QAT_HW_AUTH_ALGO_SHA1:
258 if (partial_hash_sha1(data_in, digest))
260 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
261 *hash_state_out_be32 =
262 rte_bswap32(*(((uint32_t *)digest)+i));
264 case ICP_QAT_HW_AUTH_ALGO_SHA224:
265 if (partial_hash_sha224(data_in, digest))
267 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
268 *hash_state_out_be32 =
269 rte_bswap32(*(((uint32_t *)digest)+i));
271 case ICP_QAT_HW_AUTH_ALGO_SHA256:
272 if (partial_hash_sha256(data_in, digest))
274 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
275 *hash_state_out_be32 =
276 rte_bswap32(*(((uint32_t *)digest)+i));
278 case ICP_QAT_HW_AUTH_ALGO_SHA384:
279 if (partial_hash_sha384(data_in, digest))
281 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
282 *hash_state_out_be64 =
283 rte_bswap64(*(((uint64_t *)digest)+i));
285 case ICP_QAT_HW_AUTH_ALGO_SHA512:
286 if (partial_hash_sha512(data_in, digest))
288 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
289 *hash_state_out_be64 =
290 rte_bswap64(*(((uint64_t *)digest)+i));
292 case ICP_QAT_HW_AUTH_ALGO_MD5:
293 if (partial_hash_md5(data_in, data_out))
297 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
303 #define HMAC_IPAD_VALUE 0x36
304 #define HMAC_OPAD_VALUE 0x5c
305 #define HASH_XCBC_PRECOMP_KEY_NUM 3
307 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
308 const uint8_t *auth_key,
309 uint16_t auth_keylen,
310 uint8_t *p_state_buf,
311 uint16_t *p_state_len)
314 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
315 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
318 PMD_INIT_FUNC_TRACE();
319 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
320 static uint8_t qat_aes_xcbc_key_seed[
321 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
322 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
323 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
324 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
325 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
326 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
327 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
331 uint8_t *out = p_state_buf;
335 in = rte_zmalloc("working mem for key",
336 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
338 PMD_DRV_LOG(ERR, "Failed to alloc memory");
342 rte_memcpy(in, qat_aes_xcbc_key_seed,
343 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
344 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
345 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
348 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
350 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
351 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
354 AES_encrypt(in, out, &enc_key);
355 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
356 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
358 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
359 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
361 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
362 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
364 uint8_t *out = p_state_buf;
367 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
368 ICP_QAT_HW_GALOIS_LEN_A_SZ +
369 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
370 in = rte_zmalloc("working mem for key",
371 ICP_QAT_HW_GALOIS_H_SZ, 16);
373 PMD_DRV_LOG(ERR, "Failed to alloc memory");
377 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
378 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
382 AES_encrypt(in, out, &enc_key);
383 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
384 ICP_QAT_HW_GALOIS_LEN_A_SZ +
385 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
390 block_size = qat_hash_get_block_size(hash_alg);
393 /* init ipad and opad from key and xor with fixed values */
394 memset(ipad, 0, block_size);
395 memset(opad, 0, block_size);
397 if (auth_keylen > (unsigned int)block_size) {
398 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
401 rte_memcpy(ipad, auth_key, auth_keylen);
402 rte_memcpy(opad, auth_key, auth_keylen);
404 for (i = 0; i < block_size; i++) {
405 uint8_t *ipad_ptr = ipad + i;
406 uint8_t *opad_ptr = opad + i;
407 *ipad_ptr ^= HMAC_IPAD_VALUE;
408 *opad_ptr ^= HMAC_OPAD_VALUE;
411 /* do partial hash of ipad and copy to state1 */
412 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
413 memset(ipad, 0, block_size);
414 memset(opad, 0, block_size);
415 PMD_DRV_LOG(ERR, "ipad precompute failed");
420 * State len is a multiple of 8, so may be larger than the digest.
421 * Put the partial hash of opad state_len bytes after state1
423 *p_state_len = qat_hash_get_state1_size(hash_alg);
424 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
425 memset(ipad, 0, block_size);
426 memset(opad, 0, block_size);
427 PMD_DRV_LOG(ERR, "opad precompute failed");
431 /* don't leave data lying around */
432 memset(ipad, 0, block_size);
433 memset(opad, 0, block_size);
437 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
440 PMD_INIT_FUNC_TRACE();
442 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
443 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
444 header->comn_req_flags =
445 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
446 QAT_COMN_PTR_TYPE_FLAT);
447 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
448 ICP_QAT_FW_LA_PARTIAL_NONE);
449 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
450 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
451 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
453 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
454 ICP_QAT_FW_LA_NO_UPDATE_STATE);
457 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
459 uint32_t cipherkeylen)
461 struct icp_qat_hw_cipher_algo_blk *cipher;
462 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
463 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
464 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
465 void *ptr = &req_tmpl->cd_ctrl;
466 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
467 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
468 enum icp_qat_hw_cipher_convert key_convert;
469 uint32_t total_key_size;
470 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
471 uint16_t cipher_offset, cd_size;
472 uint32_t wordIndex = 0;
473 uint32_t *temp_key = NULL;
474 PMD_INIT_FUNC_TRACE();
476 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
477 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
478 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
479 ICP_QAT_FW_SLICE_CIPHER);
480 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
481 ICP_QAT_FW_SLICE_DRAM_WR);
482 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
483 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
484 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
485 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
486 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
487 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
488 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
489 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
490 ICP_QAT_FW_SLICE_CIPHER);
491 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
492 ICP_QAT_FW_SLICE_AUTH);
493 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
494 ICP_QAT_FW_SLICE_AUTH);
495 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
496 ICP_QAT_FW_SLICE_DRAM_WR);
497 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
498 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
499 PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
503 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
505 * CTR Streaming ciphers are a special case. Decrypt = encrypt
506 * Overriding default values previously set
508 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
509 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
510 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
511 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
512 else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
513 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
515 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
517 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
518 total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
519 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
520 cipher_cd_ctrl->cipher_state_sz =
521 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
522 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
523 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
524 total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
525 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
526 cipher_cd_ctrl->cipher_padding_sz =
527 (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
528 } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
529 total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
530 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
531 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
533 total_key_size = cipherkeylen;
534 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
535 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
537 cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
538 cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
539 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
541 header->service_cmd_id = cdesc->qat_cmd;
542 qat_alg_init_common_hdr(header, proto);
544 cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
546 cipher->cipher_config.val =
547 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
548 cdesc->qat_cipher_alg, key_convert,
551 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
552 temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
553 sizeof(struct icp_qat_hw_cipher_config)
555 memcpy(cipher->key, cipherkey, cipherkeylen);
556 memcpy(temp_key, cipherkey, cipherkeylen);
558 /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
559 for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
561 temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
563 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
564 cipherkeylen + cipherkeylen;
566 memcpy(cipher->key, cipherkey, cipherkeylen);
567 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
571 if (total_key_size > cipherkeylen) {
572 uint32_t padding_size = total_key_size-cipherkeylen;
573 if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
574 && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
575 /* K3 not provided so use K1 = K3*/
576 memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
578 memset(cdesc->cd_cur_ptr, 0, padding_size);
579 cdesc->cd_cur_ptr += padding_size;
581 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
582 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
587 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
590 uint32_t add_auth_data_length,
592 unsigned int operation)
594 struct icp_qat_hw_auth_setup *hash;
595 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
596 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
597 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
598 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
599 void *ptr = &req_tmpl->cd_ctrl;
600 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
601 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
602 struct icp_qat_fw_la_auth_req_params *auth_param =
603 (struct icp_qat_fw_la_auth_req_params *)
604 ((char *)&req_tmpl->serv_specif_rqpars +
605 sizeof(struct icp_qat_fw_la_cipher_req_params));
606 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
607 uint16_t state1_size = 0, state2_size = 0;
608 uint16_t hash_offset, cd_size;
609 uint32_t *aad_len = NULL;
610 uint32_t wordIndex = 0;
613 PMD_INIT_FUNC_TRACE();
615 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
616 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
617 ICP_QAT_FW_SLICE_AUTH);
618 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
619 ICP_QAT_FW_SLICE_DRAM_WR);
620 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
621 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
622 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
623 ICP_QAT_FW_SLICE_AUTH);
624 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
625 ICP_QAT_FW_SLICE_CIPHER);
626 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
627 ICP_QAT_FW_SLICE_CIPHER);
628 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
629 ICP_QAT_FW_SLICE_DRAM_WR);
630 cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
631 } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
632 PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
636 if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
637 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
638 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
639 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
640 ICP_QAT_FW_LA_CMP_AUTH_RES);
641 cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
643 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
644 ICP_QAT_FW_LA_RET_AUTH_RES);
645 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
646 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
647 cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
651 * Setup the inner hash config
653 hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
654 hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
655 hash->auth_config.reserved = 0;
656 hash->auth_config.config =
657 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
658 cdesc->qat_hash_alg, digestsize);
660 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
661 || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
662 hash->auth_counter.counter = 0;
664 hash->auth_counter.counter = rte_bswap32(
665 qat_hash_get_block_size(cdesc->qat_hash_alg));
667 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
670 * cd_cur_ptr now points at the state1 information.
672 switch (cdesc->qat_hash_alg) {
673 case ICP_QAT_HW_AUTH_ALGO_SHA1:
674 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
675 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
676 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
679 state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
681 case ICP_QAT_HW_AUTH_ALGO_SHA224:
682 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
683 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
684 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
687 state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
689 case ICP_QAT_HW_AUTH_ALGO_SHA256:
690 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
691 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
692 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
695 state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
697 case ICP_QAT_HW_AUTH_ALGO_SHA384:
698 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
699 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
700 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
703 state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
705 case ICP_QAT_HW_AUTH_ALGO_SHA512:
706 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
707 authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
708 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
711 state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
713 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
714 state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
715 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
716 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
718 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
722 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
723 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
724 proto = ICP_QAT_FW_LA_GCM_PROTO;
725 state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
726 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
727 authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
729 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
733 * Write (the length of AAD) into bytes 16-19 of state2
734 * in big-endian format. This field is 8 bytes
736 auth_param->u2.aad_sz =
737 RTE_ALIGN_CEIL(add_auth_data_length, 16);
738 auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
740 aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
741 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
742 ICP_QAT_HW_GALOIS_H_SZ);
743 *aad_len = rte_bswap32(add_auth_data_length);
745 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
746 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
747 state1_size = qat_hash_get_state1_size(
748 ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
749 state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
750 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
752 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
753 (cdesc->cd_cur_ptr + state1_size + state2_size);
754 cipherconfig->cipher_config.val =
755 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
756 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
757 ICP_QAT_HW_CIPHER_KEY_CONVERT,
758 ICP_QAT_HW_CIPHER_ENCRYPT);
759 memcpy(cipherconfig->key, authkey, authkeylen);
760 memset(cipherconfig->key + authkeylen,
761 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
762 cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
763 authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
764 auth_param->hash_state_sz =
765 RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
767 case ICP_QAT_HW_AUTH_ALGO_MD5:
768 if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
769 authkey, authkeylen, cdesc->cd_cur_ptr,
771 PMD_DRV_LOG(ERR, "(MD5)precompute failed");
774 state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
776 case ICP_QAT_HW_AUTH_ALGO_NULL:
777 state1_size = qat_hash_get_state1_size(
778 ICP_QAT_HW_AUTH_ALGO_NULL);
779 state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
781 case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
782 state1_size = qat_hash_get_state1_size(
783 ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
784 state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
785 memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
786 pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
789 * The Inner Hash Initial State2 block must contain IK
790 * (Initialisation Key), followed by IK XOR-ed with KM
791 * (Key Modifier): IK||(IK^KM).
793 /* write the auth key */
794 memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
795 /* initialise temp key with auth key */
796 memcpy(pTempKey, authkey, authkeylen);
797 /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
798 for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
799 pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
802 PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
806 /* Request template setup */
807 qat_alg_init_common_hdr(header, proto);
808 header->service_cmd_id = cdesc->qat_cmd;
810 /* Auth CD config setup */
811 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
812 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
813 hash_cd_ctrl->inner_res_sz = digestsize;
814 hash_cd_ctrl->final_sz = digestsize;
815 hash_cd_ctrl->inner_state1_sz = state1_size;
816 auth_param->auth_res_sz = digestsize;
818 hash_cd_ctrl->inner_state2_sz = state2_size;
819 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
820 ((sizeof(struct icp_qat_hw_auth_setup) +
821 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
824 cdesc->cd_cur_ptr += state1_size + state2_size;
825 cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
827 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
828 cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
833 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
836 case ICP_QAT_HW_AES_128_KEY_SZ:
837 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
839 case ICP_QAT_HW_AES_192_KEY_SZ:
840 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
842 case ICP_QAT_HW_AES_256_KEY_SZ:
843 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
851 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
854 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
855 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
863 int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
866 case ICP_QAT_HW_KASUMI_KEY_SZ:
867 *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
875 int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
878 case QAT_3DES_KEY_SZ_OPT1:
879 case QAT_3DES_KEY_SZ_OPT2:
880 *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;