2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
6 * Copyright(c) 2015-2016 Intel Corporation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * Contact Information:
20 * Copyright(c) 2015-2016 Intel Corporation.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * * Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * * Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * * Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <rte_memcpy.h>
49 #include <rte_common.h>
50 #include <rte_spinlock.h>
51 #include <rte_byteorder.h>
53 #include <rte_malloc.h>
55 #include "../qat_logs.h"
58 #include <openssl/sha.h> /* Needed to calculate pre-compute values */
59 #include <openssl/aes.h> /* Needed to calculate pre-compute values */
63 * Returns size in bytes per hash algo for state1 size field in cd_ctrl
64 * This is digest size rounded up to nearest quadword
66 static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
68 switch (qat_hash_alg) {
69 case ICP_QAT_HW_AUTH_ALGO_SHA1:
70 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
71 QAT_HW_DEFAULT_ALIGNMENT);
72 case ICP_QAT_HW_AUTH_ALGO_SHA256:
73 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
74 QAT_HW_DEFAULT_ALIGNMENT);
75 case ICP_QAT_HW_AUTH_ALGO_SHA512:
76 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
77 QAT_HW_DEFAULT_ALIGNMENT);
78 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
79 return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
80 QAT_HW_DEFAULT_ALIGNMENT);
81 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
82 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
83 return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
84 QAT_HW_DEFAULT_ALIGNMENT);
85 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
86 return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
87 QAT_HW_DEFAULT_ALIGNMENT);
88 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
89 /* return maximum state1 size in this case */
90 return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
91 QAT_HW_DEFAULT_ALIGNMENT);
93 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
99 /* returns digest size in bytes per hash algo */
100 static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
102 switch (qat_hash_alg) {
103 case ICP_QAT_HW_AUTH_ALGO_SHA1:
104 return ICP_QAT_HW_SHA1_STATE1_SZ;
105 case ICP_QAT_HW_AUTH_ALGO_SHA256:
106 return ICP_QAT_HW_SHA256_STATE1_SZ;
107 case ICP_QAT_HW_AUTH_ALGO_SHA512:
108 return ICP_QAT_HW_SHA512_STATE1_SZ;
109 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
110 /* return maximum digest size in this case */
111 return ICP_QAT_HW_SHA512_STATE1_SZ;
113 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
119 /* returns block size in byes per hash algo */
120 static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
122 switch (qat_hash_alg) {
123 case ICP_QAT_HW_AUTH_ALGO_SHA1:
125 case ICP_QAT_HW_AUTH_ALGO_SHA256:
126 return SHA256_CBLOCK;
127 case ICP_QAT_HW_AUTH_ALGO_SHA512:
128 return SHA512_CBLOCK;
129 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
131 case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
132 /* return maximum block size in this case */
133 return SHA512_CBLOCK;
135 PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
141 static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
145 if (!SHA1_Init(&ctx))
147 SHA1_Transform(&ctx, data_in);
148 rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
152 static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
156 if (!SHA256_Init(&ctx))
158 SHA256_Transform(&ctx, data_in);
159 rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
163 static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
167 if (!SHA512_Init(&ctx))
169 SHA512_Transform(&ctx, data_in);
170 rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
174 static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
179 uint8_t digest[qat_hash_get_digest_size(
180 ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
181 uint32_t *hash_state_out_be32;
182 uint64_t *hash_state_out_be64;
185 PMD_INIT_FUNC_TRACE();
186 digest_size = qat_hash_get_digest_size(hash_alg);
187 if (digest_size <= 0)
190 hash_state_out_be32 = (uint32_t *)data_out;
191 hash_state_out_be64 = (uint64_t *)data_out;
194 case ICP_QAT_HW_AUTH_ALGO_SHA1:
195 if (partial_hash_sha1(data_in, digest))
197 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
198 *hash_state_out_be32 =
199 rte_bswap32(*(((uint32_t *)digest)+i));
201 case ICP_QAT_HW_AUTH_ALGO_SHA256:
202 if (partial_hash_sha256(data_in, digest))
204 for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
205 *hash_state_out_be32 =
206 rte_bswap32(*(((uint32_t *)digest)+i));
208 case ICP_QAT_HW_AUTH_ALGO_SHA512:
209 if (partial_hash_sha512(data_in, digest))
211 for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
212 *hash_state_out_be64 =
213 rte_bswap64(*(((uint64_t *)digest)+i));
216 PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
222 #define HMAC_IPAD_VALUE 0x36
223 #define HMAC_OPAD_VALUE 0x5c
224 #define HASH_XCBC_PRECOMP_KEY_NUM 3
226 static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
227 const uint8_t *auth_key,
228 uint16_t auth_keylen,
229 uint8_t *p_state_buf,
230 uint16_t *p_state_len)
233 uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
234 uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
237 PMD_INIT_FUNC_TRACE();
238 if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
239 static uint8_t qat_aes_xcbc_key_seed[
240 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
241 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
242 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
243 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
244 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
245 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
246 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
250 uint8_t *out = p_state_buf;
254 in = rte_zmalloc("working mem for key",
255 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
256 rte_memcpy(in, qat_aes_xcbc_key_seed,
257 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
258 for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
259 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
262 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
264 (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
265 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
268 AES_encrypt(in, out, &enc_key);
269 in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
270 out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
272 *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
273 rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
275 } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
276 (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
278 uint8_t *out = p_state_buf;
281 memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
282 ICP_QAT_HW_GALOIS_LEN_A_SZ +
283 ICP_QAT_HW_GALOIS_E_CTR0_SZ);
284 in = rte_zmalloc("working mem for key",
285 ICP_QAT_HW_GALOIS_H_SZ, 16);
286 memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
287 if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
291 AES_encrypt(in, out, &enc_key);
292 *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
293 ICP_QAT_HW_GALOIS_LEN_A_SZ +
294 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
299 block_size = qat_hash_get_block_size(hash_alg);
302 /* init ipad and opad from key and xor with fixed values */
303 memset(ipad, 0, block_size);
304 memset(opad, 0, block_size);
306 if (auth_keylen > (unsigned int)block_size) {
307 PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
310 rte_memcpy(ipad, auth_key, auth_keylen);
311 rte_memcpy(opad, auth_key, auth_keylen);
313 for (i = 0; i < block_size; i++) {
314 uint8_t *ipad_ptr = ipad + i;
315 uint8_t *opad_ptr = opad + i;
316 *ipad_ptr ^= HMAC_IPAD_VALUE;
317 *opad_ptr ^= HMAC_OPAD_VALUE;
320 /* do partial hash of ipad and copy to state1 */
321 if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
322 memset(ipad, 0, block_size);
323 memset(opad, 0, block_size);
324 PMD_DRV_LOG(ERR, "ipad precompute failed");
329 * State len is a multiple of 8, so may be larger than the digest.
330 * Put the partial hash of opad state_len bytes after state1
332 *p_state_len = qat_hash_get_state1_size(hash_alg);
333 if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
334 memset(ipad, 0, block_size);
335 memset(opad, 0, block_size);
336 PMD_DRV_LOG(ERR, "opad precompute failed");
340 /* don't leave data lying around */
341 memset(ipad, 0, block_size);
342 memset(opad, 0, block_size);
346 void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
348 PMD_INIT_FUNC_TRACE();
350 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
351 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
352 header->comn_req_flags =
353 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
354 QAT_COMN_PTR_TYPE_FLAT);
355 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
356 ICP_QAT_FW_LA_PARTIAL_NONE);
357 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
358 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
359 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
360 ICP_QAT_FW_LA_NO_PROTO);
361 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
362 ICP_QAT_FW_LA_NO_UPDATE_STATE);
365 int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
367 uint32_t cipherkeylen)
369 struct icp_qat_hw_cipher_algo_blk *cipher;
370 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
371 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
372 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
373 void *ptr = &req_tmpl->cd_ctrl;
374 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
375 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
376 enum icp_qat_hw_cipher_convert key_convert;
377 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
378 uint16_t cipher_offset = 0;
380 PMD_INIT_FUNC_TRACE();
382 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
383 cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
385 (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
386 sizeof(struct icp_qat_hw_auth_algo_blk));
387 cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk);
389 cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
393 if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
394 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
395 ICP_QAT_FW_LA_RET_AUTH_RES);
396 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
397 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
399 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
400 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
401 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
402 ICP_QAT_FW_LA_CMP_AUTH_RES);
405 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
406 /* CTR Streaming ciphers are a special case. Decrypt = encrypt
407 * Overriding default values previously set
409 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
410 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
411 } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
412 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
414 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
416 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
417 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
419 /* For Snow3G, set key convert and other bits */
420 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
421 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
422 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
423 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
424 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
425 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
426 ICP_QAT_FW_LA_RET_AUTH_RES);
427 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
428 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
432 cipher->aes.cipher_config.val =
433 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
434 cdesc->qat_cipher_alg, key_convert,
436 memcpy(cipher->aes.key, cipherkey, cipherkeylen);
438 proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
439 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
440 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
442 /* Request template setup */
443 qat_alg_init_common_hdr(header);
444 header->service_cmd_id = cdesc->qat_cmd;
445 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
446 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
447 /* Configure the common header protocol flags */
448 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
449 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
450 cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
452 /* Cipher CD config setup */
453 if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
454 cipher_cd_ctrl->cipher_key_sz =
455 (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
456 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3;
457 cipher_cd_ctrl->cipher_state_sz =
458 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
459 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
460 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
461 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
462 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
465 cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
466 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
467 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
470 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
471 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
472 ICP_QAT_FW_SLICE_CIPHER);
473 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
474 ICP_QAT_FW_SLICE_DRAM_WR);
475 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
476 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
477 ICP_QAT_FW_SLICE_CIPHER);
478 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
479 ICP_QAT_FW_SLICE_AUTH);
480 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
481 ICP_QAT_FW_SLICE_AUTH);
482 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
483 ICP_QAT_FW_SLICE_DRAM_WR);
484 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
485 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
486 ICP_QAT_FW_SLICE_AUTH);
487 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
488 ICP_QAT_FW_SLICE_CIPHER);
489 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
490 ICP_QAT_FW_SLICE_CIPHER);
491 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
492 ICP_QAT_FW_SLICE_DRAM_WR);
494 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
495 "encryption supported");
501 int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
504 uint32_t add_auth_data_length,
507 struct icp_qat_hw_cipher_algo_blk *cipher;
508 struct icp_qat_hw_auth_algo_blk *hash;
509 struct icp_qat_hw_cipher_algo_blk *cipherconfig;
510 struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
511 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
512 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
513 void *ptr = &req_tmpl->cd_ctrl;
514 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
515 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
516 struct icp_qat_fw_la_auth_req_params *auth_param =
517 (struct icp_qat_fw_la_auth_req_params *)
518 ((char *)&req_tmpl->serv_specif_rqpars +
519 sizeof(struct icp_qat_fw_la_cipher_req_params));
520 enum icp_qat_hw_cipher_convert key_convert;
521 uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
522 uint16_t state1_size = 0;
523 uint16_t state2_size = 0;
524 uint16_t cipher_offset = 0, hash_offset = 0;
526 PMD_INIT_FUNC_TRACE();
528 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
529 cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
530 hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd;
532 (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
533 sizeof(struct icp_qat_hw_auth_algo_blk));
535 cipher_offset = ((char *)hash - (char *)cipher);
537 cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
538 hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd +
539 sizeof(struct icp_qat_hw_cipher_algo_blk));
541 hash_offset = ((char *)hash - (char *)cipher);
545 if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
546 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
547 ICP_QAT_FW_LA_RET_AUTH_RES);
548 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
549 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
551 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
552 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
553 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
554 ICP_QAT_FW_LA_CMP_AUTH_RES);
557 if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
558 /* CTR Streaming ciphers are a special case. Decrypt = encrypt
559 * Overriding default values previously set
561 cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
562 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
563 } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
564 key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
566 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
568 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
569 key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
571 cipher->aes.cipher_config.val =
572 ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
573 cdesc->qat_cipher_alg, key_convert,
576 hash->sha.inner_setup.auth_config.reserved = 0;
577 hash->sha.inner_setup.auth_config.config =
578 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
579 cdesc->qat_hash_alg, digestsize);
580 hash->sha.inner_setup.auth_counter.counter =
581 rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
582 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
583 hash->sha.inner_setup.auth_counter.counter = 0;
584 hash->sha.outer_setup.auth_config.reserved = 0;
585 cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
586 ((char *)&cdesc->cd +
587 sizeof(struct icp_qat_hw_auth_algo_blk)
589 cipherconfig->aes.cipher_config.val =
590 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
591 ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
592 ICP_QAT_HW_CIPHER_KEY_CONVERT,
593 ICP_QAT_HW_CIPHER_ENCRYPT);
594 memcpy(cipherconfig->aes.key, authkey, authkeylen);
595 memset(cipherconfig->aes.key + authkeylen, 0,
596 ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
600 if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
601 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
602 authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
603 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) {
604 PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
607 } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
608 (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
609 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
610 authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
611 ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) {
612 PMD_DRV_LOG(ERR, "(GCM)precompute failed");
616 * Write (the length of AAD) into bytes 16-19 of state2
617 * in big-endian format. This field is 8 bytes
619 *(uint32_t *)&(hash->sha.state1[
620 ICP_QAT_HW_GALOIS_128_STATE1_SZ +
621 ICP_QAT_HW_GALOIS_H_SZ]) =
622 rte_bswap32(add_auth_data_length);
623 proto = ICP_QAT_FW_LA_GCM_PROTO;
624 } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
625 proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
626 state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
628 if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
629 authkey, authkeylen, (uint8_t *)(hash->sha.state1),
631 PMD_DRV_LOG(ERR, "(SHA)precompute failed");
636 /* Request template setup */
637 qat_alg_init_common_hdr(header);
638 header->service_cmd_id = cdesc->qat_cmd;
639 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
640 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
641 /* Configure the common header protocol flags */
642 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
643 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
644 cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
646 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
647 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
648 ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
649 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
650 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
651 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
652 ICP_QAT_FW_LA_RET_AUTH_RES);
653 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
654 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
657 /* Cipher CD config setup */
658 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
659 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
661 if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_AUTH) {
662 cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
663 cipher_cd_ctrl->cipher_cfg_offset = cipher_offset>>3;
665 cipher_cd_ctrl->cipher_state_sz = 0;
666 cipher_cd_ctrl->cipher_cfg_offset = 0;
669 /* Auth CD config setup */
670 hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
671 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
672 hash_cd_ctrl->inner_res_sz = digestsize;
673 hash_cd_ctrl->final_sz = digestsize;
674 hash_cd_ctrl->inner_state1_sz = state1_size;
676 switch (cdesc->qat_hash_alg) {
677 case ICP_QAT_HW_AUTH_ALGO_SHA1:
678 hash_cd_ctrl->inner_state2_sz =
679 RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
681 case ICP_QAT_HW_AUTH_ALGO_SHA256:
682 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
684 case ICP_QAT_HW_AUTH_ALGO_SHA512:
685 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
687 case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
688 hash_cd_ctrl->inner_state2_sz =
689 ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
690 hash_cd_ctrl->inner_state1_sz =
691 ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
692 memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ);
694 case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
695 case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
696 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ +
697 ICP_QAT_HW_GALOIS_LEN_A_SZ +
698 ICP_QAT_HW_GALOIS_E_CTR0_SZ;
699 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
700 memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
702 case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
703 hash_cd_ctrl->inner_state2_sz =
704 ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
705 hash_cd_ctrl->inner_state1_sz =
706 ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ;
707 memset(hash->sha.state1, 0, ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ);
710 PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
714 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
715 ((sizeof(struct icp_qat_hw_auth_setup) +
716 RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
718 auth_param->auth_res_sz = digestsize;
720 if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
721 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
722 ICP_QAT_FW_SLICE_AUTH);
723 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
724 ICP_QAT_FW_SLICE_DRAM_WR);
725 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
726 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
727 ICP_QAT_FW_SLICE_CIPHER);
728 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
729 ICP_QAT_FW_SLICE_AUTH);
730 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
731 ICP_QAT_FW_SLICE_AUTH);
732 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
733 ICP_QAT_FW_SLICE_DRAM_WR);
734 } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
735 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
736 ICP_QAT_FW_SLICE_AUTH);
737 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
738 ICP_QAT_FW_SLICE_CIPHER);
739 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
740 ICP_QAT_FW_SLICE_CIPHER);
741 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
742 ICP_QAT_FW_SLICE_DRAM_WR);
744 PMD_DRV_LOG(ERR, "invalid param, only authenticated "
745 "encryption supported");
751 static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
752 struct icp_qat_hw_cipher_algo_blk *cd,
753 const uint8_t *key, unsigned int keylen)
755 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
756 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
757 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
759 PMD_INIT_FUNC_TRACE();
760 rte_memcpy(cd->aes.key, key, keylen);
761 qat_alg_init_common_hdr(header);
762 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
763 cd_pars->u.s.content_desc_params_sz =
764 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
765 /* Cipher CD config setup */
766 cd_ctrl->cipher_key_sz = keylen >> 3;
767 cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
768 cd_ctrl->cipher_cfg_offset = 0;
769 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
770 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
773 void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
774 int alg, const uint8_t *key,
777 struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
778 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
779 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
781 PMD_INIT_FUNC_TRACE();
782 qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
783 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
784 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
787 void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
788 int alg, const uint8_t *key,
791 struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
792 struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
793 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
795 PMD_INIT_FUNC_TRACE();
796 qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
797 cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
798 dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
801 int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
804 case ICP_QAT_HW_AES_128_KEY_SZ:
805 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
807 case ICP_QAT_HW_AES_192_KEY_SZ:
808 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
810 case ICP_QAT_HW_AES_256_KEY_SZ:
811 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
819 int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
822 case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
823 *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;