X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fcrypto%2Fqat%2Fqat_adf%2Fqat_algs_build_desc.c;h=26f854c2117765a73091f6c2ce6389c40acd40f7;hb=c3f15def2ebe9cc255cf0e5cf32aa171f5b4326d;hp=8900668dcd73f4341b72c5e0c416ff916d962f6b;hpb=6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9;p=deb_dpdk.git diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index 8900668d..26f854c2 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -17,7 +17,7 @@ * qat-linux@intel.com * * BSD LICENSE - * Copyright(c) 2015-2016 Intel Corporation. + * Copyright(c) 2015-2017 Intel Corporation. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -54,12 +54,31 @@ #include #include "../qat_logs.h" -#include "qat_algs.h" #include /* Needed to calculate pre-compute values */ #include /* Needed to calculate pre-compute values */ #include /* Needed to calculate pre-compute values */ +#include "qat_algs.h" + +/* returns block size in bytes per cipher algo */ +int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg) +{ + switch (qat_cipher_alg) { + case ICP_QAT_HW_CIPHER_ALGO_DES: + return ICP_QAT_HW_DES_BLK_SZ; + case ICP_QAT_HW_CIPHER_ALGO_3DES: + return ICP_QAT_HW_3DES_BLK_SZ; + case ICP_QAT_HW_CIPHER_ALGO_AES128: + case ICP_QAT_HW_CIPHER_ALGO_AES192: + case ICP_QAT_HW_CIPHER_ALGO_AES256: + return ICP_QAT_HW_AES_BLK_SZ; + default: + PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg); + return -EFAULT; + }; + return -EFAULT; +} /* * Returns size in bytes per hash algo for state1 size field in cd_ctrl @@ -90,6 +109,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); @@ -99,6 +121,12 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_NULL: + return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: + return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_DELIMITER: /* return maximum state1 size in this case */ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, @@ -331,6 +359,11 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, in = rte_zmalloc("working mem for key", ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16); + if (in == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory"); + return -ENOMEM; + } + rte_memcpy(in, qat_aes_xcbc_key_seed, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) { @@ -361,6 +394,11 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, ICP_QAT_HW_GALOIS_E_CTR0_SZ); in = rte_zmalloc("working mem for key", ICP_QAT_HW_GALOIS_H_SZ, 16); + if (in == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory"); + return -ENOMEM; + } + memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ); if (AES_set_encrypt_key(auth_key, auth_keylen << 3, &enc_key) != 0) { @@ -422,7 +460,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, } void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, - uint16_t proto) + enum qat_crypto_proto_flag proto_flags) { PMD_INIT_FUNC_TRACE(); header->hdr_flags = @@ -435,10 +473,58 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, ICP_QAT_FW_LA_PARTIAL_NONE); ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, ICP_QAT_FW_CIPH_IV_16BYTE_DATA); - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - proto); + + switch (proto_flags) { + case QAT_CRYPTO_PROTO_FLAG_NONE: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_CCM: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CCM_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_GCM: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_GCM_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_SNOW3G: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_SNOW_3G_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_ZUC: + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_ZUC_3G_PROTO); + break; + } + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, ICP_QAT_FW_LA_NO_UPDATE_STATE); + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); +} + +/* + * Snow3G and ZUC should never use this function + * and set its protocol flag in both cipher and auth part of content + * descriptor building function + */ +static enum qat_crypto_proto_flag +qat_get_crypto_proto_flag(uint16_t flags) +{ + int proto = ICP_QAT_FW_LA_PROTO_GET(flags); + enum qat_crypto_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; + + switch (proto) { + case ICP_QAT_FW_LA_GCM_PROTO: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; + break; + case ICP_QAT_FW_LA_CCM_PROTO: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; + break; + } + + return qat_proto_flag; } int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, @@ -453,8 +539,9 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; enum icp_qat_hw_cipher_convert key_convert; + enum qat_crypto_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; uint32_t total_key_size; - uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */ uint16_t cipher_offset, cd_size; uint32_t wordIndex = 0; uint32_t *temp_key = NULL; @@ -494,7 +581,9 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, */ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 + || cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; @@ -506,7 +595,8 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; - proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3; @@ -515,21 +605,35 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) { total_key_size = ICP_QAT_HW_3DES_KEY_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3; - proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags); + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) { + total_key_size = ICP_QAT_HW_DES_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3; + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } else if (cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + cipher_cd_ctrl->cipher_state_sz = + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + cdesc->min_qat_dev_gen = QAT_GEN2; } else { total_key_size = cipherkeylen; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; - proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags); + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); } cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3; cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; header->service_cmd_id = cdesc->qat_cmd; - qat_alg_init_common_hdr(header, proto); + qat_alg_init_common_hdr(header, qat_proto_flag); cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr; - cipher->cipher_config.val = ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, cdesc->qat_cipher_alg, key_convert, @@ -574,7 +678,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, uint8_t *authkey, uint32_t authkeylen, - uint32_t add_auth_data_length, + uint32_t aad_length, uint32_t digestsize, unsigned int operation) { @@ -590,12 +694,13 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, (struct icp_qat_fw_la_auth_req_params *) ((char *)&req_tmpl->serv_specif_rqpars + sizeof(struct icp_qat_fw_la_cipher_req_params)); - uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */ uint16_t state1_size = 0, state2_size = 0; uint16_t hash_offset, cd_size; uint32_t *aad_len = NULL; uint32_t wordIndex = 0; uint32_t *pTempKey; + enum qat_crypto_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; PMD_INIT_FUNC_TRACE(); @@ -645,7 +750,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, cdesc->qat_hash_alg, digestsize); if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 - || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) hash->auth_counter.counter = 0; else hash->auth_counter.counter = rte_bswap32( @@ -708,7 +814,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, break; case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - proto = ICP_QAT_FW_LA_GCM_PROTO; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ; if (qat_alg_do_precomputes(cdesc->qat_hash_alg, authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, @@ -721,16 +827,17 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, * in big-endian format. This field is 8 bytes */ auth_param->u2.aad_sz = - RTE_ALIGN_CEIL(add_auth_data_length, 16); + RTE_ALIGN_CEIL(aad_length, 16); auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3; aad_len = (uint32_t *)(cdesc->cd_cur_ptr + ICP_QAT_HW_GALOIS_128_STATE1_SZ + ICP_QAT_HW_GALOIS_H_SZ); - *aad_len = rte_bswap32(add_auth_data_length); + *aad_len = rte_bswap32(aad_length); + cdesc->aad_len = aad_length; break; case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; state1_size = qat_hash_get_state1_size( ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2); state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ; @@ -748,8 +855,25 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; - auth_param->hash_state_sz = - RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3; + auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; + break; + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + hash->auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0, + cdesc->qat_hash_alg, digestsize); + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3); + state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); + + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + cdesc->cd_cur_ptr += state1_size + state2_size + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; + cdesc->min_qat_dev_gen = QAT_GEN2; + break; case ICP_QAT_HW_AUTH_ALGO_MD5: if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, @@ -761,6 +885,34 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, state2_size = ICP_QAT_HW_MD5_STATE2_SZ; break; case ICP_QAT_HW_AUTH_ALGO_NULL: + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_NULL); + state2_size = ICP_QAT_HW_NULL_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC); + state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + + ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ; + + if (aad_length > 0) { + aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN + + ICP_QAT_HW_CCM_AAD_LEN_INFO; + auth_param->u2.aad_sz = + RTE_ALIGN_CEIL(aad_length, + ICP_QAT_HW_CCM_AAD_ALIGNMENT); + } else { + auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN; + } + + cdesc->aad_len = aad_length; + hash->auth_counter.counter = 0; + + hash_cd_ctrl->outer_prefix_sz = digestsize; + auth_param->hash_state_sz = digestsize; + + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); break; case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: state1_size = qat_hash_get_state1_size( @@ -788,7 +940,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, } /* Request template setup */ - qat_alg_init_common_hdr(header, proto); + qat_alg_init_common_hdr(header, qat_proto_flag); header->service_cmd_id = cdesc->qat_cmd; /* Auth CD config setup */ @@ -832,6 +984,19 @@ int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg) return 0; } +int qat_alg_validate_aes_docsisbpi_key(int key_len, + enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_AES_128_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; + break; + default: + return -EINVAL; + } + return 0; +} + int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg) { switch (key_len) { @@ -856,6 +1021,18 @@ int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg) return 0; } +int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_DES_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_DES; + break; + default: + return -EINVAL; + } + return 0; +} + int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) { switch (key_len) { @@ -868,3 +1045,15 @@ int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) } return 0; } + +int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3; + break; + default: + return -EINVAL; + } + return 0; +}