void *kd;
/** TODO: add linked alg support **/
- if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ if (key->is_link)
return;
if (kop == VNET_CRYPTO_KEY_OP_DEL)
u64 pad[block_qw], key_hash[block_qw];
clib_memset_u8 (key_hash, 0, HMAC_MAX_BLOCK_SIZE);
- if (vec_len (key->data) <= ad->block_size)
- clib_memcpy_fast (key_hash, key->data, vec_len (key->data));
+ if (key->length <= ad->block_size)
+ clib_memcpy_fast (key_hash, key->data, key->length);
else
- ad->hash_fn (key->data, vec_len (key->data), key_hash);
+ ad->hash_fn (key->data, key->length, key_hash);
for (i = 0; i < block_qw; i++)
pad[i] = key_hash[i] ^ 0x3636363636363636;
vec_validate_aligned (ptd->hmac_ctx, idx, CLIB_CACHE_LINE_BYTES);
#if OPENSSL_VERSION_NUMBER >= 0x10100000L
ctx = HMAC_CTX_new ();
- HMAC_Init_ex (ctx, key->data, vec_len (key->data), md, NULL);
+ HMAC_Init_ex (ctx, key->data, key->length, md, NULL);
ptd->hmac_ctx[idx] = ctx;
#else
HMAC_CTX_init (&(ptd->_hmac_ctx));
for (ptd = per_thread_data; ptd - per_thread_data < num_threads; ptd++)
{
ctx = ptd->hmac_ctx[idx];
- HMAC_Init_ex (ctx, key->data, vec_len (key->data), md, NULL);
+ HMAC_Init_ex (ctx, key->data, key->length, md, NULL);
}
}
else if (VNET_CRYPTO_KEY_OP_DEL == kop)
crypto_openssl_main_t *cm = &crypto_openssl_main;
/** TODO: add linked alg support **/
- if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ if (key->is_link)
return;
if (cm->ctx_fn[key->alg] == 0)
u32 index;
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *engine;
- vnet_crypto_key_t *key;
+ vnet_crypto_key_t *key, **kp;
+ u32 alloc_sz = sizeof (vnet_crypto_key_t) + round_pow2 (length, 16);
u8 need_barrier_sync = 0;
if (need_barrier_sync)
vlib_worker_thread_barrier_sync (vm);
- pool_get_zero (cm->keys, key);
+ pool_get (cm->keys, kp);
if (need_barrier_sync)
vlib_worker_thread_barrier_release (vm);
- index = key - cm->keys;
- key->type = VNET_CRYPTO_KEY_TYPE_DATA;
- key->alg = alg;
- vec_validate_aligned (key->data, length - 1, CLIB_CACHE_LINE_BYTES);
+ key = clib_mem_alloc_aligned (alloc_sz, _Alignof (vnet_crypto_key_t));
+ kp[0] = key;
+ index = kp - cm->keys;
+ *key = (vnet_crypto_key_t){
+ .index = index,
+ .alg = alg,
+ .length = length,
+ };
clib_memcpy (key->data, data, length);
vec_foreach (engine, cm->engines)
if (engine->key_op_handler)
{
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *engine;
- vnet_crypto_key_t *key = pool_elt_at_index (cm->keys, index);
+ vnet_crypto_key_t *key = cm->keys[index];
+ u32 sz = sizeof (vnet_crypto_key_t) + round_pow2 (key->length, 16);
vec_foreach (engine, cm->engines)
if (engine->key_op_handler)
engine->key_op_handler (VNET_CRYPTO_KEY_OP_DEL, index);
- if (key->type == VNET_CRYPTO_KEY_TYPE_DATA)
- {
- clib_memset (key->data, 0xfe, vec_len (key->data));
- vec_free (key->data);
- }
- else if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
- {
- key->index_crypto = key->index_integ = ~0;
- }
-
- pool_put (cm->keys, key);
+ clib_memset (key, 0xfe, sz);
+ clib_mem_free (key);
+ pool_put_index (cm->keys, index);
}
void
vnet_crypto_key_index_t index_crypto,
vnet_crypto_key_index_t index_integ)
{
- u32 index;
+ u32 index, need_barrier_sync;
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *engine;
- vnet_crypto_key_t *key_crypto, *key_integ, *key;
+ vnet_crypto_key_t *key_crypto, *key_integ, *key, **kp;
vnet_crypto_async_alg_t linked_alg;
- key_crypto = pool_elt_at_index (cm->keys, index_crypto);
- key_integ = pool_elt_at_index (cm->keys, index_integ);
+ key_crypto = cm->keys[index_crypto];
+ key_integ = cm->keys[index_integ];
linked_alg = vnet_crypto_link_algs (key_crypto->alg, key_integ->alg);
if (linked_alg == ~0)
return ~0;
- pool_get_zero (cm->keys, key);
- index = key - cm->keys;
- key->type = VNET_CRYPTO_KEY_TYPE_LINK;
- key->index_crypto = index_crypto;
- key->index_integ = index_integ;
- key->async_alg = linked_alg;
+ need_barrier_sync = pool_get_will_expand (cm->keys);
+ /* If the cm->keys will expand, stop the parade. */
+ if (need_barrier_sync)
+ vlib_worker_thread_barrier_sync (vm);
+
+ pool_get (cm->keys, kp);
+
+ if (need_barrier_sync)
+ vlib_worker_thread_barrier_release (vm);
+
+ key = clib_mem_alloc_aligned (sizeof (vnet_crypto_key_t),
+ _Alignof (vnet_crypto_key_t));
+ kp[0] = key;
+ index = kp - cm->keys;
+ *key = (vnet_crypto_key_t){
+ .index = index,
+ .is_link = 1,
+ .index_crypto = index_crypto,
+ .index_integ = index_integ,
+ .async_alg = linked_alg,
+ };
vec_foreach (engine, cm->engines)
if (engine->key_op_handler)
typedef struct
{
+ u32 index;
+ u16 length;
+ u8 is_link : 1;
union
{
struct
{
- u8 *data;
vnet_crypto_alg_t alg:8;
};
struct
vnet_crypto_async_alg_t async_alg:8;
};
};
-#define VNET_CRYPTO_KEY_TYPE_DATA 0
-#define VNET_CRYPTO_KEY_TYPE_LINK 1
- u8 type;
+ u8 data[];
} vnet_crypto_key_t;
typedef enum
vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS];
vnet_crypto_async_op_data_t async_opt_data[VNET_CRYPTO_ASYNC_OP_N_IDS];
vnet_crypto_engine_t *engines;
- vnet_crypto_key_t *keys;
+ vnet_crypto_key_t **keys;
uword *engine_index_by_name;
uword *alg_index_by_name;
uword *async_alg_index_by_name;
vnet_crypto_get_key (vnet_crypto_key_index_t index)
{
vnet_crypto_main_t *cm = &crypto_main;
- return vec_elt_at_index (cm->keys, index);
+ return cm->keys[index];
}
static_always_inline int