2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <vnet/vnet.h>
22 #include <vpp/app/version.h>
24 #include <dpdk/buffer.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
28 #include <rte_bus_vdev.h>
29 #include <rte_cryptodev.h>
30 #include <rte_crypto_sym.h>
31 #include <rte_crypto.h>
32 #include <rte_cryptodev_pmd.h>
33 #include <rte_config.h>
36 #define always_inline static inline
38 #define always_inline static inline __attribute__ ((__always_inline__))
41 #define CRYPTODEV_NB_CRYPTO_OPS 1024
42 #define CRYPTODEV_NB_SESSION 10240
43 #define CRYPTODEV_DEF_DRIVE crypto_aesni_mb
45 #define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv))
46 #define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
48 /* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN */
49 #define foreach_vnet_aead_crypto_conversion \
50 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 8) \
51 _(AES_128_GCM, AEAD, AES_GCM, 12, 16, 12) \
52 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 8) \
53 _(AES_192_GCM, AEAD, AES_GCM, 12, 16, 12) \
54 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 8) \
55 _(AES_256_GCM, AEAD, AES_GCM, 12, 16, 12)
58 * crypto (alg, cryptodev_alg), hash (alg, digest-size)
60 #define foreach_cryptodev_link_async_alg \
61 _ (AES_128_CBC, AES_CBC, SHA1, 12) \
62 _ (AES_192_CBC, AES_CBC, SHA1, 12) \
63 _ (AES_256_CBC, AES_CBC, SHA1, 12) \
64 _ (AES_128_CBC, AES_CBC, SHA224, 14) \
65 _ (AES_192_CBC, AES_CBC, SHA224, 14) \
66 _ (AES_256_CBC, AES_CBC, SHA224, 14) \
67 _ (AES_128_CBC, AES_CBC, SHA256, 16) \
68 _ (AES_192_CBC, AES_CBC, SHA256, 16) \
69 _ (AES_256_CBC, AES_CBC, SHA256, 16) \
70 _ (AES_128_CBC, AES_CBC, SHA384, 24) \
71 _ (AES_192_CBC, AES_CBC, SHA384, 24) \
72 _ (AES_256_CBC, AES_CBC, SHA384, 24) \
73 _ (AES_128_CBC, AES_CBC, SHA512, 32) \
74 _ (AES_192_CBC, AES_CBC, SHA512, 32) \
75 _ (AES_256_CBC, AES_CBC, SHA512, 32)
77 #define foreach_vnet_crypto_status_conversion \
78 _(SUCCESS, COMPLETED) \
79 _(NOT_PROCESSED, WORK_IN_PROGRESS) \
80 _(AUTH_FAILED, FAIL_BAD_HMAC) \
81 _(INVALID_SESSION, FAIL_ENGINE_ERR) \
82 _(INVALID_ARGS, FAIL_ENGINE_ERR) \
83 _(ERROR, FAIL_ENGINE_ERR)
85 static const vnet_crypto_op_status_t cryptodev_status_conversion[] = {
86 #define _(a, b) VNET_CRYPTO_OP_STATUS_##b,
87 foreach_vnet_crypto_status_conversion
93 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
94 struct rte_crypto_op op;
95 struct rte_crypto_sym_op sop;
98 vnet_crypto_async_frame_t *frame;
104 CRYPTODEV_OP_TYPE_ENCRYPT = 0,
105 CRYPTODEV_OP_TYPE_DECRYPT,
106 CRYPTODEV_N_OP_TYPES,
107 } cryptodev_op_type_t;
111 struct rte_cryptodev_sym_session *keys[CRYPTODEV_N_OP_TYPES];
123 struct rte_mempool *cop_pool;
124 struct rte_mempool *sess_pool;
125 struct rte_mempool *sess_priv_pool;
126 } cryptodev_numa_data_t;
130 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
134 cryptodev_op_t **cops;
135 struct rte_ring *ring;
136 } cryptodev_engine_thread_t;
140 cryptodev_numa_data_t *per_numa_data;
141 cryptodev_key_t *keys;
142 cryptodev_engine_thread_t *per_thread_data;
143 enum rte_iova_mode iova_mode;
144 cryptodev_inst_t *cryptodev_inst;
145 clib_bitmap_t *active_cdev_inst_mask;
146 clib_spinlock_t tlock;
149 cryptodev_main_t cryptodev_main;
152 prepare_aead_xform (struct rte_crypto_sym_xform *xform,
153 cryptodev_op_type_t op_type,
154 const vnet_crypto_key_t * key, u32 aad_len)
156 struct rte_crypto_aead_xform *aead_xform = &xform->aead;
157 memset (xform, 0, sizeof (*xform));
158 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
161 if (key->alg != VNET_CRYPTO_ALG_AES_128_GCM &&
162 key->alg != VNET_CRYPTO_ALG_AES_192_GCM &&
163 key->alg != VNET_CRYPTO_ALG_AES_256_GCM)
166 aead_xform->algo = RTE_CRYPTO_AEAD_AES_GCM;
167 aead_xform->op = (op_type == CRYPTODEV_OP_TYPE_ENCRYPT) ?
168 RTE_CRYPTO_AEAD_OP_ENCRYPT : RTE_CRYPTO_AEAD_OP_DECRYPT;
169 aead_xform->aad_length = aad_len;
170 aead_xform->digest_length = 16;
171 aead_xform->iv.offset = CRYPTODEV_IV_OFFSET;
172 aead_xform->iv.length = 12;
173 aead_xform->key.data = key->data;
174 aead_xform->key.length = vec_len (key->data);
180 prepare_linked_xform (struct rte_crypto_sym_xform *xforms,
181 cryptodev_op_type_t op_type,
182 const vnet_crypto_key_t * key)
184 struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
185 vnet_crypto_key_t *key_cipher, *key_auth;
186 enum rte_crypto_cipher_algorithm cipher_algo = ~0;
187 enum rte_crypto_auth_algorithm auth_algo = ~0;
190 key_cipher = vnet_crypto_get_key (key->index_crypto);
191 key_auth = vnet_crypto_get_key (key->index_integ);
192 if (!key_cipher || !key_auth)
195 if (op_type == CRYPTODEV_OP_TYPE_ENCRYPT)
197 xform_cipher = xforms;
198 xform_auth = xforms + 1;
199 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
200 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
204 xform_cipher = xforms + 1;
206 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
207 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
210 xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
211 xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
212 xforms->next = xforms + 1;
214 switch (key->async_alg)
216 #define _(a, b, c, d) \
217 case VNET_CRYPTO_ALG_##a##_##c##_TAG##d:\
218 cipher_algo = RTE_CRYPTO_CIPHER_##b; \
219 auth_algo = RTE_CRYPTO_AUTH_##c##_HMAC; \
223 foreach_cryptodev_link_async_alg
229 xform_cipher->cipher.algo = cipher_algo;
230 xform_cipher->cipher.key.data = key_cipher->data;
231 xform_cipher->cipher.key.length = vec_len (key_cipher->data);
232 xform_cipher->cipher.iv.length = 16;
233 xform_cipher->cipher.iv.offset = CRYPTODEV_IV_OFFSET;
235 xform_auth->auth.algo = auth_algo;
236 xform_auth->auth.digest_length = digest_len;
237 xform_auth->auth.key.data = key_auth->data;
238 xform_auth->auth.key.length = vec_len (key_auth->data);
244 cryptodev_session_create (vnet_crypto_key_t * const key,
245 struct rte_mempool *sess_priv_pool,
246 cryptodev_key_t * session_pair, u32 aad_len)
248 struct rte_crypto_sym_xform xforms_enc[2] = { {0} };
249 struct rte_crypto_sym_xform xforms_dec[2] = { {0} };
250 cryptodev_main_t *cmt = &cryptodev_main;
251 cryptodev_inst_t *dev_inst;
252 struct rte_cryptodev *cdev;
256 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
257 ret = prepare_linked_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key);
259 ret = prepare_aead_xform (xforms_enc, CRYPTODEV_OP_TYPE_ENCRYPT, key,
264 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
265 prepare_linked_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key);
267 prepare_aead_xform (xforms_dec, CRYPTODEV_OP_TYPE_DECRYPT, key, aad_len);
269 vec_foreach (dev_inst, cmt->cryptodev_inst)
271 dev_id = dev_inst->dev_id;
272 cdev = rte_cryptodev_pmd_get_dev (dev_id);
274 /* if the session is already configured for the driver type, avoid
275 configuring it again to increase the session data's refcnt */
276 if (session_pair->keys[0]->sess_data[cdev->driver_id].data &&
277 session_pair->keys[1]->sess_data[cdev->driver_id].data)
280 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[0],
281 xforms_enc, sess_priv_pool);
282 ret = rte_cryptodev_sym_session_init (dev_id, session_pair->keys[1],
283 xforms_dec, sess_priv_pool);
287 session_pair->keys[0]->opaque_data = aad_len;
288 session_pair->keys[1]->opaque_data = aad_len;
294 cryptodev_session_del (struct rte_cryptodev_sym_session *sess)
301 n_devs = rte_cryptodev_count ();
303 for (i = 0; i < n_devs; i++)
304 rte_cryptodev_sym_session_clear (i, sess);
306 rte_cryptodev_sym_session_free (sess);
310 cryptodev_check_supported_vnet_alg (vnet_crypto_key_t * key)
312 vnet_crypto_alg_t alg;
313 if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
318 #define _(a, b, c, d, e, f) \
319 if (alg == VNET_CRYPTO_ALG_##a) \
322 foreach_vnet_aead_crypto_conversion
327 static_always_inline void
328 cryptodev_sess_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
329 vnet_crypto_key_index_t idx, u32 aad_len)
331 cryptodev_main_t *cmt = &cryptodev_main;
332 cryptodev_numa_data_t *numa_data;
333 vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
334 struct rte_mempool *sess_pool, *sess_priv_pool;
335 cryptodev_key_t *ckey = 0;
338 if (kop == VNET_CRYPTO_KEY_OP_DEL)
340 if (idx >= vec_len (cmt->keys))
343 ckey = pool_elt_at_index (cmt->keys, idx);
344 cryptodev_session_del (ckey->keys[0]);
345 cryptodev_session_del (ckey->keys[1]);
348 pool_put (cmt->keys, ckey);
351 else if (kop == VNET_CRYPTO_KEY_OP_MODIFY)
353 if (idx >= vec_len (cmt->keys))
356 ckey = pool_elt_at_index (cmt->keys, idx);
358 cryptodev_session_del (ckey->keys[0]);
359 cryptodev_session_del (ckey->keys[1]);
363 else /* create key */
364 pool_get_zero (cmt->keys, ckey);
366 /* do not create session for unsupported alg */
367 if (cryptodev_check_supported_vnet_alg (key))
370 numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
371 sess_pool = numa_data->sess_pool;
372 sess_priv_pool = numa_data->sess_priv_pool;
374 ckey->keys[0] = rte_cryptodev_sym_session_create (sess_pool);
381 ckey->keys[1] = rte_cryptodev_sym_session_create (sess_pool);
388 ret = cryptodev_session_create (key, sess_priv_pool, ckey, aad_len);
393 cryptodev_session_del (ckey->keys[0]);
394 cryptodev_session_del (ckey->keys[1]);
395 memset (ckey, 0, sizeof (*ckey));
396 pool_put (cmt->keys, ckey);
401 cryptodev_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
402 vnet_crypto_key_index_t idx)
404 cryptodev_sess_handler (vm, kop, idx, 8);
407 static_always_inline void
408 cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t * f,
409 vnet_crypto_op_status_t s)
411 u32 n_elts = f->n_elts, i;
413 for (i = 0; i < n_elts; i++)
414 f->elts[i].status = s;
417 static_always_inline rte_iova_t
418 cryptodev_get_iova (clib_pmalloc_main_t * pm, enum rte_iova_mode mode,
422 if (mode == RTE_IOVA_VA)
423 return (rte_iova_t) pointer_to_uword (data);
425 index = clib_pmalloc_get_page_index (pm, data);
426 return pointer_to_uword (data) - pm->lookup_table[index];
429 static_always_inline void
430 cryptodev_validate_mbuf_chain (vlib_main_t * vm, struct rte_mbuf *mb,
433 struct rte_mbuf *first_mb = mb, *last_mb = mb; /**< last mbuf */
434 /* when input node is not dpdk, mbuf data len is not initialized, for
435 * single buffer it is not a problem since the data length is written
436 * into cryptodev operation. For chained buffer a reference data length
437 * has to be computed through vlib_buffer.
439 * even when input node is dpdk, it is possible chained vlib_buffers
440 * are updated (either added or removed a buffer) but not not mbuf fields.
441 * we have to re-link every mbuf in the chain.
443 u16 data_len = b->current_length + (b->data + b->current_data -
444 rte_pktmbuf_mtod (mb, u8 *));
446 first_mb->nb_segs = 1;
447 first_mb->pkt_len = first_mb->data_len = data_len;
449 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
451 b = vlib_get_buffer (vm, b->next_buffer);
452 mb = rte_mbuf_from_vlib_buffer (b);
453 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
454 rte_pktmbuf_reset (mb);
457 mb->data_len = b->current_length;
458 mb->pkt_len = b->current_length;
459 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
461 if (PREDICT_FALSE (b->ref_count > 1))
463 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
467 static_always_inline int
468 cryptodev_frame_linked_algs_enqueue (vlib_main_t * vm,
469 vnet_crypto_async_frame_t * frame,
470 cryptodev_op_type_t op_type)
472 cryptodev_main_t *cmt = &cryptodev_main;
473 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
474 cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
475 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
476 vnet_crypto_async_frame_elt_t *fe;
477 cryptodev_op_t **cop;
479 u32 n_enqueue, n_elts;
480 cryptodev_key_t *key;
483 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
485 n_elts = frame->n_elts;
487 if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
489 cryptodev_mark_frame_err_status (frame,
490 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
494 if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
495 (void **) cet->cops, n_elts) < 0))
497 cryptodev_mark_frame_err_status (frame,
498 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
504 bi = frame->buffer_indices;
505 cop[0]->frame = frame;
506 cop[0]->n_elts = n_elts;
508 key = pool_elt_at_index (cmt->keys, fe->key_index);
509 last_key_index = fe->key_index;
513 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
514 struct rte_crypto_sym_op *sop = &cop[0]->sop;
515 i16 crypto_offset = fe->crypto_start_offset;
516 i16 integ_offset = fe->integ_start_offset;
517 u32 offset_diff = crypto_offset - integ_offset;
521 CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
522 CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
523 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
524 CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
526 if (last_key_index != fe->key_index)
528 key = pool_elt_at_index (cmt->keys, fe->key_index);
529 last_key_index = fe->key_index;
532 sop->m_src = rte_mbuf_from_vlib_buffer (b);
533 sop->m_src->data_off = VLIB_BUFFER_PRE_DATA_SIZE;
535 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
536 * so we have to manually adjust mbuf data_off here so cryptodev can
537 * correctly compute the data pointer. The prepend here will be later
538 * rewritten by tx. */
539 if (PREDICT_TRUE (fe->integ_start_offset < 0))
541 sop->m_src->data_off += fe->integ_start_offset;
543 crypto_offset = offset_diff;
545 sop->session = key->keys[op_type];
546 sop->cipher.data.offset = crypto_offset;
547 sop->cipher.data.length = fe->crypto_total_length;
548 sop->auth.data.offset = integ_offset;
549 sop->auth.data.length = fe->crypto_total_length + fe->integ_length_adj;
550 sop->auth.digest.data = fe->digest;
551 sop->auth.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode,
553 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
554 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
556 /* for input nodes that are not dpdk-input, it is possible the mbuf
557 * was updated before as one of the chained mbufs. Setting nb_segs
558 * to 1 here to prevent the cryptodev PMD to access potentially
559 * invalid m_src->next pointers.
561 sop->m_src->nb_segs = 1;
562 clib_memcpy_fast (cop[0]->iv, fe->iv, 16);
569 n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
571 (struct rte_crypto_op **)
572 cet->cops, frame->n_elts);
573 ASSERT (n_enqueue == frame->n_elts);
574 cet->inflight += n_enqueue;
579 static_always_inline int
580 cryptodev_frame_gcm_enqueue (vlib_main_t * vm,
581 vnet_crypto_async_frame_t * frame,
582 cryptodev_op_type_t op_type, u8 aad_len)
584 cryptodev_main_t *cmt = &cryptodev_main;
585 clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
586 cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
587 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
588 vnet_crypto_async_frame_elt_t *fe;
589 cryptodev_op_t **cop;
591 u32 n_enqueue = 0, n_elts;
592 cryptodev_key_t *key;
596 if (PREDICT_FALSE (frame == 0 || frame->n_elts == 0))
598 n_elts = frame->n_elts;
600 if (PREDICT_FALSE (CRYPTODEV_NB_CRYPTO_OPS - cet->inflight < n_elts))
602 cryptodev_mark_frame_err_status (frame,
603 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
607 if (PREDICT_FALSE (rte_mempool_get_bulk (numa->cop_pool,
608 (void **) cet->cops, n_elts) < 0))
610 cryptodev_mark_frame_err_status (frame,
611 VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
617 bi = frame->buffer_indices;
618 cop[0]->frame = frame;
619 cop[0]->n_elts = n_elts;
621 key = pool_elt_at_index (cmt->keys, fe->key_index);
622 last_key_index = fe->key_index;
623 sess_aad_len = (u8) key->keys[op_type]->opaque_data;
624 if (PREDICT_FALSE (sess_aad_len != aad_len))
625 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY,
626 fe->key_index, aad_len);
630 vlib_buffer_t *b = vlib_get_buffer (vm, bi[0]);
631 struct rte_crypto_sym_op *sop = &cop[0]->sop;
632 u16 crypto_offset = fe->crypto_start_offset;
636 CLIB_PREFETCH (cop[1], CLIB_CACHE_LINE_BYTES * 3, STORE);
637 CLIB_PREFETCH (cop[2], CLIB_CACHE_LINE_BYTES * 3, STORE);
638 CLIB_PREFETCH (&fe[1], CLIB_CACHE_LINE_BYTES, LOAD);
639 CLIB_PREFETCH (&fe[2], CLIB_CACHE_LINE_BYTES, LOAD);
641 if (last_key_index != fe->key_index)
643 key = pool_elt_at_index (cmt->keys, fe->key_index);
644 sess_aad_len = (u8) key->keys[op_type]->opaque_data;
645 if (PREDICT_FALSE (sess_aad_len != aad_len))
647 cryptodev_sess_handler (vm, VNET_CRYPTO_KEY_OP_MODIFY,
648 fe->key_index, aad_len);
650 last_key_index = fe->key_index;
653 sop->m_src = rte_mbuf_from_vlib_buffer (b);
655 /* mbuf prepend happens in the tx, but vlib_buffer happens in the nodes,
656 * so we have to manually adjust mbuf data_off here so cryptodev can
657 * correctly compute the data pointer. The prepend here will be later
658 * rewritten by tx. */
659 if (PREDICT_FALSE (fe->crypto_start_offset < 0))
661 rte_pktmbuf_prepend (sop->m_src, -fe->crypto_start_offset);
665 sop->session = key->keys[op_type];
666 sop->aead.aad.data = cop[0]->aad;
667 sop->aead.aad.phys_addr = cop[0]->op.phys_addr + CRYPTODEV_AAD_OFFSET;
668 sop->aead.data.length = fe->crypto_total_length;
669 sop->aead.data.offset = crypto_offset;
670 sop->aead.digest.data = fe->tag;
671 sop->aead.digest.phys_addr = cryptodev_get_iova (pm, cmt->iova_mode,
673 if (PREDICT_FALSE (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS))
674 cryptodev_validate_mbuf_chain (vm, sop->m_src, b);
676 /* for input nodes that are not dpdk-input, it is possible the mbuf
677 * was updated before as one of the chained mbufs. Setting nb_segs
678 * to 1 here to prevent the cryptodev PMD to access potentially
679 * invalid m_src->next pointers.
681 sop->m_src->nb_segs = 1;
682 clib_memcpy_fast (cop[0]->iv, fe->iv, 12);
683 clib_memcpy_fast (cop[0]->aad, fe->aad, aad_len);
690 n_enqueue = rte_cryptodev_enqueue_burst (cet->cryptodev_id,
692 (struct rte_crypto_op **)
693 cet->cops, frame->n_elts);
694 ASSERT (n_enqueue == frame->n_elts);
695 cet->inflight += n_enqueue;
700 static_always_inline cryptodev_op_t *
701 cryptodev_get_ring_head (struct rte_ring * ring)
703 cryptodev_op_t **r = (void *) &ring[1];
704 return r[ring->cons.head & ring->mask];
707 static_always_inline vnet_crypto_async_frame_t *
708 cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
709 u32 * enqueue_thread_idx)
711 cryptodev_main_t *cmt = &cryptodev_main;
712 cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
713 cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index;
714 cryptodev_op_t *cop0, **cop = cet->cops;
715 vnet_crypto_async_frame_elt_t *fe;
716 vnet_crypto_async_frame_t *frame;
717 u32 n_elts, n_completed_ops = rte_ring_count (cet->ring);
718 u32 ss0 = 0, ss1 = 0, ss2 = 0, ss3 = 0; /* sum of status */
722 n_elts = clib_min (CRYPTODEV_NB_CRYPTO_OPS - n_completed_ops,
723 VNET_CRYPTO_FRAME_SIZE);
724 n_elts = rte_cryptodev_dequeue_burst
725 (cet->cryptodev_id, cet->cryptodev_q,
726 (struct rte_crypto_op **) cet->cops, n_elts);
727 cet->inflight -= n_elts;
728 n_completed_ops += n_elts;
730 rte_ring_sp_enqueue_burst (cet->ring, (void *) cet->cops, n_elts, NULL);
733 if (PREDICT_FALSE (n_completed_ops == 0))
736 cop0 = cryptodev_get_ring_head (cet->ring);
737 /* not a single frame is finished */
738 if (PREDICT_FALSE (cop0->n_elts > rte_ring_count (cet->ring)))
742 n_elts = cop0->n_elts;
743 n_elts = rte_ring_sc_dequeue_bulk (cet->ring, (void **) cet->cops,
749 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
750 ss1 |= fe[1].status = cryptodev_status_conversion[cop[1]->op.status];
751 ss2 |= fe[2].status = cryptodev_status_conversion[cop[2]->op.status];
752 ss3 |= fe[3].status = cryptodev_status_conversion[cop[3]->op.status];
761 ss0 |= fe[0].status = cryptodev_status_conversion[cop[0]->op.status];
767 frame->state = (ss0 | ss1 | ss2 | ss3) == VNET_CRYPTO_OP_STATUS_COMPLETED ?
768 VNET_CRYPTO_FRAME_STATE_SUCCESS : VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
770 rte_mempool_put_bulk (numa->cop_pool, (void **) cet->cops, frame->n_elts);
771 *nb_elts_processed = frame->n_elts;
772 *enqueue_thread_idx = frame->enqueue_thread_index;
777 static_always_inline int
778 cryptodev_enqueue_gcm_aad_8_enc (vlib_main_t * vm,
779 vnet_crypto_async_frame_t * frame)
781 return cryptodev_frame_gcm_enqueue (vm, frame,
782 CRYPTODEV_OP_TYPE_ENCRYPT, 8);
784 static_always_inline int
785 cryptodev_enqueue_gcm_aad_12_enc (vlib_main_t * vm,
786 vnet_crypto_async_frame_t * frame)
788 return cryptodev_frame_gcm_enqueue (vm, frame,
789 CRYPTODEV_OP_TYPE_ENCRYPT, 12);
792 static_always_inline int
793 cryptodev_enqueue_gcm_aad_8_dec (vlib_main_t * vm,
794 vnet_crypto_async_frame_t * frame)
796 return cryptodev_frame_gcm_enqueue (vm, frame,
797 CRYPTODEV_OP_TYPE_DECRYPT, 8);
799 static_always_inline int
800 cryptodev_enqueue_gcm_aad_12_dec (vlib_main_t * vm,
801 vnet_crypto_async_frame_t * frame)
803 return cryptodev_frame_gcm_enqueue (vm, frame,
804 CRYPTODEV_OP_TYPE_DECRYPT, 12);
807 static_always_inline int
808 cryptodev_enqueue_linked_alg_enc (vlib_main_t * vm,
809 vnet_crypto_async_frame_t * frame)
811 return cryptodev_frame_linked_algs_enqueue (vm, frame,
812 CRYPTODEV_OP_TYPE_ENCRYPT);
815 static_always_inline int
816 cryptodev_enqueue_linked_alg_dec (vlib_main_t * vm,
817 vnet_crypto_async_frame_t * frame)
819 return cryptodev_frame_linked_algs_enqueue (vm, frame,
820 CRYPTODEV_OP_TYPE_DECRYPT);
825 CRYPTODEV_RESOURCE_ASSIGN_AUTO = 0,
826 CRYPTODEV_RESOURCE_ASSIGN_UPDATE,
827 } cryptodev_resource_assign_op_t;
830 * assign a cryptodev resource to a worker.
831 * @param cet: the worker thread data
832 * @param cryptodev_inst_index: if op is "ASSIGN_AUTO" this param is ignored.
833 * @param op: the assignment method.
834 * @return: 0 if successfully, negative number otherwise.
836 static_always_inline int
837 cryptodev_assign_resource (cryptodev_engine_thread_t * cet,
838 u32 cryptodev_inst_index,
839 cryptodev_resource_assign_op_t op)
841 cryptodev_main_t *cmt = &cryptodev_main;
842 cryptodev_inst_t *cinst = 0;
845 /* assign resource is only allowed when no inflight op is in the queue */
851 case CRYPTODEV_RESOURCE_ASSIGN_AUTO:
852 if (clib_bitmap_count_set_bits (cmt->active_cdev_inst_mask) >=
853 vec_len (cmt->cryptodev_inst))
856 clib_spinlock_lock (&cmt->tlock);
857 idx = clib_bitmap_first_clear (cmt->active_cdev_inst_mask);
858 clib_bitmap_set (cmt->active_cdev_inst_mask, idx, 1);
859 cinst = vec_elt_at_index (cmt->cryptodev_inst, idx);
860 cet->cryptodev_id = cinst->dev_id;
861 cet->cryptodev_q = cinst->q_id;
862 clib_spinlock_unlock (&cmt->tlock);
864 case CRYPTODEV_RESOURCE_ASSIGN_UPDATE:
865 /* assigning a used cryptodev resource is not allowed */
866 if (clib_bitmap_get (cmt->active_cdev_inst_mask, cryptodev_inst_index)
869 vec_foreach_index (idx, cmt->cryptodev_inst)
871 cinst = cmt->cryptodev_inst + idx;
872 if (cinst->dev_id == cet->cryptodev_id &&
873 cinst->q_id == cet->cryptodev_q)
876 /* invalid existing worker resource assignment */
877 if (idx == vec_len (cmt->cryptodev_inst))
879 clib_spinlock_lock (&cmt->tlock);
880 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask, idx, 0);
881 clib_bitmap_set_no_check (cmt->active_cdev_inst_mask,
882 cryptodev_inst_index, 1);
883 cinst = cmt->cryptodev_inst + cryptodev_inst_index;
884 cet->cryptodev_id = cinst->dev_id;
885 cet->cryptodev_q = cinst->q_id;
886 clib_spinlock_unlock (&cmt->tlock);
895 format_cryptodev_inst (u8 * s, va_list * args)
897 cryptodev_main_t *cmt = &cryptodev_main;
898 u32 inst = va_arg (*args, u32);
899 cryptodev_inst_t *cit = cmt->cryptodev_inst + inst;
900 u32 thread_index = 0;
901 struct rte_cryptodev_info info;
903 rte_cryptodev_info_get (cit->dev_id, &info);
904 s = format (s, "%-25s%-10u", info.device->name, cit->q_id);
906 vec_foreach_index (thread_index, cmt->per_thread_data)
908 cryptodev_engine_thread_t *cet = cmt->per_thread_data + thread_index;
909 if (vlib_num_workers () > 0 && thread_index == 0)
912 if (cet->cryptodev_id == cit->dev_id && cet->cryptodev_q == cit->q_id)
914 s = format (s, "%u (%v)\n", thread_index,
915 vlib_worker_threads[thread_index].name);
920 if (thread_index == vec_len (cmt->per_thread_data))
921 s = format (s, "%s\n", "free");
926 static clib_error_t *
927 cryptodev_show_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
928 vlib_cli_command_t * cmd)
930 cryptodev_main_t *cmt = &cryptodev_main;
933 vlib_cli_output (vm, "%-5s%-25s%-10s%s\n", "No.", "Name", "Queue-id",
935 if (vec_len (cmt->cryptodev_inst) == 0)
937 vlib_cli_output (vm, "(nil)\n");
941 vec_foreach_index (inst, cmt->cryptodev_inst)
942 vlib_cli_output (vm, "%-5u%U", inst, format_cryptodev_inst, inst);
947 VLIB_CLI_COMMAND (show_cryptodev_assignment, static) = {
948 .path = "show cryptodev assignment",
949 .short_help = "show cryptodev assignment",
950 .function = cryptodev_show_assignment_fn,
953 static clib_error_t *
954 cryptodev_set_assignment_fn (vlib_main_t * vm, unformat_input_t * input,
955 vlib_cli_command_t * cmd)
957 cryptodev_main_t *cmt = &cryptodev_main;
958 cryptodev_engine_thread_t *cet;
959 unformat_input_t _line_input, *line_input = &_line_input;
960 u32 thread_index, inst_index;
961 u32 thread_present = 0, inst_present = 0;
962 clib_error_t *error = 0;
965 /* Get a line of input. */
966 if (!unformat_user (input, unformat_line_input, line_input))
969 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
971 if (unformat (line_input, "thread %u", &thread_index))
973 else if (unformat (line_input, "resource %u", &inst_index))
977 error = clib_error_return (0, "unknown input `%U'",
978 format_unformat_error, line_input);
983 if (!thread_present || !inst_present)
985 error = clib_error_return (0, "mandatory argument(s) missing");
989 if (thread_index == 0 && vlib_num_workers () > 0)
992 clib_error_return (0, "assign crypto resource for master thread");
996 if (thread_index > vec_len (cmt->per_thread_data) ||
997 inst_index > vec_len (cmt->cryptodev_inst))
999 error = clib_error_return (0, "wrong thread id or resource id");
1003 cet = cmt->per_thread_data + thread_index;
1004 ret = cryptodev_assign_resource (cet, inst_index,
1005 CRYPTODEV_RESOURCE_ASSIGN_UPDATE);
1008 error = clib_error_return (0, "cryptodev_assign_resource returned %i",
1016 VLIB_CLI_COMMAND (set_cryptodev_assignment, static) = {
1017 .path = "set cryptodev assignment",
1018 .short_help = "set cryptodev assignment thread <thread_index> "
1019 "resource <inst_index>",
1020 .function = cryptodev_set_assignment_fn,
1024 check_cryptodev_alg_support (u32 dev_id)
1026 const struct rte_cryptodev_symmetric_capability *cap;
1027 struct rte_cryptodev_sym_capability_idx cap_idx;
1029 #define _(a, b, c, d, e, f) \
1030 cap_idx.type = RTE_CRYPTO_SYM_XFORM_##b; \
1031 cap_idx.algo.aead = RTE_CRYPTO_##b##_##c; \
1032 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1034 return -RTE_CRYPTO_##b##_##c; \
1037 if (cap->aead.digest_size.min > e || cap->aead.digest_size.max < e) \
1038 return -RTE_CRYPTO_##b##_##c; \
1039 if (cap->aead.aad_size.min > f || cap->aead.aad_size.max < f) \
1040 return -RTE_CRYPTO_##b##_##c; \
1041 if (cap->aead.iv_size.min > d || cap->aead.iv_size.max < d) \
1042 return -RTE_CRYPTO_##b##_##c; \
1045 foreach_vnet_aead_crypto_conversion
1048 #define _(a, b, c, d) \
1049 cap_idx.type = RTE_CRYPTO_SYM_XFORM_CIPHER; \
1050 cap_idx.algo.cipher = RTE_CRYPTO_CIPHER_##b; \
1051 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1053 return -RTE_CRYPTO_CIPHER_##b; \
1054 cap_idx.type = RTE_CRYPTO_SYM_XFORM_AUTH; \
1055 cap_idx.algo.auth = RTE_CRYPTO_AUTH_##c##_HMAC; \
1056 cap = rte_cryptodev_sym_capability_get (dev_id, &cap_idx); \
1058 return -RTE_CRYPTO_AUTH_##c;
1060 foreach_cryptodev_link_async_alg
1066 cryptodev_count_queue (u32 numa)
1068 struct rte_cryptodev_info info;
1069 u32 n_cryptodev = rte_cryptodev_count ();
1072 for (i = 0; i < n_cryptodev; i++)
1074 rte_cryptodev_info_get (i, &info);
1075 if (rte_cryptodev_socket_id (i) != numa)
1077 clib_warning ("DPDK crypto resource %s is in different numa node "
1078 "as %u, ignored", info.device->name, numa);
1081 /* only device support symmetric crypto is used */
1082 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
1084 q_count += info.max_nb_queue_pairs;
1091 cryptodev_configure (vlib_main_t *vm, uint32_t cryptodev_id)
1093 struct rte_cryptodev_info info;
1094 struct rte_cryptodev *cdev;
1095 cryptodev_main_t *cmt = &cryptodev_main;
1096 cryptodev_numa_data_t *numa_data = vec_elt_at_index (cmt->per_numa_data,
1101 rte_cryptodev_info_get (cryptodev_id, &info);
1103 /* do not configure the device that does not support symmetric crypto */
1104 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO))
1107 ret = check_cryptodev_alg_support (cryptodev_id);
1111 cdev = rte_cryptodev_pmd_get_dev (cryptodev_id);
1112 /** If the device is already started, we reuse it, otherwise configure
1113 * both the device and queue pair.
1115 if (!cdev->data->dev_started)
1117 struct rte_cryptodev_config cfg;
1119 cfg.socket_id = vm->numa_node;
1120 cfg.nb_queue_pairs = info.max_nb_queue_pairs;
1122 rte_cryptodev_configure (cryptodev_id, &cfg);
1124 for (i = 0; i < info.max_nb_queue_pairs; i++)
1126 struct rte_cryptodev_qp_conf qp_cfg;
1130 qp_cfg.mp_session = numa_data->sess_pool;
1131 qp_cfg.mp_session_private = numa_data->sess_priv_pool;
1132 qp_cfg.nb_descriptors = CRYPTODEV_NB_CRYPTO_OPS;
1134 ret = rte_cryptodev_queue_pair_setup (cryptodev_id, i, &qp_cfg,
1139 if (i != info.max_nb_queue_pairs)
1141 /* start the device */
1142 rte_cryptodev_start (i);
1145 for (i = 0; i < cdev->data->nb_queue_pairs; i++)
1147 cryptodev_inst_t *cdev_inst;
1148 vec_add2(cmt->cryptodev_inst, cdev_inst, 1);
1149 cdev_inst->desc = vec_new (char, strlen (info.device->name) + 10);
1150 cdev_inst->dev_id = cryptodev_id;
1151 cdev_inst->q_id = i;
1153 snprintf (cdev_inst->desc, strlen (info.device->name) + 9,
1154 "%s_q%u", info.device->name, i);
1161 cryptodev_cmp (void *v1, void *v2)
1163 cryptodev_inst_t *a1 = v1;
1164 cryptodev_inst_t *a2 = v2;
1166 if (a1->q_id > a2->q_id)
1168 if (a1->q_id < a2->q_id)
1174 cryptodev_probe (vlib_main_t *vm, u32 n_workers)
1176 cryptodev_main_t *cmt = &cryptodev_main;
1177 u32 n_queues = cryptodev_count_queue (vm->numa_node);
1181 /* If there is not enough queues, exit */
1182 if (n_queues < n_workers)
1185 for (i = 0; i < rte_cryptodev_count (); i++)
1187 ret = cryptodev_configure (vm, i);
1192 vec_sort_with_function(cmt->cryptodev_inst, cryptodev_cmp);
1198 cryptodev_get_session_sz (vlib_main_t *vm, uint32_t n_workers)
1200 u32 sess_data_sz = 0, i;
1202 if (rte_cryptodev_count () == 0)
1205 for (i = 0; i < rte_cryptodev_count (); i++)
1207 u32 dev_sess_sz = rte_cryptodev_sym_get_private_session_size (i);
1209 sess_data_sz = dev_sess_sz > sess_data_sz ? dev_sess_sz : sess_data_sz;
1212 return sess_data_sz;
1216 dpdk_disable_cryptodev_engine (vlib_main_t * vm)
1218 cryptodev_main_t *cmt = &cryptodev_main;
1219 cryptodev_numa_data_t *numa_data;
1221 vec_validate (cmt->per_numa_data, vm->numa_node);
1222 numa_data = vec_elt_at_index (cmt->per_numa_data, vm->numa_node);
1224 if (numa_data->sess_pool)
1225 rte_mempool_free (numa_data->sess_pool);
1226 if (numa_data->sess_priv_pool)
1227 rte_mempool_free (numa_data->sess_priv_pool);
1228 if (numa_data->cop_pool)
1229 rte_mempool_free (numa_data->cop_pool);
1233 crypto_op_init (struct rte_mempool *mempool,
1234 void *_arg __attribute__ ((unused)),
1235 void *_obj, unsigned i __attribute__ ((unused)))
1237 struct rte_crypto_op *op = _obj;
1239 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1240 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1241 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
1242 op->phys_addr = rte_mempool_virt2iova (_obj);
1243 op->mempool = mempool;
1248 dpdk_cryptodev_init (vlib_main_t * vm)
1250 cryptodev_main_t *cmt = &cryptodev_main;
1251 vlib_thread_main_t *tm = vlib_get_thread_main ();
1252 cryptodev_engine_thread_t *ptd;
1253 cryptodev_numa_data_t *numa_data;
1254 struct rte_mempool *mp;
1255 u32 skip_master = vlib_num_workers () > 0;
1256 u32 n_workers = tm->n_vlib_mains - skip_master;
1257 u32 numa = vm->numa_node;
1263 clib_error_t *error;
1264 struct rte_crypto_op_pool_private *priv;
1266 cmt->iova_mode = rte_eal_iova_mode ();
1268 sess_sz = cryptodev_get_session_sz(vm, n_workers);
1271 error = clib_error_return (0, "Not enough cryptodevs");
1275 /* A total of 4 times n_worker threads * frame size as crypto ops */
1276 n_cop_elts = max_pow2 ((u64)n_workers * CRYPTODEV_NB_CRYPTO_OPS);
1278 vec_validate (cmt->per_numa_data, vm->numa_node);
1279 numa_data = vec_elt_at_index (cmt->per_numa_data, numa);
1281 /* create session pool for the numa node */
1282 name = format (0, "vcryptodev_sess_pool_%u%c", numa, 0);
1283 mp = rte_cryptodev_sym_session_pool_create ((char *) name,
1284 CRYPTODEV_NB_SESSION,
1288 error = clib_error_return (0, "Not enough memory for mp %s", name);
1293 numa_data->sess_pool = mp;
1295 /* create session private pool for the numa node */
1296 name = format (0, "cryptodev_sess_pool_%u%c", numa, 0);
1297 mp = rte_mempool_create ((char *) name, CRYPTODEV_NB_SESSION, sess_sz, 0,
1298 0, NULL, NULL, NULL, NULL, numa, 0);
1301 error = clib_error_return (0, "Not enough memory for mp %s", name);
1308 numa_data->sess_priv_pool = mp;
1310 /* create cryptodev op pool */
1311 name = format (0, "cryptodev_op_pool_%u%c", numa, 0);
1313 mp = rte_mempool_create ((char *) name, n_cop_elts,
1314 sizeof (cryptodev_op_t), VLIB_FRAME_SIZE * 2,
1315 sizeof (struct rte_crypto_op_pool_private), NULL,
1316 NULL, crypto_op_init, NULL, numa, 0);
1319 error = clib_error_return (0, "Not enough memory for mp %s", name);
1324 priv = rte_mempool_get_priv (mp);
1325 priv->priv_size = sizeof (struct rte_crypto_op_pool_private);
1326 priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1328 numa_data->cop_pool = mp;
1330 /* probe all cryptodev devices and get queue info */
1331 if (cryptodev_probe (vm, n_workers) < 0)
1333 error = clib_error_return (0, "Failed to configure cryptodev");
1337 clib_bitmap_vec_validate (cmt->active_cdev_inst_mask, tm->n_vlib_mains);
1338 clib_spinlock_init (&cmt->tlock);
1340 vec_validate_aligned(cmt->per_thread_data, tm->n_vlib_mains - 1,
1341 CLIB_CACHE_LINE_BYTES);
1342 for (i = skip_master; i < tm->n_vlib_mains; i++)
1344 ptd = cmt->per_thread_data + i;
1345 cryptodev_assign_resource (ptd, 0, CRYPTODEV_RESOURCE_ASSIGN_AUTO);
1346 name = format (0, "frames_ring_%u%c", i, 0);
1347 ptd->ring = rte_ring_create((char *) name, CRYPTODEV_NB_CRYPTO_OPS,
1348 vm->numa_node, RING_F_SP_ENQ|RING_F_SC_DEQ);
1351 error = clib_error_return (0, "Not enough memory for mp %s", name);
1355 vec_validate (ptd->cops, VNET_CRYPTO_FRAME_SIZE - 1);
1359 /* register handler */
1360 eidx = vnet_crypto_register_engine (vm, "dpdk_cryptodev", 79,
1361 "DPDK Cryptodev Engine");
1363 #define _(a, b, c, d, e, f) \
1364 vnet_crypto_register_async_handler \
1365 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_ENC, \
1366 cryptodev_enqueue_gcm_aad_##f##_enc,\
1367 cryptodev_frame_dequeue); \
1368 vnet_crypto_register_async_handler \
1369 (vm, eidx, VNET_CRYPTO_OP_##a##_TAG##e##_AAD##f##_DEC, \
1370 cryptodev_enqueue_gcm_aad_##f##_dec, \
1371 cryptodev_frame_dequeue);
1373 foreach_vnet_aead_crypto_conversion
1376 #define _(a, b, c, d) \
1377 vnet_crypto_register_async_handler \
1378 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_ENC, \
1379 cryptodev_enqueue_linked_alg_enc, \
1380 cryptodev_frame_dequeue); \
1381 vnet_crypto_register_async_handler \
1382 (vm, eidx, VNET_CRYPTO_OP_##a##_##c##_TAG##d##_DEC, \
1383 cryptodev_enqueue_linked_alg_dec, \
1384 cryptodev_frame_dequeue);
1386 foreach_cryptodev_link_async_alg
1389 vnet_crypto_register_key_handler (vm, eidx, cryptodev_key_handler);
1394 dpdk_disable_cryptodev_engine (vm);
1401 * fd.io coding-style-patch-verification: ON
1404 * eval: (c-set-style "gnu")