2 * Copyright (c) 2017 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vnet/ip/ip.h>
17 #include <vnet/api_errno.h>
18 #include <vnet/ipsec/ipsec.h>
19 #include <vlib/node_funcs.h>
21 #include <dpdk/device/dpdk.h>
22 #include <dpdk/ipsec/ipsec.h>
24 dpdk_crypto_main_t dpdk_crypto_main;
26 #define EMPTY_STRUCT {0}
29 algos_init (u32 n_mains)
31 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
34 vec_validate_aligned (dcm->cipher_algs, IPSEC_CRYPTO_N_ALG - 1, 8);
38 dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].name = str; \
39 dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].disabled = n_mains;
40 foreach_ipsec_crypto_alg
44 /* Minimum boundary for ciphers is 4B, required by ESP */
45 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_NONE];
46 a->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
47 a->alg = RTE_CRYPTO_CIPHER_NULL;
48 a->boundary = 4; /* 1 */
52 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_128];
53 a->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
54 a->alg = RTE_CRYPTO_CIPHER_AES_CBC;
59 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_192];
60 a->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
61 a->alg = RTE_CRYPTO_CIPHER_AES_CBC;
66 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_256];
67 a->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
68 a->alg = RTE_CRYPTO_CIPHER_AES_CBC;
73 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_128];
74 a->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
75 a->alg = RTE_CRYPTO_CIPHER_AES_CTR;
76 a->boundary = 4; /* 1 */
80 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_192];
81 a->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
82 a->alg = RTE_CRYPTO_CIPHER_AES_CTR;
83 a->boundary = 4; /* 1 */
87 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_256];
88 a->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
89 a->alg = RTE_CRYPTO_CIPHER_AES_CTR;
90 a->boundary = 4; /* 1 */
94 #define AES_GCM_TYPE RTE_CRYPTO_SYM_XFORM_AEAD
95 #define AES_GCM_ALG RTE_CRYPTO_AEAD_AES_GCM
97 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_128];
98 a->type = AES_GCM_TYPE;
100 a->boundary = 4; /* 1 */
105 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_192];
106 a->type = AES_GCM_TYPE;
107 a->alg = AES_GCM_ALG;
108 a->boundary = 4; /* 1 */
113 a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_256];
114 a->type = AES_GCM_TYPE;
115 a->alg = AES_GCM_ALG;
116 a->boundary = 4; /* 1 */
121 vec_validate (dcm->auth_algs, IPSEC_INTEG_N_ALG - 1);
125 dcm->auth_algs[IPSEC_INTEG_ALG_##f].name = str; \
126 dcm->auth_algs[IPSEC_INTEG_ALG_##f].disabled = n_mains;
127 foreach_ipsec_integ_alg
131 a = &dcm->auth_algs[IPSEC_INTEG_ALG_NONE];
132 a->type = RTE_CRYPTO_SYM_XFORM_AUTH;
133 a->alg = RTE_CRYPTO_AUTH_NULL;
137 a = &dcm->auth_algs[IPSEC_INTEG_ALG_MD5_96];
138 a->type = RTE_CRYPTO_SYM_XFORM_AUTH;
139 a->alg = RTE_CRYPTO_AUTH_MD5_HMAC;
143 a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA1_96];
144 a->type = RTE_CRYPTO_SYM_XFORM_AUTH;
145 a->alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
149 a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_96];
150 a->type = RTE_CRYPTO_SYM_XFORM_AUTH;
151 a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
155 a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_128];
156 a->type = RTE_CRYPTO_SYM_XFORM_AUTH;
157 a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
161 a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_384_192];
162 a->type = RTE_CRYPTO_SYM_XFORM_AUTH;
163 a->alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
167 a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_512_256];
168 a->type = RTE_CRYPTO_SYM_XFORM_AUTH;
169 a->alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
175 cipher_alg_index (const crypto_alg_t * alg)
177 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
179 return (alg - dcm->cipher_algs);
183 auth_alg_index (const crypto_alg_t * alg)
185 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
187 return (alg - dcm->auth_algs);
190 static crypto_alg_t *
191 cipher_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 key_len)
193 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
196 if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
200 vec_foreach (alg, dcm->cipher_algs)
202 if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
203 (alg->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
204 (cap->sym.cipher.algo == alg->alg) &&
205 (alg->key_len == key_len))
207 if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
208 (alg->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
209 (cap->sym.aead.algo == alg->alg) &&
210 (alg->key_len == key_len))
218 static crypto_alg_t *
219 auth_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 trunc_size)
221 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
224 if ((cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) ||
225 (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH))
229 vec_foreach (alg, dcm->auth_algs)
231 if ((cap->sym.auth.algo == alg->alg) &&
232 (alg->trunc_size == trunc_size))
241 crypto_set_aead_xform (struct rte_crypto_sym_xform *xform,
242 ipsec_sa_t * sa, u8 is_outbound)
244 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
247 c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg);
249 ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_AEAD);
251 xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
252 xform->aead.algo = c->alg;
253 xform->aead.key.data = sa->crypto_key;
254 xform->aead.key.length = c->key_len;
255 xform->aead.iv.offset =
256 crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb);
257 xform->aead.iv.length = 12;
258 xform->aead.digest_length = c->trunc_size;
259 xform->aead.aad_length = sa->use_esn ? 12 : 8;
263 xform->aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
265 xform->aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
269 crypto_set_cipher_xform (struct rte_crypto_sym_xform *xform,
270 ipsec_sa_t * sa, u8 is_outbound)
272 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
275 c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg);
277 ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_CIPHER);
279 xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
280 xform->cipher.algo = c->alg;
281 xform->cipher.key.data = sa->crypto_key;
282 xform->cipher.key.length = c->key_len;
283 xform->cipher.iv.offset =
284 crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb);
285 xform->cipher.iv.length = c->iv_len;
289 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
291 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
295 crypto_set_auth_xform (struct rte_crypto_sym_xform *xform,
296 ipsec_sa_t * sa, u8 is_outbound)
298 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
301 a = vec_elt_at_index (dcm->auth_algs, sa->integ_alg);
303 ASSERT (a->type == RTE_CRYPTO_SYM_XFORM_AUTH);
305 xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
306 xform->auth.algo = a->alg;
307 xform->auth.key.data = sa->integ_key;
308 xform->auth.key.length = a->key_len;
309 xform->auth.digest_length = a->trunc_size;
313 xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
315 xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
319 create_sym_session (struct rte_cryptodev_sym_session **session,
321 crypto_resource_t * res,
322 crypto_worker_main_t * cwm, u8 is_outbound)
324 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
325 ipsec_main_t *im = &ipsec_main;
328 struct rte_crypto_sym_xform cipher_xform = { 0 };
329 struct rte_crypto_sym_xform auth_xform = { 0 };
330 struct rte_crypto_sym_xform *xfs;
331 crypto_session_key_t key = { 0 };
333 key.drv_id = res->drv_id;
336 sa = pool_elt_at_index (im->sad, sa_idx);
338 if ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) |
339 (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) |
340 (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256))
342 crypto_set_aead_xform (&cipher_xform, sa, is_outbound);
347 crypto_set_cipher_xform (&cipher_xform, sa, is_outbound);
348 crypto_set_auth_xform (&auth_xform, sa, is_outbound);
352 cipher_xform.next = &auth_xform;
357 auth_xform.next = &cipher_xform;
362 data = vec_elt_at_index (dcm->data, res->numa);
366 * Multiple worker/threads share the session for an SA
367 * Single session per SA, initialized for each device driver
369 session[0] = (void *) hash_get (data->session_by_sa_index, sa_idx);
373 session[0] = rte_cryptodev_sym_session_create (data->session_h);
376 data->session_h_failed += 1;
377 return clib_error_return (0, "failed to create session header");
379 hash_set (data->session_by_sa_index, sa_idx, session[0]);
382 struct rte_mempool **mp;
383 mp = vec_elt_at_index (data->session_drv, res->drv_id);
384 ASSERT (mp[0] != NULL);
387 rte_cryptodev_sym_session_init (res->dev_id, session[0], xfs, mp[0]);
390 data->session_drv_failed[res->drv_id] += 1;
391 return clib_error_return (0, "failed to init session for drv %u",
395 hash_set (cwm->session_by_drv_id_and_sa_index, key.val, session[0]);
400 static void __attribute__ ((unused)) clear_and_free_obj (void *obj)
402 struct rte_mempool *mp = rte_mempool_from_obj (obj);
404 memset (obj, 0, mp->elt_size);
406 rte_mempool_put (mp, obj);
409 /* This is from rte_cryptodev_pmd.h */
411 get_session_private_data (const struct rte_cryptodev_sym_session *sess,
414 return sess->sess_private_data[driver_id];
417 /* This is from rte_cryptodev_pmd.h */
419 set_session_private_data (struct rte_cryptodev_sym_session *sess,
420 uint8_t driver_id, void *private_data)
422 sess->sess_private_data[driver_id] = private_data;
425 static clib_error_t *
426 add_del_sa_session (u32 sa_index, u8 is_add)
428 ipsec_main_t *im = &ipsec_main;
429 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
430 crypto_worker_main_t *cwm;
431 struct rte_cryptodev_sym_session *s;
432 crypto_session_key_t key = { 0 };
437 key.sa_idx = sa_index;
442 ipsec_sa_t *sa = pool_elt_at_index (im->sad, sa_index);
444 switch (sa->crypto_alg)
446 case IPSEC_CRYPTO_ALG_AES_GCM_128:
447 case IPSEC_CRYPTO_ALG_AES_GCM_192:
448 case IPSEC_CRYPTO_ALG_AES_GCM_256:
449 clib_memcpy (&sa->salt, &sa->crypto_key[sa->crypto_key_len - 4], 4);
452 seed = (u32) clib_cpu_time_now ();
453 sa->salt = random_u32 (&seed);
459 /* XXX Wait N cycles to be sure session is not in use OR
460 * keep refcnt at SA level per worker/thread ? */
464 vec_foreach (cwm, dcm->workers_main)
466 for (drv_id = 0; drv_id < dcm->max_drv_id; drv_id++)
469 val = hash_get (cwm->session_by_drv_id_and_sa_index, key.val);
470 s = (struct rte_cryptodev_sym_session *) val;
475 hash_unset (cwm->session_by_drv_id_and_sa_index, key.val);
482 vec_foreach (data, dcm->data)
484 val = hash_get (data->session_by_sa_index, sa_index);
485 s = (struct rte_cryptodev_sym_session *) val;
490 hash_unset (data->session_by_sa_index, sa_index);
493 vec_foreach_index (drv_id, dcm->drv)
495 drv_session = get_session_private_data (s, drv_id);
500 * Custom clear to avoid finding a dev_id for drv_id:
501 * ret = rte_cryptodev_sym_session_clear (dev_id, drv_session);
504 clear_and_free_obj (drv_session);
506 set_session_private_data (s, drv_id, NULL);
509 ret = rte_cryptodev_sym_session_free(s);
517 static clib_error_t *
518 dpdk_ipsec_check_support (ipsec_sa_t * sa)
520 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
522 if (sa->integ_alg == IPSEC_INTEG_ALG_NONE)
523 switch (sa->crypto_alg)
525 case IPSEC_CRYPTO_ALG_AES_GCM_128:
526 case IPSEC_CRYPTO_ALG_AES_GCM_192:
527 case IPSEC_CRYPTO_ALG_AES_GCM_256:
530 return clib_error_return (0, "unsupported integ-alg %U crypto-alg %U",
531 format_ipsec_integ_alg, sa->integ_alg,
532 format_ipsec_crypto_alg, sa->crypto_alg);
535 /* XXX do we need the NONE check? */
536 if (sa->crypto_alg != IPSEC_CRYPTO_ALG_NONE &&
537 dcm->cipher_algs[sa->crypto_alg].disabled)
538 return clib_error_return (0, "disabled crypto-alg %U",
539 format_ipsec_crypto_alg, sa->crypto_alg);
541 /* XXX do we need the NONE check? */
542 if (sa->integ_alg != IPSEC_INTEG_ALG_NONE &&
543 dcm->auth_algs[sa->integ_alg].disabled)
544 return clib_error_return (0, "disabled integ-alg %U",
545 format_ipsec_integ_alg, sa->integ_alg);
550 crypto_parse_capabilities (crypto_dev_t * dev,
551 const struct rte_cryptodev_capabilities *cap,
554 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
558 for (; cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; cap++)
560 /* A single capability maps to multiple cipher/auth algorithms */
561 switch (cap->sym.xform_type)
563 case RTE_CRYPTO_SYM_XFORM_AEAD:
564 case RTE_CRYPTO_SYM_XFORM_CIPHER:
565 inc = cap->sym.cipher.key_size.increment;
567 for (len = cap->sym.cipher.key_size.min;
568 len <= cap->sym.cipher.key_size.max; len += inc)
570 alg = cipher_cap_to_alg (cap, len);
573 dev->cipher_support[cipher_alg_index (alg)] = 1;
574 alg->resources += vec_len (dev->free_resources);
575 /* At least enough resources to support one algo */
576 dcm->enabled |= (alg->resources >= n_mains);
579 case RTE_CRYPTO_SYM_XFORM_AUTH:
580 inc = cap->sym.auth.digest_size.increment;
582 for (len = cap->sym.auth.digest_size.min;
583 len <= cap->sym.auth.digest_size.max; len += inc)
585 alg = auth_cap_to_alg (cap, len);
588 dev->auth_support[auth_alg_index (alg)] = 1;
589 alg->resources += vec_len (dev->free_resources);
590 /* At least enough resources to support one algo */
591 dcm->enabled |= (alg->resources >= n_mains);
600 #define DPDK_CRYPTO_N_QUEUE_DESC 2048
601 #define DPDK_CRYPTO_NB_SESS_OBJS 20000
603 static clib_error_t *
604 crypto_dev_conf (u8 dev, u16 n_qp, u8 numa)
606 struct rte_cryptodev_config dev_conf;
607 struct rte_cryptodev_qp_conf qp_conf;
612 dev_conf.socket_id = numa;
613 dev_conf.nb_queue_pairs = n_qp;
615 error_str = "failed to configure crypto device %u";
616 ret = rte_cryptodev_configure (dev, &dev_conf);
618 return clib_error_return (0, error_str, dev);
620 error_str = "failed to setup crypto device %u queue pair %u";
621 qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC;
622 for (qp = 0; qp < n_qp; qp++)
624 ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa, NULL);
626 return clib_error_return (0, error_str, dev, qp);
633 crypto_scan_devs (u32 n_mains)
635 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
636 struct rte_cryptodev *cryptodev;
637 struct rte_cryptodev_info info;
639 crypto_resource_t *res;
642 u16 max_res_idx, res_idx, j;
645 vec_validate_init_empty (dcm->dev, rte_cryptodev_count () - 1,
646 (crypto_dev_t) EMPTY_STRUCT);
648 for (i = 0; i < rte_cryptodev_count (); i++)
650 dev = vec_elt_at_index (dcm->dev, i);
652 cryptodev = &rte_cryptodevs[i];
653 rte_cryptodev_info_get (i, &info);
656 dev->name = cryptodev->data->name;
657 dev->numa = rte_cryptodev_socket_id (i);
658 dev->features = info.feature_flags;
659 dev->max_qp = info.max_nb_queue_pairs;
660 drv_id = info.driver_id;
661 if (drv_id >= vec_len (dcm->drv))
662 vec_validate_init_empty (dcm->drv, drv_id,
663 (crypto_drv_t) EMPTY_STRUCT);
664 vec_elt_at_index (dcm->drv, drv_id)->name = info.driver_name;
665 dev->drv_id = drv_id;
666 vec_add1 (vec_elt_at_index (dcm->drv, drv_id)->devs, i);
668 if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
671 if ((error = crypto_dev_conf (i, dev->max_qp, dev->numa)))
673 clib_error_report (error);
677 max_res_idx = (dev->max_qp / 2) - 1;
679 vec_validate (dev->free_resources, max_res_idx);
681 res_idx = vec_len (dcm->resource);
682 vec_validate_init_empty_aligned (dcm->resource, res_idx + max_res_idx,
683 (crypto_resource_t) EMPTY_STRUCT,
684 CLIB_CACHE_LINE_BYTES);
686 for (j = 0; j <= max_res_idx; j++, res_idx++)
688 vec_elt (dev->free_resources, max_res_idx - j) = res_idx;
689 res = &dcm->resource[res_idx];
691 res->drv_id = drv_id;
693 res->numa = dev->numa;
694 res->thread_idx = (u16) ~ 0;
697 crypto_parse_capabilities (dev, info.capabilities, n_mains);
702 crypto_auto_placement (void)
704 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
705 crypto_resource_t *res;
706 crypto_worker_main_t *cwm;
708 u32 thread_idx, skip_master;
713 skip_master = vlib_num_workers () > 0;
716 vec_foreach (dev, dcm->dev)
718 vec_foreach_index (thread_idx, dcm->workers_main)
720 if (vec_len (dev->free_resources) == 0)
723 if (thread_idx < skip_master)
726 /* Check thread is not already using the device */
727 vec_foreach (idx, dev->used_resources)
728 if (dcm->resource[idx[0]].thread_idx == thread_idx)
731 cwm = vec_elt_at_index (dcm->workers_main, thread_idx);
734 res_idx = vec_pop (dev->free_resources);
736 /* Set device only for supported algos */
737 for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++)
738 if (dev->cipher_support[i] &&
739 cwm->cipher_resource_idx[i] == (u16) ~0)
741 dcm->cipher_algs[i].disabled--;
742 cwm->cipher_resource_idx[i] = res_idx;
746 for (i = 0; i < IPSEC_INTEG_N_ALG; i++)
747 if (dev->auth_support[i] &&
748 cwm->auth_resource_idx[i] == (u16) ~0)
750 dcm->auth_algs[i].disabled--;
751 cwm->auth_resource_idx[i] = res_idx;
757 vec_add1 (dev->free_resources, res_idx);
761 vec_add1 (dev->used_resources, res_idx);
763 res = vec_elt_at_index (dcm->resource, res_idx);
765 ASSERT (res->thread_idx == (u16) ~0);
766 res->thread_idx = thread_idx;
768 /* Add device to vector of polling resources */
769 vec_add1 (cwm->resource_idx, res_idx);
776 crypto_op_init (struct rte_mempool *mempool,
777 void *_arg __attribute__ ((unused)),
778 void *_obj, unsigned i __attribute__ ((unused)))
780 struct rte_crypto_op *op = _obj;
782 op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
783 op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
784 op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
785 op->phys_addr = rte_mem_virt2phy (_obj);
786 op->mempool = mempool;
789 static clib_error_t *
790 crypto_create_crypto_op_pool (vlib_main_t * vm, u8 numa)
792 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
793 dpdk_config_main_t *conf = &dpdk_config_main;
796 u32 pool_priv_size = sizeof (struct rte_crypto_op_pool_private);
797 struct rte_crypto_op_pool_private *priv;
798 struct rte_mempool *mp;
799 clib_error_t *error = NULL;
800 vlib_physmem_region_index_t pri;
802 data = vec_elt_at_index (dcm->data, numa);
804 /* Already allocated */
808 pool_name = format (0, "crypto_pool_numa%u%c", numa, 0);
811 dpdk_pool_create (vm, pool_name, crypto_op_len (), conf->num_mbufs,
812 pool_priv_size, 512, numa, &mp, &pri);
814 vec_free (pool_name);
819 /* Initialize mempool private data */
820 priv = rte_mempool_get_priv (mp);
821 priv->priv_size = pool_priv_size;
822 priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
824 /* call the object initializers */
825 rte_mempool_obj_iter (mp, crypto_op_init, 0);
827 data->crypto_op = mp;
832 static clib_error_t *
833 crypto_create_session_h_pool (vlib_main_t * vm, u8 numa)
835 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
838 struct rte_mempool *mp;
839 clib_error_t *error = NULL;
840 vlib_physmem_region_index_t pri;
843 data = vec_elt_at_index (dcm->data, numa);
848 pool_name = format (0, "session_h_pool_numa%u%c", numa, 0);
850 elt_size = rte_cryptodev_get_header_session_size ();
853 dpdk_pool_create (vm, pool_name, elt_size, DPDK_CRYPTO_NB_SESS_OBJS,
854 0, 512, numa, &mp, &pri);
856 vec_free (pool_name);
861 data->session_h = mp;
866 static clib_error_t *
867 crypto_create_session_drv_pool (vlib_main_t * vm, crypto_dev_t * dev)
869 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
872 struct rte_mempool *mp;
873 clib_error_t *error = NULL;
874 vlib_physmem_region_index_t pri;
878 data = vec_elt_at_index (dcm->data, numa);
880 vec_validate (data->session_drv, dev->drv_id);
881 vec_validate (data->session_drv_failed, dev->drv_id);
883 if (data->session_drv[dev->drv_id])
886 pool_name = format (0, "session_drv%u_pool_numa%u%c", dev->drv_id, numa, 0);
887 elt_size = rte_cryptodev_get_private_session_size (dev->id);
890 dpdk_pool_create (vm, pool_name, elt_size, DPDK_CRYPTO_NB_SESS_OBJS,
891 0, 512, numa, &mp, &pri);
893 vec_free (pool_name);
898 data->session_drv[dev->drv_id] = mp;
903 static clib_error_t *
904 crypto_create_pools (vlib_main_t * vm)
906 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
907 clib_error_t *error = NULL;
911 vec_foreach (dev, dcm->dev)
913 vec_validate_aligned (dcm->data, dev->numa, CLIB_CACHE_LINE_BYTES);
915 error = crypto_create_crypto_op_pool (vm, dev->numa);
919 error = crypto_create_session_h_pool (vm, dev->numa);
923 error = crypto_create_session_drv_pool (vm, dev);
933 crypto_disable (void)
935 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
942 vec_foreach (data, dcm->data)
944 rte_mempool_free (data->crypto_op);
945 rte_mempool_free (data->session_h);
947 vec_foreach_index (i, data->session_drv)
948 rte_mempool_free (data->session_drv[i]);
950 vec_free (data->session_drv);
954 vec_free (dcm->data);
955 vec_free (dcm->workers_main);
956 vec_free (dcm->sa_session);
958 vec_free (dcm->resource);
959 vec_free (dcm->cipher_algs);
960 vec_free (dcm->auth_algs);
964 dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
967 ipsec_main_t *im = &ipsec_main;
968 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
969 vlib_thread_main_t *tm = vlib_get_thread_main ();
970 crypto_worker_main_t *cwm;
971 clib_error_t *error = NULL;
972 u32 i, skip_master, n_mains;
974 n_mains = tm->n_vlib_mains;
975 skip_master = vlib_num_workers () > 0;
977 algos_init (n_mains - skip_master);
979 crypto_scan_devs (n_mains - skip_master);
983 clib_warning ("not enough DPDK crypto resources, default to OpenSSL");
988 vec_validate_init_empty_aligned (dcm->workers_main, n_mains - 1,
989 (crypto_worker_main_t) EMPTY_STRUCT,
990 CLIB_CACHE_LINE_BYTES);
993 vec_foreach (cwm, dcm->workers_main)
995 vec_validate_init_empty_aligned (cwm->ops, VLIB_FRAME_SIZE - 1, 0,
996 CLIB_CACHE_LINE_BYTES);
997 memset (cwm->cipher_resource_idx, ~0,
998 IPSEC_CRYPTO_N_ALG * sizeof(*cwm->cipher_resource_idx));
999 memset (cwm->auth_resource_idx, ~0,
1000 IPSEC_INTEG_N_ALG * sizeof(*cwm->auth_resource_idx));
1004 crypto_auto_placement ();
1006 error = crypto_create_pools (vm);
1009 clib_error_report (error);
1014 /* Add new next node and set it as default */
1015 vlib_node_t *node, *next_node;
1017 next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-encrypt");
1019 node = vlib_get_node_by_name (vm, (u8 *) "ipsec-output-ip4");
1021 im->esp_encrypt_node_index = next_node->index;
1022 im->esp_encrypt_next_index =
1023 vlib_node_add_next (vm, node->index, next_node->index);
1025 next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-decrypt");
1027 node = vlib_get_node_by_name (vm, (u8 *) "ipsec-input-ip4");
1029 im->esp_decrypt_node_index = next_node->index;
1030 im->esp_decrypt_next_index =
1031 vlib_node_add_next (vm, node->index, next_node->index);
1033 im->cb.check_support_cb = dpdk_ipsec_check_support;
1034 im->cb.add_del_sa_sess_cb = add_del_sa_session;
1036 node = vlib_get_node_by_name (vm, (u8 *) "dpdk-crypto-input");
1038 for (i = skip_master; i < n_mains; i++)
1039 vlib_node_set_state (vlib_mains[i], node->index, VLIB_NODE_STATE_POLLING);
1044 VLIB_REGISTER_NODE (dpdk_ipsec_process_node,static) = {
1045 .function = dpdk_ipsec_process,
1046 .type = VLIB_NODE_TYPE_PROCESS,
1047 .name = "dpdk-ipsec-process",
1048 .process_log2_n_stack_bytes = 17,
1053 * fd.io coding-style-patch-verification: ON
1056 * eval: (c-set-style "gnu")