2 * Copyright (c) 2016 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vnet/ip/ip.h>
17 #include <vnet/api_errno.h>
18 #include <vnet/ipsec/ipsec.h>
19 #include <vlib/node_funcs.h>
21 #include <vnet/devices/dpdk/dpdk.h>
22 #include <vnet/devices/dpdk/ipsec/ipsec.h>
23 #include <vnet/devices/dpdk/ipsec/esp.h>
25 #define DPDK_CRYPTO_NB_SESS_OBJS 20000
26 #define DPDK_CRYPTO_CACHE_SIZE 512
27 #define DPDK_CRYPTO_PRIV_SIZE 128
28 #define DPDK_CRYPTO_N_QUEUE_DESC 1024
29 #define DPDK_CRYPTO_NB_COPS (1024 * 4)
32 add_del_sa_sess (u32 sa_index, u8 is_add)
34 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
35 crypto_worker_main_t *cwm;
36 u8 skip_master = vlib_num_workers () > 0;
39 vec_foreach (cwm, dcm->workers_main)
41 crypto_sa_session_t *sa_sess;
50 for (is_outbound = 0; is_outbound < 2; is_outbound++)
54 pool_get (cwm->sa_sess_d[is_outbound], sa_sess);
60 sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index);
61 dev_id = cwm->qp_data[sa_sess->qp_index].dev_id;
66 if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess))
68 clib_warning("failed to free session");
71 memset(sa_sess, 0, sizeof(sa_sess[0]));
81 update_qp_data (crypto_worker_main_t * cwm,
82 u8 cdev_id, u16 qp_id, u8 is_outbound, u16 * idx)
84 crypto_qp_data_t *qpd;
87 vec_foreach_index (*idx, cwm->qp_data)
89 qpd = vec_elt_at_index(cwm->qp_data, *idx);
91 if (qpd->dev_id == cdev_id && qpd->qp_id == qp_id &&
92 qpd->is_outbound == is_outbound)
97 vec_add2 (cwm->qp_data, qpd, 1);
99 qpd->dev_id = cdev_id;
101 qpd->is_outbound = is_outbound;
110 add_mapping (crypto_worker_main_t * cwm,
111 u8 cdev_id, u16 qp, u8 is_outbound,
112 const struct rte_cryptodev_capabilities *cipher_cap,
113 const struct rte_cryptodev_capabilities *auth_cap)
116 uword key = 0, data, *ret;
117 crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key;
119 p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo;
120 p_key->auth_algo = (u8) auth_cap->sym.auth.algo;
121 p_key->is_outbound = is_outbound;
123 ret = hash_get (cwm->algo_qp_map, key);
127 update_qp_data (cwm, cdev_id, qp, is_outbound, &qp_index);
129 data = (uword) qp_index;
130 hash_set (cwm->algo_qp_map, key, data);
141 add_cdev_mapping (crypto_worker_main_t * cwm,
142 struct rte_cryptodev_info *dev_info, u8 cdev_id,
143 u16 qp, u8 is_outbound)
145 const struct rte_cryptodev_capabilities *i, *j;
148 for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++)
150 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
153 if (check_algo_is_supported (i, NULL) != 0)
156 for (j = dev_info->capabilities; j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
159 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
162 if (check_algo_is_supported (j, NULL) != 0)
165 mapped |= add_mapping (cwm, cdev_id, qp, is_outbound, i, j);
173 check_cryptodev_queues ()
179 if (vlib_num_workers () > 0)
180 n_req_qs = vlib_num_workers () * 2;
182 for (cdev_id = 0; cdev_id < rte_cryptodev_count (); cdev_id++)
184 struct rte_cryptodev_info cdev_info;
186 rte_cryptodev_info_get (cdev_id, &cdev_info);
189 (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
192 n_qs += cdev_info.max_nb_queue_pairs;
195 if (n_qs >= n_req_qs)
201 static clib_error_t *
202 dpdk_ipsec_check_support (ipsec_sa_t * sa)
204 if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
206 if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
207 return clib_error_return (0, "unsupported integ-alg %U with "
208 "crypto-algo aes-gcm-128",
209 format_ipsec_integ_alg, sa->integ_alg);
210 sa->integ_alg = IPSEC_INTEG_ALG_AES_GCM_128;
214 if (sa->integ_alg == IPSEC_INTEG_ALG_NONE ||
215 sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)
216 return clib_error_return (0, "unsupported integ-alg %U",
217 format_ipsec_integ_alg, sa->integ_alg);
224 dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
227 dpdk_config_main_t *conf = &dpdk_config_main;
228 ipsec_main_t *im = &ipsec_main;
229 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
230 vlib_thread_main_t *tm = vlib_get_thread_main ();
231 struct rte_cryptodev_config dev_conf;
232 struct rte_cryptodev_qp_conf qp_conf;
233 struct rte_cryptodev_info cdev_info;
234 struct rte_mempool *rmp;
238 if (!conf->cryptodev)
240 clib_warning ("DPDK Cryptodev support is disabled, "
241 "default to OpenSSL IPsec");
245 if (check_cryptodev_queues () < 0)
248 clib_warning ("not enough Cryptodevs, default to OpenSSL IPsec");
252 vec_alloc (dcm->workers_main, tm->n_vlib_mains);
253 _vec_len (dcm->workers_main) = tm->n_vlib_mains;
255 fprintf (stdout, "DPDK Cryptodevs info:\n");
256 fprintf (stdout, "dev_id\tn_qp\tnb_obj\tcache_size\n");
257 /* HW cryptodevs have higher dev_id, use HW first */
258 for (dev_id = rte_cryptodev_count () - 1; dev_id >= 0; dev_id--)
260 u16 max_nb_qp, qp = 0;
261 skip_master = vlib_num_workers () > 0;
263 rte_cryptodev_info_get (dev_id, &cdev_info);
266 (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
269 max_nb_qp = cdev_info.max_nb_queue_pairs;
271 for (i = 0; i < tm->n_vlib_mains; i++)
274 crypto_worker_main_t *cwm;
283 cwm = vec_elt_at_index (dcm->workers_main, i);
284 map = cwm->algo_qp_map;
288 map = hash_create (0, sizeof (crypto_worker_qp_key_t));
291 clib_warning ("unable to create hash table for worker %u",
292 vlib_mains[i]->cpu_index);
295 cwm->algo_qp_map = map;
298 for (is_outbound = 0; is_outbound < 2 && qp < max_nb_qp;
300 qp += add_cdev_mapping (cwm, &cdev_info, dev_id, qp, is_outbound);
306 dev_conf.socket_id = rte_cryptodev_socket_id (dev_id);
307 dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs;
308 dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_SESS_OBJS;
309 dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE;
311 ret = rte_cryptodev_configure (dev_id, &dev_conf);
314 clib_warning ("cryptodev %u config error", dev_id);
318 qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC;
319 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
321 ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf,
325 clib_warning ("cryptodev %u qp %u setup error", dev_id, qp);
329 vec_validate_aligned (dcm->cop_pools, dev_conf.socket_id,
330 CLIB_CACHE_LINE_BYTES);
332 if (!vec_elt (dcm->cop_pools, dev_conf.socket_id))
334 u8 *pool_name = format (0, "crypto_op_pool_socket%u%c",
335 dev_conf.socket_id, 0);
337 rmp = rte_crypto_op_pool_create ((char *) pool_name,
338 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
339 DPDK_CRYPTO_NB_COPS *
340 (1 + vlib_num_workers ()),
341 DPDK_CRYPTO_CACHE_SIZE,
342 DPDK_CRYPTO_PRIV_SIZE,
344 vec_free (pool_name);
348 clib_warning ("failed to allocate mempool on socket %u",
352 vec_elt (dcm->cop_pools, dev_conf.socket_id) = rmp;
355 fprintf (stdout, "%u\t%u\t%u\t%u\n", dev_id, dev_conf.nb_queue_pairs,
356 DPDK_CRYPTO_NB_SESS_OBJS, DPDK_CRYPTO_CACHE_SIZE);
361 /* Add new next node and set as default */
362 vlib_node_t *node, *next_node;
364 next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-encrypt");
366 node = vlib_get_node_by_name (vm, (u8 *) "ipsec-output-ip4");
368 im->esp_encrypt_node_index = next_node->index;
369 im->esp_encrypt_next_index =
370 vlib_node_add_next (vm, node->index, next_node->index);
372 next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-decrypt");
374 node = vlib_get_node_by_name (vm, (u8 *) "ipsec-input-ip4");
376 im->esp_decrypt_node_index = next_node->index;
377 im->esp_decrypt_next_index =
378 vlib_node_add_next (vm, node->index, next_node->index);
380 im->cb.check_support_cb = dpdk_ipsec_check_support;
381 im->cb.add_del_sa_sess_cb = add_del_sa_sess;
383 if (vec_len (vlib_mains) == 0)
384 vlib_node_set_state (&vlib_global_main, dpdk_crypto_input_node.index,
385 VLIB_NODE_STATE_POLLING);
387 for (i = 1; i < tm->n_vlib_mains; i++)
388 vlib_node_set_state (vlib_mains[i], dpdk_crypto_input_node.index,
389 VLIB_NODE_STATE_POLLING);
391 /* TODO cryptodev counters */
397 crypto_worker_main_t *cwm;
398 struct rte_mempool **mp;
400 vec_foreach (cwm, dcm->workers_main)
401 hash_free (cwm->algo_qp_map);
403 vec_foreach (mp, dcm->cop_pools)
406 rte_mempool_free (mp[0]);
409 vec_free (dcm->workers_main);
410 vec_free (dcm->cop_pools);
416 VLIB_REGISTER_NODE (dpdk_ipsec_process_node,static) = {
417 .function = dpdk_ipsec_process,
418 .type = VLIB_NODE_TYPE_PROCESS,
419 .name = "dpdk-ipsec-process",
420 .process_log2_n_stack_bytes = 17,
425 * fd.io coding-style-patch-verification: ON
428 * eval: (c-set-style "gnu")