2 * Copyright (c) 2016 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vnet/ip/ip.h>
17 #include <vnet/api_errno.h>
18 #include <vnet/devices/dpdk/dpdk.h>
19 #include <vnet/devices/dpdk/ipsec/ipsec.h>
20 #include <vnet/devices/dpdk/ipsec/esp.h>
21 #include <vnet/ipsec/ipsec.h>
23 #define DPDK_CRYPTO_NB_OBJS 2048
24 #define DPDK_CRYPTO_CACHE_SIZE 512
25 #define DPDK_CRYPTO_PRIV_SIZE 128
26 #define DPDK_CRYPTO_N_QUEUE_DESC 512
27 #define DPDK_CRYPTO_NB_COPS (1024 * 4)
36 update_qp_data (crypto_worker_main_t * cwm,
37 u8 cdev_id, u16 qp_id, u8 is_outbound, u16 * idx)
39 crypto_qp_data_t *qpd;
42 vec_foreach_index (*idx, cwm->qp_data)
44 qpd = vec_elt_at_index(cwm->qp_data, *idx);
46 if (qpd->dev_id == cdev_id && qpd->qp_id == qp_id &&
47 qpd->is_outbound == is_outbound)
52 vec_add2 (cwm->qp_data, qpd, 1);
54 qpd->dev_id = cdev_id;
56 qpd->is_outbound = is_outbound;
68 add_mapping (crypto_worker_main_t * cwm,
69 u8 cdev_id, u16 qp, u8 is_outbound,
70 const struct rte_cryptodev_capabilities *cipher_cap,
71 const struct rte_cryptodev_capabilities *auth_cap)
75 uword key = 0, data, *ret;
76 crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key;
78 p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo;
79 p_key->auth_algo = (u8) auth_cap->sym.auth.algo;
80 p_key->is_outbound = is_outbound;
82 ret = hash_get (cwm->algo_qp_map, key);
86 mapped = update_qp_data (cwm, cdev_id, qp, is_outbound, &qp_index);
90 data = (uword) qp_index;
92 ret = hash_set (cwm->algo_qp_map, key, data);
94 rte_panic ("Failed to insert hash table\n");
105 add_cdev_mapping (crypto_worker_main_t * cwm,
106 struct rte_cryptodev_info *dev_info, u8 cdev_id,
107 u16 qp, u8 is_outbound)
109 const struct rte_cryptodev_capabilities *i, *j;
112 for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++)
114 if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
117 if (check_algo_is_supported (i, NULL) != 0)
120 for (j = dev_info->capabilities; j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
125 if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
128 if (check_algo_is_supported (j, NULL) != 0)
131 status = add_mapping (cwm, cdev_id, qp, is_outbound, i, j);
143 check_cryptodev_queues ()
149 if (vlib_num_workers () > 0)
150 n_req_qs = vlib_num_workers () * 2;
152 for (cdev_id = 0; cdev_id < rte_cryptodev_count (); cdev_id++)
154 struct rte_cryptodev_info cdev_info;
156 rte_cryptodev_info_get (cdev_id, &cdev_info);
159 (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
162 n_qs += cdev_info.max_nb_queue_pairs;
165 if (n_qs >= n_req_qs)
171 static clib_error_t *
172 dpdk_ipsec_init (vlib_main_t * vm)
174 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
175 vlib_thread_main_t *tm = vlib_get_thread_main ();
176 struct rte_cryptodev_config dev_conf;
177 struct rte_cryptodev_qp_conf qp_conf;
178 struct rte_cryptodev_info cdev_info;
179 struct rte_mempool *rmp;
183 if (check_cryptodev_queues () < 0)
184 return clib_error_return (0, "not enough cryptodevs for ipsec");
186 vec_alloc (dcm->workers_main, tm->n_vlib_mains);
187 _vec_len (dcm->workers_main) = tm->n_vlib_mains;
189 fprintf (stdout, "DPDK Cryptodevs info:\n");
190 fprintf (stdout, "dev_id\tn_qp\tnb_obj\tcache_size\n");
191 /* HW cryptodevs have higher dev_id, use HW first */
192 for (dev_id = rte_cryptodev_count () - 1; dev_id >= 0; dev_id--)
194 u16 max_nb_qp, qp = 0;
195 skip_master = vlib_num_workers () > 0;
197 rte_cryptodev_info_get (dev_id, &cdev_info);
200 (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
203 max_nb_qp = cdev_info.max_nb_queue_pairs;
205 for (i = 0; i < tm->n_vlib_mains; i++)
208 crypto_worker_main_t *cwm;
217 cwm = vec_elt_at_index (dcm->workers_main, i);
218 map = cwm->algo_qp_map;
222 map = hash_create (0, sizeof (crypto_worker_qp_key_t));
224 return clib_error_return (0, "unable to create hash table "
226 vlib_mains[i]->cpu_index);
227 cwm->algo_qp_map = map;
230 for (is_outbound = 0; is_outbound < 2 && qp < max_nb_qp;
233 int mapped = add_cdev_mapping (cwm, &cdev_info,
234 dev_id, qp, is_outbound);
239 return clib_error_return (0,
240 "too many queues for one worker");
247 dev_conf.socket_id = rte_cryptodev_socket_id (dev_id);
248 dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs;
249 dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_OBJS;
250 dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE;
252 ret = rte_cryptodev_configure (dev_id, &dev_conf);
254 return clib_error_return (0, "cryptodev %u config error", dev_id);
256 qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC;
257 for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
259 ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf,
262 return clib_error_return (0, "cryptodev %u qp %u setup error",
265 fprintf (stdout, "%u\t%u\t%u\t%u\n", dev_id, dev_conf.nb_queue_pairs,
266 DPDK_CRYPTO_NB_OBJS, DPDK_CRYPTO_CACHE_SIZE);
269 u32 socket_id = rte_socket_id ();
271 vec_validate_aligned (dcm->cop_pools, socket_id, CLIB_CACHE_LINE_BYTES);
273 /* pool already exists, nothing to do */
274 if (dcm->cop_pools[socket_id])
277 u8 *pool_name = format (0, "crypto_op_pool_socket%u%c", socket_id, 0);
279 rmp = rte_crypto_op_pool_create ((char *) pool_name,
280 RTE_CRYPTO_OP_TYPE_SYMMETRIC,
281 DPDK_CRYPTO_NB_COPS *
282 (1 + vlib_num_workers ()),
283 DPDK_CRYPTO_CACHE_SIZE,
284 DPDK_CRYPTO_PRIV_SIZE, socket_id);
285 vec_free (pool_name);
288 return clib_error_return (0, "failed to allocate mempool on socket %u",
290 dcm->cop_pools[socket_id] = rmp;
294 if (vec_len (vlib_mains) == 0)
295 vlib_node_set_state (&vlib_global_main, dpdk_crypto_input_node.index,
296 VLIB_NODE_STATE_POLLING);
298 for (i = 1; i < tm->n_vlib_mains; i++)
299 vlib_node_set_state (vlib_mains[i], dpdk_crypto_input_node.index,
300 VLIB_NODE_STATE_POLLING);
305 VLIB_MAIN_LOOP_ENTER_FUNCTION (dpdk_ipsec_init);
308 * fd.io coding-style-patch-verification: ON
311 * eval: (c-set-style "gnu")