Use thread local storage for thread index
[vpp.git] / src / plugins / dpdk / ipsec / ipsec.c
1 /*
2  * Copyright (c) 2016 Intel and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vnet/vnet.h>
16 #include <vnet/ip/ip.h>
17 #include <vnet/api_errno.h>
18 #include <vnet/ipsec/ipsec.h>
19 #include <vlib/node_funcs.h>
20
21 #include <dpdk/device/dpdk.h>
22 #include <dpdk/ipsec/ipsec.h>
23 #include <dpdk/ipsec/esp.h>
24
25 #define DPDK_CRYPTO_NB_SESS_OBJS  20000
26 #define DPDK_CRYPTO_CACHE_SIZE    512
27 #define DPDK_CRYPTO_PRIV_SIZE     128
28 #define DPDK_CRYPTO_N_QUEUE_DESC  1024
29 #define DPDK_CRYPTO_NB_COPS       (1024 * 4)
30
31 static int
32 add_del_sa_sess (u32 sa_index, u8 is_add)
33 {
34   dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
35   crypto_worker_main_t *cwm;
36   u8 skip_master = vlib_num_workers () > 0;
37
38   /* *INDENT-OFF* */
39   vec_foreach (cwm, dcm->workers_main)
40     {
41       crypto_sa_session_t *sa_sess;
42       u8 is_outbound;
43
44       if (skip_master)
45         {
46           skip_master = 0;
47           continue;
48         }
49
50       for (is_outbound = 0; is_outbound < 2; is_outbound++)
51         {
52           if (is_add)
53             {
54               pool_get (cwm->sa_sess_d[is_outbound], sa_sess);
55             }
56           else
57             {
58               u8 dev_id;
59
60               sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index);
61               dev_id = cwm->qp_data[sa_sess->qp_index].dev_id;
62
63               if (!sa_sess->sess)
64                 continue;
65
66               if (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess))
67                 {
68                   clib_warning("failed to free session");
69                   return -1;
70                 }
71               memset(sa_sess, 0, sizeof(sa_sess[0]));
72             }
73         }
74     }
75   /* *INDENT-OFF* */
76
77   return 0;
78 }
79
80 static void
81 update_qp_data (crypto_worker_main_t * cwm,
82                 u8 cdev_id, u16 qp_id, u8 is_outbound, u16 * idx)
83 {
84   crypto_qp_data_t *qpd;
85
86   /* *INDENT-OFF* */
87   vec_foreach_index (*idx, cwm->qp_data)
88     {
89       qpd = vec_elt_at_index(cwm->qp_data, *idx);
90
91       if (qpd->dev_id == cdev_id && qpd->qp_id == qp_id &&
92           qpd->is_outbound == is_outbound)
93           return;
94     }
95   /* *INDENT-ON* */
96
97   vec_add2 (cwm->qp_data, qpd, 1);
98
99   qpd->dev_id = cdev_id;
100   qpd->qp_id = qp_id;
101   qpd->is_outbound = is_outbound;
102 }
103
104 /*
105  * return:
106  *      0: already exist
107  *      1: mapped
108  */
109 static int
110 add_mapping (crypto_worker_main_t * cwm,
111              u8 cdev_id, u16 qp, u8 is_outbound,
112              const struct rte_cryptodev_capabilities *cipher_cap,
113              const struct rte_cryptodev_capabilities *auth_cap)
114 {
115   u16 qp_index;
116   uword key = 0, data, *ret;
117   crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key;
118
119   p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo;
120   p_key->auth_algo = (u8) auth_cap->sym.auth.algo;
121   p_key->is_outbound = is_outbound;
122
123   ret = hash_get (cwm->algo_qp_map, key);
124   if (ret)
125     return 0;
126
127   update_qp_data (cwm, cdev_id, qp, is_outbound, &qp_index);
128
129   data = (uword) qp_index;
130   hash_set (cwm->algo_qp_map, key, data);
131
132   return 1;
133 }
134
135 /*
136  * return:
137  *      0: already exist
138  *      1: mapped
139  */
140 static int
141 add_cdev_mapping (crypto_worker_main_t * cwm,
142                   struct rte_cryptodev_info *dev_info, u8 cdev_id,
143                   u16 qp, u8 is_outbound)
144 {
145   const struct rte_cryptodev_capabilities *i, *j;
146   u32 mapped = 0;
147
148   for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++)
149     {
150       if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
151         continue;
152
153       if (check_algo_is_supported (i, NULL) != 0)
154         continue;
155
156       for (j = dev_info->capabilities; j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
157            j++)
158         {
159           if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
160             continue;
161
162           if (check_algo_is_supported (j, NULL) != 0)
163             continue;
164
165           mapped |= add_mapping (cwm, cdev_id, qp, is_outbound, i, j);
166         }
167     }
168
169   return mapped;
170 }
171
172 static int
173 check_cryptodev_queues ()
174 {
175   u32 n_qs = 0;
176   u8 cdev_id;
177   u32 n_req_qs = 2;
178
179   if (vlib_num_workers () > 0)
180     n_req_qs = vlib_num_workers () * 2;
181
182   for (cdev_id = 0; cdev_id < rte_cryptodev_count (); cdev_id++)
183     {
184       struct rte_cryptodev_info cdev_info;
185
186       rte_cryptodev_info_get (cdev_id, &cdev_info);
187
188       if (!
189           (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
190         continue;
191
192       n_qs += cdev_info.max_nb_queue_pairs;
193     }
194
195   if (n_qs >= n_req_qs)
196     return 0;
197   else
198     return -1;
199 }
200
201 static clib_error_t *
202 dpdk_ipsec_check_support (ipsec_sa_t * sa)
203 {
204   if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128)
205     {
206       if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
207         return clib_error_return (0, "unsupported integ-alg %U with "
208                                   "crypto-algo aes-gcm-128",
209                                   format_ipsec_integ_alg, sa->integ_alg);
210       sa->integ_alg = IPSEC_INTEG_ALG_AES_GCM_128;
211     }
212   else
213     {
214       if (sa->integ_alg == IPSEC_INTEG_ALG_NONE ||
215           sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128)
216         return clib_error_return (0, "unsupported integ-alg %U",
217                                   format_ipsec_integ_alg, sa->integ_alg);
218     }
219
220   return 0;
221 }
222
223 static uword
224 dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
225                     vlib_frame_t * f)
226 {
227   dpdk_config_main_t *conf = &dpdk_config_main;
228   ipsec_main_t *im = &ipsec_main;
229   dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
230   vlib_thread_main_t *tm = vlib_get_thread_main ();
231   struct rte_cryptodev_config dev_conf;
232   struct rte_cryptodev_qp_conf qp_conf;
233   struct rte_cryptodev_info cdev_info;
234   struct rte_mempool *rmp;
235   i32 dev_id, ret;
236   u32 i, skip_master;
237
238   if (!conf->cryptodev)
239     {
240       clib_warning ("DPDK Cryptodev support is disabled, "
241                     "default to OpenSSL IPsec");
242       return 0;
243     }
244
245   if (check_cryptodev_queues () < 0)
246     {
247       conf->cryptodev = 0;
248       clib_warning ("not enough Cryptodevs, default to OpenSSL IPsec");
249       return 0;
250     }
251
252   vec_alloc (dcm->workers_main, tm->n_vlib_mains);
253   _vec_len (dcm->workers_main) = tm->n_vlib_mains;
254
255   fprintf (stdout, "DPDK Cryptodevs info:\n");
256   fprintf (stdout, "dev_id\tn_qp\tnb_obj\tcache_size\n");
257   /* HW cryptodevs have higher dev_id, use HW first */
258   for (dev_id = rte_cryptodev_count () - 1; dev_id >= 0; dev_id--)
259     {
260       u16 max_nb_qp, qp = 0;
261       skip_master = vlib_num_workers () > 0;
262
263       rte_cryptodev_info_get (dev_id, &cdev_info);
264
265       if (!
266           (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
267         continue;
268
269       max_nb_qp = cdev_info.max_nb_queue_pairs;
270
271       for (i = 0; i < tm->n_vlib_mains; i++)
272         {
273           u8 is_outbound;
274           crypto_worker_main_t *cwm;
275           uword *map;
276
277           if (skip_master)
278             {
279               skip_master = 0;
280               continue;
281             }
282
283           cwm = vec_elt_at_index (dcm->workers_main, i);
284           map = cwm->algo_qp_map;
285
286           if (!map)
287             {
288               map = hash_create (0, sizeof (crypto_worker_qp_key_t));
289               if (!map)
290                 {
291                   clib_warning ("unable to create hash table for worker %u",
292                                 vlib_mains[i]->thread_index);
293                   goto error;
294                 }
295               cwm->algo_qp_map = map;
296             }
297
298           for (is_outbound = 0; is_outbound < 2 && qp < max_nb_qp;
299                is_outbound++)
300             qp += add_cdev_mapping (cwm, &cdev_info, dev_id, qp, is_outbound);
301         }
302
303       if (qp == 0)
304         continue;
305
306       dev_conf.socket_id = rte_cryptodev_socket_id (dev_id);
307       dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs;
308       dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_SESS_OBJS;
309       dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE;
310
311       ret = rte_cryptodev_configure (dev_id, &dev_conf);
312       if (ret < 0)
313         {
314           clib_warning ("cryptodev %u config error", dev_id);
315           goto error;
316         }
317
318       qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC;
319       for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++)
320         {
321           ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf,
322                                                 dev_conf.socket_id);
323           if (ret < 0)
324             {
325               clib_warning ("cryptodev %u qp %u setup error", dev_id, qp);
326               goto error;
327             }
328         }
329       vec_validate_aligned (dcm->cop_pools, dev_conf.socket_id,
330                             CLIB_CACHE_LINE_BYTES);
331
332       if (!vec_elt (dcm->cop_pools, dev_conf.socket_id))
333         {
334           u8 *pool_name = format (0, "crypto_op_pool_socket%u%c",
335                                   dev_conf.socket_id, 0);
336
337           rmp = rte_crypto_op_pool_create ((char *) pool_name,
338                                            RTE_CRYPTO_OP_TYPE_SYMMETRIC,
339                                            DPDK_CRYPTO_NB_COPS *
340                                            (1 + vlib_num_workers ()),
341                                            DPDK_CRYPTO_CACHE_SIZE,
342                                            DPDK_CRYPTO_PRIV_SIZE,
343                                            dev_conf.socket_id);
344           vec_free (pool_name);
345
346           if (!rmp)
347             {
348               clib_warning ("failed to allocate mempool on socket %u",
349                             dev_conf.socket_id);
350               goto error;
351             }
352           vec_elt (dcm->cop_pools, dev_conf.socket_id) = rmp;
353         }
354
355       fprintf (stdout, "%u\t%u\t%u\t%u\n", dev_id, dev_conf.nb_queue_pairs,
356                DPDK_CRYPTO_NB_SESS_OBJS, DPDK_CRYPTO_CACHE_SIZE);
357     }
358
359   dpdk_esp_init ();
360
361   /* Add new next node and set as default */
362   vlib_node_t *node, *next_node;
363
364   next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-encrypt");
365   ASSERT (next_node);
366   node = vlib_get_node_by_name (vm, (u8 *) "ipsec-output-ip4");
367   ASSERT (node);
368   im->esp_encrypt_node_index = next_node->index;
369   im->esp_encrypt_next_index =
370     vlib_node_add_next (vm, node->index, next_node->index);
371
372   next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-decrypt");
373   ASSERT (next_node);
374   node = vlib_get_node_by_name (vm, (u8 *) "ipsec-input-ip4");
375   ASSERT (node);
376   im->esp_decrypt_node_index = next_node->index;
377   im->esp_decrypt_next_index =
378     vlib_node_add_next (vm, node->index, next_node->index);
379
380   im->cb.check_support_cb = dpdk_ipsec_check_support;
381   im->cb.add_del_sa_sess_cb = add_del_sa_sess;
382
383   for (i = 1; i < tm->n_vlib_mains; i++)
384     vlib_node_set_state (vlib_mains[i], dpdk_crypto_input_node.index,
385                          VLIB_NODE_STATE_POLLING);
386
387   /* TODO cryptodev counters */
388
389   return 0;
390
391 error:
392   ;
393   crypto_worker_main_t *cwm;
394   struct rte_mempool **mp;
395   /* *INDENT-OFF* */
396   vec_foreach (cwm, dcm->workers_main)
397     hash_free (cwm->algo_qp_map);
398
399   vec_foreach (mp, dcm->cop_pools)
400     {
401       if (mp)
402         rte_mempool_free (mp[0]);
403     }
404   /* *INDENT-ON* */
405   vec_free (dcm->workers_main);
406   vec_free (dcm->cop_pools);
407
408   return 0;
409 }
410
411 /* *INDENT-OFF* */
412 VLIB_REGISTER_NODE (dpdk_ipsec_process_node,static) = {
413     .function = dpdk_ipsec_process,
414     .type = VLIB_NODE_TYPE_PROCESS,
415     .name = "dpdk-ipsec-process",
416     .process_log2_n_stack_bytes = 17,
417 };
418 /* *INDENT-ON* */
419
420 /*
421  * fd.io coding-style-patch-verification: ON
422  *
423  * Local Variables:
424  * eval: (c-set-style "gnu")
425  * End:
426  */