2 * Copyright (c) 2017 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __DPDK_IPSEC_H__
16 #define __DPDK_IPSEC_H__
18 #include <vnet/vnet.h>
19 #include <vppinfra/cache.h>
20 #include <vnet/ipsec/ipsec.h>
23 #include <rte_config.h>
24 #include <rte_crypto.h>
25 #include <rte_cryptodev.h>
28 #define always_inline static inline
30 #define always_inline static inline __attribute__ ((__always_inline__))
33 #define foreach_dpdk_crypto_input_next \
34 _(DROP, "error-drop") \
35 _(IP4_LOOKUP, "ip4-lookup") \
36 _(IP6_LOOKUP, "ip6-lookup") \
37 _(INTERFACE_OUTPUT, "interface-output") \
38 _(DECRYPT4_POST, "dpdk-esp4-decrypt-post") \
39 _(DECRYPT6_POST, "dpdk-esp6-decrypt-post")
43 #define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f,
44 foreach_dpdk_crypto_input_next
46 DPDK_CRYPTO_INPUT_N_NEXT,
47 } dpdk_crypto_input_next_t;
49 #define MAX_QP_PER_LCORE 16
61 dpdk_gcm_cnt_blk cb __attribute__ ((aligned (16)));
69 struct rte_crypto_op **ops;
70 u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
71 u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
72 CLIB_CACHE_LINE_ALIGN_MARK (pad);
73 } crypto_worker_main_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
78 enum rte_crypto_sym_xform_type type;
86 } crypto_alg_t __attribute__ ((aligned (8)));
92 u8 cipher_support[IPSEC_CRYPTO_N_ALG];
93 u8 auth_support[IPSEC_INTEG_N_ALG];
119 struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
120 u32 bi[VLIB_FRAME_SIZE];
121 } crypto_resource_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
126 struct rte_cryptodev_sym_session *session;
127 } crypto_session_disposal_t;
131 struct rte_cryptodev_sym_session *session;
133 } crypto_session_by_drv_t;
137 /* Required for vec_validate_aligned */
138 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
139 struct rte_mempool *crypto_op;
140 struct rte_mempool *session_h;
141 struct rte_mempool **session_drv;
142 crypto_session_disposal_t *session_disposal;
143 uword *session_by_sa_index;
144 u64 crypto_op_get_failed;
145 u64 session_h_failed;
146 u64 *session_drv_failed;
147 crypto_session_by_drv_t *session_by_drv_id_and_sa_index;
148 clib_spinlock_t lockp;
153 crypto_worker_main_t *workers_main;
155 crypto_resource_t *resource;
156 crypto_alg_t *cipher_algs;
157 crypto_alg_t *auth_algs;
160 u64 session_timeout; /* nsec */
162 } dpdk_crypto_main_t;
164 extern dpdk_crypto_main_t dpdk_crypto_main;
166 static const u8 pad_data[] =
167 { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0 };
169 void crypto_auto_placement (void);
171 clib_error_t *create_sym_session (struct rte_cryptodev_sym_session **session,
172 u32 sa_idx, crypto_resource_t * res,
173 crypto_worker_main_t * cwm, u8 is_outbound);
175 static_always_inline u32
180 sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
182 return ((op_size + align - 1) & ~(align - 1)) + sizeof (dpdk_op_priv_t);
185 static_always_inline u32
186 crypto_op_get_priv_offset (void)
188 const u32 align = 16;
191 offset = sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
192 offset = (offset + align - 1) & ~(align - 1);
197 static_always_inline dpdk_op_priv_t *
198 crypto_op_get_priv (struct rte_crypto_op * op)
200 return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ());
204 static_always_inline void
205 add_session_by_drv_and_sa_idx (struct rte_cryptodev_sym_session *session,
206 crypto_data_t * data, u32 drv_id, u32 sa_idx)
208 crypto_session_by_drv_t *sbd;
209 vec_validate_aligned (data->session_by_drv_id_and_sa_index, sa_idx,
210 CLIB_CACHE_LINE_BYTES);
211 sbd = vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
212 sbd->dev_mask |= 1L << drv_id;
213 sbd->session = session;
216 static_always_inline struct rte_cryptodev_sym_session *
217 get_session_by_drv_and_sa_idx (crypto_data_t * data, u32 drv_id, u32 sa_idx)
219 crypto_session_by_drv_t *sess_by_sa;
220 if (_vec_len (data->session_by_drv_id_and_sa_index) <= sa_idx)
223 vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
224 return (sess_by_sa->dev_mask & (1L << drv_id)) ? sess_by_sa->session : NULL;
227 static_always_inline clib_error_t *
228 crypto_get_session (struct rte_cryptodev_sym_session ** session,
230 crypto_resource_t * res,
231 crypto_worker_main_t * cwm, u8 is_outbound)
233 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
235 struct rte_cryptodev_sym_session *sess;
237 data = vec_elt_at_index (dcm->data, res->numa);
238 sess = get_session_by_drv_and_sa_idx (data, res->drv_id, sa_idx);
240 if (PREDICT_FALSE (!sess))
241 return create_sym_session (session, sa_idx, res, cwm, is_outbound);
248 static_always_inline u16
249 get_resource (crypto_worker_main_t * cwm, ipsec_sa_t * sa)
251 u16 cipher_res = cwm->cipher_resource_idx[sa->crypto_alg];
252 u16 auth_res = cwm->auth_resource_idx[sa->integ_alg];
255 /* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */
257 is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) ||
258 (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) ||
259 (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256));
261 if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE)
264 if (cipher_res == auth_res)
273 static_always_inline i32
274 crypto_alloc_ops (u8 numa, struct rte_crypto_op ** ops, u32 n)
276 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
277 crypto_data_t *data = vec_elt_at_index (dcm->data, numa);
280 ret = rte_mempool_get_bulk (data->crypto_op, (void **) ops, n);
283 data->crypto_op_get_failed += ! !ret;
289 static_always_inline void
290 crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n)
292 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
293 crypto_data_t *data = vec_elt_at_index (dcm->data, numa);
298 rte_mempool_put_bulk (data->crypto_op, (void **) ops, n);
301 static_always_inline void
302 crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm, u8 outbound,
303 u32 node_index, u32 error, u8 numa)
305 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
306 crypto_resource_t *res;
310 vec_foreach (res_idx, cwm->resource_idx)
313 res = vec_elt_at_index (dcm->resource, res_idx[0]);
318 enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id + outbound,
319 res->ops, res->n_ops);
320 res->inflights[outbound] += enq;
322 if (PREDICT_FALSE (enq < res->n_ops))
324 crypto_free_ops (numa, &res->ops[enq], res->n_ops - enq);
325 vlib_buffer_free (vm, &res->bi[enq], res->n_ops - enq);
327 vlib_node_increment_counter (vm, node_index, error,
335 static_always_inline void
336 crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi)
343 static_always_inline void
344 crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0,
345 struct rte_crypto_op *op, void *session,
346 u32 cipher_off, u32 cipher_len,
347 u32 auth_off, u32 auth_len,
348 u8 * aad, u8 * digest, u64 digest_paddr)
350 struct rte_crypto_sym_op *sym_op;
352 sym_op = (struct rte_crypto_sym_op *) (op + 1);
355 sym_op->session = session;
359 sym_op->aead.data.offset = cipher_off;
360 sym_op->aead.data.length = cipher_len;
362 sym_op->aead.aad.data = aad;
363 sym_op->aead.aad.phys_addr =
364 op->phys_addr + (uintptr_t) aad - (uintptr_t) op;
366 sym_op->aead.digest.data = digest;
367 sym_op->aead.digest.phys_addr = digest_paddr;
371 sym_op->cipher.data.offset = cipher_off;
372 sym_op->cipher.data.length = cipher_len;
374 sym_op->auth.data.offset = auth_off;
375 sym_op->auth.data.length = auth_len;
377 sym_op->auth.digest.data = digest;
378 sym_op->auth.digest.phys_addr = digest_paddr;
382 #endif /* __DPDK_IPSEC_H__ */
385 * fd.io coding-style-patch-verification: ON
388 * eval: (c-set-style "gnu")