2 * Copyright (c) 2017 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __DPDK_IPSEC_H__
16 #define __DPDK_IPSEC_H__
18 #include <vnet/vnet.h>
19 #include <vppinfra/cache.h>
20 #include <vnet/ipsec/ipsec.h>
23 #include <rte_config.h>
24 #include <rte_crypto.h>
25 #include <rte_cryptodev.h>
28 #define always_inline static inline
30 #define always_inline static inline __attribute__ ((__always_inline__))
33 #define DPDK_CRYPTO_N_QUEUE_DESC 2048
34 #define DPDK_CRYPTO_NB_SESS_OBJS 20000
36 #define foreach_dpdk_crypto_input_next \
37 _(DROP, "error-drop") \
38 _(IP4_LOOKUP, "ip4-lookup") \
39 _(IP6_LOOKUP, "ip6-lookup") \
40 _(INTERFACE_OUTPUT, "interface-output") \
41 _(MIDCHAIN, "adj-midchain-tx") \
42 _(DECRYPT4_POST, "dpdk-esp4-decrypt-post") \
43 _(DECRYPT6_POST, "dpdk-esp6-decrypt-post")
47 #define _(f,s) DPDK_CRYPTO_INPUT_NEXT_##f,
48 foreach_dpdk_crypto_input_next
50 DPDK_CRYPTO_INPUT_N_NEXT,
51 } dpdk_crypto_input_next_t;
53 #define MAX_QP_PER_LCORE 16
67 CLIB_ALIGN_MARK (mark0, 16);
70 u8 icv[32]; /* XXX last 16B in next cache line */
76 struct rte_crypto_op **ops;
77 u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
78 u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
79 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
80 } crypto_worker_main_t;
84 CLIB_ALIGN_MARK (pad, 8); /* align up to 8 bytes for 32bit builds */
86 enum rte_crypto_sym_xform_type type;
100 u8 cipher_support[IPSEC_CRYPTO_N_ALG];
101 u8 auth_support[IPSEC_INTEG_N_ALG];
127 struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
128 u32 bi[VLIB_FRAME_SIZE];
129 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
135 struct rte_cryptodev_sym_session *session;
136 } crypto_session_disposal_t;
140 struct rte_cryptodev_sym_session *session;
142 CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */
143 } crypto_session_by_drv_t;
147 struct rte_mempool *crypto_op;
148 struct rte_mempool *session_h;
149 struct rte_mempool **session_drv;
150 crypto_session_disposal_t *session_disposal;
151 uword *session_by_sa_index;
152 u64 crypto_op_get_failed;
153 u64 session_h_failed;
154 u64 *session_drv_failed;
155 crypto_session_by_drv_t *session_by_drv_id_and_sa_index;
156 clib_spinlock_t lockp;
157 /* Required for vec_validate_aligned */
158 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
163 crypto_worker_main_t *workers_main;
165 crypto_resource_t *resource;
166 crypto_alg_t *cipher_algs;
167 crypto_alg_t *auth_algs;
170 u64 session_timeout; /* nsec */
172 } dpdk_crypto_main_t;
174 extern dpdk_crypto_main_t dpdk_crypto_main;
176 static const u8 pad_data[] =
177 { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0 };
179 void crypto_auto_placement (void);
181 clib_error_t *create_sym_session (struct rte_cryptodev_sym_session **session,
182 u32 sa_idx, crypto_resource_t * res,
183 crypto_worker_main_t * cwm, u8 is_outbound);
185 static_always_inline u32
190 sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
192 return ((op_size + align - 1) & ~(align - 1)) + sizeof (dpdk_op_priv_t);
195 static_always_inline u32
196 crypto_op_get_priv_offset (void)
198 const u32 align = 16;
201 offset = sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op);
202 offset = (offset + align - 1) & ~(align - 1);
207 static_always_inline dpdk_op_priv_t *
208 crypto_op_get_priv (struct rte_crypto_op * op)
210 return (dpdk_op_priv_t *) (((u8 *) op) + crypto_op_get_priv_offset ());
214 static_always_inline void
215 add_session_by_drv_and_sa_idx (struct rte_cryptodev_sym_session *session,
216 crypto_data_t * data, u32 drv_id, u32 sa_idx)
218 crypto_session_by_drv_t *sbd;
219 vec_validate_aligned (data->session_by_drv_id_and_sa_index, sa_idx,
220 CLIB_CACHE_LINE_BYTES);
221 sbd = vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
222 sbd->dev_mask |= 1L << drv_id;
223 sbd->session = session;
226 static_always_inline struct rte_cryptodev_sym_session *
227 get_session_by_drv_and_sa_idx (crypto_data_t * data, u32 drv_id, u32 sa_idx)
229 crypto_session_by_drv_t *sess_by_sa;
230 if (_vec_len (data->session_by_drv_id_and_sa_index) <= sa_idx)
233 vec_elt_at_index (data->session_by_drv_id_and_sa_index, sa_idx);
234 return (sess_by_sa->dev_mask & (1L << drv_id)) ? sess_by_sa->session : NULL;
237 static_always_inline clib_error_t *
238 crypto_get_session (struct rte_cryptodev_sym_session ** session,
240 crypto_resource_t * res,
241 crypto_worker_main_t * cwm, u8 is_outbound)
243 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
245 struct rte_cryptodev_sym_session *sess;
247 data = vec_elt_at_index (dcm->data, res->numa);
248 sess = get_session_by_drv_and_sa_idx (data, res->drv_id, sa_idx);
250 if (PREDICT_FALSE (!sess))
251 return create_sym_session (session, sa_idx, res, cwm, is_outbound);
258 static_always_inline u16
259 get_resource (crypto_worker_main_t * cwm, ipsec_sa_t * sa)
261 u16 cipher_res = cwm->cipher_resource_idx[sa->crypto_alg];
262 u16 auth_res = cwm->auth_resource_idx[sa->integ_alg];
265 /* Not allowed to setup SA with no-aead-cipher/NULL or NULL/NULL */
267 is_aead = ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) ||
268 (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) ||
269 (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256));
271 if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE)
274 if (cipher_res == auth_res)
283 static_always_inline i32
284 crypto_alloc_ops (u8 numa, struct rte_crypto_op ** ops, u32 n)
286 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
287 crypto_data_t *data = vec_elt_at_index (dcm->data, numa);
290 ret = rte_mempool_get_bulk (data->crypto_op, (void **) ops, n);
293 data->crypto_op_get_failed += ! !ret;
299 static_always_inline void
300 crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n)
302 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
303 crypto_data_t *data = vec_elt_at_index (dcm->data, numa);
308 rte_mempool_put_bulk (data->crypto_op, (void **) ops, n);
311 static_always_inline void
312 crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm,
313 u32 node_index, u32 error, u8 numa, u8 encrypt)
315 dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
316 crypto_resource_t *res;
320 vec_foreach (res_idx, cwm->resource_idx)
323 res = vec_elt_at_index (dcm->resource, res_idx[0]);
328 n_ops = (DPDK_CRYPTO_N_QUEUE_DESC / 2) - res->inflights[encrypt];
329 n_ops = res->n_ops < n_ops ? res->n_ops : n_ops;
330 enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id,
332 ASSERT (n_ops == enq);
333 res->inflights[encrypt] += enq;
335 if (PREDICT_FALSE (enq < res->n_ops))
337 crypto_free_ops (numa, &res->ops[enq], res->n_ops - enq);
338 vlib_buffer_free (vm, &res->bi[enq], res->n_ops - enq);
340 vlib_node_increment_counter (vm, node_index, error,
348 static_always_inline void
349 crypto_set_icb (dpdk_gcm_cnt_blk * icb, u32 salt, u32 seq, u32 seq_hi)
356 static_always_inline void
357 crypto_op_setup (u8 is_aead, struct rte_mbuf *mb0,
358 struct rte_crypto_op *op, void *session,
359 u32 cipher_off, u32 cipher_len,
360 u32 auth_off, u32 auth_len,
361 u8 * aad, u8 * digest, u64 digest_paddr)
363 struct rte_crypto_sym_op *sym_op;
365 sym_op = (struct rte_crypto_sym_op *) (op + 1);
368 sym_op->session = session;
372 sym_op->aead.data.offset = cipher_off;
373 sym_op->aead.data.length = cipher_len;
375 sym_op->aead.aad.data = aad;
376 sym_op->aead.aad.phys_addr =
377 op->phys_addr + (uintptr_t) aad - (uintptr_t) op;
379 sym_op->aead.digest.data = digest;
380 sym_op->aead.digest.phys_addr = digest_paddr;
384 sym_op->cipher.data.offset = cipher_off;
385 sym_op->cipher.data.length = cipher_len;
387 sym_op->auth.data.offset = auth_off;
388 sym_op->auth.data.length = auth_len;
390 sym_op->auth.digest.data = digest;
391 sym_op->auth.digest.phys_addr = digest_paddr;
395 #endif /* __DPDK_IPSEC_H__ */
398 * fd.io coding-style-patch-verification: ON
401 * eval: (c-set-style "gnu")