1 From 865c50fdf46eaaf9efd6e0a897a836201b0ec5a1 Mon Sep 17 00:00:00 2001
2 From: Fan Zhang <roy.fan.zhang@intel.com>
3 Date: Mon, 27 Jul 2020 14:14:24 +0100
4 Subject: [PATCH] cryptodev: add symmetric crypto data-path APIs
6 This patch adds data-path APIs for enqueue and dequeue operations to
7 cryptodev. The APIs support flexible user-define enqueue and dequeue
8 behaviors and operation modes. The QAT PMD is also updated to
11 Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
12 Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
14 drivers/common/qat/Makefile | 1 +
15 drivers/crypto/qat/meson.build | 1 +
16 drivers/crypto/qat/qat_sym.h | 13 +
17 drivers/crypto/qat/qat_sym_hw_dp.c | 926 ++++++++++++++++++
18 drivers/crypto/qat/qat_sym_pmd.c | 9 +-
19 lib/librte_cryptodev/rte_crypto.h | 9 +
20 lib/librte_cryptodev/rte_crypto_sym.h | 44 +-
21 lib/librte_cryptodev/rte_cryptodev.c | 45 +
22 lib/librte_cryptodev/rte_cryptodev.h | 336 ++++++-
23 lib/librte_cryptodev/rte_cryptodev_pmd.h | 36 +-
24 .../rte_cryptodev_version.map | 8 +
25 11 files changed, 1417 insertions(+), 11 deletions(-)
26 create mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
28 diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
29 index 85d420709..1b71bbbab 100644
30 --- a/drivers/common/qat/Makefile
31 +++ b/drivers/common/qat/Makefile
32 @@ -42,6 +42,7 @@ endif
34 SRCS-y += qat_sym_session.c
35 SRCS-y += qat_sym_pmd.c
36 + SRCS-y += qat_sym_hw_dp.c
40 diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
41 index a225f374a..bc90ec44c 100644
42 --- a/drivers/crypto/qat/meson.build
43 +++ b/drivers/crypto/qat/meson.build
44 @@ -15,6 +15,7 @@ if dep.found()
45 qat_sources += files('qat_sym_pmd.c',
52 diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
53 index 1a9748849..2d6316130 100644
54 --- a/drivers/crypto/qat/qat_sym.h
55 +++ b/drivers/crypto/qat/qat_sym.h
56 @@ -264,6 +264,18 @@ qat_sym_process_response(void **op, uint8_t *resp)
62 +qat_sym_dp_configure_service_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
63 + struct rte_crypto_dp_service_ctx *service_ctx,
64 + enum rte_crypto_dp_service service_type,
65 + enum rte_crypto_op_sess_type sess_type,
66 + union rte_cryptodev_session_ctx session_ctx,
70 +qat_sym_get_service_ctx_size(struct rte_cryptodev *dev);
75 @@ -276,5 +288,6 @@ static inline void
76 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
81 #endif /* _QAT_SYM_H_ */
82 diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
84 index 000000000..ce75212ba
86 +++ b/drivers/crypto/qat/qat_sym_hw_dp.c
88 +/* SPDX-License-Identifier: BSD-3-Clause
89 + * Copyright(c) 2020 Intel Corporation
92 +#include <rte_cryptodev_pmd.h>
94 +#include "adf_transport_access_macros.h"
95 +#include "icp_qat_fw.h"
96 +#include "icp_qat_fw_la.h"
99 +#include "qat_sym_pmd.h"
100 +#include "qat_sym_session.h"
103 +struct qat_sym_dp_service_ctx {
104 + struct qat_sym_session *session;
109 +static __rte_always_inline int32_t
110 +qat_sym_dp_get_data(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
111 + struct rte_crypto_vec *data, uint16_t n_data_vecs)
113 + struct qat_queue *tx_queue;
114 + struct qat_sym_op_cookie *cookie;
115 + struct qat_sgl *list;
117 + uint32_t total_len;
119 + if (likely(n_data_vecs == 1)) {
120 + req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
122 + req->comn_mid.src_length = req->comn_mid.dst_length =
124 + return data[0].len;
127 + if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
131 + tx_queue = &qp->tx_q;
133 + ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
134 + QAT_COMN_PTR_TYPE_SGL);
135 + cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
136 + list = (struct qat_sgl *)&cookie->qat_sgl_src;
138 + for (i = 0; i < n_data_vecs; i++) {
139 + list->buffers[i].len = data[i].len;
140 + list->buffers[i].resrvd = 0;
141 + list->buffers[i].addr = data[i].iova;
142 + if (total_len + data[i].len > UINT32_MAX) {
143 + QAT_DP_LOG(ERR, "Message too long");
146 + total_len += data[i].len;
149 + list->num_bufs = i;
150 + req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
151 + cookie->qat_sgl_src_phys_addr;
152 + req->comn_mid.src_length = req->comn_mid.dst_length = 0;
156 +static __rte_always_inline void
157 +set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
158 + struct rte_crypto_data *iv, uint32_t iv_len,
159 + struct icp_qat_fw_la_bulk_req *qat_req)
161 + /* copy IV into request if it fits */
162 + if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
163 + rte_memcpy(cipher_param->u.cipher_IV_array, iv->base, iv_len);
165 + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
166 + qat_req->comn_hdr.serv_specif_flags,
167 + ICP_QAT_FW_CIPH_IV_64BIT_PTR);
168 + cipher_param->u.s.cipher_IV_ptr = iv->iova;
172 +#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
173 + (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
174 + ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
176 +static __rte_always_inline void
177 +qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
181 + for (i = 0; i < n; i++)
185 +static __rte_always_inline void
186 +submit_one_aead_job(struct qat_sym_session *ctx,
187 + struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_data *iv_vec,
188 + struct rte_crypto_data *digest_vec, struct rte_crypto_data *aad_vec,
189 + union rte_crypto_sym_ofs ofs, uint32_t data_len)
191 + struct icp_qat_fw_la_cipher_req_params *cipher_param =
192 + (void *)&req->serv_specif_rqpars;
193 + struct icp_qat_fw_la_auth_req_params *auth_param =
194 + (void *)((uint8_t *)&req->serv_specif_rqpars +
195 + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
197 + uint8_t aad_ccm_real_len;
198 + uint8_t aad_len_field_sz;
199 + uint32_t msg_len_be;
200 + rte_iova_t aad_iova = 0;
203 + switch (ctx->qat_hash_alg) {
204 + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
205 + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
206 + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
207 + req->comn_hdr.serv_specif_flags,
208 + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
209 + rte_memcpy(cipher_param->u.cipher_IV_array,
210 + iv_vec->base, ctx->cipher_iv.length);
211 + aad_iova = aad_vec->iova;
213 + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
214 + aad_data = aad_vec->base;
215 + aad_iova = aad_vec->iova;
216 + aad_ccm_real_len = 0;
217 + aad_len_field_sz = 0;
218 + msg_len_be = rte_bswap32((uint32_t)data_len -
219 + ofs.ofs.cipher.head);
221 + if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
222 + aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
223 + aad_ccm_real_len = ctx->aad_len -
224 + ICP_QAT_HW_CCM_AAD_B0_LEN -
225 + ICP_QAT_HW_CCM_AAD_LEN_INFO;
227 + aad_data = iv_vec->base;
228 + aad_iova = iv_vec->iova;
231 + q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
232 + aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
233 + aad_len_field_sz, ctx->digest_length, q);
234 + if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
235 + memcpy(aad_data + ctx->cipher_iv.length +
236 + ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
237 + ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
238 + (uint8_t *)&msg_len_be,
239 + ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
241 + memcpy(aad_data + ctx->cipher_iv.length +
242 + ICP_QAT_HW_CCM_NONCE_OFFSET,
243 + (uint8_t *)&msg_len_be +
244 + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
248 + if (aad_len_field_sz > 0) {
249 + *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
250 + rte_bswap16(aad_ccm_real_len);
252 + if ((aad_ccm_real_len + aad_len_field_sz)
253 + % ICP_QAT_HW_CCM_AAD_B0_LEN) {
254 + uint8_t pad_len = 0;
255 + uint8_t pad_idx = 0;
257 + pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
258 + ((aad_ccm_real_len +
259 + aad_len_field_sz) %
260 + ICP_QAT_HW_CCM_AAD_B0_LEN);
261 + pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
264 + memset(&aad_data[pad_idx], 0, pad_len);
267 + rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
268 + + ICP_QAT_HW_CCM_NONCE_OFFSET,
269 + (uint8_t *)iv_vec->base +
270 + ICP_QAT_HW_CCM_NONCE_OFFSET,
271 + ctx->cipher_iv.length);
272 + *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
273 + q - ICP_QAT_HW_CCM_NONCE_OFFSET;
275 + rte_memcpy((uint8_t *)aad_vec->base +
276 + ICP_QAT_HW_CCM_NONCE_OFFSET,
277 + (uint8_t *)iv_vec->base +
278 + ICP_QAT_HW_CCM_NONCE_OFFSET,
279 + ctx->cipher_iv.length);
286 + cipher_param->cipher_offset = ofs.ofs.cipher.head;
287 + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head
288 + - ofs.ofs.cipher.tail;
289 + auth_param->auth_off = ofs.ofs.cipher.head;
290 + auth_param->auth_len = data_len - ofs.ofs.cipher.head
291 + - ofs.ofs.cipher.tail;
292 + auth_param->auth_res_addr = digest_vec->iova;
293 + auth_param->u1.aad_adr = aad_iova;
295 + if (ctx->is_single_pass) {
296 + cipher_param->spc_aad_addr = aad_iova;
297 + cipher_param->spc_auth_res_addr = digest_vec->iova;
301 +static __rte_always_inline int
302 +qat_sym_dp_submit_single_aead(void *qp_data, uint8_t *service_data,
303 + struct rte_crypto_vec *data, uint16_t n_data_vecs,
304 + union rte_crypto_sym_ofs ofs, struct rte_crypto_data *iv_vec,
305 + struct rte_crypto_data *digest_vec, struct rte_crypto_data *aad_vec,
308 + struct qat_qp *qp = qp_data;
309 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
310 + struct qat_queue *tx_queue = &qp->tx_q;
311 + struct qat_sym_session *ctx = service_ctx->session;
312 + struct icp_qat_fw_la_bulk_req *req;
314 + uint32_t tail = service_ctx->tail;
316 + req = (struct icp_qat_fw_la_bulk_req *)(
317 + (uint8_t *)tx_queue->base_addr + tail);
318 + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
319 + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
320 + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
321 + data_len = qat_sym_dp_get_data(qp, req, data, n_data_vecs);
322 + if (unlikely(data_len < 0))
324 + req->comn_mid.opaque_data = (uint64_t)opaque;
326 + submit_one_aead_job(ctx, req, iv_vec, digest_vec, aad_vec, ofs,
327 + (uint32_t)data_len);
329 + service_ctx->tail = tail;
334 +static __rte_always_inline uint32_t
335 +qat_sym_dp_submit_aead_jobs(void *qp_data, uint8_t *service_data,
336 + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
339 + struct qat_qp *qp = qp_data;
340 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
341 + struct qat_queue *tx_queue = &qp->tx_q;
342 + struct qat_sym_session *ctx = service_ctx->session;
345 + struct icp_qat_fw_la_bulk_req *req;
348 + if (unlikely(qp->enqueued - qp->dequeued + vec->num >=
349 + qp->max_inflights)) {
350 + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
354 + tail = service_ctx->tail;
356 + for (i = 0; i < vec->num; i++) {
357 + req = (struct icp_qat_fw_la_bulk_req *)(
358 + (uint8_t *)tx_queue->base_addr + tail);
359 + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
361 + data_len = qat_sym_dp_get_data(qp, req, vec->sgl[i].vec,
362 + vec->sgl[i].num) - ofs.ofs.cipher.head -
363 + ofs.ofs.cipher.tail;
364 + if (unlikely(data_len < 0))
366 + req->comn_mid.opaque_data = (uint64_t)opaque[i];
367 + submit_one_aead_job(ctx, req, vec->iv_vec + i,
368 + vec->digest_vec + i, vec->aad_vec + i, ofs,
369 + (uint32_t)data_len);
370 + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
373 + if (unlikely(i < vec->num))
374 + qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
376 + service_ctx->tail = tail;
380 +static __rte_always_inline void
381 +submit_one_cipher_job(struct qat_sym_session *ctx,
382 + struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_data *iv_vec,
383 + union rte_crypto_sym_ofs ofs, uint32_t data_len)
385 + struct icp_qat_fw_la_cipher_req_params *cipher_param;
387 + cipher_param = (void *)&req->serv_specif_rqpars;
390 + set_cipher_iv(cipher_param, iv_vec, ctx->cipher_iv.length, req);
391 + cipher_param->cipher_offset = ofs.ofs.cipher.head;
392 + cipher_param->cipher_length = data_len - ofs.ofs.cipher.head
393 + - ofs.ofs.cipher.tail;
396 +static __rte_always_inline int
397 +qat_sym_dp_submit_single_cipher(void *qp_data, uint8_t *service_data,
398 + struct rte_crypto_vec *data, uint16_t n_data_vecs,
399 + union rte_crypto_sym_ofs ofs, struct rte_crypto_data *iv_vec,
400 + __rte_unused struct rte_crypto_data *digest_vec,
401 + __rte_unused struct rte_crypto_data *aad_vec,
404 + struct qat_qp *qp = qp_data;
405 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
406 + struct qat_queue *tx_queue = &qp->tx_q;
407 + struct qat_sym_session *ctx = service_ctx->session;
408 + struct icp_qat_fw_la_bulk_req *req;
410 + uint32_t tail = service_ctx->tail;
412 + req = (struct icp_qat_fw_la_bulk_req *)(
413 + (uint8_t *)tx_queue->base_addr + tail);
414 + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
415 + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
416 + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
417 + data_len = qat_sym_dp_get_data(qp, req, data, n_data_vecs);
418 + if (unlikely(data_len < 0))
420 + req->comn_mid.opaque_data = (uint64_t)opaque;
422 + submit_one_cipher_job(ctx, req, iv_vec, ofs, (uint32_t)data_len);
424 + service_ctx->tail = tail;
429 +static __rte_always_inline uint32_t
430 +qat_sym_dp_submit_cipher_jobs(void *qp_data, uint8_t *service_data,
431 + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
434 + struct qat_qp *qp = qp_data;
435 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
436 + struct qat_queue *tx_queue = &qp->tx_q;
437 + struct qat_sym_session *ctx = service_ctx->session;
440 + struct icp_qat_fw_la_bulk_req *req;
443 + if (unlikely(qp->enqueued - qp->dequeued + vec->num >=
444 + qp->max_inflights)) {
445 + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
449 + tail = service_ctx->tail;
451 + for (i = 0; i < vec->num; i++) {
452 + req = (struct icp_qat_fw_la_bulk_req *)(
453 + (uint8_t *)tx_queue->base_addr + tail);
454 + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
456 + data_len = qat_sym_dp_get_data(qp, req, vec->sgl[i].vec,
457 + vec->sgl[i].num) - ofs.ofs.cipher.head -
458 + ofs.ofs.cipher.tail;
459 + if (unlikely(data_len < 0))
461 + req->comn_mid.opaque_data = (uint64_t)opaque[i];
462 + submit_one_cipher_job(ctx, req, vec->iv_vec + i, ofs,
463 + (uint32_t)data_len);
464 + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
467 + if (unlikely(i < vec->num))
468 + qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
470 + service_ctx->tail = tail;
474 +static __rte_always_inline void
475 +submit_one_auth_job(struct qat_sym_session *ctx,
476 + struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_data *iv_vec,
477 + struct rte_crypto_data *digest_vec, union rte_crypto_sym_ofs ofs,
480 + struct icp_qat_fw_la_cipher_req_params *cipher_param;
481 + struct icp_qat_fw_la_auth_req_params *auth_param;
483 + cipher_param = (void *)&req->serv_specif_rqpars;
484 + auth_param = (void *)((uint8_t *)cipher_param +
485 + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
487 + auth_param->auth_off = ofs.ofs.auth.head;
488 + auth_param->auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
489 + auth_param->auth_res_addr = digest_vec->iova;
491 + switch (ctx->qat_hash_alg) {
492 + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
493 + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
494 + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
495 + auth_param->u1.aad_adr = iv_vec->iova;
497 + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
498 + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
499 + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
500 + req->comn_hdr.serv_specif_flags,
501 + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
502 + rte_memcpy(cipher_param->u.cipher_IV_array,
503 + iv_vec->base, ctx->cipher_iv.length);
510 +static __rte_always_inline int
511 +qat_sym_dp_submit_single_auth(void *qp_data, uint8_t *service_data,
512 + struct rte_crypto_vec *data, uint16_t n_data_vecs,
513 + union rte_crypto_sym_ofs ofs, struct rte_crypto_data *iv_vec,
514 + struct rte_crypto_data *digest_vec,
515 + __rte_unused struct rte_crypto_data *aad_vec,
518 + struct qat_qp *qp = qp_data;
519 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
520 + struct qat_queue *tx_queue = &qp->tx_q;
521 + struct qat_sym_session *ctx = service_ctx->session;
522 + struct icp_qat_fw_la_bulk_req *req;
524 + uint32_t tail = service_ctx->tail;
526 + req = (struct icp_qat_fw_la_bulk_req *)(
527 + (uint8_t *)tx_queue->base_addr + tail);
528 + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
529 + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
530 + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
531 + data_len = qat_sym_dp_get_data(qp, req, data, n_data_vecs);
532 + if (unlikely(data_len < 0))
534 + req->comn_mid.opaque_data = (uint64_t)opaque;
536 + submit_one_auth_job(ctx, req, iv_vec, digest_vec, ofs,
537 + (uint32_t)data_len);
539 + service_ctx->tail = tail;
544 +static __rte_always_inline uint32_t
545 +qat_sym_dp_submit_auth_jobs(void *qp_data, uint8_t *service_data,
546 + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
549 + struct qat_qp *qp = qp_data;
550 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
551 + struct qat_queue *tx_queue = &qp->tx_q;
552 + struct qat_sym_session *ctx = service_ctx->session;
555 + struct icp_qat_fw_la_bulk_req *req;
558 + if (unlikely(qp->enqueued - qp->dequeued + vec->num >=
559 + qp->max_inflights)) {
560 + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
564 + tail = service_ctx->tail;
566 + for (i = 0; i < vec->num; i++) {
567 + req = (struct icp_qat_fw_la_bulk_req *)(
568 + (uint8_t *)tx_queue->base_addr + tail);
569 + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
571 + data_len = qat_sym_dp_get_data(qp, req, vec->sgl[i].vec,
572 + vec->sgl[i].num) - ofs.ofs.cipher.head -
573 + ofs.ofs.cipher.tail;
574 + if (unlikely(data_len < 0))
576 + req->comn_mid.opaque_data = (uint64_t)opaque[i];
577 + submit_one_auth_job(ctx, req, vec->iv_vec + i,
578 + vec->digest_vec + i, ofs, (uint32_t)data_len);
579 + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
582 + if (unlikely(i < vec->num))
583 + qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
585 + service_ctx->tail = tail;
589 +static __rte_always_inline void
590 +submit_one_chain_job(struct qat_sym_session *ctx,
591 + struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_vec *data,
592 + uint16_t n_data_vecs, struct rte_crypto_data *iv_vec,
593 + struct rte_crypto_data *digest_vec, union rte_crypto_sym_ofs ofs,
596 + struct icp_qat_fw_la_cipher_req_params *cipher_param;
597 + struct icp_qat_fw_la_auth_req_params *auth_param;
598 + rte_iova_t auth_iova_end;
599 + int32_t cipher_len, auth_len;
601 + cipher_param = (void *)&req->serv_specif_rqpars;
602 + auth_param = (void *)((uint8_t *)cipher_param +
603 + ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
605 + cipher_len = data_len - ofs.ofs.cipher.head -
606 + ofs.ofs.cipher.tail;
607 + auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
609 + assert(cipher_len > 0 && auth_len > 0);
611 + cipher_param->cipher_offset = ofs.ofs.cipher.head;
612 + cipher_param->cipher_length = cipher_len;
613 + set_cipher_iv(cipher_param, iv_vec, ctx->cipher_iv.length, req);
615 + auth_param->auth_off = ofs.ofs.auth.head;
616 + auth_param->auth_len = auth_len;
617 + auth_param->auth_res_addr = digest_vec->iova;
619 + switch (ctx->qat_hash_alg) {
620 + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
621 + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
622 + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
623 + auth_param->u1.aad_adr = iv_vec->iova;
625 + if (unlikely(n_data_vecs > 1)) {
626 + int auth_end_get = 0, i = n_data_vecs - 1;
627 + struct rte_crypto_vec *cvec = &data[i];
630 + len = data_len - ofs.ofs.auth.tail;
632 + while (i >= 0 && len > 0) {
633 + if (cvec->len >= len) {
634 + auth_iova_end = cvec->iova +
645 + assert(auth_end_get != 0);
647 + auth_iova_end = digest_vec->iova +
648 + ctx->digest_length;
650 + /* Then check if digest-encrypted conditions are met */
651 + if ((auth_param->auth_off + auth_param->auth_len <
652 + cipher_param->cipher_offset +
653 + cipher_param->cipher_length) &&
654 + (digest_vec->iova == auth_iova_end)) {
655 + /* Handle partial digest encryption */
656 + if (cipher_param->cipher_offset +
657 + cipher_param->cipher_length <
658 + auth_param->auth_off +
659 + auth_param->auth_len +
660 + ctx->digest_length)
661 + req->comn_mid.dst_length =
662 + req->comn_mid.src_length =
663 + auth_param->auth_off +
664 + auth_param->auth_len +
665 + ctx->digest_length;
666 + struct icp_qat_fw_comn_req_hdr *header =
668 + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
669 + header->serv_specif_flags,
670 + ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
673 + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
674 + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
681 +static __rte_always_inline int
682 +qat_sym_dp_submit_single_chain(void *qp_data, uint8_t *service_data,
683 + struct rte_crypto_vec *data, uint16_t n_data_vecs,
684 + union rte_crypto_sym_ofs ofs, struct rte_crypto_data *iv_vec,
685 + struct rte_crypto_data *digest_vec,
686 + __rte_unused struct rte_crypto_data *aad_vec,
689 + struct qat_qp *qp = qp_data;
690 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
691 + struct qat_queue *tx_queue = &qp->tx_q;
692 + struct qat_sym_session *ctx = service_ctx->session;
693 + struct icp_qat_fw_la_bulk_req *req;
695 + uint32_t tail = service_ctx->tail;
697 + req = (struct icp_qat_fw_la_bulk_req *)(
698 + (uint8_t *)tx_queue->base_addr + tail);
699 + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
700 + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
701 + rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
702 + data_len = qat_sym_dp_get_data(qp, req, data, n_data_vecs);
703 + if (unlikely(data_len < 0))
705 + req->comn_mid.opaque_data = (uint64_t)opaque;
707 + submit_one_chain_job(ctx, req, data, n_data_vecs, iv_vec, digest_vec,
708 + ofs, (uint32_t)data_len);
710 + service_ctx->tail = tail;
715 +static __rte_always_inline uint32_t
716 +qat_sym_dp_submit_chain_jobs(void *qp_data, uint8_t *service_data,
717 + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
720 + struct qat_qp *qp = qp_data;
721 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
722 + struct qat_queue *tx_queue = &qp->tx_q;
723 + struct qat_sym_session *ctx = service_ctx->session;
726 + struct icp_qat_fw_la_bulk_req *req;
729 + if (unlikely(qp->enqueued - qp->dequeued + vec->num >=
730 + qp->max_inflights)) {
731 + qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
735 + tail = service_ctx->tail;
737 + for (i = 0; i < vec->num; i++) {
738 + req = (struct icp_qat_fw_la_bulk_req *)(
739 + (uint8_t *)tx_queue->base_addr + tail);
740 + rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
742 + data_len = qat_sym_dp_get_data(qp, req, vec->sgl[i].vec,
743 + vec->sgl[i].num) - ofs.ofs.cipher.head -
744 + ofs.ofs.cipher.tail;
745 + if (unlikely(data_len < 0))
747 + req->comn_mid.opaque_data = (uint64_t)opaque[i];
748 + submit_one_chain_job(ctx, req, vec->sgl[i].vec, vec->sgl[i].num,
749 + vec->iv_vec + i, vec->digest_vec + i, ofs,
750 + (uint32_t)data_len);
751 + tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
754 + if (unlikely(i < vec->num))
755 + qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
757 + service_ctx->tail = tail;
761 +static __rte_always_inline uint32_t
762 +qat_sym_dp_dequeue(void *qp_data, uint8_t *service_data,
763 + rte_cryptodev_get_dequeue_count_t get_dequeue_count,
764 + rte_cryptodev_post_dequeue_t post_dequeue,
765 + void **out_opaque, uint8_t is_opaque_array,
766 + uint32_t *n_success_jobs)
768 + struct qat_qp *qp = qp_data;
769 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
770 + struct qat_queue *rx_queue = &qp->rx_q;
771 + struct icp_qat_fw_comn_resp *resp;
773 + uint32_t i, n, inflight;
777 + *n_success_jobs = 0;
778 + head = service_ctx->head;
780 + inflight = qp->enqueued - qp->dequeued;
781 + if (unlikely(inflight == 0))
784 + resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
786 + /* no operation ready */
787 + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
790 + resp_opaque = (void *)(uintptr_t)resp->opaque_data;
791 + /* get the dequeue count */
792 + n = get_dequeue_count(resp_opaque);
793 + if (unlikely(n == 0))
796 + out_opaque[0] = resp_opaque;
797 + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
798 + post_dequeue(resp_opaque, 0, status);
799 + *n_success_jobs += status;
801 + head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
803 + /* we already finished dequeue when n == 1 */
804 + if (unlikely(n == 1)) {
809 + if (is_opaque_array) {
810 + for (i = 1; i < n; i++) {
811 + resp = (struct icp_qat_fw_comn_resp *)(
812 + (uint8_t *)rx_queue->base_addr + head);
813 + if (unlikely(*(uint32_t *)resp ==
814 + ADF_RING_EMPTY_SIG))
816 + out_opaque[i] = (void *)(uintptr_t)
818 + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
819 + *n_success_jobs += status;
820 + post_dequeue(out_opaque[i], i, status);
821 + head = (head + rx_queue->msg_size) &
822 + rx_queue->modulo_mask;
828 + /* opaque is not array */
829 + for (i = 1; i < n; i++) {
830 + resp = (struct icp_qat_fw_comn_resp *)(
831 + (uint8_t *)rx_queue->base_addr + head);
832 + status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
833 + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
835 + head = (head + rx_queue->msg_size) &
836 + rx_queue->modulo_mask;
837 + post_dequeue(resp_opaque, i, status);
838 + *n_success_jobs += status;
842 + service_ctx->head = head;
846 +static __rte_always_inline int
847 +qat_sym_dp_dequeue_single_job(void *qp_data, uint8_t *service_data,
850 + struct qat_qp *qp = qp_data;
851 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
852 + struct qat_queue *rx_queue = &qp->rx_q;
854 + register struct icp_qat_fw_comn_resp *resp;
856 + resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
857 + service_ctx->head);
859 + if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
862 + *out_opaque = (void *)(uintptr_t)resp->opaque_data;
864 + service_ctx->head = (service_ctx->head + rx_queue->msg_size) &
865 + rx_queue->modulo_mask;
867 + return QAT_SYM_DP_IS_RESP_SUCCESS(resp);
870 +static __rte_always_inline void
871 +qat_sym_dp_kick_tail(void *qp_data, uint8_t *service_data, uint32_t n)
873 + struct qat_qp *qp = qp_data;
874 + struct qat_queue *tx_queue = &qp->tx_q;
875 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
878 + qp->stats.enqueued_count += n;
880 + assert(service_ctx->tail == ((tx_queue->tail + tx_queue->msg_size * n) &
881 + tx_queue->modulo_mask));
883 + tx_queue->tail = service_ctx->tail;
885 + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
886 + tx_queue->hw_bundle_number,
887 + tx_queue->hw_queue_number, tx_queue->tail);
888 + tx_queue->csr_tail = tx_queue->tail;
891 +static __rte_always_inline void
892 +qat_sym_dp_update_head(void *qp_data, uint8_t *service_data, uint32_t n)
894 + struct qat_qp *qp = qp_data;
895 + struct qat_queue *rx_queue = &qp->rx_q;
896 + struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
898 + assert(service_ctx->head == ((rx_queue->head + rx_queue->msg_size * n) &
899 + rx_queue->modulo_mask));
901 + rx_queue->head = service_ctx->head;
902 + rx_queue->nb_processed_responses += n;
904 + qp->stats.dequeued_count += n;
905 + if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
906 + uint32_t old_head, new_head;
909 + old_head = rx_queue->csr_head;
910 + new_head = rx_queue->head;
911 + max_head = qp->nb_descriptors * rx_queue->msg_size;
913 + /* write out free descriptors */
914 + void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
916 + if (new_head < old_head) {
917 + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
918 + max_head - old_head);
919 + memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
922 + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
925 + rx_queue->nb_processed_responses = 0;
926 + rx_queue->csr_head = new_head;
928 + /* write current head to CSR */
929 + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
930 + rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
936 +qat_sym_dp_configure_service_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
937 + struct rte_crypto_dp_service_ctx *service_ctx,
938 + enum rte_crypto_dp_service service_type,
939 + enum rte_crypto_op_sess_type sess_type,
940 + union rte_cryptodev_session_ctx session_ctx,
944 + struct qat_sym_session *ctx;
945 + struct qat_sym_dp_service_ctx *dp_ctx;
947 + if (service_ctx == NULL || session_ctx.crypto_sess == NULL ||
948 + sess_type != RTE_CRYPTO_OP_WITH_SESSION)
951 + qp = dev->data->queue_pairs[qp_id];
952 + ctx = (struct qat_sym_session *)get_sym_session_private_data(
953 + session_ctx.crypto_sess, qat_sym_driver_id);
954 + dp_ctx = (struct qat_sym_dp_service_ctx *)
955 + service_ctx->drv_service_data;
958 + memset(service_ctx, 0, sizeof(*service_ctx) +
959 + sizeof(struct qat_sym_dp_service_ctx));
960 + service_ctx->qp_data = dev->data->queue_pairs[qp_id];
961 + dp_ctx->tail = qp->tx_q.tail;
962 + dp_ctx->head = qp->rx_q.head;
965 + dp_ctx->session = ctx;
967 + service_ctx->submit_done = qat_sym_dp_kick_tail;
968 + service_ctx->dequeue_opaque = qat_sym_dp_dequeue;
969 + service_ctx->dequeue_single = qat_sym_dp_dequeue_single_job;
970 + service_ctx->dequeue_done = qat_sym_dp_update_head;
972 + if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
973 + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
974 + /* AES-GCM or AES-CCM */
975 + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
976 + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
977 + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
978 + && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
979 + && ctx->qat_hash_alg ==
980 + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
981 + if (service_type != RTE_CRYPTO_DP_SYM_AEAD)
983 + service_ctx->submit_vec = qat_sym_dp_submit_aead_jobs;
984 + service_ctx->submit_single_job =
985 + qat_sym_dp_submit_single_aead;
987 + if (service_type != RTE_CRYPTO_DP_SYM_CHAIN)
989 + service_ctx->submit_vec = qat_sym_dp_submit_chain_jobs;
990 + service_ctx->submit_single_job =
991 + qat_sym_dp_submit_single_chain;
993 + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
994 + if (service_type != RTE_CRYPTO_DP_SYM_AUTH_ONLY)
996 + service_ctx->submit_vec = qat_sym_dp_submit_auth_jobs;
997 + service_ctx->submit_single_job = qat_sym_dp_submit_single_auth;
998 + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
999 + if (service_type != RTE_CRYPTO_DP_SYM_CIPHER_ONLY)
1001 + service_ctx->submit_vec = qat_sym_dp_submit_cipher_jobs;
1002 + service_ctx->submit_single_job =
1003 + qat_sym_dp_submit_single_cipher;
1010 +qat_sym_get_service_ctx_size(__rte_unused struct rte_cryptodev *dev)
1012 + return sizeof(struct qat_sym_dp_service_ctx);
1014 diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
1015 index 314742f53..bef08c3bc 100644
1016 --- a/drivers/crypto/qat/qat_sym_pmd.c
1017 +++ b/drivers/crypto/qat/qat_sym_pmd.c
1018 @@ -258,7 +258,11 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
1019 /* Crypto related operations */
1020 .sym_session_get_size = qat_sym_session_get_private_size,
1021 .sym_session_configure = qat_sym_session_configure,
1022 - .sym_session_clear = qat_sym_session_clear
1023 + .sym_session_clear = qat_sym_session_clear,
1025 + /* Data plane service related operations */
1026 + .get_drv_ctx_size = qat_sym_get_service_ctx_size,
1027 + .configure_service = qat_sym_dp_configure_service_ctx,
1030 #ifdef RTE_LIBRTE_SECURITY
1031 @@ -376,7 +380,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
1032 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
1033 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
1034 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
1035 - RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
1036 + RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
1037 + RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE;
1039 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1041 diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
1042 index fd5ef3a87..f009be9af 100644
1043 --- a/lib/librte_cryptodev/rte_crypto.h
1044 +++ b/lib/librte_cryptodev/rte_crypto.h
1045 @@ -438,6 +438,15 @@ rte_crypto_op_attach_asym_session(struct rte_crypto_op *op,
1049 +/** Crypto data-path service types */
1050 +enum rte_crypto_dp_service {
1051 + RTE_CRYPTO_DP_SYM_CIPHER_ONLY = 0,
1052 + RTE_CRYPTO_DP_SYM_AUTH_ONLY,
1053 + RTE_CRYPTO_DP_SYM_CHAIN,
1054 + RTE_CRYPTO_DP_SYM_AEAD,
1055 + RTE_CRYPTO_DP_N_SERVICE
1061 diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
1062 index f29c98051..518e4111b 100644
1063 --- a/lib/librte_cryptodev/rte_crypto_sym.h
1064 +++ b/lib/librte_cryptodev/rte_crypto_sym.h
1065 @@ -50,6 +50,18 @@ struct rte_crypto_sgl {
1070 + * Crypto IO Data without length info.
1071 + * Supposed to be used to pass input/output data buffers with lengths
1072 + * defined when creating crypto session.
1074 +struct rte_crypto_data {
1075 + /** virtual address of the data buffer */
1077 + /** IOVA of the data buffer */
1082 * Synchronous operation descriptor.
1083 * Supposed to be used with CPU crypto API call.
1084 @@ -57,12 +69,32 @@ struct rte_crypto_sgl {
1085 struct rte_crypto_sym_vec {
1086 /** array of SGL vectors */
1087 struct rte_crypto_sgl *sgl;
1088 - /** array of pointers to IV */
1090 - /** array of pointers to AAD */
1092 - /** array of pointers to digest */
1097 + /* Supposed to be used with CPU crypto API call. */
1099 + /** array of pointers to IV */
1101 + /** array of pointers to AAD */
1103 + /** array of pointers to digest */
1107 + /* Supposed to be used with rte_cryptodev_dp_sym_submit_vec()
1111 + /** vector to IV */
1112 + struct rte_crypto_data *iv_vec;
1113 + /** vecor to AAD */
1114 + struct rte_crypto_data *aad_vec;
1115 + /** vector to Digest */
1116 + struct rte_crypto_data *digest_vec;
1121 * array of statuses for each operation:
1123 diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
1124 index 1dd795bcb..06c01cfaa 100644
1125 --- a/lib/librte_cryptodev/rte_cryptodev.c
1126 +++ b/lib/librte_cryptodev/rte_cryptodev.c
1127 @@ -1914,6 +1914,51 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1128 return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1132 +rte_cryptodev_get_dp_service_ctx_data_size(uint8_t dev_id)
1134 + struct rte_cryptodev *dev;
1135 + int32_t size = sizeof(struct rte_crypto_dp_service_ctx);
1136 + int32_t priv_size;
1138 + if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1141 + dev = rte_cryptodev_pmd_get_dev(dev_id);
1143 + if (*dev->dev_ops->get_drv_ctx_size == NULL ||
1144 + !(dev->feature_flags & RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE)) {
1148 + priv_size = (*dev->dev_ops->get_drv_ctx_size)(dev);
1149 + if (priv_size < 0)
1152 + return RTE_ALIGN_CEIL((size + priv_size), 8);
1156 +rte_cryptodev_dp_configure_service(uint8_t dev_id, uint16_t qp_id,
1157 + enum rte_crypto_dp_service service_type,
1158 + enum rte_crypto_op_sess_type sess_type,
1159 + union rte_cryptodev_session_ctx session_ctx,
1160 + struct rte_crypto_dp_service_ctx *ctx, uint8_t is_update)
1162 + struct rte_cryptodev *dev;
1164 + if (rte_cryptodev_get_qp_status(dev_id, qp_id) != 1)
1167 + dev = rte_cryptodev_pmd_get_dev(dev_id);
1168 + if (!(dev->feature_flags & RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE)
1169 + || dev->dev_ops->configure_service == NULL)
1172 + return (*dev->dev_ops->configure_service)(dev, qp_id, ctx,
1173 + service_type, sess_type, session_ctx, is_update);
1176 /** Initialise rte_crypto_op mempool element */
1178 rte_crypto_op_init(struct rte_mempool *mempool,
1179 diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
1180 index 7b3ebc20f..6eb8ad9f9 100644
1181 --- a/lib/librte_cryptodev/rte_cryptodev.h
1182 +++ b/lib/librte_cryptodev/rte_cryptodev.h
1183 @@ -466,7 +466,8 @@ rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
1184 /**< Support symmetric session-less operations */
1185 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
1186 /**< Support operations on data which is not byte aligned */
1188 +#define RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE (1ULL << 24)
1189 +/**< Support accelerated specific raw data as input */
1192 * Get the name of a crypto device feature flag
1193 @@ -1351,6 +1352,339 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1194 struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1195 struct rte_crypto_sym_vec *vec);
1198 + * Get the size of the data-path service context for all registered drivers.
1200 + * @param dev_id The device identifier.
1203 + * - If the device supports data-path service, return the context size.
1204 + * - If the device does not support the data-plane service, return -1.
1208 +rte_cryptodev_get_dp_service_ctx_data_size(uint8_t dev_id);
1211 + * Union of different crypto session types, including sessionless
1213 +union rte_cryptodev_session_ctx {
1214 + struct rte_cryptodev_sym_session *crypto_sess;
1215 + struct rte_crypto_sym_xform *xform;
1216 + struct rte_security_session *sec_sess;
1220 + * Submit a data vector into device queue but the driver will not start
1221 + * processing until rte_cryptodev_dp_sym_submit_vec() is called.
1223 + * @param qp Driver specific queue pair data.
1224 + * @param service_data Driver specific service data.
1225 + * @param vec The array of job vectors.
1226 + * @param ofs Start and stop offsets for auth and cipher
1228 + * @param opaque The array of opaque data for dequeue.
1230 + * - The number of jobs successfully submitted.
1232 +typedef uint32_t (*cryptodev_dp_sym_submit_vec_t)(
1233 + void *qp, uint8_t *service_data, struct rte_crypto_sym_vec *vec,
1234 + union rte_crypto_sym_ofs ofs, void **opaque);
1237 + * Submit single job into device queue but the driver will not start
1238 + * processing until rte_cryptodev_dp_sym_submit_vec() is called.
1240 + * @param qp Driver specific queue pair data.
1241 + * @param service_data Driver specific service data.
1242 + * @param data The buffer vector.
1243 + * @param n_data_vecs Number of buffer vectors.
1244 + * @param ofs Start and stop offsets for auth and cipher
1246 + * @param iv IV data.
1247 + * @param digest Digest data.
1248 + * @param aad AAD data.
1249 + * @param opaque The array of opaque data for dequeue.
1251 + * - On success return 0.
1252 + * - On failure return negative integer.
1254 +typedef int (*cryptodev_dp_submit_single_job_t)(
1255 + void *qp_data, uint8_t *service_data, struct rte_crypto_vec *data,
1256 + uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1257 + struct rte_crypto_data *iv, struct rte_crypto_data *digest,
1258 + struct rte_crypto_data *aad, void *opaque);
1261 + * Inform the queue pair to start processing or finish dequeuing all
1262 + * submitted/dequeued jobs.
1264 + * @param qp Driver specific queue pair data.
1265 + * @param service_data Driver specific service data.
1266 + * @param n The total number of submitted jobs.
1268 +typedef void (*cryptodev_dp_sym_opeartion_done_t)(void *qp,
1269 + uint8_t *service_data, uint32_t n);
1272 + * Typedef that the user provided to get the dequeue count. User may use it to
1273 + * return a fixed number or the number parsed from the opaque data stored in
1274 + * the first processed job.
1276 + * @param opaque Dequeued opaque data.
1278 +typedef uint32_t (*rte_cryptodev_get_dequeue_count_t)(void *opaque);
1281 + * Typedef that the user provided to deal with post dequeue operation, such
1282 + * as filling status.
1284 + * @param opaque Dequeued opaque data. In case
1285 + * RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY bit is
1286 + * set, this value will be the opaque data stored
1287 + * in the specific processed jobs referenced by
1288 + * index, otherwise it will be the opaque data
1289 + * stored in the first processed job in the burst.
1290 + * @param index Index number of the processed job.
1291 + * @param is_op_success Driver filled operation status.
1293 +typedef void (*rte_cryptodev_post_dequeue_t)(void *opaque, uint32_t index,
1294 + uint8_t is_op_success);
1297 + * Dequeue symmetric crypto processing of user provided data.
1299 + * @param qp Driver specific queue pair data.
1300 + * @param service_data Driver specific service data.
1301 + * @param get_dequeue_count User provided callback function to
1302 + * obtain dequeue count.
1303 + * @param post_dequeue User provided callback function to
1304 + * post-process a dequeued operation.
1305 + * @param out_opaque Opaque pointer array to be retrieve from
1306 + * device queue. In case of
1307 + * *is_opaque_array* is set there should
1308 + * be enough room to store all opaque data.
1309 + * @param is_opaque_array Set 1 if every dequeued job will be
1310 + * written the opaque data into
1311 + * *out_opaque* array.
1312 + * @param n_success_jobs Driver written value to specific the
1313 + * total successful operations count.
1316 + * - Returns number of dequeued packets.
1318 +typedef uint32_t (*cryptodev_dp_sym_dequeue_t)(void *qp, uint8_t *service_data,
1319 + rte_cryptodev_get_dequeue_count_t get_dequeue_count,
1320 + rte_cryptodev_post_dequeue_t post_dequeue,
1321 + void **out_opaque, uint8_t is_opaque_array,
1322 + uint32_t *n_success_jobs);
1325 + * Dequeue symmetric crypto processing of user provided data.
1327 + * @param qp Driver specific queue pair data.
1328 + * @param service_data Driver specific service data.
1329 + * @param out_opaque Opaque pointer to be retrieve from
1330 + * device queue. The driver shall support
1331 + * NULL input of this parameter.
1334 + * - 1 if the job is dequeued and the operation is a success.
1335 + * - 0 if the job is dequeued but the operation is failed.
1336 + * - -1 if no job is dequeued.
1338 +typedef int (*cryptodev_dp_sym_dequeue_single_job_t)(
1339 + void *qp, uint8_t *service_data, void **out_opaque);
1342 + * Context data for asynchronous crypto process.
1344 +struct rte_crypto_dp_service_ctx {
1348 + /* Supposed to be used for symmetric crypto service */
1350 + cryptodev_dp_submit_single_job_t submit_single_job;
1351 + cryptodev_dp_sym_submit_vec_t submit_vec;
1352 + cryptodev_dp_sym_opeartion_done_t submit_done;
1353 + cryptodev_dp_sym_dequeue_t dequeue_opaque;
1354 + cryptodev_dp_sym_dequeue_single_job_t dequeue_single;
1355 + cryptodev_dp_sym_opeartion_done_t dequeue_done;
1359 + /* Driver specific service data */
1360 + uint8_t drv_service_data[];
1364 + * Initialize one DP service, should be called before submitting job(s).
1365 + * Calling this function for the first time the user should unset is_update
1366 + * parameter and the driver will fill necessary operation data into ctx buffer.
1367 + * Only when rte_cryptodev_dp_submit_done() is called the data stored in
1368 + * the ctx buffer will not be effective.
1370 + * @param dev_id The device identifier.
1371 + * @param qp_id The index of the queue pair from which to
1372 + * retrieve processed packets. The value must be
1373 + * in the range [0, nb_queue_pair - 1] previously
1374 + * supplied to rte_cryptodev_configure().
1375 + * @param service_type Type of the service requested.
1376 + * @param sess_type session type.
1377 + * @param session_ctx Session context data.
1378 + * @param ctx The data-path service context data.
1379 + * @param is_update Set 1 if ctx is pre-initialized but need
1380 + * update to different service type or session,
1381 + * but the rest driver data remains the same.
1382 + * buffer will always be one.
1384 + * - On success return 0.
1385 + * - On failure return negative integer.
1389 +rte_cryptodev_dp_configure_service(uint8_t dev_id, uint16_t qp_id,
1390 + enum rte_crypto_dp_service service_type,
1391 + enum rte_crypto_op_sess_type sess_type,
1392 + union rte_cryptodev_session_ctx session_ctx,
1393 + struct rte_crypto_dp_service_ctx *ctx, uint8_t is_update);
1396 + * Submit single job into device queue but the driver will not start
1397 + * processing until rte_cryptodev_dp_sym_submit_vec() is called.
1399 + * @param ctx The initialized data-path service context data.
1400 + * @param data The buffer vector.
1401 + * @param n_data_vecs Number of buffer vectors.
1402 + * @param ofs Start and stop offsets for auth and cipher
1404 + * @param iv IV data.
1405 + * @param digest Digest data.
1406 + * @param aad AAD data.
1407 + * @param opaque The array of opaque data for dequeue.
1409 + * - On success return 0.
1410 + * - On failure return negative integer.
1413 +static __rte_always_inline int
1414 +rte_cryptodev_dp_submit_single_job(struct rte_crypto_dp_service_ctx *ctx,
1415 + struct rte_crypto_vec *data, uint16_t n_data_vecs,
1416 + union rte_crypto_sym_ofs ofs,
1417 + struct rte_crypto_data *iv, struct rte_crypto_data *digest,
1418 + struct rte_crypto_data *aad, void *opaque)
1420 + return (*ctx->submit_single_job)(ctx->qp_data, ctx->drv_service_data,
1421 + data, n_data_vecs, ofs, iv, digest, aad, opaque);
1425 + * Submit a data vector into device queue but the driver will not start
1426 + * processing until rte_cryptodev_dp_sym_submit_vec() is called.
1428 + * @param ctx The initialized data-path service context data.
1429 + * @param vec The array of job vectors.
1430 + * @param ofs Start and stop offsets for auth and cipher operations.
1431 + * @param opaque The array of opaque data for dequeue.
1433 + * - The number of jobs successfully submitted.
1436 +static __rte_always_inline uint32_t
1437 +rte_cryptodev_dp_sym_submit_vec(struct rte_crypto_dp_service_ctx *ctx,
1438 + struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1441 + return (*ctx->submit_vec)(ctx->qp_data, ctx->drv_service_data, vec,
1446 + * Kick the queue pair to start processing all submitted jobs from last
1447 + * rte_cryptodev_init_dp_service() call.
1449 + * @param ctx The initialized data-path service context data.
1450 + * @param n The total number of submitted jobs.
1453 +static __rte_always_inline void
1454 +rte_cryptodev_dp_submit_done(struct rte_crypto_dp_service_ctx *ctx, uint32_t n)
1456 + (*ctx->submit_done)(ctx->qp_data, ctx->drv_service_data, n);
1460 + * Dequeue symmetric crypto processing of user provided data.
1462 + * @param ctx The initialized data-path service
1464 + * @param get_dequeue_count User provided callback function to
1465 + * obtain dequeue count.
1466 + * @param post_dequeue User provided callback function to
1467 + * post-process a dequeued operation.
1468 + * @param out_opaque Opaque pointer array to be retrieve from
1469 + * device queue. In case of
1470 + * *is_opaque_array* is set there should
1471 + * be enough room to store all opaque data.
1472 + * @param is_opaque_array Set 1 if every dequeued job will be
1473 + * written the opaque data into
1474 + * *out_opaque* array.
1475 + * @param n_success_jobs Driver written value to specific the
1476 + * total successful operations count.
1479 + * - Returns number of dequeued packets.
1482 +static __rte_always_inline uint32_t
1483 +rte_cryptodev_dp_sym_dequeue(struct rte_crypto_dp_service_ctx *ctx,
1484 + rte_cryptodev_get_dequeue_count_t get_dequeue_count,
1485 + rte_cryptodev_post_dequeue_t post_dequeue,
1486 + void **out_opaque, uint8_t is_opaque_array,
1487 + uint32_t *n_success_jobs)
1489 + return (*ctx->dequeue_opaque)(ctx->qp_data, ctx->drv_service_data,
1490 + get_dequeue_count, post_dequeue, out_opaque, is_opaque_array,
1495 + * Dequeue Single symmetric crypto processing of user provided data.
1497 + * @param ctx The initialized data-path service
1499 + * @param out_opaque Opaque pointer to be retrieve from
1500 + * device queue. The driver shall support
1501 + * NULL input of this parameter.
1504 + * - 1 if the job is dequeued and the operation is a success.
1505 + * - 0 if the job is dequeued but the operation is failed.
1506 + * - -1 if no job is dequeued.
1509 +static __rte_always_inline int
1510 +rte_cryptodev_dp_sym_dequeue_single_job(struct rte_crypto_dp_service_ctx *ctx,
1511 + void **out_opaque)
1513 + return (*ctx->dequeue_single)(ctx->qp_data, ctx->drv_service_data,
1518 + * Inform the queue pair dequeue jobs finished.
1520 + * @param ctx The initialized data-path service context data.
1521 + * @param n The total number of submitted jobs.
1524 +static __rte_always_inline void
1525 +rte_cryptodev_dp_dequeue_done(struct rte_crypto_dp_service_ctx *ctx, uint32_t n)
1527 + (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_service_data, n);
1533 diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
1534 index 81975d72b..9904267d7 100644
1535 --- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
1536 +++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
1537 @@ -316,6 +316,30 @@ typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t)
1538 (struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess,
1539 union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec);
1541 +typedef int (*cryptodev_dp_get_service_ctx_size_t)(
1542 + struct rte_cryptodev *dev);
1545 + * Typedef that the driver provided to update data-path service.
1547 + * @param ctx The data-path service context data.
1548 + * @param service_type Type of the service requested.
1549 + * @param sess_type session type.
1550 + * @param session_ctx Session context data.
1551 + * @param is_update Set 1 if ctx is pre-initialized but need
1552 + * update to different service type or session,
1553 + * but the rest driver data remains the same.
1554 + * buffer will always be one.
1556 + * - On success return 0.
1557 + * - On failure return negative integer.
1559 +typedef int (*cryptodev_dp_configure_service_t)(
1560 + struct rte_cryptodev *dev, uint16_t qp_id,
1561 + struct rte_crypto_dp_service_ctx *ctx,
1562 + enum rte_crypto_dp_service service_type,
1563 + enum rte_crypto_op_sess_type sess_type,
1564 + union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
1566 /** Crypto device operations function pointer table */
1567 struct rte_cryptodev_ops {
1568 @@ -348,8 +372,16 @@ struct rte_cryptodev_ops {
1569 /**< Clear a Crypto sessions private data. */
1570 cryptodev_asym_free_session_t asym_session_clear;
1571 /**< Clear a Crypto sessions private data. */
1572 - cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
1573 - /**< process input data synchronously (cpu-crypto). */
1575 + cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
1576 + /**< process input data synchronously (cpu-crypto). */
1578 + cryptodev_dp_get_service_ctx_size_t get_drv_ctx_size;
1579 + /**< Get data path service context data size. */
1580 + cryptodev_dp_configure_service_t configure_service;
1581 + /**< Initialize crypto service ctx data. */
1587 diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
1588 index a7a78dc41..6c5e78144 100644
1589 --- a/lib/librte_cryptodev/rte_cryptodev_version.map
1590 +++ b/lib/librte_cryptodev/rte_cryptodev_version.map
1591 @@ -106,4 +106,12 @@ EXPERIMENTAL {
1594 rte_cryptodev_get_qp_status;
1595 + rte_cryptodev_dp_configure_service;
1596 + rte_cryptodev_get_dp_service_ctx_data_size;
1597 + rte_cryptodev_dp_submit_single_job;
1598 + rte_cryptodev_dp_sym_submit_vec;
1599 + rte_cryptodev_dp_submit_done;
1600 + rte_cryptodev_dp_sym_dequeue;
1601 + rte_cryptodev_dp_sym_dequeue_single_job;
1602 + rte_cryptodev_dp_dequeue_done;