crypto: change cryptodev with new cryptodev API
[vpp.git] / build / external / patches / dpdk_20.08 / 0001-cryptodev-add-symmetric-crypto-data-path-APIs.patch
1 From 865c50fdf46eaaf9efd6e0a897a836201b0ec5a1 Mon Sep 17 00:00:00 2001
2 From: Fan Zhang <roy.fan.zhang@intel.com>
3 Date: Mon, 27 Jul 2020 14:14:24 +0100
4 Subject: [PATCH] cryptodev: add symmetric crypto data-path APIs
5
6 This patch adds data-path APIs for enqueue and dequeue operations to
7 cryptodev. The APIs support flexible user-define enqueue and dequeue
8 behaviors and operation modes. The QAT PMD is also updated to
9 support this feature.
10
11 Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
12 Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
13 ---
14  drivers/common/qat/Makefile                   |   1 +
15  drivers/crypto/qat/meson.build                |   1 +
16  drivers/crypto/qat/qat_sym.h                  |  13 +
17  drivers/crypto/qat/qat_sym_hw_dp.c            | 926 ++++++++++++++++++
18  drivers/crypto/qat/qat_sym_pmd.c              |   9 +-
19  lib/librte_cryptodev/rte_crypto.h             |   9 +
20  lib/librte_cryptodev/rte_crypto_sym.h         |  44 +-
21  lib/librte_cryptodev/rte_cryptodev.c          |  45 +
22  lib/librte_cryptodev/rte_cryptodev.h          | 336 ++++++-
23  lib/librte_cryptodev/rte_cryptodev_pmd.h      |  36 +-
24  .../rte_cryptodev_version.map                 |   8 +
25  11 files changed, 1417 insertions(+), 11 deletions(-)
26  create mode 100644 drivers/crypto/qat/qat_sym_hw_dp.c
27
28 diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile
29 index 85d420709..1b71bbbab 100644
30 --- a/drivers/common/qat/Makefile
31 +++ b/drivers/common/qat/Makefile
32 @@ -42,6 +42,7 @@ endif
33         SRCS-y += qat_sym.c
34         SRCS-y += qat_sym_session.c
35         SRCS-y += qat_sym_pmd.c
36 +       SRCS-y += qat_sym_hw_dp.c
37         build_qat = yes
38  endif
39  endif
40 diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
41 index a225f374a..bc90ec44c 100644
42 --- a/drivers/crypto/qat/meson.build
43 +++ b/drivers/crypto/qat/meson.build
44 @@ -15,6 +15,7 @@ if dep.found()
45         qat_sources += files('qat_sym_pmd.c',
46                              'qat_sym.c',
47                              'qat_sym_session.c',
48 +                            'qat_sym_hw_dp.c',
49                              'qat_asym_pmd.c',
50                              'qat_asym.c')
51         qat_ext_deps += dep
52 diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
53 index 1a9748849..2d6316130 100644
54 --- a/drivers/crypto/qat/qat_sym.h
55 +++ b/drivers/crypto/qat/qat_sym.h
56 @@ -264,6 +264,18 @@ qat_sym_process_response(void **op, uint8_t *resp)
57         }
58         *op = (void *)rx_op;
59  }
60 +
61 +int
62 +qat_sym_dp_configure_service_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
63 +       struct rte_crypto_dp_service_ctx *service_ctx,
64 +       enum rte_crypto_dp_service service_type,
65 +       enum rte_crypto_op_sess_type sess_type,
66 +       union rte_cryptodev_session_ctx session_ctx,
67 +       uint8_t is_update);
68 +
69 +int
70 +qat_sym_get_service_ctx_size(struct rte_cryptodev *dev);
71 +
72  #else
73  
74  static inline void
75 @@ -276,5 +288,6 @@ static inline void
76  qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
77  {
78  }
79 +
80  #endif
81  #endif /* _QAT_SYM_H_ */
82 diff --git a/drivers/crypto/qat/qat_sym_hw_dp.c b/drivers/crypto/qat/qat_sym_hw_dp.c
83 new file mode 100644
84 index 000000000..ce75212ba
85 --- /dev/null
86 +++ b/drivers/crypto/qat/qat_sym_hw_dp.c
87 @@ -0,0 +1,926 @@
88 +/* SPDX-License-Identifier: BSD-3-Clause
89 + * Copyright(c) 2020 Intel Corporation
90 + */
91 +
92 +#include <rte_cryptodev_pmd.h>
93 +
94 +#include "adf_transport_access_macros.h"
95 +#include "icp_qat_fw.h"
96 +#include "icp_qat_fw_la.h"
97 +
98 +#include "qat_sym.h"
99 +#include "qat_sym_pmd.h"
100 +#include "qat_sym_session.h"
101 +#include "qat_qp.h"
102 +
103 +struct qat_sym_dp_service_ctx {
104 +       struct qat_sym_session *session;
105 +       uint32_t tail;
106 +       uint32_t head;
107 +};
108 +
109 +static __rte_always_inline int32_t
110 +qat_sym_dp_get_data(struct qat_qp *qp, struct icp_qat_fw_la_bulk_req *req,
111 +               struct rte_crypto_vec *data, uint16_t n_data_vecs)
112 +{
113 +       struct qat_queue *tx_queue;
114 +       struct qat_sym_op_cookie *cookie;
115 +       struct qat_sgl *list;
116 +       uint32_t i;
117 +       uint32_t total_len;
118 +
119 +       if (likely(n_data_vecs == 1)) {
120 +               req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
121 +                       data[0].iova;
122 +               req->comn_mid.src_length = req->comn_mid.dst_length =
123 +                       data[0].len;
124 +               return data[0].len;
125 +       }
126 +
127 +       if (n_data_vecs == 0 || n_data_vecs > QAT_SYM_SGL_MAX_NUMBER)
128 +               return -1;
129 +
130 +       total_len = 0;
131 +       tx_queue = &qp->tx_q;
132 +
133 +       ICP_QAT_FW_COMN_PTR_TYPE_SET(req->comn_hdr.comn_req_flags,
134 +                       QAT_COMN_PTR_TYPE_SGL);
135 +       cookie = qp->op_cookies[tx_queue->tail >> tx_queue->trailz];
136 +       list = (struct qat_sgl *)&cookie->qat_sgl_src;
137 +
138 +       for (i = 0; i < n_data_vecs; i++) {
139 +               list->buffers[i].len = data[i].len;
140 +               list->buffers[i].resrvd = 0;
141 +               list->buffers[i].addr = data[i].iova;
142 +               if (total_len + data[i].len > UINT32_MAX) {
143 +                       QAT_DP_LOG(ERR, "Message too long");
144 +                       return -1;
145 +               }
146 +               total_len += data[i].len;
147 +       }
148 +
149 +       list->num_bufs = i;
150 +       req->comn_mid.src_data_addr = req->comn_mid.dest_data_addr =
151 +                       cookie->qat_sgl_src_phys_addr;
152 +       req->comn_mid.src_length = req->comn_mid.dst_length = 0;
153 +       return total_len;
154 +}
155 +
156 +static __rte_always_inline void
157 +set_cipher_iv(struct icp_qat_fw_la_cipher_req_params *cipher_param,
158 +               struct rte_crypto_data *iv, uint32_t iv_len,
159 +               struct icp_qat_fw_la_bulk_req *qat_req)
160 +{
161 +       /* copy IV into request if it fits */
162 +       if (iv_len <= sizeof(cipher_param->u.cipher_IV_array))
163 +               rte_memcpy(cipher_param->u.cipher_IV_array, iv->base, iv_len);
164 +       else {
165 +               ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
166 +                               qat_req->comn_hdr.serv_specif_flags,
167 +                               ICP_QAT_FW_CIPH_IV_64BIT_PTR);
168 +               cipher_param->u.s.cipher_IV_ptr = iv->iova;
169 +       }
170 +}
171 +
172 +#define QAT_SYM_DP_IS_RESP_SUCCESS(resp) \
173 +       (ICP_QAT_FW_COMN_STATUS_FLAG_OK == \
174 +       ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(resp->comn_hdr.comn_status))
175 +
176 +static __rte_always_inline void
177 +qat_sym_dp_fill_vec_status(int32_t *sta, int status, uint32_t n)
178 +{
179 +       uint32_t i;
180 +
181 +       for (i = 0; i < n; i++)
182 +               sta[i] = status;
183 +}
184 +
185 +static __rte_always_inline void
186 +submit_one_aead_job(struct qat_sym_session *ctx,
187 +       struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_data *iv_vec,
188 +       struct rte_crypto_data *digest_vec, struct rte_crypto_data *aad_vec,
189 +       union rte_crypto_sym_ofs ofs, uint32_t data_len)
190 +{
191 +       struct icp_qat_fw_la_cipher_req_params *cipher_param =
192 +               (void *)&req->serv_specif_rqpars;
193 +       struct icp_qat_fw_la_auth_req_params *auth_param =
194 +               (void *)((uint8_t *)&req->serv_specif_rqpars +
195 +               ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
196 +       uint8_t *aad_data;
197 +       uint8_t aad_ccm_real_len;
198 +       uint8_t aad_len_field_sz;
199 +       uint32_t msg_len_be;
200 +       rte_iova_t aad_iova = 0;
201 +       uint8_t q;
202 +
203 +       switch (ctx->qat_hash_alg) {
204 +       case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
205 +       case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
206 +               ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
207 +                       req->comn_hdr.serv_specif_flags,
208 +                               ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
209 +               rte_memcpy(cipher_param->u.cipher_IV_array,
210 +                               iv_vec->base, ctx->cipher_iv.length);
211 +               aad_iova = aad_vec->iova;
212 +               break;
213 +       case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
214 +               aad_data = aad_vec->base;
215 +               aad_iova = aad_vec->iova;
216 +               aad_ccm_real_len = 0;
217 +               aad_len_field_sz = 0;
218 +               msg_len_be = rte_bswap32((uint32_t)data_len -
219 +                               ofs.ofs.cipher.head);
220 +
221 +               if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
222 +                       aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
223 +                       aad_ccm_real_len = ctx->aad_len -
224 +                               ICP_QAT_HW_CCM_AAD_B0_LEN -
225 +                               ICP_QAT_HW_CCM_AAD_LEN_INFO;
226 +               } else {
227 +                       aad_data = iv_vec->base;
228 +                       aad_iova = iv_vec->iova;
229 +               }
230 +
231 +               q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
232 +               aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
233 +                       aad_len_field_sz, ctx->digest_length, q);
234 +               if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
235 +                       memcpy(aad_data + ctx->cipher_iv.length +
236 +                               ICP_QAT_HW_CCM_NONCE_OFFSET + (q -
237 +                               ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
238 +                               (uint8_t *)&msg_len_be,
239 +                               ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
240 +               } else {
241 +                       memcpy(aad_data + ctx->cipher_iv.length +
242 +                               ICP_QAT_HW_CCM_NONCE_OFFSET,
243 +                               (uint8_t *)&msg_len_be +
244 +                               (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
245 +                               - q), q);
246 +               }
247 +
248 +               if (aad_len_field_sz > 0) {
249 +                       *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] =
250 +                               rte_bswap16(aad_ccm_real_len);
251 +
252 +                       if ((aad_ccm_real_len + aad_len_field_sz)
253 +                               % ICP_QAT_HW_CCM_AAD_B0_LEN) {
254 +                               uint8_t pad_len = 0;
255 +                               uint8_t pad_idx = 0;
256 +
257 +                               pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
258 +                                       ((aad_ccm_real_len +
259 +                                       aad_len_field_sz) %
260 +                                       ICP_QAT_HW_CCM_AAD_B0_LEN);
261 +                               pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
262 +                                       aad_ccm_real_len +
263 +                                       aad_len_field_sz;
264 +                               memset(&aad_data[pad_idx], 0, pad_len);
265 +                       }
266 +
267 +                       rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array)
268 +                               + ICP_QAT_HW_CCM_NONCE_OFFSET,
269 +                               (uint8_t *)iv_vec->base +
270 +                               ICP_QAT_HW_CCM_NONCE_OFFSET,
271 +                               ctx->cipher_iv.length);
272 +                       *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
273 +                               q - ICP_QAT_HW_CCM_NONCE_OFFSET;
274 +
275 +                       rte_memcpy((uint8_t *)aad_vec->base +
276 +                               ICP_QAT_HW_CCM_NONCE_OFFSET,
277 +                               (uint8_t *)iv_vec->base +
278 +                               ICP_QAT_HW_CCM_NONCE_OFFSET,
279 +                               ctx->cipher_iv.length);
280 +               }
281 +               break;
282 +       default:
283 +               break;
284 +       }
285 +
286 +       cipher_param->cipher_offset = ofs.ofs.cipher.head;
287 +       cipher_param->cipher_length = data_len - ofs.ofs.cipher.head
288 +           - ofs.ofs.cipher.tail;
289 +       auth_param->auth_off = ofs.ofs.cipher.head;
290 +       auth_param->auth_len = data_len - ofs.ofs.cipher.head
291 +           - ofs.ofs.cipher.tail;
292 +       auth_param->auth_res_addr = digest_vec->iova;
293 +       auth_param->u1.aad_adr = aad_iova;
294 +
295 +       if (ctx->is_single_pass) {
296 +               cipher_param->spc_aad_addr = aad_iova;
297 +               cipher_param->spc_auth_res_addr = digest_vec->iova;
298 +       }
299 +}
300 +
301 +static __rte_always_inline int
302 +qat_sym_dp_submit_single_aead(void *qp_data, uint8_t *service_data,
303 +       struct rte_crypto_vec *data, uint16_t n_data_vecs,
304 +       union rte_crypto_sym_ofs ofs, struct rte_crypto_data *iv_vec,
305 +       struct rte_crypto_data *digest_vec, struct rte_crypto_data *aad_vec,
306 +       void *opaque)
307 +{
308 +       struct qat_qp *qp = qp_data;
309 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
310 +       struct qat_queue *tx_queue = &qp->tx_q;
311 +       struct qat_sym_session *ctx = service_ctx->session;
312 +       struct icp_qat_fw_la_bulk_req *req;
313 +       int32_t data_len;
314 +       uint32_t tail = service_ctx->tail;
315 +
316 +       req = (struct icp_qat_fw_la_bulk_req *)(
317 +               (uint8_t *)tx_queue->base_addr + tail);
318 +       tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
319 +       rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
320 +       rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
321 +       data_len = qat_sym_dp_get_data(qp, req, data, n_data_vecs);
322 +       if (unlikely(data_len < 0))
323 +               return -1;
324 +       req->comn_mid.opaque_data = (uint64_t)opaque;
325 +
326 +       submit_one_aead_job(ctx, req, iv_vec, digest_vec, aad_vec, ofs,
327 +               (uint32_t)data_len);
328 +
329 +       service_ctx->tail = tail;
330 +
331 +       return 0;
332 +}
333 +
334 +static __rte_always_inline uint32_t
335 +qat_sym_dp_submit_aead_jobs(void *qp_data, uint8_t *service_data,
336 +       struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
337 +       void **opaque)
338 +{
339 +       struct qat_qp *qp = qp_data;
340 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
341 +       struct qat_queue *tx_queue = &qp->tx_q;
342 +       struct qat_sym_session *ctx = service_ctx->session;
343 +       uint32_t i;
344 +       uint32_t tail;
345 +       struct icp_qat_fw_la_bulk_req *req;
346 +       int32_t data_len;
347 +
348 +       if (unlikely(qp->enqueued - qp->dequeued + vec->num >=
349 +                       qp->max_inflights)) {
350 +               qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
351 +               return 0;
352 +       }
353 +
354 +       tail = service_ctx->tail;
355 +
356 +       for (i = 0; i < vec->num; i++) {
357 +               req  = (struct icp_qat_fw_la_bulk_req *)(
358 +                       (uint8_t *)tx_queue->base_addr + tail);
359 +               rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
360 +
361 +               data_len = qat_sym_dp_get_data(qp, req, vec->sgl[i].vec,
362 +                       vec->sgl[i].num) - ofs.ofs.cipher.head -
363 +                       ofs.ofs.cipher.tail;
364 +               if (unlikely(data_len < 0))
365 +                       break;
366 +               req->comn_mid.opaque_data = (uint64_t)opaque[i];
367 +               submit_one_aead_job(ctx, req, vec->iv_vec + i,
368 +                       vec->digest_vec + i, vec->aad_vec + i, ofs,
369 +                       (uint32_t)data_len);
370 +               tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
371 +       }
372 +
373 +       if (unlikely(i < vec->num))
374 +               qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
375 +
376 +       service_ctx->tail = tail;
377 +       return i;
378 +}
379 +
380 +static __rte_always_inline void
381 +submit_one_cipher_job(struct qat_sym_session *ctx,
382 +       struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_data *iv_vec,
383 +       union rte_crypto_sym_ofs ofs, uint32_t data_len)
384 +{
385 +       struct icp_qat_fw_la_cipher_req_params *cipher_param;
386 +
387 +       cipher_param = (void *)&req->serv_specif_rqpars;
388 +
389 +       /* cipher IV */
390 +       set_cipher_iv(cipher_param, iv_vec, ctx->cipher_iv.length, req);
391 +       cipher_param->cipher_offset = ofs.ofs.cipher.head;
392 +       cipher_param->cipher_length = data_len - ofs.ofs.cipher.head
393 +           - ofs.ofs.cipher.tail;
394 +}
395 +
396 +static __rte_always_inline int
397 +qat_sym_dp_submit_single_cipher(void *qp_data, uint8_t *service_data,
398 +       struct rte_crypto_vec *data, uint16_t n_data_vecs,
399 +       union rte_crypto_sym_ofs ofs, struct rte_crypto_data *iv_vec,
400 +       __rte_unused struct rte_crypto_data *digest_vec,
401 +       __rte_unused struct rte_crypto_data *aad_vec,
402 +       void *opaque)
403 +{
404 +       struct qat_qp *qp = qp_data;
405 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
406 +       struct qat_queue *tx_queue = &qp->tx_q;
407 +       struct qat_sym_session *ctx = service_ctx->session;
408 +       struct icp_qat_fw_la_bulk_req *req;
409 +       int32_t data_len;
410 +       uint32_t tail = service_ctx->tail;
411 +
412 +       req = (struct icp_qat_fw_la_bulk_req *)(
413 +               (uint8_t *)tx_queue->base_addr + tail);
414 +       tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
415 +       rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
416 +       rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
417 +       data_len = qat_sym_dp_get_data(qp, req, data, n_data_vecs);
418 +       if (unlikely(data_len < 0))
419 +               return -1;
420 +       req->comn_mid.opaque_data = (uint64_t)opaque;
421 +
422 +       submit_one_cipher_job(ctx, req, iv_vec, ofs, (uint32_t)data_len);
423 +
424 +       service_ctx->tail = tail;
425 +
426 +       return 0;
427 +}
428 +
429 +static __rte_always_inline uint32_t
430 +qat_sym_dp_submit_cipher_jobs(void *qp_data, uint8_t *service_data,
431 +       struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
432 +       void **opaque)
433 +{
434 +       struct qat_qp *qp = qp_data;
435 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
436 +       struct qat_queue *tx_queue = &qp->tx_q;
437 +       struct qat_sym_session *ctx = service_ctx->session;
438 +       uint32_t i;
439 +       uint32_t tail;
440 +       struct icp_qat_fw_la_bulk_req *req;
441 +       int32_t data_len;
442 +
443 +       if (unlikely(qp->enqueued - qp->dequeued + vec->num >=
444 +                       qp->max_inflights)) {
445 +               qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
446 +               return 0;
447 +       }
448 +
449 +       tail = service_ctx->tail;
450 +
451 +       for (i = 0; i < vec->num; i++) {
452 +               req  = (struct icp_qat_fw_la_bulk_req *)(
453 +                       (uint8_t *)tx_queue->base_addr + tail);
454 +               rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
455 +
456 +               data_len = qat_sym_dp_get_data(qp, req, vec->sgl[i].vec,
457 +                       vec->sgl[i].num) - ofs.ofs.cipher.head -
458 +                       ofs.ofs.cipher.tail;
459 +               if (unlikely(data_len < 0))
460 +                       break;
461 +               req->comn_mid.opaque_data = (uint64_t)opaque[i];
462 +               submit_one_cipher_job(ctx, req, vec->iv_vec + i, ofs,
463 +                       (uint32_t)data_len);
464 +               tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
465 +       }
466 +
467 +       if (unlikely(i < vec->num))
468 +               qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
469 +
470 +       service_ctx->tail = tail;
471 +       return i;
472 +}
473 +
474 +static __rte_always_inline void
475 +submit_one_auth_job(struct qat_sym_session *ctx,
476 +       struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_data *iv_vec,
477 +       struct rte_crypto_data *digest_vec, union rte_crypto_sym_ofs ofs,
478 +       uint32_t data_len)
479 +{
480 +       struct icp_qat_fw_la_cipher_req_params *cipher_param;
481 +       struct icp_qat_fw_la_auth_req_params *auth_param;
482 +
483 +       cipher_param = (void *)&req->serv_specif_rqpars;
484 +       auth_param = (void *)((uint8_t *)cipher_param +
485 +                       ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
486 +
487 +       auth_param->auth_off = ofs.ofs.auth.head;
488 +       auth_param->auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
489 +       auth_param->auth_res_addr = digest_vec->iova;
490 +
491 +       switch (ctx->qat_hash_alg) {
492 +       case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
493 +       case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
494 +       case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
495 +               auth_param->u1.aad_adr = iv_vec->iova;
496 +               break;
497 +       case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
498 +       case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
499 +               ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
500 +                       req->comn_hdr.serv_specif_flags,
501 +                               ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
502 +               rte_memcpy(cipher_param->u.cipher_IV_array,
503 +                               iv_vec->base, ctx->cipher_iv.length);
504 +               break;
505 +       default:
506 +               break;
507 +       }
508 +}
509 +
510 +static __rte_always_inline int
511 +qat_sym_dp_submit_single_auth(void *qp_data, uint8_t *service_data,
512 +       struct rte_crypto_vec *data, uint16_t n_data_vecs,
513 +       union rte_crypto_sym_ofs ofs, struct rte_crypto_data *iv_vec,
514 +       struct rte_crypto_data *digest_vec,
515 +       __rte_unused struct rte_crypto_data *aad_vec,
516 +       void *opaque)
517 +{
518 +       struct qat_qp *qp = qp_data;
519 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
520 +       struct qat_queue *tx_queue = &qp->tx_q;
521 +       struct qat_sym_session *ctx = service_ctx->session;
522 +       struct icp_qat_fw_la_bulk_req *req;
523 +       int32_t data_len;
524 +       uint32_t tail = service_ctx->tail;
525 +
526 +       req = (struct icp_qat_fw_la_bulk_req *)(
527 +               (uint8_t *)tx_queue->base_addr + tail);
528 +       tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
529 +       rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
530 +       rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
531 +       data_len = qat_sym_dp_get_data(qp, req, data, n_data_vecs);
532 +       if (unlikely(data_len < 0))
533 +               return -1;
534 +       req->comn_mid.opaque_data = (uint64_t)opaque;
535 +
536 +       submit_one_auth_job(ctx, req, iv_vec, digest_vec, ofs,
537 +                       (uint32_t)data_len);
538 +
539 +       service_ctx->tail = tail;
540 +
541 +       return 0;
542 +}
543 +
544 +static __rte_always_inline uint32_t
545 +qat_sym_dp_submit_auth_jobs(void *qp_data, uint8_t *service_data,
546 +       struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
547 +       void **opaque)
548 +{
549 +       struct qat_qp *qp = qp_data;
550 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
551 +       struct qat_queue *tx_queue = &qp->tx_q;
552 +       struct qat_sym_session *ctx = service_ctx->session;
553 +       uint32_t i;
554 +       uint32_t tail;
555 +       struct icp_qat_fw_la_bulk_req *req;
556 +       int32_t data_len;
557 +
558 +       if (unlikely(qp->enqueued - qp->dequeued + vec->num >=
559 +                       qp->max_inflights)) {
560 +               qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
561 +               return 0;
562 +       }
563 +
564 +       tail = service_ctx->tail;
565 +
566 +       for (i = 0; i < vec->num; i++) {
567 +               req  = (struct icp_qat_fw_la_bulk_req *)(
568 +                       (uint8_t *)tx_queue->base_addr + tail);
569 +               rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
570 +
571 +               data_len = qat_sym_dp_get_data(qp, req, vec->sgl[i].vec,
572 +                       vec->sgl[i].num) - ofs.ofs.cipher.head -
573 +                       ofs.ofs.cipher.tail;
574 +               if (unlikely(data_len < 0))
575 +                       break;
576 +               req->comn_mid.opaque_data = (uint64_t)opaque[i];
577 +               submit_one_auth_job(ctx, req, vec->iv_vec + i,
578 +                       vec->digest_vec + i, ofs, (uint32_t)data_len);
579 +               tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
580 +       }
581 +
582 +       if (unlikely(i < vec->num))
583 +               qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
584 +
585 +       service_ctx->tail = tail;
586 +       return i;
587 +}
588 +
589 +static __rte_always_inline void
590 +submit_one_chain_job(struct qat_sym_session *ctx,
591 +       struct icp_qat_fw_la_bulk_req *req, struct rte_crypto_vec *data,
592 +       uint16_t n_data_vecs, struct rte_crypto_data *iv_vec,
593 +       struct rte_crypto_data *digest_vec, union rte_crypto_sym_ofs ofs,
594 +       uint32_t data_len)
595 +{
596 +       struct icp_qat_fw_la_cipher_req_params *cipher_param;
597 +       struct icp_qat_fw_la_auth_req_params *auth_param;
598 +       rte_iova_t auth_iova_end;
599 +       int32_t cipher_len, auth_len;
600 +
601 +       cipher_param = (void *)&req->serv_specif_rqpars;
602 +       auth_param = (void *)((uint8_t *)cipher_param +
603 +                       ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET);
604 +
605 +       cipher_len = data_len - ofs.ofs.cipher.head -
606 +                       ofs.ofs.cipher.tail;
607 +       auth_len = data_len - ofs.ofs.auth.head - ofs.ofs.auth.tail;
608 +
609 +       assert(cipher_len > 0 && auth_len > 0);
610 +
611 +       cipher_param->cipher_offset = ofs.ofs.cipher.head;
612 +       cipher_param->cipher_length = cipher_len;
613 +       set_cipher_iv(cipher_param, iv_vec, ctx->cipher_iv.length, req);
614 +
615 +       auth_param->auth_off = ofs.ofs.auth.head;
616 +       auth_param->auth_len = auth_len;
617 +       auth_param->auth_res_addr = digest_vec->iova;
618 +
619 +       switch (ctx->qat_hash_alg) {
620 +       case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
621 +       case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
622 +       case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
623 +               auth_param->u1.aad_adr = iv_vec->iova;
624 +
625 +               if (unlikely(n_data_vecs > 1)) {
626 +                       int auth_end_get = 0, i = n_data_vecs - 1;
627 +                       struct rte_crypto_vec *cvec = &data[i];
628 +                       uint32_t len;
629 +
630 +                       len = data_len - ofs.ofs.auth.tail;
631 +
632 +                       while (i >= 0 && len > 0) {
633 +                               if (cvec->len >= len) {
634 +                                       auth_iova_end = cvec->iova +
635 +                                               (cvec->len - len);
636 +                                       len = 0;
637 +                                       auth_end_get = 1;
638 +                                       break;
639 +                               }
640 +                               len -= cvec->len;
641 +                               i--;
642 +                               cvec--;
643 +                       }
644 +
645 +                       assert(auth_end_get != 0);
646 +               } else
647 +                       auth_iova_end = digest_vec->iova +
648 +                               ctx->digest_length;
649 +
650 +               /* Then check if digest-encrypted conditions are met */
651 +               if ((auth_param->auth_off + auth_param->auth_len <
652 +                               cipher_param->cipher_offset +
653 +                               cipher_param->cipher_length) &&
654 +                               (digest_vec->iova == auth_iova_end)) {
655 +                       /* Handle partial digest encryption */
656 +                       if (cipher_param->cipher_offset +
657 +                                       cipher_param->cipher_length <
658 +                                       auth_param->auth_off +
659 +                                       auth_param->auth_len +
660 +                                       ctx->digest_length)
661 +                               req->comn_mid.dst_length =
662 +                                       req->comn_mid.src_length =
663 +                                       auth_param->auth_off +
664 +                                       auth_param->auth_len +
665 +                                       ctx->digest_length;
666 +                       struct icp_qat_fw_comn_req_hdr *header =
667 +                               &req->comn_hdr;
668 +                       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
669 +                               header->serv_specif_flags,
670 +                               ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
671 +               }
672 +               break;
673 +       case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
674 +       case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
675 +               break;
676 +       default:
677 +               break;
678 +       }
679 +}
680 +
681 +static __rte_always_inline int
682 +qat_sym_dp_submit_single_chain(void *qp_data, uint8_t *service_data,
683 +       struct rte_crypto_vec *data, uint16_t n_data_vecs,
684 +       union rte_crypto_sym_ofs ofs, struct rte_crypto_data *iv_vec,
685 +       struct rte_crypto_data *digest_vec,
686 +       __rte_unused struct rte_crypto_data *aad_vec,
687 +       void *opaque)
688 +{
689 +       struct qat_qp *qp = qp_data;
690 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
691 +       struct qat_queue *tx_queue = &qp->tx_q;
692 +       struct qat_sym_session *ctx = service_ctx->session;
693 +       struct icp_qat_fw_la_bulk_req *req;
694 +       int32_t data_len;
695 +       uint32_t tail = service_ctx->tail;
696 +
697 +       req = (struct icp_qat_fw_la_bulk_req *)(
698 +               (uint8_t *)tx_queue->base_addr + tail);
699 +       tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
700 +       rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
701 +       rte_prefetch0((uint8_t *)tx_queue->base_addr + tail);
702 +       data_len = qat_sym_dp_get_data(qp, req, data, n_data_vecs);
703 +       if (unlikely(data_len < 0))
704 +               return -1;
705 +       req->comn_mid.opaque_data = (uint64_t)opaque;
706 +
707 +       submit_one_chain_job(ctx, req, data, n_data_vecs, iv_vec, digest_vec,
708 +               ofs, (uint32_t)data_len);
709 +
710 +       service_ctx->tail = tail;
711 +
712 +       return 0;
713 +}
714 +
715 +static __rte_always_inline uint32_t
716 +qat_sym_dp_submit_chain_jobs(void *qp_data, uint8_t *service_data,
717 +       struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
718 +       void **opaque)
719 +{
720 +       struct qat_qp *qp = qp_data;
721 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
722 +       struct qat_queue *tx_queue = &qp->tx_q;
723 +       struct qat_sym_session *ctx = service_ctx->session;
724 +       uint32_t i;
725 +       uint32_t tail;
726 +       struct icp_qat_fw_la_bulk_req *req;
727 +       int32_t data_len;
728 +
729 +       if (unlikely(qp->enqueued - qp->dequeued + vec->num >=
730 +                       qp->max_inflights)) {
731 +               qat_sym_dp_fill_vec_status(vec->status, -1, vec->num);
732 +               return 0;
733 +       }
734 +
735 +       tail = service_ctx->tail;
736 +
737 +       for (i = 0; i < vec->num; i++) {
738 +               req  = (struct icp_qat_fw_la_bulk_req *)(
739 +                       (uint8_t *)tx_queue->base_addr + tail);
740 +               rte_mov128((uint8_t *)req, (const uint8_t *)&(ctx->fw_req));
741 +
742 +               data_len = qat_sym_dp_get_data(qp, req, vec->sgl[i].vec,
743 +                       vec->sgl[i].num) - ofs.ofs.cipher.head -
744 +                       ofs.ofs.cipher.tail;
745 +               if (unlikely(data_len < 0))
746 +                       break;
747 +               req->comn_mid.opaque_data = (uint64_t)opaque[i];
748 +               submit_one_chain_job(ctx, req, vec->sgl[i].vec, vec->sgl[i].num,
749 +                       vec->iv_vec + i, vec->digest_vec + i, ofs,
750 +                       (uint32_t)data_len);
751 +               tail = (tail + tx_queue->msg_size) & tx_queue->modulo_mask;
752 +       }
753 +
754 +       if (unlikely(i < vec->num))
755 +               qat_sym_dp_fill_vec_status(vec->status + i, -1, vec->num - i);
756 +
757 +       service_ctx->tail = tail;
758 +       return i;
759 +}
760 +
761 +static __rte_always_inline uint32_t
762 +qat_sym_dp_dequeue(void *qp_data, uint8_t *service_data,
763 +       rte_cryptodev_get_dequeue_count_t get_dequeue_count,
764 +       rte_cryptodev_post_dequeue_t post_dequeue,
765 +       void **out_opaque, uint8_t is_opaque_array,
766 +       uint32_t *n_success_jobs)
767 +{
768 +       struct qat_qp *qp = qp_data;
769 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
770 +       struct qat_queue *rx_queue = &qp->rx_q;
771 +       struct icp_qat_fw_comn_resp *resp;
772 +       void *resp_opaque;
773 +       uint32_t i, n, inflight;
774 +       uint32_t head;
775 +       uint8_t status;
776 +
777 +       *n_success_jobs = 0;
778 +       head = service_ctx->head;
779 +
780 +       inflight = qp->enqueued - qp->dequeued;
781 +       if (unlikely(inflight == 0))
782 +               return 0;
783 +
784 +       resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
785 +                       head);
786 +       /* no operation ready */
787 +       if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
788 +               return 0;
789 +
790 +       resp_opaque = (void *)(uintptr_t)resp->opaque_data;
791 +       /* get the dequeue count */
792 +       n = get_dequeue_count(resp_opaque);
793 +       if (unlikely(n == 0))
794 +               return 0;
795 +
796 +       out_opaque[0] = resp_opaque;
797 +       status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
798 +       post_dequeue(resp_opaque, 0, status);
799 +       *n_success_jobs += status;
800 +
801 +       head = (head + rx_queue->msg_size) & rx_queue->modulo_mask;
802 +
803 +       /* we already finished dequeue when n == 1 */
804 +       if (unlikely(n == 1)) {
805 +               i = 1;
806 +               goto end_deq;
807 +       }
808 +
809 +       if (is_opaque_array) {
810 +               for (i = 1; i < n; i++) {
811 +                       resp = (struct icp_qat_fw_comn_resp *)(
812 +                               (uint8_t *)rx_queue->base_addr + head);
813 +                       if (unlikely(*(uint32_t *)resp ==
814 +                                       ADF_RING_EMPTY_SIG))
815 +                               goto end_deq;
816 +                       out_opaque[i] = (void *)(uintptr_t)
817 +                                       resp->opaque_data;
818 +                       status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
819 +                       *n_success_jobs += status;
820 +                       post_dequeue(out_opaque[i], i, status);
821 +                       head = (head + rx_queue->msg_size) &
822 +                                       rx_queue->modulo_mask;
823 +               }
824 +
825 +               goto end_deq;
826 +       }
827 +
828 +       /* opaque is not array */
829 +       for (i = 1; i < n; i++) {
830 +               resp = (struct icp_qat_fw_comn_resp *)(
831 +                       (uint8_t *)rx_queue->base_addr + head);
832 +               status = QAT_SYM_DP_IS_RESP_SUCCESS(resp);
833 +               if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
834 +                       goto end_deq;
835 +               head = (head + rx_queue->msg_size) &
836 +                               rx_queue->modulo_mask;
837 +               post_dequeue(resp_opaque, i, status);
838 +               *n_success_jobs += status;
839 +       }
840 +
841 +end_deq:
842 +       service_ctx->head = head;
843 +       return i;
844 +}
845 +
846 +static __rte_always_inline int
847 +qat_sym_dp_dequeue_single_job(void *qp_data, uint8_t *service_data,
848 +               void **out_opaque)
849 +{
850 +       struct qat_qp *qp = qp_data;
851 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
852 +       struct qat_queue *rx_queue = &qp->rx_q;
853 +
854 +       register struct icp_qat_fw_comn_resp *resp;
855 +
856 +       resp = (struct icp_qat_fw_comn_resp *)((uint8_t *)rx_queue->base_addr +
857 +                       service_ctx->head);
858 +
859 +       if (unlikely(*(uint32_t *)resp == ADF_RING_EMPTY_SIG))
860 +               return -1;
861 +
862 +       *out_opaque = (void *)(uintptr_t)resp->opaque_data;
863 +
864 +       service_ctx->head = (service_ctx->head + rx_queue->msg_size) &
865 +                       rx_queue->modulo_mask;
866 +
867 +       return QAT_SYM_DP_IS_RESP_SUCCESS(resp);
868 +}
869 +
870 +static __rte_always_inline void
871 +qat_sym_dp_kick_tail(void *qp_data, uint8_t *service_data, uint32_t n)
872 +{
873 +       struct qat_qp *qp = qp_data;
874 +       struct qat_queue *tx_queue = &qp->tx_q;
875 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
876 +
877 +       qp->enqueued += n;
878 +       qp->stats.enqueued_count += n;
879 +
880 +       assert(service_ctx->tail == ((tx_queue->tail + tx_queue->msg_size * n) &
881 +                       tx_queue->modulo_mask));
882 +
883 +       tx_queue->tail = service_ctx->tail;
884 +
885 +       WRITE_CSR_RING_TAIL(qp->mmap_bar_addr,
886 +                       tx_queue->hw_bundle_number,
887 +                       tx_queue->hw_queue_number, tx_queue->tail);
888 +       tx_queue->csr_tail = tx_queue->tail;
889 +}
890 +
891 +static __rte_always_inline void
892 +qat_sym_dp_update_head(void *qp_data, uint8_t *service_data, uint32_t n)
893 +{
894 +       struct qat_qp *qp = qp_data;
895 +       struct qat_queue *rx_queue = &qp->rx_q;
896 +       struct qat_sym_dp_service_ctx *service_ctx = (void *)service_data;
897 +
898 +       assert(service_ctx->head == ((rx_queue->head + rx_queue->msg_size * n) &
899 +                       rx_queue->modulo_mask));
900 +
901 +       rx_queue->head = service_ctx->head;
902 +       rx_queue->nb_processed_responses += n;
903 +       qp->dequeued += n;
904 +       qp->stats.dequeued_count += n;
905 +       if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) {
906 +               uint32_t old_head, new_head;
907 +               uint32_t max_head;
908 +
909 +               old_head = rx_queue->csr_head;
910 +               new_head = rx_queue->head;
911 +               max_head = qp->nb_descriptors * rx_queue->msg_size;
912 +
913 +               /* write out free descriptors */
914 +               void *cur_desc = (uint8_t *)rx_queue->base_addr + old_head;
915 +
916 +               if (new_head < old_head) {
917 +                       memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE,
918 +                                       max_head - old_head);
919 +                       memset(rx_queue->base_addr, ADF_RING_EMPTY_SIG_BYTE,
920 +                                       new_head);
921 +               } else {
922 +                       memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head -
923 +                                       old_head);
924 +               }
925 +               rx_queue->nb_processed_responses = 0;
926 +               rx_queue->csr_head = new_head;
927 +
928 +               /* write current head to CSR */
929 +               WRITE_CSR_RING_HEAD(qp->mmap_bar_addr,
930 +                       rx_queue->hw_bundle_number, rx_queue->hw_queue_number,
931 +                       new_head);
932 +       }
933 +}
934 +
935 +int
936 +qat_sym_dp_configure_service_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
937 +       struct rte_crypto_dp_service_ctx *service_ctx,
938 +       enum rte_crypto_dp_service service_type,
939 +       enum rte_crypto_op_sess_type sess_type,
940 +       union rte_cryptodev_session_ctx session_ctx,
941 +       uint8_t is_update)
942 +{
943 +       struct qat_qp *qp;
944 +       struct qat_sym_session *ctx;
945 +       struct qat_sym_dp_service_ctx *dp_ctx;
946 +
947 +       if (service_ctx == NULL || session_ctx.crypto_sess == NULL ||
948 +                       sess_type != RTE_CRYPTO_OP_WITH_SESSION)
949 +               return -EINVAL;
950 +
951 +       qp = dev->data->queue_pairs[qp_id];
952 +       ctx = (struct qat_sym_session *)get_sym_session_private_data(
953 +                       session_ctx.crypto_sess, qat_sym_driver_id);
954 +       dp_ctx = (struct qat_sym_dp_service_ctx *)
955 +                       service_ctx->drv_service_data;
956 +
957 +       if (!is_update) {
958 +               memset(service_ctx, 0, sizeof(*service_ctx) +
959 +                               sizeof(struct qat_sym_dp_service_ctx));
960 +               service_ctx->qp_data = dev->data->queue_pairs[qp_id];
961 +               dp_ctx->tail = qp->tx_q.tail;
962 +               dp_ctx->head = qp->rx_q.head;
963 +       }
964 +
965 +       dp_ctx->session = ctx;
966 +
967 +       service_ctx->submit_done = qat_sym_dp_kick_tail;
968 +       service_ctx->dequeue_opaque = qat_sym_dp_dequeue;
969 +       service_ctx->dequeue_single = qat_sym_dp_dequeue_single_job;
970 +       service_ctx->dequeue_done = qat_sym_dp_update_head;
971 +
972 +       if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
973 +                       ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
974 +               /* AES-GCM or AES-CCM */
975 +               if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
976 +                       ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
977 +                       (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
978 +                       && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
979 +                       && ctx->qat_hash_alg ==
980 +                                       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
981 +                       if (service_type != RTE_CRYPTO_DP_SYM_AEAD)
982 +                               return -1;
983 +                       service_ctx->submit_vec = qat_sym_dp_submit_aead_jobs;
984 +                       service_ctx->submit_single_job =
985 +                                       qat_sym_dp_submit_single_aead;
986 +               } else {
987 +                       if (service_type != RTE_CRYPTO_DP_SYM_CHAIN)
988 +                               return -1;
989 +                       service_ctx->submit_vec = qat_sym_dp_submit_chain_jobs;
990 +                       service_ctx->submit_single_job =
991 +                                       qat_sym_dp_submit_single_chain;
992 +               }
993 +       } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
994 +               if (service_type != RTE_CRYPTO_DP_SYM_AUTH_ONLY)
995 +                       return -1;
996 +               service_ctx->submit_vec = qat_sym_dp_submit_auth_jobs;
997 +               service_ctx->submit_single_job = qat_sym_dp_submit_single_auth;
998 +       } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
999 +               if (service_type != RTE_CRYPTO_DP_SYM_CIPHER_ONLY)
1000 +                       return -1;
1001 +               service_ctx->submit_vec = qat_sym_dp_submit_cipher_jobs;
1002 +               service_ctx->submit_single_job =
1003 +                       qat_sym_dp_submit_single_cipher;
1004 +       }
1005 +
1006 +       return 0;
1007 +}
1008 +
1009 +int
1010 +qat_sym_get_service_ctx_size(__rte_unused struct rte_cryptodev *dev)
1011 +{
1012 +       return sizeof(struct qat_sym_dp_service_ctx);
1013 +}
1014 diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
1015 index 314742f53..bef08c3bc 100644
1016 --- a/drivers/crypto/qat/qat_sym_pmd.c
1017 +++ b/drivers/crypto/qat/qat_sym_pmd.c
1018 @@ -258,7 +258,11 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
1019                 /* Crypto related operations */
1020                 .sym_session_get_size   = qat_sym_session_get_private_size,
1021                 .sym_session_configure  = qat_sym_session_configure,
1022 -               .sym_session_clear      = qat_sym_session_clear
1023 +               .sym_session_clear      = qat_sym_session_clear,
1024 +
1025 +               /* Data plane service related operations */
1026 +               .get_drv_ctx_size = qat_sym_get_service_ctx_size,
1027 +               .configure_service = qat_sym_dp_configure_service_ctx,
1028  };
1029  
1030  #ifdef RTE_LIBRTE_SECURITY
1031 @@ -376,7 +380,8 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
1032                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
1033                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
1034                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
1035 -                       RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
1036 +                       RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED |
1037 +                       RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE;
1038  
1039         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1040                 return 0;
1041 diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h
1042 index fd5ef3a87..f009be9af 100644
1043 --- a/lib/librte_cryptodev/rte_crypto.h
1044 +++ b/lib/librte_cryptodev/rte_crypto.h
1045 @@ -438,6 +438,15 @@ rte_crypto_op_attach_asym_session(struct rte_crypto_op *op,
1046         return 0;
1047  }
1048  
1049 +/** Crypto data-path service types */
1050 +enum rte_crypto_dp_service {
1051 +       RTE_CRYPTO_DP_SYM_CIPHER_ONLY = 0,
1052 +       RTE_CRYPTO_DP_SYM_AUTH_ONLY,
1053 +       RTE_CRYPTO_DP_SYM_CHAIN,
1054 +       RTE_CRYPTO_DP_SYM_AEAD,
1055 +       RTE_CRYPTO_DP_N_SERVICE
1056 +};
1057 +
1058  #ifdef __cplusplus
1059  }
1060  #endif
1061 diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
1062 index f29c98051..518e4111b 100644
1063 --- a/lib/librte_cryptodev/rte_crypto_sym.h
1064 +++ b/lib/librte_cryptodev/rte_crypto_sym.h
1065 @@ -50,6 +50,18 @@ struct rte_crypto_sgl {
1066         uint32_t num;
1067  };
1068  
1069 +/**
1070 + * Crypto IO Data without length info.
1071 + * Supposed to be used to pass input/output data buffers with lengths
1072 + * defined when creating crypto session.
1073 + */
1074 +struct rte_crypto_data {
1075 +       /** virtual address of the data buffer */
1076 +       void *base;
1077 +       /** IOVA of the data buffer */
1078 +       rte_iova_t iova;
1079 +};
1080 +
1081  /**
1082   * Synchronous operation descriptor.
1083   * Supposed to be used with CPU crypto API call.
1084 @@ -57,12 +69,32 @@ struct rte_crypto_sgl {
1085  struct rte_crypto_sym_vec {
1086         /** array of SGL vectors */
1087         struct rte_crypto_sgl *sgl;
1088 -       /** array of pointers to IV */
1089 -       void **iv;
1090 -       /** array of pointers to AAD */
1091 -       void **aad;
1092 -       /** array of pointers to digest */
1093 -       void **digest;
1094 +
1095 +       union {
1096 +
1097 +               /* Supposed to be used with CPU crypto API call. */
1098 +               struct {
1099 +                       /** array of pointers to IV */
1100 +                       void **iv;
1101 +                       /** array of pointers to AAD */
1102 +                       void **aad;
1103 +                       /** array of pointers to digest */
1104 +                       void **digest;
1105 +               };
1106 +
1107 +               /* Supposed to be used with rte_cryptodev_dp_sym_submit_vec()
1108 +                * call.
1109 +                */
1110 +               struct {
1111 +                       /** vector to IV */
1112 +                       struct rte_crypto_data *iv_vec;
1113 +                       /** vecor to AAD */
1114 +                       struct rte_crypto_data *aad_vec;
1115 +                       /** vector to Digest */
1116 +                       struct rte_crypto_data *digest_vec;
1117 +               };
1118 +       };
1119 +
1120         /**
1121          * array of statuses for each operation:
1122          *  - 0 on success
1123 diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
1124 index 1dd795bcb..06c01cfaa 100644
1125 --- a/lib/librte_cryptodev/rte_cryptodev.c
1126 +++ b/lib/librte_cryptodev/rte_cryptodev.c
1127 @@ -1914,6 +1914,51 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1128         return dev->dev_ops->sym_cpu_process(dev, sess, ofs, vec);
1129  }
1130  
1131 +int32_t
1132 +rte_cryptodev_get_dp_service_ctx_data_size(uint8_t dev_id)
1133 +{
1134 +       struct rte_cryptodev *dev;
1135 +       int32_t size = sizeof(struct rte_crypto_dp_service_ctx);
1136 +       int32_t priv_size;
1137 +
1138 +       if (!rte_cryptodev_pmd_is_valid_dev(dev_id))
1139 +               return -1;
1140 +
1141 +       dev = rte_cryptodev_pmd_get_dev(dev_id);
1142 +
1143 +       if (*dev->dev_ops->get_drv_ctx_size == NULL ||
1144 +               !(dev->feature_flags & RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE)) {
1145 +               return -1;
1146 +       }
1147 +
1148 +       priv_size = (*dev->dev_ops->get_drv_ctx_size)(dev);
1149 +       if (priv_size < 0)
1150 +               return -1;
1151 +
1152 +       return RTE_ALIGN_CEIL((size + priv_size), 8);
1153 +}
1154 +
1155 +int
1156 +rte_cryptodev_dp_configure_service(uint8_t dev_id, uint16_t qp_id,
1157 +       enum rte_crypto_dp_service service_type,
1158 +       enum rte_crypto_op_sess_type sess_type,
1159 +       union rte_cryptodev_session_ctx session_ctx,
1160 +       struct rte_crypto_dp_service_ctx *ctx, uint8_t is_update)
1161 +{
1162 +       struct rte_cryptodev *dev;
1163 +
1164 +       if (rte_cryptodev_get_qp_status(dev_id, qp_id) != 1)
1165 +               return -1;
1166 +
1167 +       dev = rte_cryptodev_pmd_get_dev(dev_id);
1168 +       if (!(dev->feature_flags & RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE)
1169 +               || dev->dev_ops->configure_service == NULL)
1170 +               return -1;
1171 +
1172 +       return (*dev->dev_ops->configure_service)(dev, qp_id, ctx,
1173 +               service_type, sess_type, session_ctx, is_update);
1174 +}
1175 +
1176  /** Initialise rte_crypto_op mempool element */
1177  static void
1178  rte_crypto_op_init(struct rte_mempool *mempool,
1179 diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
1180 index 7b3ebc20f..6eb8ad9f9 100644
1181 --- a/lib/librte_cryptodev/rte_cryptodev.h
1182 +++ b/lib/librte_cryptodev/rte_cryptodev.h
1183 @@ -466,7 +466,8 @@ rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum,
1184  /**< Support symmetric session-less operations */
1185  #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA         (1ULL << 23)
1186  /**< Support operations on data which is not byte aligned */
1187 -
1188 +#define RTE_CRYPTODEV_FF_DATA_PLANE_SERVICE            (1ULL << 24)
1189 +/**< Support accelerated specific raw data as input */
1190  
1191  /**
1192   * Get the name of a crypto device feature flag
1193 @@ -1351,6 +1352,339 @@ rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id,
1194         struct rte_cryptodev_sym_session *sess, union rte_crypto_sym_ofs ofs,
1195         struct rte_crypto_sym_vec *vec);
1196  
1197 +/**
1198 + * Get the size of the data-path service context for all registered drivers.
1199 + *
1200 + * @param      dev_id          The device identifier.
1201 + *
1202 + * @return
1203 + *   - If the device supports data-path service, return the context size.
1204 + *   - If the device does not support the data-plane service, return -1.
1205 + */
1206 +__rte_experimental
1207 +int32_t
1208 +rte_cryptodev_get_dp_service_ctx_data_size(uint8_t dev_id);
1209 +
1210 +/**
1211 + * Union of different crypto session types, including sessionless
1212 + */
1213 +union rte_cryptodev_session_ctx {
1214 +       struct rte_cryptodev_sym_session *crypto_sess;
1215 +       struct rte_crypto_sym_xform *xform;
1216 +       struct rte_security_session *sec_sess;
1217 +};
1218 +
1219 +/**
1220 + * Submit a data vector into device queue but the driver will not start
1221 + * processing until rte_cryptodev_dp_sym_submit_vec() is called.
1222 + *
1223 + * @param      qp              Driver specific queue pair data.
1224 + * @param      service_data    Driver specific service data.
1225 + * @param      vec             The array of job vectors.
1226 + * @param      ofs             Start and stop offsets for auth and cipher
1227 + *                             operations.
1228 + * @param      opaque          The array of opaque data for dequeue.
1229 + * @return
1230 + *   - The number of jobs successfully submitted.
1231 + */
1232 +typedef uint32_t (*cryptodev_dp_sym_submit_vec_t)(
1233 +       void *qp, uint8_t *service_data, struct rte_crypto_sym_vec *vec,
1234 +       union rte_crypto_sym_ofs ofs, void **opaque);
1235 +
1236 +/**
1237 + * Submit single job into device queue but the driver will not start
1238 + * processing until rte_cryptodev_dp_sym_submit_vec() is called.
1239 + *
1240 + * @param      qp              Driver specific queue pair data.
1241 + * @param      service_data    Driver specific service data.
1242 + * @param      data            The buffer vector.
1243 + * @param      n_data_vecs     Number of buffer vectors.
1244 + * @param      ofs             Start and stop offsets for auth and cipher
1245 + *                             operations.
1246 + * @param      iv              IV data.
1247 + * @param      digest          Digest data.
1248 + * @param      aad             AAD data.
1249 + * @param      opaque          The array of opaque data for dequeue.
1250 + * @return
1251 + *   - On success return 0.
1252 + *   - On failure return negative integer.
1253 + */
1254 +typedef int (*cryptodev_dp_submit_single_job_t)(
1255 +       void *qp_data, uint8_t *service_data, struct rte_crypto_vec *data,
1256 +       uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1257 +       struct rte_crypto_data *iv, struct rte_crypto_data *digest,
1258 +       struct rte_crypto_data *aad, void *opaque);
1259 +
1260 +/**
1261 + * Inform the queue pair to start processing or finish dequeuing all
1262 + * submitted/dequeued jobs.
1263 + *
1264 + * @param      qp              Driver specific queue pair data.
1265 + * @param      service_data    Driver specific service data.
1266 + * @param      n               The total number of submitted jobs.
1267 + */
1268 +typedef void (*cryptodev_dp_sym_opeartion_done_t)(void *qp,
1269 +               uint8_t *service_data, uint32_t n);
1270 +
1271 +/**
1272 + * Typedef that the user provided to get the dequeue count. User may use it to
1273 + * return a fixed number or the number parsed from the opaque data stored in
1274 + * the first processed job.
1275 + *
1276 + * @param      opaque          Dequeued opaque data.
1277 + **/
1278 +typedef uint32_t (*rte_cryptodev_get_dequeue_count_t)(void *opaque);
1279 +
1280 +/**
1281 + * Typedef that the user provided to deal with post dequeue operation, such
1282 + * as filling status.
1283 + *
1284 + * @param      opaque          Dequeued opaque data. In case
1285 + *                             RTE_CRYPTO_HW_DP_FF_GET_OPAQUE_ARRAY bit is
1286 + *                             set, this value will be the opaque data stored
1287 + *                             in the specific processed jobs referenced by
1288 + *                             index, otherwise it will be the opaque data
1289 + *                             stored in the first processed job in the burst.
1290 + * @param      index           Index number of the processed job.
1291 + * @param      is_op_success   Driver filled operation status.
1292 + **/
1293 +typedef void (*rte_cryptodev_post_dequeue_t)(void *opaque, uint32_t index,
1294 +               uint8_t is_op_success);
1295 +
1296 +/**
1297 + * Dequeue symmetric crypto processing of user provided data.
1298 + *
1299 + * @param      qp                      Driver specific queue pair data.
1300 + * @param      service_data            Driver specific service data.
1301 + * @param      get_dequeue_count       User provided callback function to
1302 + *                                     obtain dequeue count.
1303 + * @param      post_dequeue            User provided callback function to
1304 + *                                     post-process a dequeued operation.
1305 + * @param      out_opaque              Opaque pointer array to be retrieve from
1306 + *                                     device queue. In case of
1307 + *                                     *is_opaque_array* is set there should
1308 + *                                     be enough room to store all opaque data.
1309 + * @param      is_opaque_array         Set 1 if every dequeued job will be
1310 + *                                     written the opaque data into
1311 + *                                     *out_opaque* array.
1312 + * @param      n_success_jobs          Driver written value to specific the
1313 + *                                     total successful operations count.
1314 + *
1315 + * @return
1316 + *  - Returns number of dequeued packets.
1317 + */
1318 +typedef uint32_t (*cryptodev_dp_sym_dequeue_t)(void *qp, uint8_t *service_data,
1319 +       rte_cryptodev_get_dequeue_count_t get_dequeue_count,
1320 +       rte_cryptodev_post_dequeue_t post_dequeue,
1321 +       void **out_opaque, uint8_t is_opaque_array,
1322 +       uint32_t *n_success_jobs);
1323 +
1324 +/**
1325 + * Dequeue symmetric crypto processing of user provided data.
1326 + *
1327 + * @param      qp                      Driver specific queue pair data.
1328 + * @param      service_data            Driver specific service data.
1329 + * @param      out_opaque              Opaque pointer to be retrieve from
1330 + *                                     device queue. The driver shall support
1331 + *                                     NULL input of this parameter.
1332 + *
1333 + * @return
1334 + *   - 1 if the job is dequeued and the operation is a success.
1335 + *   - 0 if the job is dequeued but the operation is failed.
1336 + *   - -1 if no job is dequeued.
1337 + */
1338 +typedef int (*cryptodev_dp_sym_dequeue_single_job_t)(
1339 +               void *qp, uint8_t *service_data, void **out_opaque);
1340 +
1341 +/**
1342 + * Context data for asynchronous crypto process.
1343 + */
1344 +struct rte_crypto_dp_service_ctx {
1345 +       void *qp_data;
1346 +
1347 +       union {
1348 +               /* Supposed to be used for symmetric crypto service */
1349 +               struct {
1350 +                       cryptodev_dp_submit_single_job_t submit_single_job;
1351 +                       cryptodev_dp_sym_submit_vec_t submit_vec;
1352 +                       cryptodev_dp_sym_opeartion_done_t submit_done;
1353 +                       cryptodev_dp_sym_dequeue_t dequeue_opaque;
1354 +                       cryptodev_dp_sym_dequeue_single_job_t dequeue_single;
1355 +                       cryptodev_dp_sym_opeartion_done_t dequeue_done;
1356 +               };
1357 +       };
1358 +
1359 +       /* Driver specific service data */
1360 +       uint8_t drv_service_data[];
1361 +};
1362 +
1363 +/**
1364 + * Initialize one DP service, should be called before submitting job(s).
1365 + * Calling this function for the first time the user should unset is_update
1366 + * parameter and the driver will fill necessary operation data into ctx buffer.
1367 + * Only when rte_cryptodev_dp_submit_done() is called the data stored in
1368 + * the ctx buffer will not be effective.
1369 + *
1370 + * @param      dev_id          The device identifier.
1371 + * @param      qp_id           The index of the queue pair from which to
1372 + *                             retrieve processed packets. The value must be
1373 + *                             in the range [0, nb_queue_pair - 1] previously
1374 + *                             supplied to rte_cryptodev_configure().
1375 + * @param      service_type    Type of the service requested.
1376 + * @param      sess_type       session type.
1377 + * @param      session_ctx     Session context data.
1378 + * @param      ctx             The data-path service context data.
1379 + * @param      is_update       Set 1 if ctx is pre-initialized but need
1380 + *                             update to different service type or session,
1381 + *                             but the rest driver data remains the same.
1382 + *                             buffer will always be one.
1383 + * @return
1384 + *   - On success return 0.
1385 + *   - On failure return negative integer.
1386 + */
1387 +__rte_experimental
1388 +int
1389 +rte_cryptodev_dp_configure_service(uint8_t dev_id, uint16_t qp_id,
1390 +       enum rte_crypto_dp_service service_type,
1391 +       enum rte_crypto_op_sess_type sess_type,
1392 +       union rte_cryptodev_session_ctx session_ctx,
1393 +       struct rte_crypto_dp_service_ctx *ctx, uint8_t is_update);
1394 +
1395 +/**
1396 + * Submit single job into device queue but the driver will not start
1397 + * processing until rte_cryptodev_dp_sym_submit_vec() is called.
1398 + *
1399 + * @param      ctx             The initialized data-path service context data.
1400 + * @param      data            The buffer vector.
1401 + * @param      n_data_vecs     Number of buffer vectors.
1402 + * @param      ofs             Start and stop offsets for auth and cipher
1403 + *                             operations.
1404 + * @param      iv              IV data.
1405 + * @param      digest          Digest data.
1406 + * @param      aad             AAD data.
1407 + * @param      opaque          The array of opaque data for dequeue.
1408 + * @return
1409 + *   - On success return 0.
1410 + *   - On failure return negative integer.
1411 + */
1412 +__rte_experimental
1413 +static __rte_always_inline int
1414 +rte_cryptodev_dp_submit_single_job(struct rte_crypto_dp_service_ctx *ctx,
1415 +               struct rte_crypto_vec *data, uint16_t n_data_vecs,
1416 +               union rte_crypto_sym_ofs ofs,
1417 +               struct rte_crypto_data *iv, struct rte_crypto_data *digest,
1418 +               struct rte_crypto_data *aad, void *opaque)
1419 +{
1420 +       return (*ctx->submit_single_job)(ctx->qp_data, ctx->drv_service_data,
1421 +               data, n_data_vecs, ofs, iv, digest, aad, opaque);
1422 +}
1423 +
1424 +/**
1425 + * Submit a data vector into device queue but the driver will not start
1426 + * processing until rte_cryptodev_dp_sym_submit_vec() is called.
1427 + *
1428 + * @param      ctx     The initialized data-path service context data.
1429 + * @param      vec     The array of job vectors.
1430 + * @param      ofs     Start and stop offsets for auth and cipher operations.
1431 + * @param      opaque  The array of opaque data for dequeue.
1432 + * @return
1433 + *   - The number of jobs successfully submitted.
1434 + */
1435 +__rte_experimental
1436 +static __rte_always_inline uint32_t
1437 +rte_cryptodev_dp_sym_submit_vec(struct rte_crypto_dp_service_ctx *ctx,
1438 +       struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1439 +       void **opaque)
1440 +{
1441 +       return (*ctx->submit_vec)(ctx->qp_data, ctx->drv_service_data, vec,
1442 +                       ofs, opaque);
1443 +}
1444 +
1445 +/**
1446 + * Kick the queue pair to start processing all submitted jobs from last
1447 + * rte_cryptodev_init_dp_service() call.
1448 + *
1449 + * @param      ctx     The initialized data-path service context data.
1450 + * @param      n               The total number of submitted jobs.
1451 + */
1452 +__rte_experimental
1453 +static __rte_always_inline void
1454 +rte_cryptodev_dp_submit_done(struct rte_crypto_dp_service_ctx *ctx, uint32_t n)
1455 +{
1456 +       (*ctx->submit_done)(ctx->qp_data, ctx->drv_service_data, n);
1457 +}
1458 +
1459 +/**
1460 + * Dequeue symmetric crypto processing of user provided data.
1461 + *
1462 + * @param      ctx                     The initialized data-path service
1463 + *                                     context data.
1464 + * @param      get_dequeue_count       User provided callback function to
1465 + *                                     obtain dequeue count.
1466 + * @param      post_dequeue            User provided callback function to
1467 + *                                     post-process a dequeued operation.
1468 + * @param      out_opaque              Opaque pointer array to be retrieve from
1469 + *                                     device queue. In case of
1470 + *                                     *is_opaque_array* is set there should
1471 + *                                     be enough room to store all opaque data.
1472 + * @param      is_opaque_array         Set 1 if every dequeued job will be
1473 + *                                     written the opaque data into
1474 + *                                     *out_opaque* array.
1475 + * @param      n_success_jobs          Driver written value to specific the
1476 + *                                     total successful operations count.
1477 + *
1478 + * @return
1479 + *   - Returns number of dequeued packets.
1480 + */
1481 +__rte_experimental
1482 +static __rte_always_inline uint32_t
1483 +rte_cryptodev_dp_sym_dequeue(struct rte_crypto_dp_service_ctx *ctx,
1484 +       rte_cryptodev_get_dequeue_count_t get_dequeue_count,
1485 +       rte_cryptodev_post_dequeue_t post_dequeue,
1486 +       void **out_opaque, uint8_t is_opaque_array,
1487 +       uint32_t *n_success_jobs)
1488 +{
1489 +       return (*ctx->dequeue_opaque)(ctx->qp_data, ctx->drv_service_data,
1490 +               get_dequeue_count, post_dequeue, out_opaque, is_opaque_array,
1491 +               n_success_jobs);
1492 +}
1493 +
1494 +/**
1495 + * Dequeue Single symmetric crypto processing of user provided data.
1496 + *
1497 + * @param      ctx                     The initialized data-path service
1498 + *                                     context data.
1499 + * @param      out_opaque              Opaque pointer to be retrieve from
1500 + *                                     device queue. The driver shall support
1501 + *                                     NULL input of this parameter.
1502 + *
1503 + * @return
1504 + *   - 1 if the job is dequeued and the operation is a success.
1505 + *   - 0 if the job is dequeued but the operation is failed.
1506 + *   - -1 if no job is dequeued.
1507 + */
1508 +__rte_experimental
1509 +static __rte_always_inline int
1510 +rte_cryptodev_dp_sym_dequeue_single_job(struct rte_crypto_dp_service_ctx *ctx,
1511 +               void **out_opaque)
1512 +{
1513 +       return (*ctx->dequeue_single)(ctx->qp_data, ctx->drv_service_data,
1514 +               out_opaque);
1515 +}
1516 +
1517 +/**
1518 + * Inform the queue pair dequeue jobs finished.
1519 + *
1520 + * @param      ctx     The initialized data-path service context data.
1521 + * @param      n               The total number of submitted jobs.
1522 + */
1523 +__rte_experimental
1524 +static __rte_always_inline void
1525 +rte_cryptodev_dp_dequeue_done(struct rte_crypto_dp_service_ctx *ctx, uint32_t n)
1526 +{
1527 +       (*ctx->dequeue_done)(ctx->qp_data, ctx->drv_service_data, n);
1528 +}
1529 +
1530  #ifdef __cplusplus
1531  }
1532  #endif
1533 diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h
1534 index 81975d72b..9904267d7 100644
1535 --- a/lib/librte_cryptodev/rte_cryptodev_pmd.h
1536 +++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h
1537 @@ -316,6 +316,30 @@ typedef uint32_t (*cryptodev_sym_cpu_crypto_process_t)
1538         (struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess,
1539         union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec);
1540  
1541 +typedef int (*cryptodev_dp_get_service_ctx_size_t)(
1542 +       struct rte_cryptodev *dev);
1543 +
1544 +/**
1545 + * Typedef that the driver provided to update data-path service.
1546 + *
1547 + * @param      ctx             The data-path service context data.
1548 + * @param      service_type    Type of the service requested.
1549 + * @param      sess_type       session type.
1550 + * @param      session_ctx     Session context data.
1551 + * @param      is_update       Set 1 if ctx is pre-initialized but need
1552 + *                             update to different service type or session,
1553 + *                             but the rest driver data remains the same.
1554 + *                             buffer will always be one.
1555 + * @return
1556 + *   - On success return 0.
1557 + *   - On failure return negative integer.
1558 + */
1559 +typedef int (*cryptodev_dp_configure_service_t)(
1560 +       struct rte_cryptodev *dev, uint16_t qp_id,
1561 +       struct rte_crypto_dp_service_ctx *ctx,
1562 +       enum rte_crypto_dp_service service_type,
1563 +       enum rte_crypto_op_sess_type sess_type,
1564 +       union rte_cryptodev_session_ctx session_ctx, uint8_t is_update);
1565  
1566  /** Crypto device operations function pointer table */
1567  struct rte_cryptodev_ops {
1568 @@ -348,8 +372,16 @@ struct rte_cryptodev_ops {
1569         /**< Clear a Crypto sessions private data. */
1570         cryptodev_asym_free_session_t asym_session_clear;
1571         /**< Clear a Crypto sessions private data. */
1572 -       cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
1573 -       /**< process input data synchronously (cpu-crypto). */
1574 +       union {
1575 +               cryptodev_sym_cpu_crypto_process_t sym_cpu_process;
1576 +               /**< process input data synchronously (cpu-crypto). */
1577 +               struct {
1578 +                       cryptodev_dp_get_service_ctx_size_t get_drv_ctx_size;
1579 +                       /**< Get data path service context data size. */
1580 +                       cryptodev_dp_configure_service_t configure_service;
1581 +                       /**< Initialize crypto service ctx data. */
1582 +               };
1583 +       };
1584  };
1585  
1586  
1587 diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map
1588 index a7a78dc41..6c5e78144 100644
1589 --- a/lib/librte_cryptodev/rte_cryptodev_version.map
1590 +++ b/lib/librte_cryptodev/rte_cryptodev_version.map
1591 @@ -106,4 +106,12 @@ EXPERIMENTAL {
1592  
1593         # added in 20.08
1594         rte_cryptodev_get_qp_status;
1595 +       rte_cryptodev_dp_configure_service;
1596 +       rte_cryptodev_get_dp_service_ctx_data_size;
1597 +       rte_cryptodev_dp_submit_single_job;
1598 +       rte_cryptodev_dp_sym_submit_vec;
1599 +       rte_cryptodev_dp_submit_done;
1600 +       rte_cryptodev_dp_sym_dequeue;
1601 +       rte_cryptodev_dp_sym_dequeue_single_job;
1602 +       rte_cryptodev_dp_dequeue_done;
1603  };
1604 -- 
1605 2.20.1
1606