New upstream version 18.02
[deb_dpdk.git] / drivers / crypto / mvsam / rte_mrvl_pmd.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Marvell International Ltd.
3  * Copyright(c) 2017 Semihalf.
4  * All rights reserved.
5  */
6
7 #include <rte_common.h>
8 #include <rte_hexdump.h>
9 #include <rte_cryptodev.h>
10 #include <rte_cryptodev_pmd.h>
11 #include <rte_bus_vdev.h>
12 #include <rte_malloc.h>
13 #include <rte_cpuflags.h>
14
15 #include "rte_mrvl_pmd_private.h"
16
17 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
18
19 #define MRVL_PMD_MAX_NB_SESS_ARG                ("max_nb_sessions")
20 #define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS        2048
21
22 static uint8_t cryptodev_driver_id;
23
24 struct mrvl_pmd_init_params {
25         struct rte_cryptodev_pmd_init_params common;
26         uint32_t max_nb_sessions;
27 };
28
29 const char *mrvl_pmd_valid_params[] = {
30         RTE_CRYPTODEV_PMD_NAME_ARG,
31         RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
32         RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
33         MRVL_PMD_MAX_NB_SESS_ARG
34 };
35
36 /**
37  * Flag if particular crypto algorithm is supported by PMD/MUSDK.
38  *
39  * The idea is to have Not Supported value as default (0).
40  * This way we need only to define proper map sizes,
41  * non-initialized entries will be by default not supported.
42  */
43 enum algo_supported {
44         ALGO_NOT_SUPPORTED = 0,
45         ALGO_SUPPORTED = 1,
46 };
47
48 /** Map elements for cipher mapping.*/
49 struct cipher_params_mapping {
50         enum algo_supported  supported;   /**< On/Off switch */
51         enum sam_cipher_alg  cipher_alg;  /**< Cipher algorithm */
52         enum sam_cipher_mode cipher_mode; /**< Cipher mode */
53         unsigned int max_key_len;         /**< Maximum key length (in bytes)*/
54 }
55 /* We want to squeeze in multiple maps into the cache line. */
56 __rte_aligned(32);
57
58 /** Map elements for auth mapping.*/
59 struct auth_params_mapping {
60         enum algo_supported supported;  /**< On/off switch */
61         enum sam_auth_alg   auth_alg;   /**< Auth algorithm */
62 }
63 /* We want to squeeze in multiple maps into the cache line. */
64 __rte_aligned(32);
65
66 /**
67  * Map of supported cipher algorithms.
68  */
69 static const
70 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
71         [RTE_CRYPTO_CIPHER_3DES_CBC] = {
72                 .supported = ALGO_SUPPORTED,
73                 .cipher_alg = SAM_CIPHER_3DES,
74                 .cipher_mode = SAM_CIPHER_CBC,
75                 .max_key_len = BITS2BYTES(192) },
76         [RTE_CRYPTO_CIPHER_3DES_CTR] = {
77                 .supported = ALGO_SUPPORTED,
78                 .cipher_alg = SAM_CIPHER_3DES,
79                 .cipher_mode = SAM_CIPHER_CTR,
80                 .max_key_len = BITS2BYTES(192) },
81         [RTE_CRYPTO_CIPHER_3DES_ECB] = {
82                 .supported = ALGO_SUPPORTED,
83                 .cipher_alg = SAM_CIPHER_3DES,
84                 .cipher_mode = SAM_CIPHER_ECB,
85                 .max_key_len = BITS2BYTES(192) },
86         [RTE_CRYPTO_CIPHER_AES_CBC] = {
87                 .supported = ALGO_SUPPORTED,
88                 .cipher_alg = SAM_CIPHER_AES,
89                 .cipher_mode = SAM_CIPHER_CBC,
90                 .max_key_len = BITS2BYTES(256) },
91         [RTE_CRYPTO_CIPHER_AES_CTR] = {
92                 .supported = ALGO_SUPPORTED,
93                 .cipher_alg = SAM_CIPHER_AES,
94                 .cipher_mode = SAM_CIPHER_CTR,
95                 .max_key_len = BITS2BYTES(256) },
96 };
97
98 /**
99  * Map of supported auth algorithms.
100  */
101 static const
102 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
103         [RTE_CRYPTO_AUTH_MD5_HMAC] = {
104                 .supported = ALGO_SUPPORTED,
105                 .auth_alg = SAM_AUTH_HMAC_MD5 },
106         [RTE_CRYPTO_AUTH_MD5] = {
107                 .supported = ALGO_SUPPORTED,
108                 .auth_alg = SAM_AUTH_HASH_MD5 },
109         [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
110                 .supported = ALGO_SUPPORTED,
111                 .auth_alg = SAM_AUTH_HMAC_SHA1 },
112         [RTE_CRYPTO_AUTH_SHA1] = {
113                 .supported = ALGO_SUPPORTED,
114                 .auth_alg = SAM_AUTH_HASH_SHA1 },
115         [RTE_CRYPTO_AUTH_SHA224] = {
116                 .supported = ALGO_SUPPORTED,
117                 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
118         [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
119                 .supported = ALGO_SUPPORTED,
120                 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
121         [RTE_CRYPTO_AUTH_SHA256] = {
122                 .supported = ALGO_SUPPORTED,
123                 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
124         [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
125                 .supported = ALGO_SUPPORTED,
126                 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
127         [RTE_CRYPTO_AUTH_SHA384] = {
128                 .supported = ALGO_SUPPORTED,
129                 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
130         [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
131                 .supported = ALGO_SUPPORTED,
132                 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
133         [RTE_CRYPTO_AUTH_SHA512] = {
134                 .supported = ALGO_SUPPORTED,
135                 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
136         [RTE_CRYPTO_AUTH_AES_GMAC] = {
137                 .supported = ALGO_SUPPORTED,
138                 .auth_alg = SAM_AUTH_AES_GMAC },
139 };
140
141 /**
142  * Map of supported aead algorithms.
143  */
144 static const
145 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
146         [RTE_CRYPTO_AEAD_AES_GCM] = {
147                 .supported = ALGO_SUPPORTED,
148                 .cipher_alg = SAM_CIPHER_AES,
149                 .cipher_mode = SAM_CIPHER_GCM,
150                 .max_key_len = BITS2BYTES(256) },
151 };
152
153 /*
154  *-----------------------------------------------------------------------------
155  * Forward declarations.
156  *-----------------------------------------------------------------------------
157  */
158 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
159
160 /*
161  *-----------------------------------------------------------------------------
162  * Session Preparation.
163  *-----------------------------------------------------------------------------
164  */
165
166 /**
167  * Get xform chain order.
168  *
169  * @param xform Pointer to configuration structure chain for crypto operations.
170  * @returns Order of crypto operations.
171  */
172 static enum mrvl_crypto_chain_order
173 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
174 {
175         /* Currently, Marvell supports max 2 operations in chain */
176         if (xform->next != NULL && xform->next->next != NULL)
177                 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
178
179         if (xform->next != NULL) {
180                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
181                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
182                         return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
183
184                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
185                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
186                         return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
187         } else {
188                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
189                         return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
190
191                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
192                         return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
193
194                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
195                         return MRVL_CRYPTO_CHAIN_COMBINED;
196         }
197         return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
198 }
199
200 /**
201  * Set session parameters for cipher part.
202  *
203  * @param sess Crypto session pointer.
204  * @param cipher_xform Pointer to configuration structure for cipher operations.
205  * @returns 0 in case of success, negative value otherwise.
206  */
207 static int
208 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
209                 const struct rte_crypto_sym_xform *cipher_xform)
210 {
211         /* Make sure we've got proper struct */
212         if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
213                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
214                 return -EINVAL;
215         }
216
217         /* See if map data is present and valid */
218         if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
219                 (cipher_map[cipher_xform->cipher.algo].supported
220                         != ALGO_SUPPORTED)) {
221                 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
222                 return -EINVAL;
223         }
224
225         sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
226
227         sess->sam_sess_params.dir =
228                 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
229                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
230         sess->sam_sess_params.cipher_alg =
231                 cipher_map[cipher_xform->cipher.algo].cipher_alg;
232         sess->sam_sess_params.cipher_mode =
233                 cipher_map[cipher_xform->cipher.algo].cipher_mode;
234
235         /* Assume IV will be passed together with data. */
236         sess->sam_sess_params.cipher_iv = NULL;
237
238         /* Get max key length. */
239         if (cipher_xform->cipher.key.length >
240                 cipher_map[cipher_xform->cipher.algo].max_key_len) {
241                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
242                 return -EINVAL;
243         }
244
245         sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
246         sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
247
248         return 0;
249 }
250
251 /**
252  * Set session parameters for authentication part.
253  *
254  * @param sess Crypto session pointer.
255  * @param auth_xform Pointer to configuration structure for auth operations.
256  * @returns 0 in case of success, negative value otherwise.
257  */
258 static int
259 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
260                 const struct rte_crypto_sym_xform *auth_xform)
261 {
262         /* Make sure we've got proper struct */
263         if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
264                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
265                 return -EINVAL;
266         }
267
268         /* See if map data is present and valid */
269         if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
270                 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
271                 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
272                 return -EINVAL;
273         }
274
275         sess->sam_sess_params.dir =
276                 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
277                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
278         sess->sam_sess_params.auth_alg =
279                 auth_map[auth_xform->auth.algo].auth_alg;
280         sess->sam_sess_params.u.basic.auth_icv_len =
281                 auth_xform->auth.digest_length;
282         /* auth_key must be NULL if auth algorithm does not use HMAC */
283         sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
284                                          auth_xform->auth.key.data : NULL;
285         sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
286
287         return 0;
288 }
289
290 /**
291  * Set session parameters for aead part.
292  *
293  * @param sess Crypto session pointer.
294  * @param aead_xform Pointer to configuration structure for aead operations.
295  * @returns 0 in case of success, negative value otherwise.
296  */
297 static int
298 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
299                 const struct rte_crypto_sym_xform *aead_xform)
300 {
301         /* Make sure we've got proper struct */
302         if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
303                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
304                 return -EINVAL;
305         }
306
307         /* See if map data is present and valid */
308         if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
309                 (aead_map[aead_xform->aead.algo].supported
310                         != ALGO_SUPPORTED)) {
311                 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
312                 return -EINVAL;
313         }
314
315         sess->sam_sess_params.dir =
316                 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
317                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
318         sess->sam_sess_params.cipher_alg =
319                 aead_map[aead_xform->aead.algo].cipher_alg;
320         sess->sam_sess_params.cipher_mode =
321                 aead_map[aead_xform->aead.algo].cipher_mode;
322
323         /* Assume IV will be passed together with data. */
324         sess->sam_sess_params.cipher_iv = NULL;
325
326         /* Get max key length. */
327         if (aead_xform->aead.key.length >
328                 aead_map[aead_xform->aead.algo].max_key_len) {
329                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
330                 return -EINVAL;
331         }
332
333         sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
334         sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
335
336         if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
337                 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
338
339         sess->sam_sess_params.u.basic.auth_icv_len =
340                 aead_xform->aead.digest_length;
341
342         sess->sam_sess_params.u.basic.auth_aad_len =
343                 aead_xform->aead.aad_length;
344
345         return 0;
346 }
347
348 /**
349  * Parse crypto transform chain and setup session parameters.
350  *
351  * @param dev Pointer to crypto device
352  * @param sess Poiner to crypto session
353  * @param xform Pointer to configuration structure chain for crypto operations.
354  * @returns 0 in case of success, negative value otherwise.
355  */
356 int
357 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
358                 const struct rte_crypto_sym_xform *xform)
359 {
360         const struct rte_crypto_sym_xform *cipher_xform = NULL;
361         const struct rte_crypto_sym_xform *auth_xform = NULL;
362         const struct rte_crypto_sym_xform *aead_xform = NULL;
363
364         /* Filter out spurious/broken requests */
365         if (xform == NULL)
366                 return -EINVAL;
367
368         sess->chain_order = mrvl_crypto_get_chain_order(xform);
369         switch (sess->chain_order) {
370         case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
371                 cipher_xform = xform;
372                 auth_xform = xform->next;
373                 break;
374         case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
375                 auth_xform = xform;
376                 cipher_xform = xform->next;
377                 break;
378         case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
379                 cipher_xform = xform;
380                 break;
381         case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
382                 auth_xform = xform;
383                 break;
384         case MRVL_CRYPTO_CHAIN_COMBINED:
385                 aead_xform = xform;
386                 break;
387         default:
388                 return -EINVAL;
389         }
390
391         if ((cipher_xform != NULL) &&
392                 (mrvl_crypto_set_cipher_session_parameters(
393                         sess, cipher_xform) < 0)) {
394                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
395                 return -EINVAL;
396         }
397
398         if ((auth_xform != NULL) &&
399                 (mrvl_crypto_set_auth_session_parameters(
400                         sess, auth_xform) < 0)) {
401                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
402                 return -EINVAL;
403         }
404
405         if ((aead_xform != NULL) &&
406                 (mrvl_crypto_set_aead_session_parameters(
407                         sess, aead_xform) < 0)) {
408                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
409                 return -EINVAL;
410         }
411
412         return 0;
413 }
414
415 /*
416  *-----------------------------------------------------------------------------
417  * Process Operations
418  *-----------------------------------------------------------------------------
419  */
420
421 /**
422  * Prepare a single request.
423  *
424  * This function basically translates DPDK crypto request into one
425  * understandable by MUDSK's SAM. If this is a first request in a session,
426  * it starts the session.
427  *
428  * @param request Pointer to pre-allocated && reset request buffer [Out].
429  * @param src_bd Pointer to pre-allocated source descriptor [Out].
430  * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
431  * @param op Pointer to DPDK crypto operation struct [In].
432  */
433 static inline int
434 mrvl_request_prepare(struct sam_cio_op_params *request,
435                 struct sam_buf_info *src_bd,
436                 struct sam_buf_info *dst_bd,
437                 struct rte_crypto_op *op)
438 {
439         struct mrvl_crypto_session *sess;
440         struct rte_mbuf *dst_mbuf;
441         uint8_t *digest;
442
443         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
444                 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
445                                 "oriented requests, op (%p) is sessionless.",
446                                 op);
447                 return -EINVAL;
448         }
449
450         sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
451                         op->sym->session, cryptodev_driver_id);
452         if (unlikely(sess == NULL)) {
453                 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
454                 return -EINVAL;
455         }
456
457         /*
458          * If application delivered us null dst buffer, it means it expects
459          * us to deliver the result in src buffer.
460          */
461         dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
462
463         request->sa = sess->sam_sess;
464         request->cookie = op;
465
466         /* Single buffers only, sorry. */
467         request->num_bufs = 1;
468         request->src = src_bd;
469         src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
470         src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
471         src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
472
473         /* Empty source. */
474         if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
475                 /* EIP does not support 0 length buffers. */
476                 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
477                 return -1;
478         }
479
480         /* Empty destination. */
481         if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
482                 /* Make dst buffer fit at least source data. */
483                 if (rte_pktmbuf_append(dst_mbuf,
484                         rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
485                         MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
486                         return -1;
487                 }
488         }
489
490         request->dst = dst_bd;
491         dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
492         dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
493
494         /*
495          * We can use all available space in dst_mbuf,
496          * not only what's used currently.
497          */
498         dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
499
500         if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
501                 request->cipher_len = op->sym->aead.data.length;
502                 request->cipher_offset = op->sym->aead.data.offset;
503                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
504                         sess->cipher_iv_offset);
505
506                 request->auth_aad = op->sym->aead.aad.data;
507                 request->auth_offset = request->cipher_offset;
508                 request->auth_len = request->cipher_len;
509         } else {
510                 request->cipher_len = op->sym->cipher.data.length;
511                 request->cipher_offset = op->sym->cipher.data.offset;
512                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
513                                 sess->cipher_iv_offset);
514
515                 request->auth_offset = op->sym->auth.data.offset;
516                 request->auth_len = op->sym->auth.data.length;
517         }
518
519         digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
520                 op->sym->aead.digest.data : op->sym->auth.digest.data;
521         if (digest == NULL) {
522                 /* No auth - no worry. */
523                 return 0;
524         }
525
526         request->auth_icv_offset = request->auth_offset + request->auth_len;
527
528         /*
529          * EIP supports only scenarios where ICV(digest buffer) is placed at
530          * auth_icv_offset. Any other placement means risking errors.
531          */
532         if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
533                 /*
534                  * This should be the most common case anyway,
535                  * EIP will overwrite DST buffer at auth_icv_offset.
536                  */
537                 if (rte_pktmbuf_mtod_offset(
538                                 dst_mbuf, uint8_t *,
539                                 request->auth_icv_offset) == digest) {
540                         return 0;
541                 }
542         } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
543                 /*
544                  * EIP will look for digest at auth_icv_offset
545                  * offset in SRC buffer.
546                  */
547                 if (rte_pktmbuf_mtod_offset(
548                                 op->sym->m_src, uint8_t *,
549                                 request->auth_icv_offset) == digest) {
550                         return 0;
551                 }
552         }
553
554         /*
555          * If we landed here it means that digest pointer is
556          * at different than expected place.
557          */
558         return -1;
559 }
560
561 /*
562  *-----------------------------------------------------------------------------
563  * PMD Framework handlers
564  *-----------------------------------------------------------------------------
565  */
566
567 /**
568  * Enqueue burst.
569  *
570  * @param queue_pair Pointer to queue pair.
571  * @param ops Pointer to ops requests array.
572  * @param nb_ops Number of elements in ops requests array.
573  * @returns Number of elements consumed from ops.
574  */
575 static uint16_t
576 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
577                 uint16_t nb_ops)
578 {
579         uint16_t iter_ops = 0;
580         uint16_t to_enq = 0;
581         uint16_t consumed = 0;
582         int ret;
583         struct sam_cio_op_params requests[nb_ops];
584         /*
585          * DPDK uses single fragment buffers, so we can KISS descriptors.
586          * SAM does not store bd pointers, so on-stack scope will be enough.
587          */
588         struct sam_buf_info src_bd[nb_ops];
589         struct sam_buf_info dst_bd[nb_ops];
590         struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
591
592         if (nb_ops == 0)
593                 return 0;
594
595         /* Prepare the burst. */
596         memset(&requests, 0, sizeof(requests));
597
598         /* Iterate through */
599         for (; iter_ops < nb_ops; ++iter_ops) {
600                 if (mrvl_request_prepare(&requests[iter_ops],
601                                         &src_bd[iter_ops],
602                                         &dst_bd[iter_ops],
603                                         ops[iter_ops]) < 0) {
604                         MRVL_CRYPTO_LOG_ERR(
605                                 "Error while parameters preparation!");
606                         qp->stats.enqueue_err_count++;
607                         ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
608
609                         /*
610                          * Number of handled ops is increased
611                          * (even if the result of handling is error).
612                          */
613                         ++consumed;
614                         break;
615                 }
616
617                 ops[iter_ops]->status =
618                         RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
619
620                 /* Increase the number of ops to enqueue. */
621                 ++to_enq;
622         } /* for (; iter_ops < nb_ops;... */
623
624         if (to_enq > 0) {
625                 /* Send the burst */
626                 ret = sam_cio_enq(qp->cio, requests, &to_enq);
627                 consumed += to_enq;
628                 if (ret < 0) {
629                         /*
630                          * Trust SAM that in this case returned value will be at
631                          * some point correct (now it is returned unmodified).
632                          */
633                         qp->stats.enqueue_err_count += to_enq;
634                         for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
635                                 ops[iter_ops]->status =
636                                         RTE_CRYPTO_OP_STATUS_ERROR;
637                 }
638         }
639
640         qp->stats.enqueued_count += to_enq;
641         return consumed;
642 }
643
644 /**
645  * Dequeue burst.
646  *
647  * @param queue_pair Pointer to queue pair.
648  * @param ops Pointer to ops requests array.
649  * @param nb_ops Number of elements in ops requests array.
650  * @returns Number of elements dequeued.
651  */
652 static uint16_t
653 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
654                 struct rte_crypto_op **ops,
655                 uint16_t nb_ops)
656 {
657         int ret;
658         struct mrvl_crypto_qp *qp = queue_pair;
659         struct sam_cio *cio = qp->cio;
660         struct sam_cio_op_result results[nb_ops];
661         uint16_t i;
662
663         ret = sam_cio_deq(cio, results, &nb_ops);
664         if (ret < 0) {
665                 /* Count all dequeued as error. */
666                 qp->stats.dequeue_err_count += nb_ops;
667
668                 /* But act as they were dequeued anyway*/
669                 qp->stats.dequeued_count += nb_ops;
670
671                 return 0;
672         }
673
674         /* Unpack and check results. */
675         for (i = 0; i < nb_ops; ++i) {
676                 ops[i] = results[i].cookie;
677
678                 switch (results[i].status) {
679                 case SAM_CIO_OK:
680                         ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
681                         break;
682                 case SAM_CIO_ERR_ICV:
683                         MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
684                         ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
685                         break;
686                 default:
687                         MRVL_CRYPTO_LOG_DBG(
688                                 "CIO returned Error: %d", results[i].status);
689                         ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
690                         break;
691                 }
692         }
693
694         qp->stats.dequeued_count += nb_ops;
695         return nb_ops;
696 }
697
698 /**
699  * Create a new crypto device.
700  *
701  * @param name Driver name.
702  * @param vdev Pointer to device structure.
703  * @param init_params Pointer to initialization parameters.
704  * @returns 0 in case of success, negative value otherwise.
705  */
706 static int
707 cryptodev_mrvl_crypto_create(const char *name,
708                 struct rte_vdev_device *vdev,
709                 struct mrvl_pmd_init_params *init_params)
710 {
711         struct rte_cryptodev *dev;
712         struct mrvl_crypto_private *internals;
713         struct sam_init_params  sam_params;
714         int ret;
715
716         dev = rte_cryptodev_pmd_create(name, &vdev->device,
717                         &init_params->common);
718         if (dev == NULL) {
719                 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
720                 goto init_error;
721         }
722
723         dev->driver_id = cryptodev_driver_id;
724         dev->dev_ops = rte_mrvl_crypto_pmd_ops;
725
726         /* Register rx/tx burst functions for data path. */
727         dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
728         dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
729
730         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
731                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
732                         RTE_CRYPTODEV_FF_HW_ACCELERATED;
733
734         /* Set vector instructions mode supported */
735         internals = dev->data->dev_private;
736
737         internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
738         internals->max_nb_sessions = init_params->max_nb_sessions;
739
740         /*
741          * ret == -EEXIST is correct, it means DMA
742          * has been already initialized.
743          */
744         ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
745         if (ret < 0) {
746                 if (ret != -EEXIST)
747                         return ret;
748
749                 MRVL_CRYPTO_LOG_INFO(
750                         "DMA memory has been already initialized by a different driver.");
751         }
752
753         sam_params.max_num_sessions = internals->max_nb_sessions;
754
755         return sam_init(&sam_params);
756
757 init_error:
758         MRVL_CRYPTO_LOG_ERR(
759                 "driver %s: %s failed", init_params->common.name, __func__);
760
761         cryptodev_mrvl_crypto_uninit(vdev);
762         return -EFAULT;
763 }
764
765 /** Parse integer from integer argument */
766 static int
767 parse_integer_arg(const char *key __rte_unused,
768                 const char *value, void *extra_args)
769 {
770         int *i = (int *) extra_args;
771
772         *i = atoi(value);
773         if (*i < 0) {
774                 MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
775                 return -EINVAL;
776         }
777
778         return 0;
779 }
780
781 /** Parse name */
782 static int
783 parse_name_arg(const char *key __rte_unused,
784                 const char *value, void *extra_args)
785 {
786         struct rte_cryptodev_pmd_init_params *params = extra_args;
787
788         if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
789                 MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
790                                 "%u bytes.\n", value,
791                                 RTE_CRYPTODEV_NAME_MAX_LEN - 1);
792                 return -EINVAL;
793         }
794
795         strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
796
797         return 0;
798 }
799
800 static int
801 mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params,
802                          const char *input_args)
803 {
804         struct rte_kvargs *kvlist = NULL;
805         int ret = 0;
806
807         if (params == NULL)
808                 return -EINVAL;
809
810         if (input_args) {
811                 kvlist = rte_kvargs_parse(input_args,
812                                           mrvl_pmd_valid_params);
813                 if (kvlist == NULL)
814                         return -1;
815
816                 /* Common VDEV parameters */
817                 ret = rte_kvargs_process(kvlist,
818                                          RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG,
819                                          &parse_integer_arg,
820                                          &params->common.max_nb_queue_pairs);
821                 if (ret < 0)
822                         goto free_kvlist;
823
824                 ret = rte_kvargs_process(kvlist,
825                                          RTE_CRYPTODEV_PMD_SOCKET_ID_ARG,
826                                          &parse_integer_arg,
827                                          &params->common.socket_id);
828                 if (ret < 0)
829                         goto free_kvlist;
830
831                 ret = rte_kvargs_process(kvlist,
832                                          RTE_CRYPTODEV_PMD_NAME_ARG,
833                                          &parse_name_arg,
834                                          &params->common);
835                 if (ret < 0)
836                         goto free_kvlist;
837
838                 ret = rte_kvargs_process(kvlist,
839                                          MRVL_PMD_MAX_NB_SESS_ARG,
840                                          &parse_integer_arg,
841                                          params);
842                 if (ret < 0)
843                         goto free_kvlist;
844
845         }
846
847 free_kvlist:
848         rte_kvargs_free(kvlist);
849         return ret;
850 }
851
852 /**
853  * Initialize the crypto device.
854  *
855  * @param vdev Pointer to device structure.
856  * @returns 0 in case of success, negative value otherwise.
857  */
858 static int
859 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
860 {
861         struct mrvl_pmd_init_params init_params = {
862                 .common = {
863                         .name = "",
864                         .private_data_size =
865                                 sizeof(struct mrvl_crypto_private),
866                         .max_nb_queue_pairs =
867                                 sam_get_num_inst() * SAM_HW_RING_NUM,
868                         .socket_id = rte_socket_id()
869                 },
870                 .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
871         };
872
873         const char *name, *args;
874         int ret;
875
876         name = rte_vdev_device_name(vdev);
877         if (name == NULL)
878                 return -EINVAL;
879         args = rte_vdev_device_args(vdev);
880
881         ret = mrvl_pmd_parse_input_args(&init_params, args);
882         if (ret) {
883                 RTE_LOG(ERR, PMD,
884                         "Failed to parse initialisation arguments[%s]\n",
885                         args);
886                 return -EINVAL;
887         }
888
889         return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
890 }
891
892 /**
893  * Uninitialize the crypto device
894  *
895  * @param vdev Pointer to device structure.
896  * @returns 0 in case of success, negative value otherwise.
897  */
898 static int
899 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
900 {
901         struct rte_cryptodev *cryptodev;
902         const char *name = rte_vdev_device_name(vdev);
903
904         if (name == NULL)
905                 return -EINVAL;
906
907         RTE_LOG(INFO, PMD,
908                 "Closing Marvell crypto device %s on numa socket %u\n",
909                 name, rte_socket_id());
910
911         sam_deinit();
912
913         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
914         if (cryptodev == NULL)
915                 return -ENODEV;
916
917         return rte_cryptodev_pmd_destroy(cryptodev);
918 }
919
920 /**
921  * Basic driver handlers for use in the constructor.
922  */
923 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
924         .probe = cryptodev_mrvl_crypto_init,
925         .remove = cryptodev_mrvl_crypto_uninit
926 };
927
928 static struct cryptodev_driver mrvl_crypto_drv;
929
930 /* Register the driver in constructor. */
931 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
932 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
933         "max_nb_queue_pairs=<int> "
934         "max_nb_sessions=<int> "
935         "socket_id=<int>");
936 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
937                 cryptodev_driver_id);