New upstream version 18.02
[deb_dpdk.git] / drivers / crypto / mrvl / rte_mrvl_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Marvell International Ltd.
5  *   Copyright(c) 2017 Semihalf.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of the copyright holder nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <rte_common.h>
36 #include <rte_hexdump.h>
37 #include <rte_cryptodev.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_bus_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_cpuflags.h>
42
43 #include "rte_mrvl_pmd_private.h"
44
45 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
46
47 static uint8_t cryptodev_driver_id;
48
49 /**
50  * Flag if particular crypto algorithm is supported by PMD/MUSDK.
51  *
52  * The idea is to have Not Supported value as default (0).
53  * This way we need only to define proper map sizes,
54  * non-initialized entries will be by default not supported.
55  */
56 enum algo_supported {
57         ALGO_NOT_SUPPORTED = 0,
58         ALGO_SUPPORTED = 1,
59 };
60
61 /** Map elements for cipher mapping.*/
62 struct cipher_params_mapping {
63         enum algo_supported  supported;   /**< On/Off switch */
64         enum sam_cipher_alg  cipher_alg;  /**< Cipher algorithm */
65         enum sam_cipher_mode cipher_mode; /**< Cipher mode */
66         unsigned int max_key_len;         /**< Maximum key length (in bytes)*/
67 }
68 /* We want to squeeze in multiple maps into the cache line. */
69 __rte_aligned(32);
70
71 /** Map elements for auth mapping.*/
72 struct auth_params_mapping {
73         enum algo_supported supported;  /**< On/off switch */
74         enum sam_auth_alg   auth_alg;   /**< Auth algorithm */
75 }
76 /* We want to squeeze in multiple maps into the cache line. */
77 __rte_aligned(32);
78
79 /**
80  * Map of supported cipher algorithms.
81  */
82 static const
83 struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
84         [RTE_CRYPTO_CIPHER_3DES_CBC] = {
85                 .supported = ALGO_SUPPORTED,
86                 .cipher_alg = SAM_CIPHER_3DES,
87                 .cipher_mode = SAM_CIPHER_CBC,
88                 .max_key_len = BITS2BYTES(192) },
89         [RTE_CRYPTO_CIPHER_3DES_CTR] = {
90                 .supported = ALGO_SUPPORTED,
91                 .cipher_alg = SAM_CIPHER_3DES,
92                 .cipher_mode = SAM_CIPHER_CTR,
93                 .max_key_len = BITS2BYTES(192) },
94         [RTE_CRYPTO_CIPHER_3DES_ECB] = {
95                 .supported = ALGO_SUPPORTED,
96                 .cipher_alg = SAM_CIPHER_3DES,
97                 .cipher_mode = SAM_CIPHER_ECB,
98                 .max_key_len = BITS2BYTES(192) },
99         [RTE_CRYPTO_CIPHER_AES_CBC] = {
100                 .supported = ALGO_SUPPORTED,
101                 .cipher_alg = SAM_CIPHER_AES,
102                 .cipher_mode = SAM_CIPHER_CBC,
103                 .max_key_len = BITS2BYTES(256) },
104         [RTE_CRYPTO_CIPHER_AES_CTR] = {
105                 .supported = ALGO_SUPPORTED,
106                 .cipher_alg = SAM_CIPHER_AES,
107                 .cipher_mode = SAM_CIPHER_CTR,
108                 .max_key_len = BITS2BYTES(256) },
109 };
110
111 /**
112  * Map of supported auth algorithms.
113  */
114 static const
115 struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
116         [RTE_CRYPTO_AUTH_MD5_HMAC] = {
117                 .supported = ALGO_SUPPORTED,
118                 .auth_alg = SAM_AUTH_HMAC_MD5 },
119         [RTE_CRYPTO_AUTH_MD5] = {
120                 .supported = ALGO_SUPPORTED,
121                 .auth_alg = SAM_AUTH_HASH_MD5 },
122         [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
123                 .supported = ALGO_SUPPORTED,
124                 .auth_alg = SAM_AUTH_HMAC_SHA1 },
125         [RTE_CRYPTO_AUTH_SHA1] = {
126                 .supported = ALGO_SUPPORTED,
127                 .auth_alg = SAM_AUTH_HASH_SHA1 },
128         [RTE_CRYPTO_AUTH_SHA224] = {
129                 .supported = ALGO_SUPPORTED,
130                 .auth_alg = SAM_AUTH_HASH_SHA2_224 },
131         [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
132                 .supported = ALGO_SUPPORTED,
133                 .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
134         [RTE_CRYPTO_AUTH_SHA256] = {
135                 .supported = ALGO_SUPPORTED,
136                 .auth_alg = SAM_AUTH_HASH_SHA2_256 },
137         [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
138                 .supported = ALGO_SUPPORTED,
139                 .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
140         [RTE_CRYPTO_AUTH_SHA384] = {
141                 .supported = ALGO_SUPPORTED,
142                 .auth_alg = SAM_AUTH_HASH_SHA2_384 },
143         [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
144                 .supported = ALGO_SUPPORTED,
145                 .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
146         [RTE_CRYPTO_AUTH_SHA512] = {
147                 .supported = ALGO_SUPPORTED,
148                 .auth_alg = SAM_AUTH_HASH_SHA2_512 },
149         [RTE_CRYPTO_AUTH_AES_GMAC] = {
150                 .supported = ALGO_SUPPORTED,
151                 .auth_alg = SAM_AUTH_AES_GMAC },
152 };
153
154 /**
155  * Map of supported aead algorithms.
156  */
157 static const
158 struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
159         [RTE_CRYPTO_AEAD_AES_GCM] = {
160                 .supported = ALGO_SUPPORTED,
161                 .cipher_alg = SAM_CIPHER_AES,
162                 .cipher_mode = SAM_CIPHER_GCM,
163                 .max_key_len = BITS2BYTES(256) },
164 };
165
166 /*
167  *-----------------------------------------------------------------------------
168  * Forward declarations.
169  *-----------------------------------------------------------------------------
170  */
171 static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
172
173 /*
174  *-----------------------------------------------------------------------------
175  * Session Preparation.
176  *-----------------------------------------------------------------------------
177  */
178
179 /**
180  * Get xform chain order.
181  *
182  * @param xform Pointer to configuration structure chain for crypto operations.
183  * @returns Order of crypto operations.
184  */
185 static enum mrvl_crypto_chain_order
186 mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
187 {
188         /* Currently, Marvell supports max 2 operations in chain */
189         if (xform->next != NULL && xform->next->next != NULL)
190                 return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
191
192         if (xform->next != NULL) {
193                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
194                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
195                         return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
196
197                 if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
198                         (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
199                         return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
200         } else {
201                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
202                         return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
203
204                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
205                         return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
206
207                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
208                         return MRVL_CRYPTO_CHAIN_COMBINED;
209         }
210         return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
211 }
212
213 /**
214  * Set session parameters for cipher part.
215  *
216  * @param sess Crypto session pointer.
217  * @param cipher_xform Pointer to configuration structure for cipher operations.
218  * @returns 0 in case of success, negative value otherwise.
219  */
220 static int
221 mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
222                 const struct rte_crypto_sym_xform *cipher_xform)
223 {
224         /* Make sure we've got proper struct */
225         if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
226                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
227                 return -EINVAL;
228         }
229
230         /* See if map data is present and valid */
231         if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
232                 (cipher_map[cipher_xform->cipher.algo].supported
233                         != ALGO_SUPPORTED)) {
234                 MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
235                 return -EINVAL;
236         }
237
238         sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
239
240         sess->sam_sess_params.dir =
241                 (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
242                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
243         sess->sam_sess_params.cipher_alg =
244                 cipher_map[cipher_xform->cipher.algo].cipher_alg;
245         sess->sam_sess_params.cipher_mode =
246                 cipher_map[cipher_xform->cipher.algo].cipher_mode;
247
248         /* Assume IV will be passed together with data. */
249         sess->sam_sess_params.cipher_iv = NULL;
250
251         /* Get max key length. */
252         if (cipher_xform->cipher.key.length >
253                 cipher_map[cipher_xform->cipher.algo].max_key_len) {
254                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
255                 return -EINVAL;
256         }
257
258         sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
259         sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
260
261         return 0;
262 }
263
264 /**
265  * Set session parameters for authentication part.
266  *
267  * @param sess Crypto session pointer.
268  * @param auth_xform Pointer to configuration structure for auth operations.
269  * @returns 0 in case of success, negative value otherwise.
270  */
271 static int
272 mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
273                 const struct rte_crypto_sym_xform *auth_xform)
274 {
275         /* Make sure we've got proper struct */
276         if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
277                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
278                 return -EINVAL;
279         }
280
281         /* See if map data is present and valid */
282         if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
283                 (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
284                 MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
285                 return -EINVAL;
286         }
287
288         sess->sam_sess_params.dir =
289                 (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
290                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
291         sess->sam_sess_params.auth_alg =
292                 auth_map[auth_xform->auth.algo].auth_alg;
293         sess->sam_sess_params.u.basic.auth_icv_len =
294                 auth_xform->auth.digest_length;
295         /* auth_key must be NULL if auth algorithm does not use HMAC */
296         sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
297                                          auth_xform->auth.key.data : NULL;
298         sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
299
300         return 0;
301 }
302
303 /**
304  * Set session parameters for aead part.
305  *
306  * @param sess Crypto session pointer.
307  * @param aead_xform Pointer to configuration structure for aead operations.
308  * @returns 0 in case of success, negative value otherwise.
309  */
310 static int
311 mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
312                 const struct rte_crypto_sym_xform *aead_xform)
313 {
314         /* Make sure we've got proper struct */
315         if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
316                 MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
317                 return -EINVAL;
318         }
319
320         /* See if map data is present and valid */
321         if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
322                 (aead_map[aead_xform->aead.algo].supported
323                         != ALGO_SUPPORTED)) {
324                 MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
325                 return -EINVAL;
326         }
327
328         sess->sam_sess_params.dir =
329                 (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
330                 SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
331         sess->sam_sess_params.cipher_alg =
332                 aead_map[aead_xform->aead.algo].cipher_alg;
333         sess->sam_sess_params.cipher_mode =
334                 aead_map[aead_xform->aead.algo].cipher_mode;
335
336         /* Assume IV will be passed together with data. */
337         sess->sam_sess_params.cipher_iv = NULL;
338
339         /* Get max key length. */
340         if (aead_xform->aead.key.length >
341                 aead_map[aead_xform->aead.algo].max_key_len) {
342                 MRVL_CRYPTO_LOG_ERR("Wrong key length!");
343                 return -EINVAL;
344         }
345
346         sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
347         sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
348
349         if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
350                 sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
351
352         sess->sam_sess_params.u.basic.auth_icv_len =
353                 aead_xform->aead.digest_length;
354
355         sess->sam_sess_params.u.basic.auth_aad_len =
356                 aead_xform->aead.aad_length;
357
358         return 0;
359 }
360
361 /**
362  * Parse crypto transform chain and setup session parameters.
363  *
364  * @param dev Pointer to crypto device
365  * @param sess Poiner to crypto session
366  * @param xform Pointer to configuration structure chain for crypto operations.
367  * @returns 0 in case of success, negative value otherwise.
368  */
369 int
370 mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
371                 const struct rte_crypto_sym_xform *xform)
372 {
373         const struct rte_crypto_sym_xform *cipher_xform = NULL;
374         const struct rte_crypto_sym_xform *auth_xform = NULL;
375         const struct rte_crypto_sym_xform *aead_xform = NULL;
376
377         /* Filter out spurious/broken requests */
378         if (xform == NULL)
379                 return -EINVAL;
380
381         sess->chain_order = mrvl_crypto_get_chain_order(xform);
382         switch (sess->chain_order) {
383         case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
384                 cipher_xform = xform;
385                 auth_xform = xform->next;
386                 break;
387         case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
388                 auth_xform = xform;
389                 cipher_xform = xform->next;
390                 break;
391         case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
392                 cipher_xform = xform;
393                 break;
394         case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
395                 auth_xform = xform;
396                 break;
397         case MRVL_CRYPTO_CHAIN_COMBINED:
398                 aead_xform = xform;
399                 break;
400         default:
401                 return -EINVAL;
402         }
403
404         if ((cipher_xform != NULL) &&
405                 (mrvl_crypto_set_cipher_session_parameters(
406                         sess, cipher_xform) < 0)) {
407                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
408                 return -EINVAL;
409         }
410
411         if ((auth_xform != NULL) &&
412                 (mrvl_crypto_set_auth_session_parameters(
413                         sess, auth_xform) < 0)) {
414                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
415                 return -EINVAL;
416         }
417
418         if ((aead_xform != NULL) &&
419                 (mrvl_crypto_set_aead_session_parameters(
420                         sess, aead_xform) < 0)) {
421                 MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
422                 return -EINVAL;
423         }
424
425         return 0;
426 }
427
428 /*
429  *-----------------------------------------------------------------------------
430  * Process Operations
431  *-----------------------------------------------------------------------------
432  */
433
434 /**
435  * Prepare a single request.
436  *
437  * This function basically translates DPDK crypto request into one
438  * understandable by MUDSK's SAM. If this is a first request in a session,
439  * it starts the session.
440  *
441  * @param request Pointer to pre-allocated && reset request buffer [Out].
442  * @param src_bd Pointer to pre-allocated source descriptor [Out].
443  * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
444  * @param op Pointer to DPDK crypto operation struct [In].
445  */
446 static inline int
447 mrvl_request_prepare(struct sam_cio_op_params *request,
448                 struct sam_buf_info *src_bd,
449                 struct sam_buf_info *dst_bd,
450                 struct rte_crypto_op *op)
451 {
452         struct mrvl_crypto_session *sess;
453         struct rte_mbuf *dst_mbuf;
454         uint8_t *digest;
455
456         if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
457                 MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
458                                 "oriented requests, op (%p) is sessionless.",
459                                 op);
460                 return -EINVAL;
461         }
462
463         sess = (struct mrvl_crypto_session *)get_session_private_data(
464                         op->sym->session, cryptodev_driver_id);
465         if (unlikely(sess == NULL)) {
466                 MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
467                 return -EINVAL;
468         }
469
470         /*
471          * If application delivered us null dst buffer, it means it expects
472          * us to deliver the result in src buffer.
473          */
474         dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
475
476         request->sa = sess->sam_sess;
477         request->cookie = op;
478
479         /* Single buffers only, sorry. */
480         request->num_bufs = 1;
481         request->src = src_bd;
482         src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
483         src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
484         src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
485
486         /* Empty source. */
487         if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
488                 /* EIP does not support 0 length buffers. */
489                 MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
490                 return -1;
491         }
492
493         /* Empty destination. */
494         if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
495                 /* Make dst buffer fit at least source data. */
496                 if (rte_pktmbuf_append(dst_mbuf,
497                         rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
498                         MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
499                         return -1;
500                 }
501         }
502
503         request->dst = dst_bd;
504         dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
505         dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
506
507         /*
508          * We can use all available space in dst_mbuf,
509          * not only what's used currently.
510          */
511         dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
512
513         if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
514                 request->cipher_len = op->sym->aead.data.length;
515                 request->cipher_offset = op->sym->aead.data.offset;
516                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
517                         sess->cipher_iv_offset);
518
519                 request->auth_aad = op->sym->aead.aad.data;
520                 request->auth_offset = request->cipher_offset;
521                 request->auth_len = request->cipher_len;
522         } else {
523                 request->cipher_len = op->sym->cipher.data.length;
524                 request->cipher_offset = op->sym->cipher.data.offset;
525                 request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
526                                 sess->cipher_iv_offset);
527
528                 request->auth_offset = op->sym->auth.data.offset;
529                 request->auth_len = op->sym->auth.data.length;
530         }
531
532         digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
533                 op->sym->aead.digest.data : op->sym->auth.digest.data;
534         if (digest == NULL) {
535                 /* No auth - no worry. */
536                 return 0;
537         }
538
539         request->auth_icv_offset = request->auth_offset + request->auth_len;
540
541         /*
542          * EIP supports only scenarios where ICV(digest buffer) is placed at
543          * auth_icv_offset. Any other placement means risking errors.
544          */
545         if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
546                 /*
547                  * This should be the most common case anyway,
548                  * EIP will overwrite DST buffer at auth_icv_offset.
549                  */
550                 if (rte_pktmbuf_mtod_offset(
551                                 dst_mbuf, uint8_t *,
552                                 request->auth_icv_offset) == digest) {
553                         return 0;
554                 }
555         } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
556                 /*
557                  * EIP will look for digest at auth_icv_offset
558                  * offset in SRC buffer.
559                  */
560                 if (rte_pktmbuf_mtod_offset(
561                                 op->sym->m_src, uint8_t *,
562                                 request->auth_icv_offset) == digest) {
563                         return 0;
564                 }
565         }
566
567         /*
568          * If we landed here it means that digest pointer is
569          * at different than expected place.
570          */
571         return -1;
572 }
573
574 /*
575  *-----------------------------------------------------------------------------
576  * PMD Framework handlers
577  *-----------------------------------------------------------------------------
578  */
579
580 /**
581  * Enqueue burst.
582  *
583  * @param queue_pair Pointer to queue pair.
584  * @param ops Pointer to ops requests array.
585  * @param nb_ops Number of elements in ops requests array.
586  * @returns Number of elements consumed from ops.
587  */
588 static uint16_t
589 mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
590                 uint16_t nb_ops)
591 {
592         uint16_t iter_ops = 0;
593         uint16_t to_enq = 0;
594         uint16_t consumed = 0;
595         int ret;
596         struct sam_cio_op_params requests[nb_ops];
597         /*
598          * DPDK uses single fragment buffers, so we can KISS descriptors.
599          * SAM does not store bd pointers, so on-stack scope will be enough.
600          */
601         struct sam_buf_info src_bd[nb_ops];
602         struct sam_buf_info dst_bd[nb_ops];
603         struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
604
605         if (nb_ops == 0)
606                 return 0;
607
608         /* Prepare the burst. */
609         memset(&requests, 0, sizeof(requests));
610
611         /* Iterate through */
612         for (; iter_ops < nb_ops; ++iter_ops) {
613                 if (mrvl_request_prepare(&requests[iter_ops],
614                                         &src_bd[iter_ops],
615                                         &dst_bd[iter_ops],
616                                         ops[iter_ops]) < 0) {
617                         MRVL_CRYPTO_LOG_ERR(
618                                 "Error while parameters preparation!");
619                         qp->stats.enqueue_err_count++;
620                         ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
621
622                         /*
623                          * Number of handled ops is increased
624                          * (even if the result of handling is error).
625                          */
626                         ++consumed;
627                         break;
628                 }
629
630                 ops[iter_ops]->status =
631                         RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
632
633                 /* Increase the number of ops to enqueue. */
634                 ++to_enq;
635         } /* for (; iter_ops < nb_ops;... */
636
637         if (to_enq > 0) {
638                 /* Send the burst */
639                 ret = sam_cio_enq(qp->cio, requests, &to_enq);
640                 consumed += to_enq;
641                 if (ret < 0) {
642                         /*
643                          * Trust SAM that in this case returned value will be at
644                          * some point correct (now it is returned unmodified).
645                          */
646                         qp->stats.enqueue_err_count += to_enq;
647                         for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
648                                 ops[iter_ops]->status =
649                                         RTE_CRYPTO_OP_STATUS_ERROR;
650                 }
651         }
652
653         qp->stats.enqueued_count += to_enq;
654         return consumed;
655 }
656
657 /**
658  * Dequeue burst.
659  *
660  * @param queue_pair Pointer to queue pair.
661  * @param ops Pointer to ops requests array.
662  * @param nb_ops Number of elements in ops requests array.
663  * @returns Number of elements dequeued.
664  */
665 static uint16_t
666 mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
667                 struct rte_crypto_op **ops,
668                 uint16_t nb_ops)
669 {
670         int ret;
671         struct mrvl_crypto_qp *qp = queue_pair;
672         struct sam_cio *cio = qp->cio;
673         struct sam_cio_op_result results[nb_ops];
674         uint16_t i;
675
676         ret = sam_cio_deq(cio, results, &nb_ops);
677         if (ret < 0) {
678                 /* Count all dequeued as error. */
679                 qp->stats.dequeue_err_count += nb_ops;
680
681                 /* But act as they were dequeued anyway*/
682                 qp->stats.dequeued_count += nb_ops;
683
684                 return 0;
685         }
686
687         /* Unpack and check results. */
688         for (i = 0; i < nb_ops; ++i) {
689                 ops[i] = results[i].cookie;
690
691                 switch (results[i].status) {
692                 case SAM_CIO_OK:
693                         ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
694                         break;
695                 case SAM_CIO_ERR_ICV:
696                         MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
697                         ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
698                         break;
699                 default:
700                         MRVL_CRYPTO_LOG_DBG(
701                                 "CIO returned Error: %d", results[i].status);
702                         ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
703                         break;
704                 }
705         }
706
707         qp->stats.dequeued_count += nb_ops;
708         return nb_ops;
709 }
710
711 /**
712  * Create a new crypto device.
713  *
714  * @param name Driver name.
715  * @param vdev Pointer to device structure.
716  * @param init_params Pointer to initialization parameters.
717  * @returns 0 in case of success, negative value otherwise.
718  */
719 static int
720 cryptodev_mrvl_crypto_create(const char *name,
721                 struct rte_vdev_device *vdev,
722                 struct rte_cryptodev_pmd_init_params *init_params)
723 {
724         struct rte_cryptodev *dev;
725         struct mrvl_crypto_private *internals;
726         struct sam_init_params  sam_params;
727         int ret;
728
729         dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
730         if (dev == NULL) {
731                 MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
732                 goto init_error;
733         }
734
735         dev->driver_id = cryptodev_driver_id;
736         dev->dev_ops = rte_mrvl_crypto_pmd_ops;
737
738         /* Register rx/tx burst functions for data path. */
739         dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
740         dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
741
742         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
743                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
744                         RTE_CRYPTODEV_FF_HW_ACCELERATED;
745
746         /* Set vector instructions mode supported */
747         internals = dev->data->dev_private;
748
749         internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
750         internals->max_nb_sessions = init_params->max_nb_sessions;
751
752         /*
753          * ret == -EEXIST is correct, it means DMA
754          * has been already initialized.
755          */
756         ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
757         if (ret < 0) {
758                 if (ret != -EEXIST)
759                         return ret;
760
761                 MRVL_CRYPTO_LOG_INFO(
762                         "DMA memory has been already initialized by a different driver.");
763         }
764
765         sam_params.max_num_sessions = internals->max_nb_sessions;
766
767         return sam_init(&sam_params);
768
769 init_error:
770         MRVL_CRYPTO_LOG_ERR(
771                 "driver %s: %s failed", init_params->name, __func__);
772
773         cryptodev_mrvl_crypto_uninit(vdev);
774         return -EFAULT;
775 }
776
777 /**
778  * Initialize the crypto device.
779  *
780  * @param vdev Pointer to device structure.
781  * @returns 0 in case of success, negative value otherwise.
782  */
783 static int
784 cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
785 {
786         struct rte_cryptodev_pmd_init_params init_params = { };
787         const char *name, *args;
788         int ret;
789
790         name = rte_vdev_device_name(vdev);
791         if (name == NULL)
792                 return -EINVAL;
793         args = rte_vdev_device_args(vdev);
794
795         init_params.private_data_size = sizeof(struct mrvl_crypto_private);
796         init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM;
797         init_params.max_nb_sessions =
798                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
799         init_params.socket_id = rte_socket_id();
800
801         ret = rte_cryptodev_pmd_parse_input_args(&init_params, args);
802         if (ret) {
803                 RTE_LOG(ERR, PMD,
804                         "Failed to parse initialisation arguments[%s]\n",
805                         args);
806                 return -EINVAL;
807         }
808
809         return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
810 }
811
812 /**
813  * Uninitialize the crypto device
814  *
815  * @param vdev Pointer to device structure.
816  * @returns 0 in case of success, negative value otherwise.
817  */
818 static int
819 cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
820 {
821         struct rte_cryptodev *cryptodev;
822         const char *name = rte_vdev_device_name(vdev);
823
824         if (name == NULL)
825                 return -EINVAL;
826
827         RTE_LOG(INFO, PMD,
828                 "Closing Marvell crypto device %s on numa socket %u\n",
829                 name, rte_socket_id());
830
831         sam_deinit();
832
833         cryptodev = rte_cryptodev_pmd_get_named_dev(name);
834         if (cryptodev == NULL)
835                 return -ENODEV;
836
837         return rte_cryptodev_pmd_destroy(cryptodev);
838 }
839
840 /**
841  * Basic driver handlers for use in the constructor.
842  */
843 static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
844         .probe = cryptodev_mrvl_crypto_init,
845         .remove = cryptodev_mrvl_crypto_uninit
846 };
847
848 static struct cryptodev_driver mrvl_crypto_drv;
849
850 /* Register the driver in constructor. */
851 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
852 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
853         "max_nb_queue_pairs=<int> "
854         "max_nb_sessions=<int> "
855         "socket_id=<int>");
856 RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
857                 cryptodev_driver_id);