Imported Upstream version 16.04
[deb_dpdk.git] / drivers / crypto / aesni_mb / rte_aesni_mb_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_common.h>
34 #include <rte_hexdump.h>
35 #include <rte_cryptodev.h>
36 #include <rte_cryptodev_pmd.h>
37 #include <rte_dev.h>
38 #include <rte_malloc.h>
39 #include <rte_cpuflags.h>
40
41 #include "rte_aesni_mb_pmd_private.h"
42
43 /**
44  * Global static parameter used to create a unique name for each AES-NI multi
45  * buffer crypto device.
46  */
47 static unsigned unique_name_id;
48
49 static inline int
50 create_unique_device_name(char *name, size_t size)
51 {
52         int ret;
53
54         if (name == NULL)
55                 return -EINVAL;
56
57         ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_MB_PMD,
58                         unique_name_id++);
59         if (ret < 0)
60                 return ret;
61         return 0;
62 }
63
64 typedef void (*hash_one_block_t)(void *data, void *digest);
65 typedef void (*aes_keyexp_t)(void *key, void *enc_exp_keys, void *dec_exp_keys);
66
67 /**
68  * Calculate the authentication pre-computes
69  *
70  * @param one_block_hash        Function pointer to calculate digest on ipad/opad
71  * @param ipad                  Inner pad output byte array
72  * @param opad                  Outer pad output byte array
73  * @param hkey                  Authentication key
74  * @param hkey_len              Authentication key length
75  * @param blocksize             Block size of selected hash algo
76  */
77 static void
78 calculate_auth_precomputes(hash_one_block_t one_block_hash,
79                 uint8_t *ipad, uint8_t *opad,
80                 uint8_t *hkey, uint16_t hkey_len,
81                 uint16_t blocksize)
82 {
83         unsigned i, length;
84
85         uint8_t ipad_buf[blocksize] __rte_aligned(16);
86         uint8_t opad_buf[blocksize] __rte_aligned(16);
87
88         /* Setup inner and outer pads */
89         memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
90         memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
91
92         /* XOR hash key with inner and outer pads */
93         length = hkey_len > blocksize ? blocksize : hkey_len;
94
95         for (i = 0; i < length; i++) {
96                 ipad_buf[i] ^= hkey[i];
97                 opad_buf[i] ^= hkey[i];
98         }
99
100         /* Compute partial hashes */
101         (*one_block_hash)(ipad_buf, ipad);
102         (*one_block_hash)(opad_buf, opad);
103
104         /* Clean up stack */
105         memset(ipad_buf, 0, blocksize);
106         memset(opad_buf, 0, blocksize);
107 }
108
109 /** Get xform chain order */
110 static int
111 aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
112 {
113         /*
114          * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
115          * operations, all other options are invalid, so we must have exactly
116          * 2 xform structs chained together
117          */
118         if (xform->next == NULL || xform->next->next != NULL)
119                 return -1;
120
121         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
122                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
123                 return HASH_CIPHER;
124
125         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
126                                 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
127                 return CIPHER_HASH;
128
129         return -1;
130 }
131
132 /** Set session authentication parameters */
133 static int
134 aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
135                 struct aesni_mb_session *sess,
136                 const struct rte_crypto_sym_xform *xform)
137 {
138         hash_one_block_t hash_oneblock_fn;
139
140         if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
141                 MB_LOG_ERR("Crypto xform struct not of type auth");
142                 return -1;
143         }
144
145         /* Set Authentication Parameters */
146         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
147                 sess->auth.algo = AES_XCBC;
148                 (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
149                                 sess->auth.xcbc.k1_expanded,
150                                 sess->auth.xcbc.k2, sess->auth.xcbc.k3);
151                 return 0;
152         }
153
154         switch (xform->auth.algo) {
155         case RTE_CRYPTO_AUTH_MD5_HMAC:
156                 sess->auth.algo = MD5;
157                 hash_oneblock_fn = mb_ops->aux.one_block.md5;
158                 break;
159         case RTE_CRYPTO_AUTH_SHA1_HMAC:
160                 sess->auth.algo = SHA1;
161                 hash_oneblock_fn = mb_ops->aux.one_block.sha1;
162                 break;
163         case RTE_CRYPTO_AUTH_SHA224_HMAC:
164                 sess->auth.algo = SHA_224;
165                 hash_oneblock_fn = mb_ops->aux.one_block.sha224;
166                 break;
167         case RTE_CRYPTO_AUTH_SHA256_HMAC:
168                 sess->auth.algo = SHA_256;
169                 hash_oneblock_fn = mb_ops->aux.one_block.sha256;
170                 break;
171         case RTE_CRYPTO_AUTH_SHA384_HMAC:
172                 sess->auth.algo = SHA_384;
173                 hash_oneblock_fn = mb_ops->aux.one_block.sha384;
174                 break;
175         case RTE_CRYPTO_AUTH_SHA512_HMAC:
176                 sess->auth.algo = SHA_512;
177                 hash_oneblock_fn = mb_ops->aux.one_block.sha512;
178                 break;
179         default:
180                 MB_LOG_ERR("Unsupported authentication algorithm selection");
181                 return -1;
182         }
183
184         /* Calculate Authentication precomputes */
185         calculate_auth_precomputes(hash_oneblock_fn,
186                         sess->auth.pads.inner, sess->auth.pads.outer,
187                         xform->auth.key.data,
188                         xform->auth.key.length,
189                         get_auth_algo_blocksize(sess->auth.algo));
190
191         return 0;
192 }
193
194 /** Set session cipher parameters */
195 static int
196 aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
197                 struct aesni_mb_session *sess,
198                 const struct rte_crypto_sym_xform *xform)
199 {
200         aes_keyexp_t aes_keyexp_fn;
201
202         if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
203                 MB_LOG_ERR("Crypto xform struct not of type cipher");
204                 return -1;
205         }
206
207         /* Select cipher direction */
208         switch (xform->cipher.op) {
209         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
210                 sess->cipher.direction = ENCRYPT;
211                 break;
212         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
213                 sess->cipher.direction = DECRYPT;
214                 break;
215         default:
216                 MB_LOG_ERR("Unsupported cipher operation parameter");
217                 return -1;
218         }
219
220         /* Select cipher mode */
221         switch (xform->cipher.algo) {
222         case RTE_CRYPTO_CIPHER_AES_CBC:
223                 sess->cipher.mode = CBC;
224                 break;
225         default:
226                 MB_LOG_ERR("Unsupported cipher mode parameter");
227                 return -1;
228         }
229
230         /* Check key length and choose key expansion function */
231         switch (xform->cipher.key.length) {
232         case AES_128_BYTES:
233                 sess->cipher.key_length_in_bytes = AES_128_BYTES;
234                 aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
235                 break;
236         case AES_192_BYTES:
237                 sess->cipher.key_length_in_bytes = AES_192_BYTES;
238                 aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
239                 break;
240         case AES_256_BYTES:
241                 sess->cipher.key_length_in_bytes = AES_256_BYTES;
242                 aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
243                 break;
244         default:
245                 MB_LOG_ERR("Unsupported cipher key length");
246                 return -1;
247         }
248
249         /* Expanded cipher keys */
250         (*aes_keyexp_fn)(xform->cipher.key.data,
251                         sess->cipher.expanded_aes_keys.encode,
252                         sess->cipher.expanded_aes_keys.decode);
253
254         return 0;
255 }
256
257 /** Parse crypto xform chain and set private session parameters */
258 int
259 aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
260                 struct aesni_mb_session *sess,
261                 const struct rte_crypto_sym_xform *xform)
262 {
263         const struct rte_crypto_sym_xform *auth_xform = NULL;
264         const struct rte_crypto_sym_xform *cipher_xform = NULL;
265
266         /* Select Crypto operation - hash then cipher / cipher then hash */
267         switch (aesni_mb_get_chain_order(xform)) {
268         case HASH_CIPHER:
269                 sess->chain_order = HASH_CIPHER;
270                 auth_xform = xform;
271                 cipher_xform = xform->next;
272                 break;
273         case CIPHER_HASH:
274                 sess->chain_order = CIPHER_HASH;
275                 auth_xform = xform->next;
276                 cipher_xform = xform;
277                 break;
278         default:
279                 MB_LOG_ERR("Unsupported operation chain order parameter");
280                 return -1;
281         }
282
283         if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
284                 MB_LOG_ERR("Invalid/unsupported authentication parameters");
285                 return -1;
286         }
287
288         if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
289                         cipher_xform)) {
290                 MB_LOG_ERR("Invalid/unsupported cipher parameters");
291                 return -1;
292         }
293         return 0;
294 }
295
296 /** Get multi buffer session */
297 static struct aesni_mb_session *
298 get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
299 {
300         struct aesni_mb_session *sess = NULL;
301
302         if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
303                 if (unlikely(op->sym->session->dev_type !=
304                                 RTE_CRYPTODEV_AESNI_MB_PMD))
305                         return NULL;
306
307                 sess = (struct aesni_mb_session *)op->sym->session->_private;
308         } else  {
309                 void *_sess = NULL;
310
311                 if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
312                         return NULL;
313
314                 sess = (struct aesni_mb_session *)
315                         ((struct rte_cryptodev_sym_session *)_sess)->_private;
316
317                 if (unlikely(aesni_mb_set_session_parameters(qp->ops,
318                                 sess, op->sym->xform) != 0)) {
319                         rte_mempool_put(qp->sess_mp, _sess);
320                         sess = NULL;
321                 }
322         }
323
324         return sess;
325 }
326
327 /**
328  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
329  * submission to the multi buffer library for processing.
330  *
331  * @param       qp      queue pair
332  * @param       job     JOB_AES_HMAC structure to fill
333  * @param       m       mbuf to process
334  *
335  * @return
336  * - Completed JOB_AES_HMAC structure pointer on success
337  * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
338  */
339 static JOB_AES_HMAC *
340 process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
341                 struct aesni_mb_session *session)
342 {
343         JOB_AES_HMAC *job;
344
345         struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
346         uint16_t m_offset = 0;
347
348         job = (*qp->ops->job.get_next)(&qp->mb_mgr);
349         if (unlikely(job == NULL))
350                 return job;
351
352         /* Set crypto operation */
353         job->chain_order = session->chain_order;
354
355         /* Set cipher parameters */
356         job->cipher_direction = session->cipher.direction;
357         job->cipher_mode = session->cipher.mode;
358
359         job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
360         job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
361         job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
362
363
364         /* Set authentication parameters */
365         job->hash_alg = session->auth.algo;
366         if (job->hash_alg == AES_XCBC) {
367                 job->_k1_expanded = session->auth.xcbc.k1_expanded;
368                 job->_k2 = session->auth.xcbc.k2;
369                 job->_k3 = session->auth.xcbc.k3;
370         } else {
371                 job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
372                 job->hashed_auth_key_xor_opad = session->auth.pads.outer;
373         }
374
375         /* Mutable crypto operation parameters */
376         if (op->sym->m_dst) {
377                 m_src = m_dst = op->sym->m_dst;
378
379                 /* append space for output data to mbuf */
380                 char *odata = rte_pktmbuf_append(m_dst,
381                                 rte_pktmbuf_data_len(op->sym->m_src));
382                 if (odata == NULL)
383                         MB_LOG_ERR("failed to allocate space in destination "
384                                         "mbuf for source data");
385
386                 memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
387                                 rte_pktmbuf_data_len(op->sym->m_src));
388         } else {
389                 m_dst = m_src;
390                 m_offset = op->sym->cipher.data.offset;
391         }
392
393         /* Set digest output location */
394         if (job->cipher_direction == DECRYPT) {
395                 job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
396                                 get_digest_byte_length(job->hash_alg));
397
398                 if (job->auth_tag_output == NULL) {
399                         MB_LOG_ERR("failed to allocate space in output mbuf "
400                                         "for temp digest");
401                         return NULL;
402                 }
403
404                 memset(job->auth_tag_output, 0,
405                                 sizeof(get_digest_byte_length(job->hash_alg)));
406
407         } else {
408                 job->auth_tag_output = op->sym->auth.digest.data;
409         }
410
411         /*
412          * Multi-buffer library current only support returning a truncated
413          * digest length as specified in the relevant IPsec RFCs
414          */
415         job->auth_tag_output_len_in_bytes =
416                         get_truncated_digest_byte_length(job->hash_alg);
417
418         /* Set IV parameters */
419         job->iv = op->sym->cipher.iv.data;
420         job->iv_len_in_bytes = op->sym->cipher.iv.length;
421
422         /* Data  Parameter */
423         job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
424         job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
425
426         job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
427         job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
428
429         job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
430         job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
431
432         /* Set user data to be crypto operation data struct */
433         job->user_data = op;
434         job->user_data2 = m_dst;
435
436         return job;
437 }
438
439 /**
440  * Process a completed job and return rte_mbuf which job processed
441  *
442  * @param job   JOB_AES_HMAC job to process
443  *
444  * @return
445  * - Returns processed mbuf which is trimmed of output digest used in
446  * verification of supplied digest in the case of a HASH_CIPHER operation
447  * - Returns NULL on invalid job
448  */
449 static struct rte_crypto_op *
450 post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
451 {
452         struct rte_crypto_op *op =
453                         (struct rte_crypto_op *)job->user_data;
454         struct rte_mbuf *m_dst =
455                         (struct rte_mbuf *)job->user_data2;
456
457         if (op == NULL || m_dst == NULL)
458                 return NULL;
459
460         /* set status as successful by default */
461         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
462
463         /* check if job has been processed  */
464         if (unlikely(job->status != STS_COMPLETED)) {
465                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
466                 return op;
467         } else if (job->chain_order == HASH_CIPHER) {
468                 /* Verify digest if required */
469                 if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
470                                 job->auth_tag_output_len_in_bytes) != 0)
471                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
472
473                 /* trim area used for digest from mbuf */
474                 rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
475         }
476
477         /* Free session if a session-less crypto op */
478         if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
479                 rte_mempool_put(qp->sess_mp, op->sym->session);
480                 op->sym->session = NULL;
481         }
482
483         return op;
484 }
485
486 /**
487  * Process a completed JOB_AES_HMAC job and keep processing jobs until
488  * get_completed_job return NULL
489  *
490  * @param qp            Queue Pair to process
491  * @param job           JOB_AES_HMAC job
492  *
493  * @return
494  * - Number of processed jobs
495  */
496 static unsigned
497 handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
498 {
499         struct rte_crypto_op *op = NULL;
500         unsigned processed_jobs = 0;
501
502         while (job) {
503                 processed_jobs++;
504                 op = post_process_mb_job(qp, job);
505                 if (op)
506                         rte_ring_enqueue(qp->processed_ops, (void *)op);
507                 else
508                         qp->stats.dequeue_err_count++;
509                 job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
510         }
511
512         return processed_jobs;
513 }
514
515 static uint16_t
516 aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
517                 uint16_t nb_ops)
518 {
519         struct aesni_mb_session *sess;
520         struct aesni_mb_qp *qp = queue_pair;
521
522         JOB_AES_HMAC *job = NULL;
523
524         int i, processed_jobs = 0;
525
526         for (i = 0; i < nb_ops; i++) {
527 #ifdef RTE_LIBRTE_AESNI_MB_DEBUG
528                 if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
529                         MB_LOG_ERR("PMD only supports symmetric crypto "
530                                 "operation requests, op (%p) is not a "
531                                 "symmetric operation.", op);
532                         qp->stats.enqueue_err_count++;
533                         goto flush_jobs;
534                 }
535 #endif
536                 sess = get_session(qp, ops[i]);
537                 if (unlikely(sess == NULL)) {
538                         qp->stats.enqueue_err_count++;
539                         goto flush_jobs;
540                 }
541
542                 job = process_crypto_op(qp, ops[i], sess);
543                 if (unlikely(job == NULL)) {
544                         qp->stats.enqueue_err_count++;
545                         goto flush_jobs;
546                 }
547
548                 /* Submit Job */
549                 job = (*qp->ops->job.submit)(&qp->mb_mgr);
550
551                 /*
552                  * If submit returns a processed job then handle it,
553                  * before submitting subsequent jobs
554                  */
555                 if (job)
556                         processed_jobs += handle_completed_jobs(qp, job);
557         }
558
559         if (processed_jobs == 0)
560                 goto flush_jobs;
561         else
562                 qp->stats.enqueued_count += processed_jobs;
563                 return i;
564
565 flush_jobs:
566         /*
567          * If we haven't processed any jobs in submit loop, then flush jobs
568          * queue to stop the output stalling
569          */
570         job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
571         if (job)
572                 qp->stats.enqueued_count += handle_completed_jobs(qp, job);
573
574         return i;
575 }
576
577 static uint16_t
578 aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
579                 uint16_t nb_ops)
580 {
581         struct aesni_mb_qp *qp = queue_pair;
582
583         unsigned nb_dequeued;
584
585         nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
586                         (void **)ops, nb_ops);
587         qp->stats.dequeued_count += nb_dequeued;
588
589         return nb_dequeued;
590 }
591
592
593 static int cryptodev_aesni_mb_uninit(const char *name);
594
595 static int
596 cryptodev_aesni_mb_create(const char *name,
597                 struct rte_crypto_vdev_init_params *init_params)
598 {
599         struct rte_cryptodev *dev;
600         char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
601         struct aesni_mb_private *internals;
602         enum aesni_mb_vector_mode vector_mode;
603
604         /* Check CPU for support for AES instruction set */
605         if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
606                 MB_LOG_ERR("AES instructions not supported by CPU");
607                 return -EFAULT;
608         }
609
610         /* Check CPU for supported vector instruction set */
611         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
612                 vector_mode = RTE_AESNI_MB_AVX2;
613         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
614                 vector_mode = RTE_AESNI_MB_AVX;
615         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
616                 vector_mode = RTE_AESNI_MB_SSE;
617         else {
618                 MB_LOG_ERR("Vector instructions are not supported by CPU");
619                 return -EFAULT;
620         }
621
622         /* create a unique device name */
623         if (create_unique_device_name(crypto_dev_name,
624                         RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
625                 MB_LOG_ERR("failed to create unique cryptodev name");
626                 return -EINVAL;
627         }
628
629
630         dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
631                         sizeof(struct aesni_mb_private), init_params->socket_id);
632         if (dev == NULL) {
633                 MB_LOG_ERR("failed to create cryptodev vdev");
634                 goto init_error;
635         }
636
637         dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
638         dev->dev_ops = rte_aesni_mb_pmd_ops;
639
640         /* register rx/tx burst functions for data path */
641         dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
642         dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
643
644         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
645                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
646                         RTE_CRYPTODEV_FF_CPU_AESNI;
647
648         switch (vector_mode) {
649         case RTE_AESNI_MB_SSE:
650                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
651                 break;
652         case RTE_AESNI_MB_AVX:
653                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
654                 break;
655         case RTE_AESNI_MB_AVX2:
656                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
657                 break;
658         default:
659                 break;
660         }
661
662         /* Set vector instructions mode supported */
663         internals = dev->data->dev_private;
664
665         internals->vector_mode = vector_mode;
666         internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
667         internals->max_nb_sessions = init_params->max_nb_sessions;
668
669         return 0;
670 init_error:
671         MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
672
673         cryptodev_aesni_mb_uninit(crypto_dev_name);
674         return -EFAULT;
675 }
676
677
678 static int
679 cryptodev_aesni_mb_init(const char *name,
680                 const char *input_args)
681 {
682         struct rte_crypto_vdev_init_params init_params = {
683                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
684                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
685                 rte_socket_id()
686         };
687
688         rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
689
690         RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
691                         init_params.socket_id);
692         RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
693                         init_params.max_nb_queue_pairs);
694         RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
695                         init_params.max_nb_sessions);
696
697         return cryptodev_aesni_mb_create(name, &init_params);
698 }
699
700 static int
701 cryptodev_aesni_mb_uninit(const char *name)
702 {
703         if (name == NULL)
704                 return -EINVAL;
705
706         RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
707                         name, rte_socket_id());
708
709         return 0;
710 }
711
712 static struct rte_driver cryptodev_aesni_mb_pmd_drv = {
713         .name = CRYPTODEV_NAME_AESNI_MB_PMD,
714         .type = PMD_VDEV,
715         .init = cryptodev_aesni_mb_init,
716         .uninit = cryptodev_aesni_mb_uninit
717 };
718
719 PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv);