Imported Upstream version 16.11
[deb_dpdk.git] / drivers / crypto / aesni_gcm / aesni_gcm_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <openssl/aes.h>
34
35 #include <rte_common.h>
36 #include <rte_config.h>
37 #include <rte_hexdump.h>
38 #include <rte_cryptodev.h>
39 #include <rte_cryptodev_pmd.h>
40 #include <rte_vdev.h>
41 #include <rte_malloc.h>
42 #include <rte_cpuflags.h>
43
44 #include "aesni_gcm_pmd_private.h"
45
46 /**
47  * Global static parameter used to create a unique name for each AES-NI multi
48  * buffer crypto device.
49  */
50 static unsigned unique_name_id;
51
52 static inline int
53 create_unique_device_name(char *name, size_t size)
54 {
55         int ret;
56
57         if (name == NULL)
58                 return -EINVAL;
59
60         ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD),
61                         unique_name_id++);
62         if (ret < 0)
63                 return ret;
64         return 0;
65 }
66
67 static int
68 aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
69                 uint8_t *aeskey, unsigned aeskey_length)
70 {
71         uint8_t key[aeskey_length] __rte_aligned(16);
72         AES_KEY enc_key;
73
74         if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
75                 return -EFAULT;
76
77         memcpy(key, aeskey, aeskey_length);
78
79         if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
80                 return -EFAULT;
81
82         AES_encrypt(hsubkey, hsubkey, &enc_key);
83
84         return 0;
85 }
86
87 /** Get xform chain order */
88 static int
89 aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
90 {
91         /*
92          * GCM only supports authenticated encryption or authenticated
93          * decryption, all other options are invalid, so we must have exactly
94          * 2 xform structs chained together
95          */
96         if (xform->next == NULL || xform->next->next != NULL)
97                 return -1;
98
99         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
100                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
101                 return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
102         }
103
104         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
105                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
106                 return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
107         }
108
109         return -1;
110 }
111
112 /** Parse crypto xform chain and set private session parameters */
113 int
114 aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
115                 struct aesni_gcm_session *sess,
116                 const struct rte_crypto_sym_xform *xform)
117 {
118         const struct rte_crypto_sym_xform *auth_xform = NULL;
119         const struct rte_crypto_sym_xform *cipher_xform = NULL;
120
121         uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
122
123         /* Select Crypto operation - hash then cipher / cipher then hash */
124         switch (aesni_gcm_get_mode(xform)) {
125         case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
126                 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
127
128                 cipher_xform = xform;
129                 auth_xform = xform->next;
130                 break;
131         case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
132                 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
133
134                 auth_xform = xform;
135                 cipher_xform = xform->next;
136                 break;
137         default:
138                 GCM_LOG_ERR("Unsupported operation chain order parameter");
139                 return -EINVAL;
140         }
141
142         /* We only support AES GCM */
143         if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
144                         auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
145                 return -EINVAL;
146
147         /* Select cipher direction */
148         if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
149                         cipher_xform->cipher.op !=
150                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
151                 GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
152                                 "(DECRYPT) specified are an invalid selection");
153                 return -EINVAL;
154         } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
155                         cipher_xform->cipher.op !=
156                                         RTE_CRYPTO_CIPHER_OP_DECRYPT) {
157                 GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
158                                 "(ENCRYPT) specified are an invalid selection");
159                 return -EINVAL;
160         }
161
162         /* Expand GCM AES128 key */
163         (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
164                         sess->gdata.expanded_keys);
165
166         /* Calculate hash sub key here */
167         aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
168                         cipher_xform->cipher.key.data,
169                         cipher_xform->cipher.key.length);
170
171         /* Calculate GCM pre-compute */
172         (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
173
174         return 0;
175 }
176
177 /** Get gcm session */
178 static struct aesni_gcm_session *
179 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
180 {
181         struct aesni_gcm_session *sess = NULL;
182
183         if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
184                 if (unlikely(op->session->dev_type
185                                         != RTE_CRYPTODEV_AESNI_GCM_PMD))
186                         return sess;
187
188                 sess = (struct aesni_gcm_session *)op->session->_private;
189         } else  {
190                 void *_sess;
191
192                 if (rte_mempool_get(qp->sess_mp, &_sess))
193                         return sess;
194
195                 sess = (struct aesni_gcm_session *)
196                         ((struct rte_cryptodev_session *)_sess)->_private;
197
198                 if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
199                                 sess, op->xform) != 0)) {
200                         rte_mempool_put(qp->sess_mp, _sess);
201                         sess = NULL;
202                 }
203         }
204         return sess;
205 }
206
207 /**
208  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
209  * submission to the multi buffer library for processing.
210  *
211  * @param       qp              queue pair
212  * @param       op              symmetric crypto operation
213  * @param       session         GCM session
214  *
215  * @return
216  *
217  */
218 static int
219 process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
220                 struct aesni_gcm_session *session)
221 {
222         uint8_t *src, *dst;
223         struct rte_mbuf *m = op->m_src;
224
225         src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
226         dst = op->m_dst ?
227                         rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
228                                         op->cipher.data.offset) :
229                         rte_pktmbuf_mtod_offset(m, uint8_t *,
230                                         op->cipher.data.offset);
231
232         /* sanity checks */
233         if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
234                         op->cipher.iv.length != 0) {
235                 GCM_LOG_ERR("iv");
236                 return -1;
237         }
238
239         /*
240          * GCM working in 12B IV mode => 16B pre-counter block we need
241          * to set BE LSB to 1, driver expects that 16B is allocated
242          */
243         if (op->cipher.iv.length == 12) {
244                 op->cipher.iv.data[15] = 1;
245         }
246
247         if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
248                         op->auth.aad.length != 0) {
249                 GCM_LOG_ERR("iv");
250                 return -1;
251         }
252
253         if (op->auth.digest.length != 16 &&
254                         op->auth.digest.length != 12 &&
255                         op->auth.digest.length != 8 &&
256                         op->auth.digest.length != 0) {
257                 GCM_LOG_ERR("iv");
258                 return -1;
259         }
260
261         if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
262
263                 (*qp->ops->gcm.enc)(&session->gdata, dst, src,
264                                 (uint64_t)op->cipher.data.length,
265                                 op->cipher.iv.data,
266                                 op->auth.aad.data,
267                                 (uint64_t)op->auth.aad.length,
268                                 op->auth.digest.data,
269                                 (uint64_t)op->auth.digest.length);
270         } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
271                 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
272                                 op->auth.digest.length);
273
274                 if (!auth_tag) {
275                         GCM_LOG_ERR("iv");
276                         return -1;
277                 }
278
279                 (*qp->ops->gcm.dec)(&session->gdata, dst, src,
280                                 (uint64_t)op->cipher.data.length,
281                                 op->cipher.iv.data,
282                                 op->auth.aad.data,
283                                 (uint64_t)op->auth.aad.length,
284                                 auth_tag,
285                                 (uint64_t)op->auth.digest.length);
286         } else {
287                 GCM_LOG_ERR("iv");
288                 return -1;
289         }
290
291         return 0;
292 }
293
294 /**
295  * Process a completed job and return rte_mbuf which job processed
296  *
297  * @param job   JOB_AES_HMAC job to process
298  *
299  * @return
300  * - Returns processed mbuf which is trimmed of output digest used in
301  * verification of supplied digest in the case of a HASH_CIPHER operation
302  * - Returns NULL on invalid job
303  */
304 static void
305 post_process_gcm_crypto_op(struct rte_crypto_op *op)
306 {
307         struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
308
309         struct aesni_gcm_session *session =
310                 (struct aesni_gcm_session *)op->sym->session->_private;
311
312         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
313
314         /* Verify digest if required */
315         if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
316
317                 uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
318                                 m->data_len - op->sym->auth.digest.length);
319
320 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
321                 rte_hexdump(stdout, "auth tag (orig):",
322                                 op->sym->auth.digest.data, op->sym->auth.digest.length);
323                 rte_hexdump(stdout, "auth tag (calc):",
324                                 tag, op->sym->auth.digest.length);
325 #endif
326
327                 if (memcmp(tag, op->sym->auth.digest.data,
328                                 op->sym->auth.digest.length) != 0)
329                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
330
331                 /* trim area used for digest from mbuf */
332                 rte_pktmbuf_trim(m, op->sym->auth.digest.length);
333         }
334 }
335
336 /**
337  * Process a completed GCM request
338  *
339  * @param qp            Queue Pair to process
340  * @param job           JOB_AES_HMAC job
341  *
342  * @return
343  * - Number of processed jobs
344  */
345 static void
346 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
347                 struct rte_crypto_op *op)
348 {
349         post_process_gcm_crypto_op(op);
350
351         /* Free session if a session-less crypto op */
352         if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
353                 rte_mempool_put(qp->sess_mp, op->sym->session);
354                 op->sym->session = NULL;
355         }
356
357         rte_ring_enqueue(qp->processed_pkts, (void *)op);
358 }
359
360 static uint16_t
361 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
362                 struct rte_crypto_op **ops, uint16_t nb_ops)
363 {
364         struct aesni_gcm_session *sess;
365         struct aesni_gcm_qp *qp = queue_pair;
366
367         int i, retval = 0;
368
369         for (i = 0; i < nb_ops; i++) {
370
371                 sess = aesni_gcm_get_session(qp, ops[i]->sym);
372                 if (unlikely(sess == NULL)) {
373                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
374                         qp->qp_stats.enqueue_err_count++;
375                         break;
376                 }
377
378                 retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
379                 if (retval < 0) {
380                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
381                         qp->qp_stats.enqueue_err_count++;
382                         break;
383                 }
384
385                 handle_completed_gcm_crypto_op(qp, ops[i]);
386
387                 qp->qp_stats.enqueued_count++;
388         }
389         return i;
390 }
391
392 static uint16_t
393 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
394                 struct rte_crypto_op **ops, uint16_t nb_ops)
395 {
396         struct aesni_gcm_qp *qp = queue_pair;
397
398         unsigned nb_dequeued;
399
400         nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
401                         (void **)ops, nb_ops);
402         qp->qp_stats.dequeued_count += nb_dequeued;
403
404         return nb_dequeued;
405 }
406
407 static int aesni_gcm_remove(const char *name);
408
409 static int
410 aesni_gcm_create(const char *name,
411                 struct rte_crypto_vdev_init_params *init_params)
412 {
413         struct rte_cryptodev *dev;
414         char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
415         struct aesni_gcm_private *internals;
416         enum aesni_gcm_vector_mode vector_mode;
417
418         /* Check CPU for support for AES instruction set */
419         if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
420                 GCM_LOG_ERR("AES instructions not supported by CPU");
421                 return -EFAULT;
422         }
423
424         /* Check CPU for supported vector instruction set */
425         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
426                 vector_mode = RTE_AESNI_GCM_AVX2;
427         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
428                 vector_mode = RTE_AESNI_GCM_AVX;
429         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
430                 vector_mode = RTE_AESNI_GCM_SSE;
431         else {
432                 GCM_LOG_ERR("Vector instructions are not supported by CPU");
433                 return -EFAULT;
434         }
435
436         /* create a unique device name */
437         if (create_unique_device_name(crypto_dev_name,
438                         RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
439                 GCM_LOG_ERR("failed to create unique cryptodev name");
440                 return -EINVAL;
441         }
442
443
444         dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
445                         sizeof(struct aesni_gcm_private), init_params->socket_id);
446         if (dev == NULL) {
447                 GCM_LOG_ERR("failed to create cryptodev vdev");
448                 goto init_error;
449         }
450
451         dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
452         dev->dev_ops = rte_aesni_gcm_pmd_ops;
453
454         /* register rx/tx burst functions for data path */
455         dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
456         dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
457
458         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
459                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
460                         RTE_CRYPTODEV_FF_CPU_AESNI;
461
462         switch (vector_mode) {
463         case RTE_AESNI_GCM_SSE:
464                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
465                 break;
466         case RTE_AESNI_GCM_AVX:
467                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
468                 break;
469         case RTE_AESNI_GCM_AVX2:
470                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
471                 break;
472         default:
473                 break;
474         }
475
476         /* Set vector instructions mode supported */
477         internals = dev->data->dev_private;
478
479         internals->vector_mode = vector_mode;
480
481         internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
482         internals->max_nb_sessions = init_params->max_nb_sessions;
483
484         return 0;
485
486 init_error:
487         GCM_LOG_ERR("driver %s: create failed", name);
488
489         aesni_gcm_remove(crypto_dev_name);
490         return -EFAULT;
491 }
492
493 static int
494 aesni_gcm_probe(const char *name, const char *input_args)
495 {
496         struct rte_crypto_vdev_init_params init_params = {
497                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
498                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
499                 rte_socket_id()
500         };
501
502         rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
503
504         RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
505                         init_params.socket_id);
506         RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
507                         init_params.max_nb_queue_pairs);
508         RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
509                         init_params.max_nb_sessions);
510
511         return aesni_gcm_create(name, &init_params);
512 }
513
514 static int
515 aesni_gcm_remove(const char *name)
516 {
517         if (name == NULL)
518                 return -EINVAL;
519
520         GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
521                         name, rte_socket_id());
522
523         return 0;
524 }
525
526 static struct rte_vdev_driver aesni_gcm_pmd_drv = {
527         .probe = aesni_gcm_probe,
528         .remove = aesni_gcm_remove
529 };
530
531 RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
532 RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
533 RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
534         "max_nb_queue_pairs=<int> "
535         "max_nb_sessions=<int> "
536         "socket_id=<int>");