Imported Upstream version 16.07-rc2
[deb_dpdk.git] / drivers / crypto / aesni_gcm / aesni_gcm_pmd.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <openssl/aes.h>
34
35 #include <rte_common.h>
36 #include <rte_config.h>
37 #include <rte_hexdump.h>
38 #include <rte_cryptodev.h>
39 #include <rte_cryptodev_pmd.h>
40 #include <rte_dev.h>
41 #include <rte_malloc.h>
42 #include <rte_cpuflags.h>
43
44 #include "aesni_gcm_pmd_private.h"
45
46 /**
47  * Global static parameter used to create a unique name for each AES-NI multi
48  * buffer crypto device.
49  */
50 static unsigned unique_name_id;
51
52 static inline int
53 create_unique_device_name(char *name, size_t size)
54 {
55         int ret;
56
57         if (name == NULL)
58                 return -EINVAL;
59
60         ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD),
61                         unique_name_id++);
62         if (ret < 0)
63                 return ret;
64         return 0;
65 }
66
67 static int
68 aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
69                 uint8_t *aeskey, unsigned aeskey_length)
70 {
71         uint8_t key[aeskey_length] __rte_aligned(16);
72         AES_KEY enc_key;
73
74         if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
75                 return -EFAULT;
76
77         memcpy(key, aeskey, aeskey_length);
78
79         if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
80                 return -EFAULT;
81
82         AES_encrypt(hsubkey, hsubkey, &enc_key);
83
84         return 0;
85 }
86
87 /** Get xform chain order */
88 static int
89 aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
90 {
91         /*
92          * GCM only supports authenticated encryption or authenticated
93          * decryption, all other options are invalid, so we must have exactly
94          * 2 xform structs chained together
95          */
96         if (xform->next == NULL || xform->next->next != NULL)
97                 return -1;
98
99         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
100                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
101                 return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
102         }
103
104         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
105                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
106                 return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
107         }
108
109         return -1;
110 }
111
112 /** Parse crypto xform chain and set private session parameters */
113 int
114 aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
115                 struct aesni_gcm_session *sess,
116                 const struct rte_crypto_sym_xform *xform)
117 {
118         const struct rte_crypto_sym_xform *auth_xform = NULL;
119         const struct rte_crypto_sym_xform *cipher_xform = NULL;
120
121         uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
122
123         /* Select Crypto operation - hash then cipher / cipher then hash */
124         switch (aesni_gcm_get_mode(xform)) {
125         case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
126                 sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
127
128                 cipher_xform = xform;
129                 auth_xform = xform->next;
130                 break;
131         case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
132                 sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
133
134                 auth_xform = xform;
135                 cipher_xform = xform->next;
136                 break;
137         default:
138                 GCM_LOG_ERR("Unsupported operation chain order parameter");
139                 return -EINVAL;
140         }
141
142         /* We only support AES GCM */
143         if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
144                         auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
145                 return -EINVAL;
146
147         /* Select cipher direction */
148         if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
149                         cipher_xform->cipher.op !=
150                                         RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
151                 GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
152                                 "(DECRYPT) specified are an invalid selection");
153                 return -EINVAL;
154         } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
155                         cipher_xform->cipher.op !=
156                                         RTE_CRYPTO_CIPHER_OP_DECRYPT) {
157                 GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
158                                 "(ENCRYPT) specified are an invalid selection");
159                 return -EINVAL;
160         }
161
162         /* Expand GCM AES128 key */
163         (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
164                         sess->gdata.expanded_keys);
165
166         /* Calculate hash sub key here */
167         aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
168                         cipher_xform->cipher.key.data,
169                         cipher_xform->cipher.key.length);
170
171         /* Calculate GCM pre-compute */
172         (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
173
174         return 0;
175 }
176
177 /** Get gcm session */
178 static struct aesni_gcm_session *
179 aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
180 {
181         struct aesni_gcm_session *sess = NULL;
182
183         if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
184                 if (unlikely(op->session->dev_type
185                                         != RTE_CRYPTODEV_AESNI_GCM_PMD))
186                         return sess;
187
188                 sess = (struct aesni_gcm_session *)op->session->_private;
189         } else  {
190                 void *_sess;
191
192                 if (rte_mempool_get(qp->sess_mp, &_sess))
193                         return sess;
194
195                 sess = (struct aesni_gcm_session *)
196                         ((struct rte_cryptodev_session *)_sess)->_private;
197
198                 if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
199                                 sess, op->xform) != 0)) {
200                         rte_mempool_put(qp->sess_mp, _sess);
201                         sess = NULL;
202                 }
203         }
204         return sess;
205 }
206
207 /**
208  * Process a crypto operation and complete a JOB_AES_HMAC job structure for
209  * submission to the multi buffer library for processing.
210  *
211  * @param       qp              queue pair
212  * @param       op              symmetric crypto operation
213  * @param       session         GCM session
214  *
215  * @return
216  *
217  */
218 static int
219 process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
220                 struct aesni_gcm_session *session)
221 {
222         uint8_t *src, *dst;
223         struct rte_mbuf *m = op->m_src;
224
225         src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
226         dst = op->m_dst ?
227                         rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
228                                         op->cipher.data.offset) :
229                         rte_pktmbuf_mtod_offset(m, uint8_t *,
230                                         op->cipher.data.offset);
231
232         /* sanity checks */
233         if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) {
234                 GCM_LOG_ERR("iv");
235                 return -1;
236         }
237
238         if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
239                         op->auth.aad.length != 0) {
240                 GCM_LOG_ERR("iv");
241                 return -1;
242         }
243
244         if (op->auth.digest.length != 16 &&
245                         op->auth.digest.length != 12 &&
246                         op->auth.digest.length != 8 &&
247                         op->auth.digest.length != 0) {
248                 GCM_LOG_ERR("iv");
249                 return -1;
250         }
251
252         if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
253
254                 (*qp->ops->gcm.enc)(&session->gdata, dst, src,
255                                 (uint64_t)op->cipher.data.length,
256                                 op->cipher.iv.data,
257                                 op->auth.aad.data,
258                                 (uint64_t)op->auth.aad.length,
259                                 op->auth.digest.data,
260                                 (uint64_t)op->auth.digest.length);
261         } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
262                 uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
263                                 op->auth.digest.length);
264
265                 if (!auth_tag) {
266                         GCM_LOG_ERR("iv");
267                         return -1;
268                 }
269
270                 (*qp->ops->gcm.dec)(&session->gdata, dst, src,
271                                 (uint64_t)op->cipher.data.length,
272                                 op->cipher.iv.data,
273                                 op->auth.aad.data,
274                                 (uint64_t)op->auth.aad.length,
275                                 auth_tag,
276                                 (uint64_t)op->auth.digest.length);
277         } else {
278                 GCM_LOG_ERR("iv");
279                 return -1;
280         }
281
282         return 0;
283 }
284
285 /**
286  * Process a completed job and return rte_mbuf which job processed
287  *
288  * @param job   JOB_AES_HMAC job to process
289  *
290  * @return
291  * - Returns processed mbuf which is trimmed of output digest used in
292  * verification of supplied digest in the case of a HASH_CIPHER operation
293  * - Returns NULL on invalid job
294  */
295 static void
296 post_process_gcm_crypto_op(struct rte_crypto_op *op)
297 {
298         struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
299
300         struct aesni_gcm_session *session =
301                 (struct aesni_gcm_session *)op->sym->session->_private;
302
303         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
304
305         /* Verify digest if required */
306         if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
307
308                 uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
309                                 m->data_len - op->sym->auth.digest.length);
310
311 #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
312                 rte_hexdump(stdout, "auth tag (orig):",
313                                 op->sym->auth.digest.data, op->sym->auth.digest.length);
314                 rte_hexdump(stdout, "auth tag (calc):",
315                                 tag, op->sym->auth.digest.length);
316 #endif
317
318                 if (memcmp(tag, op->sym->auth.digest.data,
319                                 op->sym->auth.digest.length) != 0)
320                         op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
321
322                 /* trim area used for digest from mbuf */
323                 rte_pktmbuf_trim(m, op->sym->auth.digest.length);
324         }
325 }
326
327 /**
328  * Process a completed GCM request
329  *
330  * @param qp            Queue Pair to process
331  * @param job           JOB_AES_HMAC job
332  *
333  * @return
334  * - Number of processed jobs
335  */
336 static void
337 handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
338                 struct rte_crypto_op *op)
339 {
340         post_process_gcm_crypto_op(op);
341
342         /* Free session if a session-less crypto op */
343         if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
344                 rte_mempool_put(qp->sess_mp, op->sym->session);
345                 op->sym->session = NULL;
346         }
347
348         rte_ring_enqueue(qp->processed_pkts, (void *)op);
349 }
350
351 static uint16_t
352 aesni_gcm_pmd_enqueue_burst(void *queue_pair,
353                 struct rte_crypto_op **ops, uint16_t nb_ops)
354 {
355         struct aesni_gcm_session *sess;
356         struct aesni_gcm_qp *qp = queue_pair;
357
358         int i, retval = 0;
359
360         for (i = 0; i < nb_ops; i++) {
361
362                 sess = aesni_gcm_get_session(qp, ops[i]->sym);
363                 if (unlikely(sess == NULL)) {
364                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
365                         qp->qp_stats.enqueue_err_count++;
366                         break;
367                 }
368
369                 retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
370                 if (retval < 0) {
371                         ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
372                         qp->qp_stats.enqueue_err_count++;
373                         break;
374                 }
375
376                 handle_completed_gcm_crypto_op(qp, ops[i]);
377
378                 qp->qp_stats.enqueued_count++;
379         }
380         return i;
381 }
382
383 static uint16_t
384 aesni_gcm_pmd_dequeue_burst(void *queue_pair,
385                 struct rte_crypto_op **ops, uint16_t nb_ops)
386 {
387         struct aesni_gcm_qp *qp = queue_pair;
388
389         unsigned nb_dequeued;
390
391         nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
392                         (void **)ops, nb_ops);
393         qp->qp_stats.dequeued_count += nb_dequeued;
394
395         return nb_dequeued;
396 }
397
398 static int aesni_gcm_uninit(const char *name);
399
400 static int
401 aesni_gcm_create(const char *name,
402                 struct rte_crypto_vdev_init_params *init_params)
403 {
404         struct rte_cryptodev *dev;
405         char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
406         struct aesni_gcm_private *internals;
407         enum aesni_gcm_vector_mode vector_mode;
408
409         /* Check CPU for support for AES instruction set */
410         if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
411                 GCM_LOG_ERR("AES instructions not supported by CPU");
412                 return -EFAULT;
413         }
414
415         /* Check CPU for supported vector instruction set */
416         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
417                 vector_mode = RTE_AESNI_GCM_AVX2;
418         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
419                 vector_mode = RTE_AESNI_GCM_AVX;
420         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
421                 vector_mode = RTE_AESNI_GCM_SSE;
422         else {
423                 GCM_LOG_ERR("Vector instructions are not supported by CPU");
424                 return -EFAULT;
425         }
426
427         /* create a unique device name */
428         if (create_unique_device_name(crypto_dev_name,
429                         RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
430                 GCM_LOG_ERR("failed to create unique cryptodev name");
431                 return -EINVAL;
432         }
433
434
435         dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
436                         sizeof(struct aesni_gcm_private), init_params->socket_id);
437         if (dev == NULL) {
438                 GCM_LOG_ERR("failed to create cryptodev vdev");
439                 goto init_error;
440         }
441
442         dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
443         dev->dev_ops = rte_aesni_gcm_pmd_ops;
444
445         /* register rx/tx burst functions for data path */
446         dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
447         dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
448
449         dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
450                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
451                         RTE_CRYPTODEV_FF_CPU_AESNI;
452
453         switch (vector_mode) {
454         case RTE_AESNI_GCM_SSE:
455                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
456                 break;
457         case RTE_AESNI_GCM_AVX:
458                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
459                 break;
460         case RTE_AESNI_GCM_AVX2:
461                 dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
462                 break;
463         default:
464                 break;
465         }
466
467         /* Set vector instructions mode supported */
468         internals = dev->data->dev_private;
469
470         internals->vector_mode = vector_mode;
471
472         internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
473         internals->max_nb_sessions = init_params->max_nb_sessions;
474
475         return 0;
476
477 init_error:
478         GCM_LOG_ERR("driver %s: create failed", name);
479
480         aesni_gcm_uninit(crypto_dev_name);
481         return -EFAULT;
482 }
483
484 static int
485 aesni_gcm_init(const char *name, const char *input_args)
486 {
487         struct rte_crypto_vdev_init_params init_params = {
488                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
489                 RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
490                 rte_socket_id()
491         };
492
493         rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
494
495         RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
496                         init_params.socket_id);
497         RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
498                         init_params.max_nb_queue_pairs);
499         RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
500                         init_params.max_nb_sessions);
501
502         return aesni_gcm_create(name, &init_params);
503 }
504
505 static int
506 aesni_gcm_uninit(const char *name)
507 {
508         if (name == NULL)
509                 return -EINVAL;
510
511         GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
512                         name, rte_socket_id());
513
514         return 0;
515 }
516
517 static struct rte_driver aesni_gcm_pmd_drv = {
518         .type = PMD_VDEV,
519         .init = aesni_gcm_init,
520         .uninit = aesni_gcm_uninit
521 };
522
523 PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv, CRYPTODEV_NAME_AESNI_GCM_PMD);
524 DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
525         "max_nb_queue_pairs=<int> "
526         "max_nb_sessions=<int> "
527         "socket_id=<int>");