New upstream version 18.11.2
[deb_dpdk.git] / lib / librte_vhost / vhost_crypto.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 #include <rte_malloc.h>
5 #include <rte_hash.h>
6 #include <rte_jhash.h>
7 #include <rte_mbuf.h>
8 #include <rte_cryptodev.h>
9
10 #include "rte_vhost_crypto.h"
11 #include "vhost.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
14
15 #define INHDR_LEN               (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET               (sizeof(struct rte_crypto_op) + \
17                                 sizeof(struct rte_crypto_sym_op))
18
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...)                                \
21         RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n",     \
22                 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...)                               \
24         RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n",    \
25                 "Vhost-Crypto", __func__, __LINE__, ## args)
26
27 #define VC_LOG_DBG(fmt, args...)                                \
28         RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n",   \
29                 "Vhost-Crypto", __func__, __LINE__, ## args)
30 #else
31 #define VC_LOG_ERR(fmt, args...)                                \
32         RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...)                               \
34         RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
36 #endif
37
38 #define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) |       \
39                 (1 << VIRTIO_RING_F_INDIRECT_DESC) |                    \
40                 (1 << VIRTIO_RING_F_EVENT_IDX) |                        \
41                 (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) |                   \
42                 (1 << VIRTIO_CRYPTO_SERVICE_MAC) |                      \
43                 (1 << VIRTIO_NET_F_CTRL_VQ))
44
45 #define IOVA_TO_VVA(t, r, a, l, p)                                      \
46         ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
47
48 static int
49 cipher_algo_transform(uint32_t virtio_cipher_algo)
50 {
51         int ret;
52
53         switch (virtio_cipher_algo) {
54         case VIRTIO_CRYPTO_CIPHER_AES_CBC:
55                 ret = RTE_CRYPTO_CIPHER_AES_CBC;
56                 break;
57         case VIRTIO_CRYPTO_CIPHER_AES_CTR:
58                 ret = RTE_CRYPTO_CIPHER_AES_CTR;
59                 break;
60         case VIRTIO_CRYPTO_CIPHER_DES_ECB:
61                 ret = -VIRTIO_CRYPTO_NOTSUPP;
62                 break;
63         case VIRTIO_CRYPTO_CIPHER_DES_CBC:
64                 ret = RTE_CRYPTO_CIPHER_DES_CBC;
65                 break;
66         case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
67                 ret = RTE_CRYPTO_CIPHER_3DES_ECB;
68                 break;
69         case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
70                 ret = RTE_CRYPTO_CIPHER_3DES_CBC;
71                 break;
72         case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
73                 ret = RTE_CRYPTO_CIPHER_3DES_CTR;
74                 break;
75         case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
76                 ret = RTE_CRYPTO_CIPHER_KASUMI_F8;
77                 break;
78         case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
79                 ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
80                 break;
81         case VIRTIO_CRYPTO_CIPHER_AES_F8:
82                 ret = RTE_CRYPTO_CIPHER_AES_F8;
83                 break;
84         case VIRTIO_CRYPTO_CIPHER_AES_XTS:
85                 ret = RTE_CRYPTO_CIPHER_AES_XTS;
86                 break;
87         case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
88                 ret = RTE_CRYPTO_CIPHER_ZUC_EEA3;
89                 break;
90         default:
91                 ret = -VIRTIO_CRYPTO_BADMSG;
92                 break;
93         }
94
95         return ret;
96 }
97
98 static int
99 auth_algo_transform(uint32_t virtio_auth_algo)
100 {
101         int ret;
102
103         switch (virtio_auth_algo) {
104
105         case VIRTIO_CRYPTO_NO_MAC:
106                 ret = RTE_CRYPTO_AUTH_NULL;
107                 break;
108         case VIRTIO_CRYPTO_MAC_HMAC_MD5:
109                 ret = RTE_CRYPTO_AUTH_MD5_HMAC;
110                 break;
111         case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
112                 ret = RTE_CRYPTO_AUTH_SHA1_HMAC;
113                 break;
114         case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
115                 ret = RTE_CRYPTO_AUTH_SHA224_HMAC;
116                 break;
117         case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
118                 ret = RTE_CRYPTO_AUTH_SHA256_HMAC;
119                 break;
120         case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
121                 ret = RTE_CRYPTO_AUTH_SHA384_HMAC;
122                 break;
123         case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
124                 ret = RTE_CRYPTO_AUTH_SHA512_HMAC;
125                 break;
126         case VIRTIO_CRYPTO_MAC_CMAC_3DES:
127                 ret = -VIRTIO_CRYPTO_NOTSUPP;
128                 break;
129         case VIRTIO_CRYPTO_MAC_CMAC_AES:
130                 ret = RTE_CRYPTO_AUTH_AES_CMAC;
131                 break;
132         case VIRTIO_CRYPTO_MAC_KASUMI_F9:
133                 ret = RTE_CRYPTO_AUTH_KASUMI_F9;
134                 break;
135         case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
136                 ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
137                 break;
138         case VIRTIO_CRYPTO_MAC_GMAC_AES:
139                 ret = RTE_CRYPTO_AUTH_AES_GMAC;
140                 break;
141         case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
142                 ret = -VIRTIO_CRYPTO_NOTSUPP;
143                 break;
144         case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
145                 ret = RTE_CRYPTO_AUTH_AES_CBC_MAC;
146                 break;
147         case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
148                 ret = -VIRTIO_CRYPTO_NOTSUPP;
149                 break;
150         case VIRTIO_CRYPTO_MAC_XCBC_AES:
151                 ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
152                 break;
153         default:
154                 ret = -VIRTIO_CRYPTO_BADMSG;
155                 break;
156         }
157
158         return ret;
159 }
160
161 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
162 {
163         int len;
164
165         switch (algo) {
166         case RTE_CRYPTO_CIPHER_3DES_CBC:
167                 len = 8;
168                 break;
169         case RTE_CRYPTO_CIPHER_3DES_CTR:
170                 len = 8;
171                 break;
172         case RTE_CRYPTO_CIPHER_3DES_ECB:
173                 len = 8;
174                 break;
175         case RTE_CRYPTO_CIPHER_AES_CBC:
176                 len = 16;
177                 break;
178
179         /* TODO: add common algos */
180
181         default:
182                 len = -1;
183                 break;
184         }
185
186         return len;
187 }
188
189 /**
190  * vhost_crypto struct is used to maintain a number of virtio_cryptos and
191  * one DPDK crypto device that deals with all crypto workloads. It is declared
192  * here and defined in vhost_crypto.c
193  */
194 struct vhost_crypto {
195         /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
196          *  session ID.
197          */
198         struct rte_hash *session_map;
199         struct rte_mempool *mbuf_pool;
200         struct rte_mempool *sess_pool;
201         struct rte_mempool *wb_pool;
202
203         /** DPDK cryptodev ID */
204         uint8_t cid;
205         uint16_t nb_qps;
206
207         uint64_t last_session_id;
208
209         uint64_t cache_session_id;
210         struct rte_cryptodev_sym_session *cache_session;
211         /** socket id for the device */
212         int socket_id;
213
214         struct virtio_net *dev;
215
216         uint8_t option;
217 } __rte_cache_aligned;
218
219 struct vhost_crypto_writeback_data {
220         uint8_t *src;
221         uint8_t *dst;
222         uint64_t len;
223         struct vhost_crypto_writeback_data *next;
224 };
225
226 struct vhost_crypto_data_req {
227         struct vring_desc *head;
228         struct virtio_net *dev;
229         struct virtio_crypto_inhdr *inhdr;
230         struct vhost_virtqueue *vq;
231         struct vhost_crypto_writeback_data *wb;
232         struct rte_mempool *wb_pool;
233         uint16_t desc_idx;
234         uint16_t len;
235         uint16_t zero_copy;
236 };
237
238 static int
239 transform_cipher_param(struct rte_crypto_sym_xform *xform,
240                 VhostUserCryptoSessionParam *param)
241 {
242         int ret;
243
244         ret = cipher_algo_transform(param->cipher_algo);
245         if (unlikely(ret < 0))
246                 return ret;
247
248         xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
249         xform->cipher.algo = (enum rte_crypto_cipher_algorithm)ret;
250         xform->cipher.key.length = param->cipher_key_len;
251         if (xform->cipher.key.length > 0)
252                 xform->cipher.key.data = param->cipher_key_buf;
253         if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
254                 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
255         else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
256                 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
257         else {
258                 VC_LOG_DBG("Bad operation type");
259                 return -VIRTIO_CRYPTO_BADMSG;
260         }
261
262         ret = get_iv_len(xform->cipher.algo);
263         if (unlikely(ret < 0))
264                 return ret;
265         xform->cipher.iv.length = (uint16_t)ret;
266         xform->cipher.iv.offset = IV_OFFSET;
267         return 0;
268 }
269
270 static int
271 transform_chain_param(struct rte_crypto_sym_xform *xforms,
272                 VhostUserCryptoSessionParam *param)
273 {
274         struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
275         int ret;
276
277         switch (param->chaining_dir) {
278         case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
279                 xform_auth = xforms;
280                 xform_cipher = xforms->next;
281                 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
282                 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
283                 break;
284         case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
285                 xform_cipher = xforms;
286                 xform_auth = xforms->next;
287                 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
288                 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
289                 break;
290         default:
291                 return -VIRTIO_CRYPTO_BADMSG;
292         }
293
294         /* cipher */
295         ret = cipher_algo_transform(param->cipher_algo);
296         if (unlikely(ret < 0))
297                 return ret;
298         xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
299         xform_cipher->cipher.algo = (enum rte_crypto_cipher_algorithm)ret;
300         xform_cipher->cipher.key.length = param->cipher_key_len;
301         xform_cipher->cipher.key.data = param->cipher_key_buf;
302         ret = get_iv_len(xform_cipher->cipher.algo);
303         if (unlikely(ret < 0))
304                 return ret;
305         xform_cipher->cipher.iv.length = (uint16_t)ret;
306         xform_cipher->cipher.iv.offset = IV_OFFSET;
307
308         /* auth */
309         xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
310         ret = auth_algo_transform(param->hash_algo);
311         if (unlikely(ret < 0))
312                 return ret;
313         xform_auth->auth.algo = (enum rte_crypto_auth_algorithm)ret;
314         xform_auth->auth.digest_length = param->digest_len;
315         xform_auth->auth.key.length = param->auth_key_len;
316         xform_auth->auth.key.data = param->auth_key_buf;
317
318         return 0;
319 }
320
321 static void
322 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
323                 VhostUserCryptoSessionParam *sess_param)
324 {
325         struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
326         struct rte_cryptodev_sym_session *session;
327         int ret;
328
329         switch (sess_param->op_type) {
330         case VIRTIO_CRYPTO_SYM_OP_NONE:
331         case VIRTIO_CRYPTO_SYM_OP_CIPHER:
332                 ret = transform_cipher_param(&xform1, sess_param);
333                 if (unlikely(ret)) {
334                         VC_LOG_ERR("Error transform session msg (%i)", ret);
335                         sess_param->session_id = ret;
336                         return;
337                 }
338                 break;
339         case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
340                 if (unlikely(sess_param->hash_mode !=
341                                 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
342                         sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
343                         VC_LOG_ERR("Error transform session message (%i)",
344                                         -VIRTIO_CRYPTO_NOTSUPP);
345                         return;
346                 }
347
348                 xform1.next = &xform2;
349
350                 ret = transform_chain_param(&xform1, sess_param);
351                 if (unlikely(ret)) {
352                         VC_LOG_ERR("Error transform session message (%i)", ret);
353                         sess_param->session_id = ret;
354                         return;
355                 }
356
357                 break;
358         default:
359                 VC_LOG_ERR("Algorithm not yet supported");
360                 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
361                 return;
362         }
363
364         session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
365         if (!session) {
366                 VC_LOG_ERR("Failed to create session");
367                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
368                 return;
369         }
370
371         if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
372                         vcrypto->sess_pool) < 0) {
373                 VC_LOG_ERR("Failed to initialize session");
374                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
375                 return;
376         }
377
378         /* insert hash to map */
379         if (rte_hash_add_key_data(vcrypto->session_map,
380                         &vcrypto->last_session_id, session) < 0) {
381                 VC_LOG_ERR("Failed to insert session to hash table");
382
383                 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
384                         VC_LOG_ERR("Failed to clear session");
385                 else {
386                         if (rte_cryptodev_sym_session_free(session) < 0)
387                                 VC_LOG_ERR("Failed to free session");
388                 }
389                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
390                 return;
391         }
392
393         VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
394                         vcrypto->last_session_id, vcrypto->dev->vid);
395
396         sess_param->session_id = vcrypto->last_session_id;
397         vcrypto->last_session_id++;
398 }
399
400 static int
401 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
402 {
403         struct rte_cryptodev_sym_session *session;
404         uint64_t sess_id = session_id;
405         int ret;
406
407         ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
408                         (void **)&session);
409
410         if (unlikely(ret < 0)) {
411                 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
412                 return -VIRTIO_CRYPTO_INVSESS;
413         }
414
415         if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
416                 VC_LOG_DBG("Failed to clear session");
417                 return -VIRTIO_CRYPTO_ERR;
418         }
419
420         if (rte_cryptodev_sym_session_free(session) < 0) {
421                 VC_LOG_DBG("Failed to free session");
422                 return -VIRTIO_CRYPTO_ERR;
423         }
424
425         if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
426                 VC_LOG_DBG("Failed to delete session from hash table.");
427                 return -VIRTIO_CRYPTO_ERR;
428         }
429
430         VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
431                         vcrypto->dev->vid);
432
433         return 0;
434 }
435
436 static enum vh_result
437 vhost_crypto_msg_post_handler(int vid, void *msg)
438 {
439         struct virtio_net *dev = get_device(vid);
440         struct vhost_crypto *vcrypto;
441         VhostUserMsg *vmsg = msg;
442         enum vh_result ret = VH_RESULT_OK;
443
444         if (dev == NULL) {
445                 VC_LOG_ERR("Invalid vid %i", vid);
446                 return VH_RESULT_ERR;
447         }
448
449         vcrypto = dev->extern_data;
450         if (vcrypto == NULL) {
451                 VC_LOG_ERR("Cannot find required data, is it initialized?");
452                 return VH_RESULT_ERR;
453         }
454
455         if (vmsg->request.master == VHOST_USER_CRYPTO_CREATE_SESS) {
456                 vhost_crypto_create_sess(vcrypto,
457                                 &vmsg->payload.crypto_session);
458                 vmsg->fd_num = 0;
459                 ret = VH_RESULT_REPLY;
460         } else if (vmsg->request.master == VHOST_USER_CRYPTO_CLOSE_SESS) {
461                 if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64))
462                         ret = VH_RESULT_ERR;
463         }
464
465         return ret;
466 }
467
468 static __rte_always_inline struct vring_desc *
469 find_write_desc(struct vring_desc *head, struct vring_desc *desc,
470                 uint32_t *nb_descs, uint32_t vq_size)
471 {
472         if (desc->flags & VRING_DESC_F_WRITE)
473                 return desc;
474
475         while (desc->flags & VRING_DESC_F_NEXT) {
476                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
477                         return NULL;
478                 (*nb_descs)--;
479
480                 desc = &head[desc->next];
481                 if (desc->flags & VRING_DESC_F_WRITE)
482                         return desc;
483         }
484
485         return NULL;
486 }
487
488 static struct virtio_crypto_inhdr *
489 reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
490                 uint32_t *nb_descs, uint32_t vq_size)
491 {
492         uint64_t dlen;
493         struct virtio_crypto_inhdr *inhdr;
494
495         while (desc->flags & VRING_DESC_F_NEXT) {
496                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
497                         return NULL;
498                 (*nb_descs)--;
499                 desc = &vc_req->head[desc->next];
500         }
501
502         dlen = desc->len;
503         inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
504                         &dlen, VHOST_ACCESS_WO);
505         if (unlikely(!inhdr || dlen != desc->len))
506                 return NULL;
507
508         return inhdr;
509 }
510
511 static __rte_always_inline int
512 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
513                 uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
514 {
515         struct vring_desc *desc = *cur_desc;
516         int left = size - desc->len;
517
518         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
519                 (*nb_descs)--;
520                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
521                         return -1;
522
523                 desc = &head[desc->next];
524                 rte_prefetch0(&head[desc->next]);
525                 left -= desc->len;
526         }
527
528         if (unlikely(left > 0))
529                 return -1;
530
531         if (unlikely(*nb_descs == 0))
532                 *cur_desc = NULL;
533         else {
534                 if (unlikely(desc->next >= vq_size))
535                         return -1;
536                 *cur_desc = &head[desc->next];
537         }
538
539         return 0;
540 }
541
542 static __rte_always_inline void *
543 get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
544                 uint8_t perm)
545 {
546         void *data;
547         uint64_t dlen = cur_desc->len;
548
549         data = IOVA_TO_VVA(void *, vc_req, cur_desc->addr, &dlen, perm);
550         if (unlikely(!data || dlen != cur_desc->len)) {
551                 VC_LOG_ERR("Failed to map object");
552                 return NULL;
553         }
554
555         return data;
556 }
557
558 static int
559 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
560                 struct vring_desc **cur_desc, uint32_t size,
561                 uint32_t *nb_descs, uint32_t vq_size)
562 {
563         struct vring_desc *desc = *cur_desc;
564         uint64_t remain, addr, dlen, len;
565         uint32_t to_copy;
566         uint8_t *data = dst_data;
567         uint8_t *src;
568         int left = size;
569
570         to_copy = RTE_MIN(desc->len, (uint32_t)left);
571         dlen = to_copy;
572         src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
573                         VHOST_ACCESS_RO);
574         if (unlikely(!src || !dlen))
575                 return -1;
576
577         rte_memcpy((uint8_t *)data, src, dlen);
578         data += dlen;
579
580         if (unlikely(dlen < to_copy)) {
581                 remain = to_copy - dlen;
582                 addr = desc->addr + dlen;
583
584                 while (remain) {
585                         len = remain;
586                         src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
587                                         VHOST_ACCESS_RO);
588                         if (unlikely(!src || !len)) {
589                                 VC_LOG_ERR("Failed to map descriptor");
590                                 return -1;
591                         }
592
593                         rte_memcpy(data, src, len);
594                         addr += len;
595                         remain -= len;
596                         data += len;
597                 }
598         }
599
600         left -= to_copy;
601
602         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
603                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
604                         VC_LOG_ERR("Invalid descriptors");
605                         return -1;
606                 }
607                 (*nb_descs)--;
608
609                 desc = &vc_req->head[desc->next];
610                 rte_prefetch0(&vc_req->head[desc->next]);
611                 to_copy = RTE_MIN(desc->len, (uint32_t)left);
612                 dlen = desc->len;
613                 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
614                                 VHOST_ACCESS_RO);
615                 if (unlikely(!src || !dlen)) {
616                         VC_LOG_ERR("Failed to map descriptor");
617                         return -1;
618                 }
619
620                 rte_memcpy(data, src, dlen);
621                 data += dlen;
622
623                 if (unlikely(dlen < to_copy)) {
624                         remain = to_copy - dlen;
625                         addr = desc->addr + dlen;
626
627                         while (remain) {
628                                 len = remain;
629                                 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
630                                                 VHOST_ACCESS_RO);
631                                 if (unlikely(!src || !len)) {
632                                         VC_LOG_ERR("Failed to map descriptor");
633                                         return -1;
634                                 }
635
636                                 rte_memcpy(data, src, len);
637                                 addr += len;
638                                 remain -= len;
639                                 data += len;
640                         }
641                 }
642
643                 left -= to_copy;
644         }
645
646         if (unlikely(left > 0)) {
647                 VC_LOG_ERR("Incorrect virtio descriptor");
648                 return -1;
649         }
650
651         if (unlikely(*nb_descs == 0))
652                 *cur_desc = NULL;
653         else {
654                 if (unlikely(desc->next >= vq_size))
655                         return -1;
656                 *cur_desc = &vc_req->head[desc->next];
657         }
658
659         return 0;
660 }
661
662 static void
663 write_back_data(struct vhost_crypto_data_req *vc_req)
664 {
665         struct vhost_crypto_writeback_data *wb_data = vc_req->wb, *wb_last;
666
667         while (wb_data) {
668                 rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
669                 wb_last = wb_data;
670                 wb_data = wb_data->next;
671                 rte_mempool_put(vc_req->wb_pool, wb_last);
672         }
673 }
674
675 static void
676 free_wb_data(struct vhost_crypto_writeback_data *wb_data,
677                 struct rte_mempool *mp)
678 {
679         while (wb_data->next != NULL)
680                 free_wb_data(wb_data->next, mp);
681
682         rte_mempool_put(mp, wb_data);
683 }
684
685 /**
686  * The function will allocate a vhost_crypto_writeback_data linked list
687  * containing the source and destination data pointers for the write back
688  * operation after dequeued from Cryptodev PMD queues.
689  *
690  * @param vc_req
691  *   The vhost crypto data request pointer
692  * @param cur_desc
693  *   The pointer of the current in use descriptor pointer. The content of
694  *   cur_desc is expected to be updated after the function execution.
695  * @param end_wb_data
696  *   The last write back data element to be returned. It is used only in cipher
697  *   and hash chain operations.
698  * @param src
699  *   The source data pointer
700  * @param offset
701  *   The offset to both source and destination data. For source data the offset
702  *   is the number of bytes between src and start point of cipher operation. For
703  *   destination data the offset is the number of bytes from *cur_desc->addr
704  *   to the point where the src will be written to.
705  * @param write_back_len
706  *   The size of the write back length.
707  * @return
708  *   The pointer to the start of the write back data linked list.
709  */
710 static struct vhost_crypto_writeback_data *
711 prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
712                 struct vring_desc **cur_desc,
713                 struct vhost_crypto_writeback_data **end_wb_data,
714                 uint8_t *src,
715                 uint32_t offset,
716                 uint64_t write_back_len,
717                 uint32_t *nb_descs, uint32_t vq_size)
718 {
719         struct vhost_crypto_writeback_data *wb_data, *head;
720         struct vring_desc *desc = *cur_desc;
721         uint64_t dlen;
722         uint8_t *dst;
723         int ret;
724
725         ret = rte_mempool_get(vc_req->wb_pool, (void **)&head);
726         if (unlikely(ret < 0)) {
727                 VC_LOG_ERR("no memory");
728                 goto error_exit;
729         }
730
731         wb_data = head;
732
733         if (likely(desc->len > offset)) {
734                 wb_data->src = src + offset;
735                 dlen = desc->len;
736                 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr,
737                         &dlen, VHOST_ACCESS_RW) + offset;
738                 if (unlikely(!dst || dlen != desc->len)) {
739                         VC_LOG_ERR("Failed to map descriptor");
740                         goto error_exit;
741                 }
742
743                 wb_data->dst = dst;
744                 wb_data->len = desc->len - offset;
745                 write_back_len -= wb_data->len;
746                 src += offset + wb_data->len;
747                 offset = 0;
748
749                 if (unlikely(write_back_len)) {
750                         ret = rte_mempool_get(vc_req->wb_pool,
751                                         (void **)&(wb_data->next));
752                         if (unlikely(ret < 0)) {
753                                 VC_LOG_ERR("no memory");
754                                 goto error_exit;
755                         }
756
757                         wb_data = wb_data->next;
758                 } else
759                         wb_data->next = NULL;
760         } else
761                 offset -= desc->len;
762
763         while (write_back_len) {
764                 if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
765                         VC_LOG_ERR("Invalid descriptors");
766                         goto error_exit;
767                 }
768                 (*nb_descs)--;
769
770                 desc = &vc_req->head[desc->next];
771                 if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
772                         VC_LOG_ERR("incorrect descriptor");
773                         goto error_exit;
774                 }
775
776                 if (desc->len <= offset) {
777                         offset -= desc->len;
778                         continue;
779                 }
780
781                 dlen = desc->len;
782                 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
783                                 VHOST_ACCESS_RW) + offset;
784                 if (unlikely(dst == NULL || dlen != desc->len)) {
785                         VC_LOG_ERR("Failed to map descriptor");
786                         goto error_exit;
787                 }
788
789                 wb_data->src = src;
790                 wb_data->dst = dst;
791                 wb_data->len = RTE_MIN(desc->len - offset, write_back_len);
792                 write_back_len -= wb_data->len;
793                 src += wb_data->len;
794                 offset = 0;
795
796                 if (write_back_len) {
797                         ret = rte_mempool_get(vc_req->wb_pool,
798                                         (void **)&(wb_data->next));
799                         if (unlikely(ret < 0)) {
800                                 VC_LOG_ERR("no memory");
801                                 goto error_exit;
802                         }
803
804                         wb_data = wb_data->next;
805                 } else
806                         wb_data->next = NULL;
807         }
808
809         if (unlikely(*nb_descs == 0))
810                 *cur_desc = NULL;
811         else {
812                 if (unlikely(desc->next >= vq_size))
813                         goto error_exit;
814                 *cur_desc = &vc_req->head[desc->next];
815         }
816
817         *end_wb_data = wb_data;
818
819         return head;
820
821 error_exit:
822         if (head)
823                 free_wb_data(head, vc_req->wb_pool);
824
825         return NULL;
826 }
827
828 static uint8_t
829 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
830                 struct vhost_crypto_data_req *vc_req,
831                 struct virtio_crypto_cipher_data_req *cipher,
832                 struct vring_desc *cur_desc,
833                 uint32_t *nb_descs, uint32_t vq_size)
834 {
835         struct vring_desc *desc = cur_desc;
836         struct vhost_crypto_writeback_data *ewb = NULL;
837         struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
838         uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
839         uint8_t ret = 0;
840
841         /* prepare */
842         /* iv */
843         if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
844                         nb_descs, vq_size) < 0)) {
845                 ret = VIRTIO_CRYPTO_BADMSG;
846                 goto error_exit;
847         }
848
849         m_src->data_len = cipher->para.src_data_len;
850
851         switch (vcrypto->option) {
852         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
853                 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
854                                 cipher->para.src_data_len);
855                 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
856                 if (unlikely(m_src->buf_iova == 0 ||
857                                 m_src->buf_addr == NULL)) {
858                         VC_LOG_ERR("zero_copy may fail due to cross page data");
859                         ret = VIRTIO_CRYPTO_ERR;
860                         goto error_exit;
861                 }
862
863                 if (unlikely(move_desc(vc_req->head, &desc,
864                                 cipher->para.src_data_len, nb_descs,
865                                 vq_size) < 0)) {
866                         VC_LOG_ERR("Incorrect descriptor");
867                         ret = VIRTIO_CRYPTO_ERR;
868                         goto error_exit;
869                 }
870
871                 break;
872         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
873                 vc_req->wb_pool = vcrypto->wb_pool;
874
875                 if (unlikely(cipher->para.src_data_len >
876                                 RTE_MBUF_DEFAULT_BUF_SIZE)) {
877                         VC_LOG_ERR("Not enough space to do data copy");
878                         ret = VIRTIO_CRYPTO_ERR;
879                         goto error_exit;
880                 }
881                 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
882                                 vc_req, &desc, cipher->para.src_data_len,
883                                 nb_descs, vq_size) < 0)) {
884                         ret = VIRTIO_CRYPTO_BADMSG;
885                         goto error_exit;
886                 }
887                 break;
888         default:
889                 ret = VIRTIO_CRYPTO_BADMSG;
890                 goto error_exit;
891         }
892
893         /* dst */
894         desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
895         if (unlikely(!desc)) {
896                 VC_LOG_ERR("Cannot find write location");
897                 ret = VIRTIO_CRYPTO_BADMSG;
898                 goto error_exit;
899         }
900
901         switch (vcrypto->option) {
902         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
903                 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
904                                 desc->addr, cipher->para.dst_data_len);
905                 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
906                 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
907                         VC_LOG_ERR("zero_copy may fail due to cross page data");
908                         ret = VIRTIO_CRYPTO_ERR;
909                         goto error_exit;
910                 }
911
912                 if (unlikely(move_desc(vc_req->head, &desc,
913                                 cipher->para.dst_data_len,
914                                 nb_descs, vq_size) < 0)) {
915                         VC_LOG_ERR("Incorrect descriptor");
916                         ret = VIRTIO_CRYPTO_ERR;
917                         goto error_exit;
918                 }
919
920                 m_dst->data_len = cipher->para.dst_data_len;
921                 break;
922         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
923                 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
924                                 rte_pktmbuf_mtod(m_src, uint8_t *), 0,
925                                 cipher->para.dst_data_len, nb_descs, vq_size);
926                 if (unlikely(vc_req->wb == NULL)) {
927                         ret = VIRTIO_CRYPTO_ERR;
928                         goto error_exit;
929                 }
930
931                 break;
932         default:
933                 ret = VIRTIO_CRYPTO_BADMSG;
934                 goto error_exit;
935         }
936
937         /* src data */
938         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
939         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
940
941         op->sym->cipher.data.offset = 0;
942         op->sym->cipher.data.length = cipher->para.src_data_len;
943
944         vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
945         if (unlikely(vc_req->inhdr == NULL)) {
946                 ret = VIRTIO_CRYPTO_BADMSG;
947                 goto error_exit;
948         }
949
950         vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
951         vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
952
953         return 0;
954
955 error_exit:
956         if (vc_req->wb)
957                 free_wb_data(vc_req->wb, vc_req->wb_pool);
958
959         vc_req->len = INHDR_LEN;
960         return ret;
961 }
962
963 static uint8_t
964 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
965                 struct vhost_crypto_data_req *vc_req,
966                 struct virtio_crypto_alg_chain_data_req *chain,
967                 struct vring_desc *cur_desc,
968                 uint32_t *nb_descs, uint32_t vq_size)
969 {
970         struct vring_desc *desc = cur_desc, *digest_desc;
971         struct vhost_crypto_writeback_data *ewb = NULL, *ewb2 = NULL;
972         struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
973         uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
974         uint32_t digest_offset;
975         void *digest_addr;
976         uint8_t ret = 0;
977
978         /* prepare */
979         /* iv */
980         if (unlikely(copy_data(iv_data, vc_req, &desc,
981                         chain->para.iv_len, nb_descs, vq_size) < 0)) {
982                 ret = VIRTIO_CRYPTO_BADMSG;
983                 goto error_exit;
984         }
985
986         m_src->data_len = chain->para.src_data_len;
987
988         switch (vcrypto->option) {
989         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
990                 m_dst->data_len = chain->para.dst_data_len;
991
992                 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
993                                 chain->para.src_data_len);
994                 m_src->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
995                 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
996                         VC_LOG_ERR("zero_copy may fail due to cross page data");
997                         ret = VIRTIO_CRYPTO_ERR;
998                         goto error_exit;
999                 }
1000
1001                 if (unlikely(move_desc(vc_req->head, &desc,
1002                                 chain->para.src_data_len,
1003                                 nb_descs, vq_size) < 0)) {
1004                         VC_LOG_ERR("Incorrect descriptor");
1005                         ret = VIRTIO_CRYPTO_ERR;
1006                         goto error_exit;
1007                 }
1008                 break;
1009         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1010                 vc_req->wb_pool = vcrypto->wb_pool;
1011
1012                 if (unlikely(chain->para.src_data_len >
1013                                 RTE_MBUF_DEFAULT_BUF_SIZE)) {
1014                         VC_LOG_ERR("Not enough space to do data copy");
1015                         ret = VIRTIO_CRYPTO_ERR;
1016                         goto error_exit;
1017                 }
1018                 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
1019                                 vc_req, &desc, chain->para.src_data_len,
1020                                 nb_descs, vq_size)) < 0) {
1021                         ret = VIRTIO_CRYPTO_BADMSG;
1022                         goto error_exit;
1023                 }
1024
1025                 break;
1026         default:
1027                 ret = VIRTIO_CRYPTO_BADMSG;
1028                 goto error_exit;
1029         }
1030
1031         /* dst */
1032         desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
1033         if (unlikely(!desc)) {
1034                 VC_LOG_ERR("Cannot find write location");
1035                 ret = VIRTIO_CRYPTO_BADMSG;
1036                 goto error_exit;
1037         }
1038
1039         switch (vcrypto->option) {
1040         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1041                 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
1042                                 desc->addr, chain->para.dst_data_len);
1043                 m_dst->buf_addr = get_data_ptr(vc_req, desc, VHOST_ACCESS_RW);
1044                 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
1045                         VC_LOG_ERR("zero_copy may fail due to cross page data");
1046                         ret = VIRTIO_CRYPTO_ERR;
1047                         goto error_exit;
1048                 }
1049
1050                 if (unlikely(move_desc(vc_req->head, &desc,
1051                                 chain->para.dst_data_len,
1052                                 nb_descs, vq_size) < 0)) {
1053                         VC_LOG_ERR("Incorrect descriptor");
1054                         ret = VIRTIO_CRYPTO_ERR;
1055                         goto error_exit;
1056                 }
1057
1058                 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
1059                                 desc->addr, chain->para.hash_result_len);
1060                 op->sym->auth.digest.data = get_data_ptr(vc_req, desc,
1061                                 VHOST_ACCESS_RW);
1062                 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
1063                         VC_LOG_ERR("zero_copy may fail due to cross page data");
1064                         ret = VIRTIO_CRYPTO_ERR;
1065                         goto error_exit;
1066                 }
1067
1068                 if (unlikely(move_desc(vc_req->head, &desc,
1069                                 chain->para.hash_result_len,
1070                                 nb_descs, vq_size) < 0)) {
1071                         VC_LOG_ERR("Incorrect descriptor");
1072                         ret = VIRTIO_CRYPTO_ERR;
1073                         goto error_exit;
1074                 }
1075
1076                 break;
1077         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1078                 vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
1079                                 rte_pktmbuf_mtod(m_src, uint8_t *),
1080                                 chain->para.cipher_start_src_offset,
1081                                 chain->para.dst_data_len -
1082                                 chain->para.cipher_start_src_offset,
1083                                 nb_descs, vq_size);
1084                 if (unlikely(vc_req->wb == NULL)) {
1085                         ret = VIRTIO_CRYPTO_ERR;
1086                         goto error_exit;
1087                 }
1088
1089                 digest_offset = m_src->data_len;
1090                 digest_addr = rte_pktmbuf_mtod_offset(m_src, void *,
1091                                 digest_offset);
1092                 digest_desc = desc;
1093
1094                 /** create a wb_data for digest */
1095                 ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
1096                                 digest_addr, 0, chain->para.hash_result_len,
1097                                 nb_descs, vq_size);
1098                 if (unlikely(ewb->next == NULL)) {
1099                         ret = VIRTIO_CRYPTO_ERR;
1100                         goto error_exit;
1101                 }
1102
1103                 if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
1104                                 chain->para.hash_result_len,
1105                                 nb_descs, vq_size) < 0)) {
1106                         ret = VIRTIO_CRYPTO_BADMSG;
1107                         goto error_exit;
1108                 }
1109
1110                 op->sym->auth.digest.data = digest_addr;
1111                 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_src,
1112                                 digest_offset);
1113                 break;
1114         default:
1115                 ret = VIRTIO_CRYPTO_BADMSG;
1116                 goto error_exit;
1117         }
1118
1119         /* record inhdr */
1120         vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
1121         if (unlikely(vc_req->inhdr == NULL)) {
1122                 ret = VIRTIO_CRYPTO_BADMSG;
1123                 goto error_exit;
1124         }
1125
1126         vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
1127
1128         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
1129         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
1130
1131         op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
1132         op->sym->cipher.data.length = chain->para.src_data_len -
1133                         chain->para.cipher_start_src_offset;
1134
1135         op->sym->auth.data.offset = chain->para.hash_start_src_offset;
1136         op->sym->auth.data.length = chain->para.len_to_hash;
1137
1138         vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
1139                         INHDR_LEN;
1140         return 0;
1141
1142 error_exit:
1143         if (vc_req->wb)
1144                 free_wb_data(vc_req->wb, vc_req->wb_pool);
1145         vc_req->len = INHDR_LEN;
1146         return ret;
1147 }
1148
1149 /**
1150  * Process on descriptor
1151  */
1152 static __rte_always_inline int
1153 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
1154                 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
1155                 struct vring_desc *head, uint16_t desc_idx)
1156 {
1157         struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
1158         struct rte_cryptodev_sym_session *session;
1159         struct virtio_crypto_op_data_req *req, tmp_req;
1160         struct virtio_crypto_inhdr *inhdr;
1161         struct vring_desc *desc = NULL;
1162         uint64_t session_id;
1163         uint64_t dlen;
1164         uint32_t nb_descs = vq->size;
1165         int err = 0;
1166
1167         vc_req->desc_idx = desc_idx;
1168         vc_req->dev = vcrypto->dev;
1169         vc_req->vq = vq;
1170
1171         if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
1172                 dlen = head->len;
1173                 nb_descs = dlen / sizeof(struct vring_desc);
1174                 /* drop invalid descriptors */
1175                 if (unlikely(nb_descs > vq->size))
1176                         return -1;
1177                 desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
1178                                 &dlen, VHOST_ACCESS_RO);
1179                 if (unlikely(!desc || dlen != head->len))
1180                         return -1;
1181                 desc_idx = 0;
1182                 head = desc;
1183         } else {
1184                 desc = head;
1185         }
1186
1187         vc_req->head = head;
1188         vc_req->zero_copy = vcrypto->option;
1189
1190         req = get_data_ptr(vc_req, desc, VHOST_ACCESS_RO);
1191         if (unlikely(req == NULL)) {
1192                 switch (vcrypto->option) {
1193                 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1194                         err = VIRTIO_CRYPTO_BADMSG;
1195                         VC_LOG_ERR("Invalid descriptor");
1196                         goto error_exit;
1197                 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1198                         req = &tmp_req;
1199                         if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
1200                                         &nb_descs, vq->size) < 0)) {
1201                                 err = VIRTIO_CRYPTO_BADMSG;
1202                                 VC_LOG_ERR("Invalid descriptor");
1203                                 goto error_exit;
1204                         }
1205                         break;
1206                 default:
1207                         err = VIRTIO_CRYPTO_ERR;
1208                         VC_LOG_ERR("Invalid option");
1209                         goto error_exit;
1210                 }
1211         } else {
1212                 if (unlikely(move_desc(vc_req->head, &desc,
1213                                 sizeof(*req), &nb_descs, vq->size) < 0)) {
1214                         VC_LOG_ERR("Incorrect descriptor");
1215                         goto error_exit;
1216                 }
1217         }
1218
1219         switch (req->header.opcode) {
1220         case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
1221         case VIRTIO_CRYPTO_CIPHER_DECRYPT:
1222                 session_id = req->header.session_id;
1223
1224                 /* one branch to avoid unnecessary table lookup */
1225                 if (vcrypto->cache_session_id != session_id) {
1226                         err = rte_hash_lookup_data(vcrypto->session_map,
1227                                         &session_id, (void **)&session);
1228                         if (unlikely(err < 0)) {
1229                                 err = VIRTIO_CRYPTO_ERR;
1230                                 VC_LOG_ERR("Failed to find session %"PRIu64,
1231                                                 session_id);
1232                                 goto error_exit;
1233                         }
1234
1235                         vcrypto->cache_session = session;
1236                         vcrypto->cache_session_id = session_id;
1237                 }
1238
1239                 session = vcrypto->cache_session;
1240
1241                 err = rte_crypto_op_attach_sym_session(op, session);
1242                 if (unlikely(err < 0)) {
1243                         err = VIRTIO_CRYPTO_ERR;
1244                         VC_LOG_ERR("Failed to attach session to op");
1245                         goto error_exit;
1246                 }
1247
1248                 switch (req->u.sym_req.op_type) {
1249                 case VIRTIO_CRYPTO_SYM_OP_NONE:
1250                         err = VIRTIO_CRYPTO_NOTSUPP;
1251                         break;
1252                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1253                         err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1254                                         &req->u.sym_req.u.cipher, desc,
1255                                         &nb_descs, vq->size);
1256                         break;
1257                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1258                         err = prepare_sym_chain_op(vcrypto, op, vc_req,
1259                                         &req->u.sym_req.u.chain, desc,
1260                                         &nb_descs, vq->size);
1261                         break;
1262                 }
1263                 if (unlikely(err != 0)) {
1264                         VC_LOG_ERR("Failed to process sym request");
1265                         goto error_exit;
1266                 }
1267                 break;
1268         default:
1269                 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1270                                 req->header.opcode);
1271                 goto error_exit;
1272         }
1273
1274         return 0;
1275
1276 error_exit:
1277
1278         inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
1279         if (likely(inhdr != NULL))
1280                 inhdr->status = (uint8_t)err;
1281
1282         return -1;
1283 }
1284
1285 static __rte_always_inline struct vhost_virtqueue *
1286 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1287                 struct vhost_virtqueue *old_vq)
1288 {
1289         struct rte_mbuf *m_src = op->sym->m_src;
1290         struct rte_mbuf *m_dst = op->sym->m_dst;
1291         struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1292         uint16_t desc_idx;
1293
1294         if (unlikely(!vc_req)) {
1295                 VC_LOG_ERR("Failed to retrieve vc_req");
1296                 return NULL;
1297         }
1298
1299         if (old_vq && (vc_req->vq != old_vq))
1300                 return vc_req->vq;
1301
1302         desc_idx = vc_req->desc_idx;
1303
1304         if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1305                 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1306         else {
1307                 if (vc_req->zero_copy == 0)
1308                         write_back_data(vc_req);
1309         }
1310
1311         vc_req->vq->used->ring[desc_idx].id = desc_idx;
1312         vc_req->vq->used->ring[desc_idx].len = vc_req->len;
1313
1314         rte_mempool_put(m_src->pool, (void *)m_src);
1315
1316         if (m_dst)
1317                 rte_mempool_put(m_dst->pool, (void *)m_dst);
1318
1319         return vc_req->vq;
1320 }
1321
1322 static __rte_always_inline uint16_t
1323 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1324                 uint16_t nb_ops, int *callfd)
1325 {
1326         uint16_t processed = 1;
1327         struct vhost_virtqueue *vq, *tmp_vq;
1328
1329         if (unlikely(nb_ops == 0))
1330                 return 0;
1331
1332         vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1333         if (unlikely(vq == NULL))
1334                 return 0;
1335         tmp_vq = vq;
1336
1337         while ((processed < nb_ops)) {
1338                 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1339                                 tmp_vq);
1340
1341                 if (unlikely(vq != tmp_vq))
1342                         break;
1343
1344                 processed++;
1345         }
1346
1347         *callfd = vq->callfd;
1348
1349         *(volatile uint16_t *)&vq->used->idx += processed;
1350
1351         return processed;
1352 }
1353
1354 int __rte_experimental
1355 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1356                 struct rte_mempool *sess_pool, int socket_id)
1357 {
1358         struct virtio_net *dev = get_device(vid);
1359         struct rte_hash_parameters params = {0};
1360         struct vhost_crypto *vcrypto;
1361         char name[128];
1362         int ret;
1363
1364         if (!dev) {
1365                 VC_LOG_ERR("Invalid vid %i", vid);
1366                 return -EINVAL;
1367         }
1368
1369         ret = rte_vhost_driver_set_features(dev->ifname,
1370                         VIRTIO_CRYPTO_FEATURES);
1371         if (ret < 0) {
1372                 VC_LOG_ERR("Error setting features");
1373                 return -1;
1374         }
1375
1376         vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1377                         RTE_CACHE_LINE_SIZE, socket_id);
1378         if (!vcrypto) {
1379                 VC_LOG_ERR("Insufficient memory");
1380                 return -ENOMEM;
1381         }
1382
1383         vcrypto->sess_pool = sess_pool;
1384         vcrypto->cid = cryptodev_id;
1385         vcrypto->cache_session_id = UINT64_MAX;
1386         vcrypto->last_session_id = 1;
1387         vcrypto->dev = dev;
1388         vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1389
1390         snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1391         params.name = name;
1392         params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1393         params.hash_func = rte_jhash;
1394         params.key_len = sizeof(uint64_t);
1395         params.socket_id = socket_id;
1396         vcrypto->session_map = rte_hash_create(&params);
1397         if (!vcrypto->session_map) {
1398                 VC_LOG_ERR("Failed to creath session map");
1399                 ret = -ENOMEM;
1400                 goto error_exit;
1401         }
1402
1403         snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1404         vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1405                         VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1406                         sizeof(struct vhost_crypto_data_req),
1407                         RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
1408                         rte_socket_id());
1409         if (!vcrypto->mbuf_pool) {
1410                 VC_LOG_ERR("Failed to creath mbuf pool");
1411                 ret = -ENOMEM;
1412                 goto error_exit;
1413         }
1414
1415         snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1416         vcrypto->wb_pool = rte_mempool_create(name,
1417                         VHOST_CRYPTO_MBUF_POOL_SIZE,
1418                         sizeof(struct vhost_crypto_writeback_data),
1419                         128, 0, NULL, NULL, NULL, NULL,
1420                         rte_socket_id(), 0);
1421         if (!vcrypto->wb_pool) {
1422                 VC_LOG_ERR("Failed to creath mempool");
1423                 ret = -ENOMEM;
1424                 goto error_exit;
1425         }
1426
1427         dev->extern_data = vcrypto;
1428         dev->extern_ops.pre_msg_handle = NULL;
1429         dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1430
1431         return 0;
1432
1433 error_exit:
1434         if (vcrypto->session_map)
1435                 rte_hash_free(vcrypto->session_map);
1436         if (vcrypto->mbuf_pool)
1437                 rte_mempool_free(vcrypto->mbuf_pool);
1438
1439         rte_free(vcrypto);
1440
1441         return ret;
1442 }
1443
1444 int __rte_experimental
1445 rte_vhost_crypto_free(int vid)
1446 {
1447         struct virtio_net *dev = get_device(vid);
1448         struct vhost_crypto *vcrypto;
1449
1450         if (unlikely(dev == NULL)) {
1451                 VC_LOG_ERR("Invalid vid %i", vid);
1452                 return -EINVAL;
1453         }
1454
1455         vcrypto = dev->extern_data;
1456         if (unlikely(vcrypto == NULL)) {
1457                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1458                 return -ENOENT;
1459         }
1460
1461         rte_hash_free(vcrypto->session_map);
1462         rte_mempool_free(vcrypto->mbuf_pool);
1463         rte_mempool_free(vcrypto->wb_pool);
1464         rte_free(vcrypto);
1465
1466         dev->extern_data = NULL;
1467         dev->extern_ops.pre_msg_handle = NULL;
1468         dev->extern_ops.post_msg_handle = NULL;
1469
1470         return 0;
1471 }
1472
1473 int __rte_experimental
1474 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1475 {
1476         struct virtio_net *dev = get_device(vid);
1477         struct vhost_crypto *vcrypto;
1478
1479         if (unlikely(dev == NULL)) {
1480                 VC_LOG_ERR("Invalid vid %i", vid);
1481                 return -EINVAL;
1482         }
1483
1484         if (unlikely((uint32_t)option >=
1485                                 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1486                 VC_LOG_ERR("Invalid option %i", option);
1487                 return -EINVAL;
1488         }
1489
1490         vcrypto = (struct vhost_crypto *)dev->extern_data;
1491         if (unlikely(vcrypto == NULL)) {
1492                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1493                 return -ENOENT;
1494         }
1495
1496         if (vcrypto->option == (uint8_t)option)
1497                 return 0;
1498
1499         if (!(rte_mempool_full(vcrypto->mbuf_pool)) ||
1500                         !(rte_mempool_full(vcrypto->wb_pool))) {
1501                 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1502                 return -EINVAL;
1503         }
1504
1505         if (option == RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE) {
1506                 char name[128];
1507
1508                 snprintf(name, 127, "WB_POOL_VM_%u", (uint32_t)vid);
1509                 vcrypto->wb_pool = rte_mempool_create(name,
1510                                 VHOST_CRYPTO_MBUF_POOL_SIZE,
1511                                 sizeof(struct vhost_crypto_writeback_data),
1512                                 128, 0, NULL, NULL, NULL, NULL,
1513                                 rte_socket_id(), 0);
1514                 if (!vcrypto->wb_pool) {
1515                         VC_LOG_ERR("Failed to creath mbuf pool");
1516                         return -ENOMEM;
1517                 }
1518         } else {
1519                 rte_mempool_free(vcrypto->wb_pool);
1520                 vcrypto->wb_pool = NULL;
1521         }
1522
1523         vcrypto->option = (uint8_t)option;
1524
1525         return 0;
1526 }
1527
1528 uint16_t __rte_experimental
1529 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1530                 struct rte_crypto_op **ops, uint16_t nb_ops)
1531 {
1532         struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1533         struct virtio_net *dev = get_device(vid);
1534         struct vhost_crypto *vcrypto;
1535         struct vhost_virtqueue *vq;
1536         uint16_t avail_idx;
1537         uint16_t start_idx;
1538         uint16_t count;
1539         uint16_t i = 0;
1540
1541         if (unlikely(dev == NULL)) {
1542                 VC_LOG_ERR("Invalid vid %i", vid);
1543                 return -EINVAL;
1544         }
1545
1546         if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1547                 VC_LOG_ERR("Invalid qid %u", qid);
1548                 return -EINVAL;
1549         }
1550
1551         vcrypto = (struct vhost_crypto *)dev->extern_data;
1552         if (unlikely(vcrypto == NULL)) {
1553                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1554                 return -ENOENT;
1555         }
1556
1557         vq = dev->virtqueue[qid];
1558
1559         avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1560         start_idx = vq->last_used_idx;
1561         count = avail_idx - start_idx;
1562         count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1563         count = RTE_MIN(count, nb_ops);
1564
1565         if (unlikely(count == 0))
1566                 return 0;
1567
1568         /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1569          * we need only 1 mbuf as src and dst
1570          */
1571         switch (vcrypto->option) {
1572         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
1573                 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1574                                 (void **)mbufs, count * 2) < 0)) {
1575                         VC_LOG_ERR("Insufficient memory");
1576                         return -ENOMEM;
1577                 }
1578
1579                 for (i = 0; i < count; i++) {
1580                         uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1581                         uint16_t desc_idx = vq->avail->ring[used_idx];
1582                         struct vring_desc *head = &vq->desc[desc_idx];
1583                         struct rte_crypto_op *op = ops[i];
1584
1585                         op->sym->m_src = mbufs[i * 2];
1586                         op->sym->m_dst = mbufs[i * 2 + 1];
1587                         op->sym->m_src->data_off = 0;
1588                         op->sym->m_dst->data_off = 0;
1589
1590                         if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1591                                         op, head, desc_idx)) < 0)
1592                                 break;
1593                 }
1594
1595                 if (unlikely(i < count))
1596                         rte_mempool_put_bulk(vcrypto->mbuf_pool,
1597                                         (void **)&mbufs[i * 2],
1598                                         (count - i) * 2);
1599
1600                 break;
1601
1602         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
1603                 if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool,
1604                                 (void **)mbufs, count) < 0)) {
1605                         VC_LOG_ERR("Insufficient memory");
1606                         return -ENOMEM;
1607                 }
1608
1609                 for (i = 0; i < count; i++) {
1610                         uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1611                         uint16_t desc_idx = vq->avail->ring[used_idx];
1612                         struct vring_desc *head = &vq->desc[desc_idx];
1613                         struct rte_crypto_op *op = ops[i];
1614
1615                         op->sym->m_src = mbufs[i];
1616                         op->sym->m_dst = NULL;
1617                         op->sym->m_src->data_off = 0;
1618
1619                         if (unlikely(vhost_crypto_process_one_req(vcrypto, vq,
1620                                         op, head, desc_idx) < 0))
1621                                 break;
1622                 }
1623
1624                 if (unlikely(i < count))
1625                         rte_mempool_put_bulk(vcrypto->mbuf_pool,
1626                                         (void **)&mbufs[i],
1627                                         count - i);
1628
1629                 break;
1630
1631         }
1632
1633         vq->last_used_idx += i;
1634
1635         return i;
1636 }
1637
1638 uint16_t __rte_experimental
1639 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1640                 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1641 {
1642         struct rte_crypto_op **tmp_ops = ops;
1643         uint16_t count = 0, left = nb_ops;
1644         int callfd;
1645         uint16_t idx = 0;
1646
1647         while (left) {
1648                 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1649                                 &callfd);
1650                 if (unlikely(count == 0))
1651                         break;
1652
1653                 tmp_ops = &tmp_ops[count];
1654                 left -= count;
1655
1656                 callfds[idx++] = callfd;
1657
1658                 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1659                         VC_LOG_ERR("Too many vqs");
1660                         break;
1661                 }
1662         }
1663
1664         *nb_callfds = idx;
1665
1666         return nb_ops - left;
1667 }