New upstream version 18.11-rc1
[deb_dpdk.git] / lib / librte_vhost / vhost_crypto.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2018 Intel Corporation
3  */
4 #include <rte_malloc.h>
5 #include <rte_hash.h>
6 #include <rte_jhash.h>
7 #include <rte_mbuf.h>
8 #include <rte_cryptodev.h>
9
10 #include "rte_vhost_crypto.h"
11 #include "vhost.h"
12 #include "vhost_user.h"
13 #include "virtio_crypto.h"
14
15 #define INHDR_LEN               (sizeof(struct virtio_crypto_inhdr))
16 #define IV_OFFSET               (sizeof(struct rte_crypto_op) + \
17                                 sizeof(struct rte_crypto_sym_op))
18
19 #ifdef RTE_LIBRTE_VHOST_DEBUG
20 #define VC_LOG_ERR(fmt, args...)                                \
21         RTE_LOG(ERR, USER1, "[%s] %s() line %u: " fmt "\n",     \
22                 "Vhost-Crypto", __func__, __LINE__, ## args)
23 #define VC_LOG_INFO(fmt, args...)                               \
24         RTE_LOG(INFO, USER1, "[%s] %s() line %u: " fmt "\n",    \
25                 "Vhost-Crypto", __func__, __LINE__, ## args)
26
27 #define VC_LOG_DBG(fmt, args...)                                \
28         RTE_LOG(DEBUG, USER1, "[%s] %s() line %u: " fmt "\n",   \
29                 "Vhost-Crypto", __func__, __LINE__, ## args)
30 #else
31 #define VC_LOG_ERR(fmt, args...)                                \
32         RTE_LOG(ERR, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
33 #define VC_LOG_INFO(fmt, args...)                               \
34         RTE_LOG(INFO, USER1, "[VHOST-Crypto]: " fmt "\n", ## args)
35 #define VC_LOG_DBG(fmt, args...)
36 #endif
37
38 #define VIRTIO_CRYPTO_FEATURES ((1 << VIRTIO_F_NOTIFY_ON_EMPTY) |       \
39                 (1 << VIRTIO_RING_F_INDIRECT_DESC) |                    \
40                 (1 << VIRTIO_RING_F_EVENT_IDX) |                        \
41                 (1 << VIRTIO_CRYPTO_SERVICE_CIPHER) |                   \
42                 (1 << VIRTIO_CRYPTO_SERVICE_MAC) |                      \
43                 (1 << VIRTIO_NET_F_CTRL_VQ))
44
45 #define IOVA_TO_VVA(t, r, a, l, p)                                      \
46         ((t)(uintptr_t)vhost_iova_to_vva(r->dev, r->vq, a, l, p))
47
48 static int
49 cipher_algo_transform(uint32_t virtio_cipher_algo)
50 {
51         int ret;
52
53         switch (virtio_cipher_algo) {
54         case VIRTIO_CRYPTO_CIPHER_AES_CBC:
55                 ret = RTE_CRYPTO_CIPHER_AES_CBC;
56                 break;
57         case VIRTIO_CRYPTO_CIPHER_AES_CTR:
58                 ret = RTE_CRYPTO_CIPHER_AES_CTR;
59                 break;
60         case VIRTIO_CRYPTO_CIPHER_DES_ECB:
61                 ret = -VIRTIO_CRYPTO_NOTSUPP;
62                 break;
63         case VIRTIO_CRYPTO_CIPHER_DES_CBC:
64                 ret = RTE_CRYPTO_CIPHER_DES_CBC;
65                 break;
66         case VIRTIO_CRYPTO_CIPHER_3DES_ECB:
67                 ret = RTE_CRYPTO_CIPHER_3DES_ECB;
68                 break;
69         case VIRTIO_CRYPTO_CIPHER_3DES_CBC:
70                 ret = RTE_CRYPTO_CIPHER_3DES_CBC;
71                 break;
72         case VIRTIO_CRYPTO_CIPHER_3DES_CTR:
73                 ret = RTE_CRYPTO_CIPHER_3DES_CTR;
74                 break;
75         case VIRTIO_CRYPTO_CIPHER_KASUMI_F8:
76                 ret = RTE_CRYPTO_CIPHER_KASUMI_F8;
77                 break;
78         case VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2:
79                 ret = RTE_CRYPTO_CIPHER_SNOW3G_UEA2;
80                 break;
81         case VIRTIO_CRYPTO_CIPHER_AES_F8:
82                 ret = RTE_CRYPTO_CIPHER_AES_F8;
83                 break;
84         case VIRTIO_CRYPTO_CIPHER_AES_XTS:
85                 ret = RTE_CRYPTO_CIPHER_AES_XTS;
86                 break;
87         case VIRTIO_CRYPTO_CIPHER_ZUC_EEA3:
88                 ret = RTE_CRYPTO_CIPHER_ZUC_EEA3;
89                 break;
90         default:
91                 ret = -VIRTIO_CRYPTO_BADMSG;
92                 break;
93         }
94
95         return ret;
96 }
97
98 static int
99 auth_algo_transform(uint32_t virtio_auth_algo)
100 {
101         int ret;
102
103         switch (virtio_auth_algo) {
104
105         case VIRTIO_CRYPTO_NO_MAC:
106                 ret = RTE_CRYPTO_AUTH_NULL;
107                 break;
108         case VIRTIO_CRYPTO_MAC_HMAC_MD5:
109                 ret = RTE_CRYPTO_AUTH_MD5_HMAC;
110                 break;
111         case VIRTIO_CRYPTO_MAC_HMAC_SHA1:
112                 ret = RTE_CRYPTO_AUTH_SHA1_HMAC;
113                 break;
114         case VIRTIO_CRYPTO_MAC_HMAC_SHA_224:
115                 ret = RTE_CRYPTO_AUTH_SHA224_HMAC;
116                 break;
117         case VIRTIO_CRYPTO_MAC_HMAC_SHA_256:
118                 ret = RTE_CRYPTO_AUTH_SHA256_HMAC;
119                 break;
120         case VIRTIO_CRYPTO_MAC_HMAC_SHA_384:
121                 ret = RTE_CRYPTO_AUTH_SHA384_HMAC;
122                 break;
123         case VIRTIO_CRYPTO_MAC_HMAC_SHA_512:
124                 ret = RTE_CRYPTO_AUTH_SHA512_HMAC;
125                 break;
126         case VIRTIO_CRYPTO_MAC_CMAC_3DES:
127                 ret = -VIRTIO_CRYPTO_NOTSUPP;
128                 break;
129         case VIRTIO_CRYPTO_MAC_CMAC_AES:
130                 ret = RTE_CRYPTO_AUTH_AES_CMAC;
131                 break;
132         case VIRTIO_CRYPTO_MAC_KASUMI_F9:
133                 ret = RTE_CRYPTO_AUTH_KASUMI_F9;
134                 break;
135         case VIRTIO_CRYPTO_MAC_SNOW3G_UIA2:
136                 ret = RTE_CRYPTO_AUTH_SNOW3G_UIA2;
137                 break;
138         case VIRTIO_CRYPTO_MAC_GMAC_AES:
139                 ret = RTE_CRYPTO_AUTH_AES_GMAC;
140                 break;
141         case VIRTIO_CRYPTO_MAC_GMAC_TWOFISH:
142                 ret = -VIRTIO_CRYPTO_NOTSUPP;
143                 break;
144         case VIRTIO_CRYPTO_MAC_CBCMAC_AES:
145                 ret = RTE_CRYPTO_AUTH_AES_CBC_MAC;
146                 break;
147         case VIRTIO_CRYPTO_MAC_CBCMAC_KASUMI_F9:
148                 ret = -VIRTIO_CRYPTO_NOTSUPP;
149                 break;
150         case VIRTIO_CRYPTO_MAC_XCBC_AES:
151                 ret = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
152                 break;
153         default:
154                 ret = -VIRTIO_CRYPTO_BADMSG;
155                 break;
156         }
157
158         return ret;
159 }
160
161 static int get_iv_len(enum rte_crypto_cipher_algorithm algo)
162 {
163         int len;
164
165         switch (algo) {
166         case RTE_CRYPTO_CIPHER_3DES_CBC:
167                 len = 8;
168                 break;
169         case RTE_CRYPTO_CIPHER_3DES_CTR:
170                 len = 8;
171                 break;
172         case RTE_CRYPTO_CIPHER_3DES_ECB:
173                 len = 8;
174                 break;
175         case RTE_CRYPTO_CIPHER_AES_CBC:
176                 len = 16;
177                 break;
178
179         /* TODO: add common algos */
180
181         default:
182                 len = -1;
183                 break;
184         }
185
186         return len;
187 }
188
189 /**
190  * vhost_crypto struct is used to maintain a number of virtio_cryptos and
191  * one DPDK crypto device that deals with all crypto workloads. It is declared
192  * here and defined in vhost_crypto.c
193  */
194 struct vhost_crypto {
195         /** Used to lookup DPDK Cryptodev Session based on VIRTIO crypto
196          *  session ID.
197          */
198         struct rte_hash *session_map;
199         struct rte_mempool *mbuf_pool;
200         struct rte_mempool *sess_pool;
201
202         /** DPDK cryptodev ID */
203         uint8_t cid;
204         uint16_t nb_qps;
205
206         uint64_t last_session_id;
207
208         uint64_t cache_session_id;
209         struct rte_cryptodev_sym_session *cache_session;
210         /** socket id for the device */
211         int socket_id;
212
213         struct virtio_net *dev;
214
215         uint8_t option;
216 } __rte_cache_aligned;
217
218 struct vhost_crypto_data_req {
219         struct vring_desc *head;
220         struct virtio_net *dev;
221         struct virtio_crypto_inhdr *inhdr;
222         struct vhost_virtqueue *vq;
223         struct vring_desc *wb_desc;
224         uint16_t wb_len;
225         uint16_t desc_idx;
226         uint16_t len;
227         uint16_t zero_copy;
228 };
229
230 static int
231 transform_cipher_param(struct rte_crypto_sym_xform *xform,
232                 VhostUserCryptoSessionParam *param)
233 {
234         int ret;
235
236         ret = cipher_algo_transform(param->cipher_algo);
237         if (unlikely(ret < 0))
238                 return ret;
239
240         xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
241         xform->cipher.algo = (uint32_t)ret;
242         xform->cipher.key.length = param->cipher_key_len;
243         if (xform->cipher.key.length > 0)
244                 xform->cipher.key.data = param->cipher_key_buf;
245         if (param->dir == VIRTIO_CRYPTO_OP_ENCRYPT)
246                 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
247         else if (param->dir == VIRTIO_CRYPTO_OP_DECRYPT)
248                 xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
249         else {
250                 VC_LOG_DBG("Bad operation type");
251                 return -VIRTIO_CRYPTO_BADMSG;
252         }
253
254         ret = get_iv_len(xform->cipher.algo);
255         if (unlikely(ret < 0))
256                 return ret;
257         xform->cipher.iv.length = (uint16_t)ret;
258         xform->cipher.iv.offset = IV_OFFSET;
259         return 0;
260 }
261
262 static int
263 transform_chain_param(struct rte_crypto_sym_xform *xforms,
264                 VhostUserCryptoSessionParam *param)
265 {
266         struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
267         int ret;
268
269         switch (param->chaining_dir) {
270         case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER:
271                 xform_auth = xforms;
272                 xform_cipher = xforms->next;
273                 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
274                 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
275                 break;
276         case VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH:
277                 xform_cipher = xforms;
278                 xform_auth = xforms->next;
279                 xform_cipher->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
280                 xform_auth->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
281                 break;
282         default:
283                 return -VIRTIO_CRYPTO_BADMSG;
284         }
285
286         /* cipher */
287         ret = cipher_algo_transform(param->cipher_algo);
288         if (unlikely(ret < 0))
289                 return ret;
290         xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
291         xform_cipher->cipher.algo = (uint32_t)ret;
292         xform_cipher->cipher.key.length = param->cipher_key_len;
293         xform_cipher->cipher.key.data = param->cipher_key_buf;
294         ret = get_iv_len(xform_cipher->cipher.algo);
295         if (unlikely(ret < 0))
296                 return ret;
297         xform_cipher->cipher.iv.length = (uint16_t)ret;
298         xform_cipher->cipher.iv.offset = IV_OFFSET;
299
300         /* auth */
301         xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
302         ret = auth_algo_transform(param->hash_algo);
303         if (unlikely(ret < 0))
304                 return ret;
305         xform_auth->auth.algo = (uint32_t)ret;
306         xform_auth->auth.digest_length = param->digest_len;
307         xform_auth->auth.key.length = param->auth_key_len;
308         xform_auth->auth.key.data = param->auth_key_buf;
309
310         return 0;
311 }
312
313 static void
314 vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
315                 VhostUserCryptoSessionParam *sess_param)
316 {
317         struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
318         struct rte_cryptodev_sym_session *session;
319         int ret;
320
321         switch (sess_param->op_type) {
322         case VIRTIO_CRYPTO_SYM_OP_NONE:
323         case VIRTIO_CRYPTO_SYM_OP_CIPHER:
324                 ret = transform_cipher_param(&xform1, sess_param);
325                 if (unlikely(ret)) {
326                         VC_LOG_ERR("Error transform session msg (%i)", ret);
327                         sess_param->session_id = ret;
328                         return;
329                 }
330                 break;
331         case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
332                 if (unlikely(sess_param->hash_mode !=
333                                 VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
334                         sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
335                         VC_LOG_ERR("Error transform session message (%i)",
336                                         -VIRTIO_CRYPTO_NOTSUPP);
337                         return;
338                 }
339
340                 xform1.next = &xform2;
341
342                 ret = transform_chain_param(&xform1, sess_param);
343                 if (unlikely(ret)) {
344                         VC_LOG_ERR("Error transform session message (%i)", ret);
345                         sess_param->session_id = ret;
346                         return;
347                 }
348
349                 break;
350         default:
351                 VC_LOG_ERR("Algorithm not yet supported");
352                 sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
353                 return;
354         }
355
356         session = rte_cryptodev_sym_session_create(vcrypto->sess_pool);
357         if (!session) {
358                 VC_LOG_ERR("Failed to create session");
359                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
360                 return;
361         }
362
363         if (rte_cryptodev_sym_session_init(vcrypto->cid, session, &xform1,
364                         vcrypto->sess_pool) < 0) {
365                 VC_LOG_ERR("Failed to initialize session");
366                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
367                 return;
368         }
369
370         /* insert hash to map */
371         if (rte_hash_add_key_data(vcrypto->session_map,
372                         &vcrypto->last_session_id, session) < 0) {
373                 VC_LOG_ERR("Failed to insert session to hash table");
374
375                 if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0)
376                         VC_LOG_ERR("Failed to clear session");
377                 else {
378                         if (rte_cryptodev_sym_session_free(session) < 0)
379                                 VC_LOG_ERR("Failed to free session");
380                 }
381                 sess_param->session_id = -VIRTIO_CRYPTO_ERR;
382                 return;
383         }
384
385         VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
386                         vcrypto->last_session_id, vcrypto->dev->vid);
387
388         sess_param->session_id = vcrypto->last_session_id;
389         vcrypto->last_session_id++;
390 }
391
392 static int
393 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
394 {
395         struct rte_cryptodev_sym_session *session;
396         uint64_t sess_id = session_id;
397         int ret;
398
399         ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
400                         (void **)&session);
401
402         if (unlikely(ret < 0)) {
403                 VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
404                 return -VIRTIO_CRYPTO_INVSESS;
405         }
406
407         if (rte_cryptodev_sym_session_clear(vcrypto->cid, session) < 0) {
408                 VC_LOG_DBG("Failed to clear session");
409                 return -VIRTIO_CRYPTO_ERR;
410         }
411
412         if (rte_cryptodev_sym_session_free(session) < 0) {
413                 VC_LOG_DBG("Failed to free session");
414                 return -VIRTIO_CRYPTO_ERR;
415         }
416
417         if (rte_hash_del_key(vcrypto->session_map, &sess_id) < 0) {
418                 VC_LOG_DBG("Failed to delete session from hash table.");
419                 return -VIRTIO_CRYPTO_ERR;
420         }
421
422         VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
423                         vcrypto->dev->vid);
424
425         return 0;
426 }
427
428 static enum vh_result
429 vhost_crypto_msg_post_handler(int vid, void *msg)
430 {
431         struct virtio_net *dev = get_device(vid);
432         struct vhost_crypto *vcrypto;
433         VhostUserMsg *vmsg = msg;
434         enum vh_result ret = VH_RESULT_OK;
435
436         if (dev == NULL) {
437                 VC_LOG_ERR("Invalid vid %i", vid);
438                 return VH_RESULT_ERR;
439         }
440
441         vcrypto = dev->extern_data;
442         if (vcrypto == NULL) {
443                 VC_LOG_ERR("Cannot find required data, is it initialized?");
444                 return VH_RESULT_ERR;
445         }
446
447         if (vmsg->request.master == VHOST_USER_CRYPTO_CREATE_SESS) {
448                 vhost_crypto_create_sess(vcrypto,
449                                 &vmsg->payload.crypto_session);
450                 vmsg->fd_num = 0;
451                 ret = VH_RESULT_REPLY;
452         } else if (vmsg->request.master == VHOST_USER_CRYPTO_CLOSE_SESS) {
453                 if (vhost_crypto_close_sess(vcrypto, vmsg->payload.u64))
454                         ret = VH_RESULT_ERR;
455         }
456
457         return ret;
458 }
459
460 static __rte_always_inline struct vring_desc *
461 find_write_desc(struct vring_desc *head, struct vring_desc *desc)
462 {
463         if (desc->flags & VRING_DESC_F_WRITE)
464                 return desc;
465
466         while (desc->flags & VRING_DESC_F_NEXT) {
467                 desc = &head[desc->next];
468                 if (desc->flags & VRING_DESC_F_WRITE)
469                         return desc;
470         }
471
472         return NULL;
473 }
474
475 static struct virtio_crypto_inhdr *
476 reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
477 {
478         uint64_t dlen;
479         struct virtio_crypto_inhdr *inhdr;
480
481         while (desc->flags & VRING_DESC_F_NEXT)
482                 desc = &vc_req->head[desc->next];
483
484         dlen = desc->len;
485         inhdr = IOVA_TO_VVA(struct virtio_crypto_inhdr *, vc_req, desc->addr,
486                         &dlen, VHOST_ACCESS_WO);
487         if (unlikely(!inhdr || dlen != desc->len))
488                 return NULL;
489
490         return inhdr;
491 }
492
493 static __rte_always_inline int
494 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
495                 uint32_t size)
496 {
497         struct vring_desc *desc = *cur_desc;
498         int left = size;
499
500         rte_prefetch0(&head[desc->next]);
501         left -= desc->len;
502
503         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
504                 desc = &head[desc->next];
505                 rte_prefetch0(&head[desc->next]);
506                 left -= desc->len;
507         }
508
509         if (unlikely(left > 0)) {
510                 VC_LOG_ERR("Incorrect virtio descriptor");
511                 return -1;
512         }
513
514         *cur_desc = &head[desc->next];
515         return 0;
516 }
517
518 static int
519 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
520                 struct vring_desc **cur_desc, uint32_t size)
521 {
522         struct vring_desc *desc = *cur_desc;
523         uint64_t remain, addr, dlen, len;
524         uint32_t to_copy;
525         uint8_t *data = dst_data;
526         uint8_t *src;
527         int left = size;
528
529         rte_prefetch0(&vc_req->head[desc->next]);
530         to_copy = RTE_MIN(desc->len, (uint32_t)left);
531         dlen = to_copy;
532         src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
533                         VHOST_ACCESS_RO);
534         if (unlikely(!src || !dlen)) {
535                 VC_LOG_ERR("Failed to map descriptor");
536                 return -1;
537         }
538
539         rte_memcpy((uint8_t *)data, src, dlen);
540         data += dlen;
541
542         if (unlikely(dlen < to_copy)) {
543                 remain = to_copy - dlen;
544                 addr = desc->addr + dlen;
545
546                 while (remain) {
547                         len = remain;
548                         src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
549                                         VHOST_ACCESS_RO);
550                         if (unlikely(!src || !len)) {
551                                 VC_LOG_ERR("Failed to map descriptor");
552                                 return -1;
553                         }
554
555                         rte_memcpy(data, src, len);
556                         addr += len;
557                         remain -= len;
558                         data += len;
559                 }
560         }
561
562         left -= to_copy;
563
564         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
565                 desc = &vc_req->head[desc->next];
566                 rte_prefetch0(&vc_req->head[desc->next]);
567                 to_copy = RTE_MIN(desc->len, (uint32_t)left);
568                 dlen = desc->len;
569                 src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
570                                 VHOST_ACCESS_RO);
571                 if (unlikely(!src || !dlen)) {
572                         VC_LOG_ERR("Failed to map descriptor");
573                         return -1;
574                 }
575
576                 rte_memcpy(data, src, dlen);
577                 data += dlen;
578
579                 if (unlikely(dlen < to_copy)) {
580                         remain = to_copy - dlen;
581                         addr = desc->addr + dlen;
582
583                         while (remain) {
584                                 len = remain;
585                                 src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
586                                                 VHOST_ACCESS_RO);
587                                 if (unlikely(!src || !len)) {
588                                         VC_LOG_ERR("Failed to map descriptor");
589                                         return -1;
590                                 }
591
592                                 rte_memcpy(data, src, len);
593                                 addr += len;
594                                 remain -= len;
595                                 data += len;
596                         }
597                 }
598
599                 left -= to_copy;
600         }
601
602         if (unlikely(left > 0)) {
603                 VC_LOG_ERR("Incorrect virtio descriptor");
604                 return -1;
605         }
606
607         *cur_desc = &vc_req->head[desc->next];
608
609         return 0;
610 }
611
612 static __rte_always_inline void *
613 get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc **cur_desc,
614                 uint32_t size, uint8_t perm)
615 {
616         void *data;
617         uint64_t dlen = (*cur_desc)->len;
618
619         data = IOVA_TO_VVA(void *, vc_req, (*cur_desc)->addr, &dlen, perm);
620         if (unlikely(!data || dlen != (*cur_desc)->len)) {
621                 VC_LOG_ERR("Failed to map object");
622                 return NULL;
623         }
624
625         if (unlikely(move_desc(vc_req->head, cur_desc, size) < 0))
626                 return NULL;
627
628         return data;
629 }
630
631 static int
632 write_back_data(struct rte_crypto_op *op, struct vhost_crypto_data_req *vc_req)
633 {
634         struct rte_mbuf *mbuf = op->sym->m_dst;
635         struct vring_desc *head = vc_req->head;
636         struct vring_desc *desc = vc_req->wb_desc;
637         int left = vc_req->wb_len;
638         uint32_t to_write;
639         uint8_t *src_data = mbuf->buf_addr, *dst;
640         uint64_t dlen;
641
642         rte_prefetch0(&head[desc->next]);
643         to_write = RTE_MIN(desc->len, (uint32_t)left);
644         dlen = desc->len;
645         dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
646                         VHOST_ACCESS_RW);
647         if (unlikely(!dst || dlen != desc->len)) {
648                 VC_LOG_ERR("Failed to map descriptor");
649                 return -1;
650         }
651
652         rte_memcpy(dst, src_data, to_write);
653         left -= to_write;
654         src_data += to_write;
655
656         while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
657                 desc = &head[desc->next];
658                 rte_prefetch0(&head[desc->next]);
659                 to_write = RTE_MIN(desc->len, (uint32_t)left);
660                 dlen = desc->len;
661                 dst = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
662                                 VHOST_ACCESS_RW);
663                 if (unlikely(!dst || dlen != desc->len)) {
664                         VC_LOG_ERR("Failed to map descriptor");
665                         return -1;
666                 }
667
668                 rte_memcpy(dst, src_data, to_write);
669                 left -= to_write;
670                 src_data += to_write;
671         }
672
673         if (unlikely(left < 0)) {
674                 VC_LOG_ERR("Incorrect virtio descriptor");
675                 return -1;
676         }
677
678         return 0;
679 }
680
681 static uint8_t
682 prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
683                 struct vhost_crypto_data_req *vc_req,
684                 struct virtio_crypto_cipher_data_req *cipher,
685                 struct vring_desc *cur_desc)
686 {
687         struct vring_desc *desc = cur_desc;
688         struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
689         uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
690         uint8_t ret = 0;
691
692         /* prepare */
693         /* iv */
694         if (unlikely(copy_data(iv_data, vc_req, &desc,
695                         cipher->para.iv_len) < 0)) {
696                 ret = VIRTIO_CRYPTO_BADMSG;
697                 goto error_exit;
698         }
699
700         m_src->data_len = cipher->para.src_data_len;
701
702         switch (vcrypto->option) {
703         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
704                 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
705                                 cipher->para.src_data_len);
706                 m_src->buf_addr = get_data_ptr(vc_req, &desc,
707                                 cipher->para.src_data_len, VHOST_ACCESS_RO);
708                 if (unlikely(m_src->buf_iova == 0 ||
709                                 m_src->buf_addr == NULL)) {
710                         VC_LOG_ERR("zero_copy may fail due to cross page data");
711                         ret = VIRTIO_CRYPTO_ERR;
712                         goto error_exit;
713                 }
714                 break;
715         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
716                 if (unlikely(cipher->para.src_data_len >
717                                 RTE_MBUF_DEFAULT_BUF_SIZE)) {
718                         VC_LOG_ERR("Not enough space to do data copy");
719                         ret = VIRTIO_CRYPTO_ERR;
720                         goto error_exit;
721                 }
722                 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
723                                 vc_req, &desc, cipher->para.src_data_len)
724                                 < 0)) {
725                         ret = VIRTIO_CRYPTO_BADMSG;
726                         goto error_exit;
727                 }
728                 break;
729         default:
730                 ret = VIRTIO_CRYPTO_BADMSG;
731                 goto error_exit;
732         }
733
734         /* dst */
735         desc = find_write_desc(vc_req->head, desc);
736         if (unlikely(!desc)) {
737                 VC_LOG_ERR("Cannot find write location");
738                 ret = VIRTIO_CRYPTO_BADMSG;
739                 goto error_exit;
740         }
741
742         switch (vcrypto->option) {
743         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
744                 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
745                                 desc->addr, cipher->para.dst_data_len);
746                 m_dst->buf_addr = get_data_ptr(vc_req, &desc,
747                                 cipher->para.dst_data_len, VHOST_ACCESS_RW);
748                 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
749                         VC_LOG_ERR("zero_copy may fail due to cross page data");
750                         ret = VIRTIO_CRYPTO_ERR;
751                         goto error_exit;
752                 }
753
754                 m_dst->data_len = cipher->para.dst_data_len;
755                 break;
756         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
757                 vc_req->wb_desc = desc;
758                 vc_req->wb_len = cipher->para.dst_data_len;
759                 if (unlikely(move_desc(vc_req->head, &desc,
760                                 vc_req->wb_len) < 0)) {
761                         ret = VIRTIO_CRYPTO_ERR;
762                         goto error_exit;
763                 }
764                 break;
765         default:
766                 ret = VIRTIO_CRYPTO_BADMSG;
767                 goto error_exit;
768         }
769
770         /* src data */
771         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
772         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
773
774         op->sym->cipher.data.offset = 0;
775         op->sym->cipher.data.length = cipher->para.src_data_len;
776
777         vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
778         if (unlikely(vc_req->inhdr == NULL)) {
779                 ret = VIRTIO_CRYPTO_BADMSG;
780                 goto error_exit;
781         }
782
783         vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
784         vc_req->len = cipher->para.dst_data_len + INHDR_LEN;
785
786         return 0;
787
788 error_exit:
789         vc_req->len = INHDR_LEN;
790         return ret;
791 }
792
793 static uint8_t
794 prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
795                 struct vhost_crypto_data_req *vc_req,
796                 struct virtio_crypto_alg_chain_data_req *chain,
797                 struct vring_desc *cur_desc)
798 {
799         struct vring_desc *desc = cur_desc;
800         struct rte_mbuf *m_src = op->sym->m_src, *m_dst = op->sym->m_dst;
801         uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
802         uint32_t digest_offset;
803         void *digest_addr;
804         uint8_t ret = 0;
805
806         /* prepare */
807         /* iv */
808         if (unlikely(copy_data(iv_data, vc_req, &desc,
809                         chain->para.iv_len) < 0)) {
810                 ret = VIRTIO_CRYPTO_BADMSG;
811                 goto error_exit;
812         }
813
814         m_src->data_len = chain->para.src_data_len;
815         m_dst->data_len = chain->para.dst_data_len;
816
817         switch (vcrypto->option) {
818         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
819                 m_src->buf_iova = gpa_to_hpa(vcrypto->dev, desc->addr,
820                                 chain->para.src_data_len);
821                 m_src->buf_addr = get_data_ptr(vc_req, &desc,
822                                 chain->para.src_data_len, VHOST_ACCESS_RO);
823                 if (unlikely(m_src->buf_iova == 0 || m_src->buf_addr == NULL)) {
824                         VC_LOG_ERR("zero_copy may fail due to cross page data");
825                         ret = VIRTIO_CRYPTO_ERR;
826                         goto error_exit;
827                 }
828                 break;
829         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
830                 if (unlikely(chain->para.src_data_len >
831                                 RTE_MBUF_DEFAULT_BUF_SIZE)) {
832                         VC_LOG_ERR("Not enough space to do data copy");
833                         ret = VIRTIO_CRYPTO_ERR;
834                         goto error_exit;
835                 }
836                 if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
837                                 vc_req, &desc, chain->para.src_data_len)) < 0) {
838                         ret = VIRTIO_CRYPTO_BADMSG;
839                         goto error_exit;
840                 }
841                 break;
842         default:
843                 ret = VIRTIO_CRYPTO_BADMSG;
844                 goto error_exit;
845         }
846
847         /* dst */
848         desc = find_write_desc(vc_req->head, desc);
849         if (unlikely(!desc)) {
850                 VC_LOG_ERR("Cannot find write location");
851                 ret = VIRTIO_CRYPTO_BADMSG;
852                 goto error_exit;
853         }
854
855         switch (vcrypto->option) {
856         case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
857                 m_dst->buf_iova = gpa_to_hpa(vcrypto->dev,
858                                 desc->addr, chain->para.dst_data_len);
859                 m_dst->buf_addr = get_data_ptr(vc_req, &desc,
860                                 chain->para.dst_data_len, VHOST_ACCESS_RW);
861                 if (unlikely(m_dst->buf_iova == 0 || m_dst->buf_addr == NULL)) {
862                         VC_LOG_ERR("zero_copy may fail due to cross page data");
863                         ret = VIRTIO_CRYPTO_ERR;
864                         goto error_exit;
865                 }
866
867                 op->sym->auth.digest.phys_addr = gpa_to_hpa(vcrypto->dev,
868                                 desc->addr, chain->para.hash_result_len);
869                 op->sym->auth.digest.data = get_data_ptr(vc_req, &desc,
870                                 chain->para.hash_result_len, VHOST_ACCESS_RW);
871                 if (unlikely(op->sym->auth.digest.phys_addr == 0)) {
872                         VC_LOG_ERR("zero_copy may fail due to cross page data");
873                         ret = VIRTIO_CRYPTO_ERR;
874                         goto error_exit;
875                 }
876                 break;
877         case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
878                 digest_offset = m_dst->data_len;
879                 digest_addr = rte_pktmbuf_mtod_offset(m_dst, void *,
880                                 digest_offset);
881
882                 vc_req->wb_desc = desc;
883                 vc_req->wb_len = m_dst->data_len + chain->para.hash_result_len;
884
885                 if (unlikely(move_desc(vc_req->head, &desc,
886                                 chain->para.dst_data_len) < 0)) {
887                         ret = VIRTIO_CRYPTO_BADMSG;
888                         goto error_exit;
889                 }
890
891                 if (unlikely(copy_data(digest_addr, vc_req, &desc,
892                                 chain->para.hash_result_len)) < 0) {
893                         ret = VIRTIO_CRYPTO_BADMSG;
894                         goto error_exit;
895                 }
896
897                 op->sym->auth.digest.data = digest_addr;
898                 op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m_dst,
899                                 digest_offset);
900                 break;
901         default:
902                 ret = VIRTIO_CRYPTO_BADMSG;
903                 goto error_exit;
904         }
905
906         /* record inhdr */
907         vc_req->inhdr = get_data_ptr(vc_req, &desc, INHDR_LEN, VHOST_ACCESS_WO);
908         if (unlikely(vc_req->inhdr == NULL)) {
909                 ret = VIRTIO_CRYPTO_BADMSG;
910                 goto error_exit;
911         }
912
913         vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
914
915         op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
916         op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
917
918         op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
919         op->sym->cipher.data.length = chain->para.src_data_len -
920                         chain->para.cipher_start_src_offset;
921
922         op->sym->auth.data.offset = chain->para.hash_start_src_offset;
923         op->sym->auth.data.length = chain->para.len_to_hash;
924
925         vc_req->len = chain->para.dst_data_len + chain->para.hash_result_len +
926                         INHDR_LEN;
927         return 0;
928
929 error_exit:
930         vc_req->len = INHDR_LEN;
931         return ret;
932 }
933
934 /**
935  * Process on descriptor
936  */
937 static __rte_always_inline int
938 vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
939                 struct vhost_virtqueue *vq, struct rte_crypto_op *op,
940                 struct vring_desc *head, uint16_t desc_idx)
941 {
942         struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
943         struct rte_cryptodev_sym_session *session;
944         struct virtio_crypto_op_data_req *req, tmp_req;
945         struct virtio_crypto_inhdr *inhdr;
946         struct vring_desc *desc = NULL;
947         uint64_t session_id;
948         uint64_t dlen;
949         int err = 0;
950
951         vc_req->desc_idx = desc_idx;
952         vc_req->dev = vcrypto->dev;
953         vc_req->vq = vq;
954
955         if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
956                 dlen = head->len;
957                 desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
958                                 &dlen, VHOST_ACCESS_RO);
959                 if (unlikely(!desc || dlen != head->len))
960                         return -1;
961                 desc_idx = 0;
962                 head = desc;
963         } else {
964                 desc = head;
965         }
966
967         vc_req->head = head;
968         vc_req->zero_copy = vcrypto->option;
969
970         req = get_data_ptr(vc_req, &desc, sizeof(*req), VHOST_ACCESS_RO);
971         if (unlikely(req == NULL)) {
972                 switch (vcrypto->option) {
973                 case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
974                         err = VIRTIO_CRYPTO_BADMSG;
975                         VC_LOG_ERR("Invalid descriptor");
976                         goto error_exit;
977                 case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
978                         req = &tmp_req;
979                         if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
980                                         < 0)) {
981                                 err = VIRTIO_CRYPTO_BADMSG;
982                                 VC_LOG_ERR("Invalid descriptor");
983                                 goto error_exit;
984                         }
985                         break;
986                 default:
987                         err = VIRTIO_CRYPTO_ERR;
988                         VC_LOG_ERR("Invalid option");
989                         goto error_exit;
990                 }
991         }
992
993         switch (req->header.opcode) {
994         case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
995         case VIRTIO_CRYPTO_CIPHER_DECRYPT:
996                 session_id = req->header.session_id;
997
998                 /* one branch to avoid unnecessary table lookup */
999                 if (vcrypto->cache_session_id != session_id) {
1000                         err = rte_hash_lookup_data(vcrypto->session_map,
1001                                         &session_id, (void **)&session);
1002                         if (unlikely(err < 0)) {
1003                                 err = VIRTIO_CRYPTO_ERR;
1004                                 VC_LOG_ERR("Failed to find session %"PRIu64,
1005                                                 session_id);
1006                                 goto error_exit;
1007                         }
1008
1009                         vcrypto->cache_session = session;
1010                         vcrypto->cache_session_id = session_id;
1011                 }
1012
1013                 session = vcrypto->cache_session;
1014
1015                 err = rte_crypto_op_attach_sym_session(op, session);
1016                 if (unlikely(err < 0)) {
1017                         err = VIRTIO_CRYPTO_ERR;
1018                         VC_LOG_ERR("Failed to attach session to op");
1019                         goto error_exit;
1020                 }
1021
1022                 switch (req->u.sym_req.op_type) {
1023                 case VIRTIO_CRYPTO_SYM_OP_NONE:
1024                         err = VIRTIO_CRYPTO_NOTSUPP;
1025                         break;
1026                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
1027                         err = prepare_sym_cipher_op(vcrypto, op, vc_req,
1028                                         &req->u.sym_req.u.cipher, desc);
1029                         break;
1030                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
1031                         err = prepare_sym_chain_op(vcrypto, op, vc_req,
1032                                         &req->u.sym_req.u.chain, desc);
1033                         break;
1034                 }
1035                 if (unlikely(err != 0)) {
1036                         VC_LOG_ERR("Failed to process sym request");
1037                         goto error_exit;
1038                 }
1039                 break;
1040         default:
1041                 VC_LOG_ERR("Unsupported symmetric crypto request type %u",
1042                                 req->header.opcode);
1043                 goto error_exit;
1044         }
1045
1046         return 0;
1047
1048 error_exit:
1049
1050         inhdr = reach_inhdr(vc_req, desc);
1051         if (likely(inhdr != NULL))
1052                 inhdr->status = (uint8_t)err;
1053
1054         return -1;
1055 }
1056
1057 static __rte_always_inline struct vhost_virtqueue *
1058 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
1059                 struct vhost_virtqueue *old_vq)
1060 {
1061         struct rte_mbuf *m_src = op->sym->m_src;
1062         struct rte_mbuf *m_dst = op->sym->m_dst;
1063         struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
1064         uint16_t desc_idx;
1065         int ret = 0;
1066
1067         if (unlikely(!vc_req)) {
1068                 VC_LOG_ERR("Failed to retrieve vc_req");
1069                 return NULL;
1070         }
1071
1072         if (old_vq && (vc_req->vq != old_vq))
1073                 return vc_req->vq;
1074
1075         desc_idx = vc_req->desc_idx;
1076
1077         if (unlikely(op->status != RTE_CRYPTO_OP_STATUS_SUCCESS))
1078                 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1079         else {
1080                 if (vc_req->zero_copy == 0) {
1081                         ret = write_back_data(op, vc_req);
1082                         if (unlikely(ret != 0))
1083                                 vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
1084                 }
1085         }
1086
1087         vc_req->vq->used->ring[desc_idx].id = desc_idx;
1088         vc_req->vq->used->ring[desc_idx].len = vc_req->len;
1089
1090         rte_mempool_put(m_dst->pool, (void *)m_dst);
1091         rte_mempool_put(m_src->pool, (void *)m_src);
1092
1093         return vc_req->vq;
1094 }
1095
1096 static __rte_always_inline uint16_t
1097 vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
1098                 uint16_t nb_ops, int *callfd)
1099 {
1100         uint16_t processed = 1;
1101         struct vhost_virtqueue *vq, *tmp_vq;
1102
1103         if (unlikely(nb_ops == 0))
1104                 return 0;
1105
1106         vq = vhost_crypto_finalize_one_request(ops[0], NULL);
1107         if (unlikely(vq == NULL))
1108                 return 0;
1109         tmp_vq = vq;
1110
1111         while ((processed < nb_ops)) {
1112                 tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
1113                                 tmp_vq);
1114
1115                 if (unlikely(vq != tmp_vq))
1116                         break;
1117
1118                 processed++;
1119         }
1120
1121         *callfd = vq->callfd;
1122
1123         *(volatile uint16_t *)&vq->used->idx += processed;
1124
1125         return processed;
1126 }
1127
1128 int __rte_experimental
1129 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
1130                 struct rte_mempool *sess_pool, int socket_id)
1131 {
1132         struct virtio_net *dev = get_device(vid);
1133         struct rte_hash_parameters params = {0};
1134         struct vhost_crypto *vcrypto;
1135         char name[128];
1136         int ret;
1137
1138         if (!dev) {
1139                 VC_LOG_ERR("Invalid vid %i", vid);
1140                 return -EINVAL;
1141         }
1142
1143         ret = rte_vhost_driver_set_features(dev->ifname,
1144                         VIRTIO_CRYPTO_FEATURES);
1145         if (ret < 0) {
1146                 VC_LOG_ERR("Error setting features");
1147                 return -1;
1148         }
1149
1150         vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
1151                         RTE_CACHE_LINE_SIZE, socket_id);
1152         if (!vcrypto) {
1153                 VC_LOG_ERR("Insufficient memory");
1154                 return -ENOMEM;
1155         }
1156
1157         vcrypto->sess_pool = sess_pool;
1158         vcrypto->cid = cryptodev_id;
1159         vcrypto->cache_session_id = UINT64_MAX;
1160         vcrypto->last_session_id = 1;
1161         vcrypto->dev = dev;
1162         vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
1163
1164         snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
1165         params.name = name;
1166         params.entries = VHOST_CRYPTO_SESSION_MAP_ENTRIES;
1167         params.hash_func = rte_jhash;
1168         params.key_len = sizeof(uint64_t);
1169         params.socket_id = socket_id;
1170         vcrypto->session_map = rte_hash_create(&params);
1171         if (!vcrypto->session_map) {
1172                 VC_LOG_ERR("Failed to creath session map");
1173                 ret = -ENOMEM;
1174                 goto error_exit;
1175         }
1176
1177         snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
1178         vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
1179                         VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
1180                         sizeof(struct vhost_crypto_data_req),
1181                         RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
1182                         rte_socket_id());
1183         if (!vcrypto->mbuf_pool) {
1184                 VC_LOG_ERR("Failed to creath mbuf pool");
1185                 ret = -ENOMEM;
1186                 goto error_exit;
1187         }
1188
1189         dev->extern_data = vcrypto;
1190         dev->extern_ops.pre_msg_handle = NULL;
1191         dev->extern_ops.post_msg_handle = vhost_crypto_msg_post_handler;
1192
1193         return 0;
1194
1195 error_exit:
1196         if (vcrypto->session_map)
1197                 rte_hash_free(vcrypto->session_map);
1198         if (vcrypto->mbuf_pool)
1199                 rte_mempool_free(vcrypto->mbuf_pool);
1200
1201         rte_free(vcrypto);
1202
1203         return ret;
1204 }
1205
1206 int __rte_experimental
1207 rte_vhost_crypto_free(int vid)
1208 {
1209         struct virtio_net *dev = get_device(vid);
1210         struct vhost_crypto *vcrypto;
1211
1212         if (unlikely(dev == NULL)) {
1213                 VC_LOG_ERR("Invalid vid %i", vid);
1214                 return -EINVAL;
1215         }
1216
1217         vcrypto = dev->extern_data;
1218         if (unlikely(vcrypto == NULL)) {
1219                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1220                 return -ENOENT;
1221         }
1222
1223         rte_hash_free(vcrypto->session_map);
1224         rte_mempool_free(vcrypto->mbuf_pool);
1225         rte_free(vcrypto);
1226
1227         dev->extern_data = NULL;
1228         dev->extern_ops.pre_msg_handle = NULL;
1229         dev->extern_ops.post_msg_handle = NULL;
1230
1231         return 0;
1232 }
1233
1234 int __rte_experimental
1235 rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
1236 {
1237         struct virtio_net *dev = get_device(vid);
1238         struct vhost_crypto *vcrypto;
1239
1240         if (unlikely(dev == NULL)) {
1241                 VC_LOG_ERR("Invalid vid %i", vid);
1242                 return -EINVAL;
1243         }
1244
1245         if (unlikely((uint32_t)option >=
1246                                 RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
1247                 VC_LOG_ERR("Invalid option %i", option);
1248                 return -EINVAL;
1249         }
1250
1251         vcrypto = (struct vhost_crypto *)dev->extern_data;
1252         if (unlikely(vcrypto == NULL)) {
1253                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1254                 return -ENOENT;
1255         }
1256
1257         if (vcrypto->option == (uint8_t)option)
1258                 return 0;
1259
1260         if (!(rte_mempool_full(vcrypto->mbuf_pool))) {
1261                 VC_LOG_ERR("Cannot update zero copy as mempool is not full");
1262                 return -EINVAL;
1263         }
1264
1265         vcrypto->option = (uint8_t)option;
1266
1267         return 0;
1268 }
1269
1270 uint16_t __rte_experimental
1271 rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
1272                 struct rte_crypto_op **ops, uint16_t nb_ops)
1273 {
1274         struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
1275         struct virtio_net *dev = get_device(vid);
1276         struct vhost_crypto *vcrypto;
1277         struct vhost_virtqueue *vq;
1278         uint16_t avail_idx;
1279         uint16_t start_idx;
1280         uint16_t required;
1281         uint16_t count;
1282         uint16_t i;
1283
1284         if (unlikely(dev == NULL)) {
1285                 VC_LOG_ERR("Invalid vid %i", vid);
1286                 return -EINVAL;
1287         }
1288
1289         if (unlikely(qid >= VHOST_MAX_QUEUE_PAIRS)) {
1290                 VC_LOG_ERR("Invalid qid %u", qid);
1291                 return -EINVAL;
1292         }
1293
1294         vcrypto = (struct vhost_crypto *)dev->extern_data;
1295         if (unlikely(vcrypto == NULL)) {
1296                 VC_LOG_ERR("Cannot find required data, is it initialized?");
1297                 return -ENOENT;
1298         }
1299
1300         vq = dev->virtqueue[qid];
1301
1302         avail_idx = *((volatile uint16_t *)&vq->avail->idx);
1303         start_idx = vq->last_used_idx;
1304         count = avail_idx - start_idx;
1305         count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
1306         count = RTE_MIN(count, nb_ops);
1307
1308         if (unlikely(count == 0))
1309                 return 0;
1310
1311         /* for zero copy, we need 2 empty mbufs for src and dst, otherwise
1312          * we need only 1 mbuf as src and dst
1313          */
1314         required = count * 2;
1315         if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs,
1316                         required) < 0)) {
1317                 VC_LOG_ERR("Insufficient memory");
1318                 return -ENOMEM;
1319         }
1320
1321         for (i = 0; i < count; i++) {
1322                 uint16_t used_idx = (start_idx + i) & (vq->size - 1);
1323                 uint16_t desc_idx = vq->avail->ring[used_idx];
1324                 struct vring_desc *head = &vq->desc[desc_idx];
1325                 struct rte_crypto_op *op = ops[i];
1326
1327                 op->sym->m_src = mbufs[i * 2];
1328                 op->sym->m_dst = mbufs[i * 2 + 1];
1329                 op->sym->m_src->data_off = 0;
1330                 op->sym->m_dst->data_off = 0;
1331
1332                 if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head,
1333                                 desc_idx)) < 0)
1334                         break;
1335         }
1336
1337         vq->last_used_idx += i;
1338
1339         return i;
1340 }
1341
1342 uint16_t __rte_experimental
1343 rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
1344                 uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
1345 {
1346         struct rte_crypto_op **tmp_ops = ops;
1347         uint16_t count = 0, left = nb_ops;
1348         int callfd;
1349         uint16_t idx = 0;
1350
1351         while (left) {
1352                 count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
1353                                 &callfd);
1354                 if (unlikely(count == 0))
1355                         break;
1356
1357                 tmp_ops = &tmp_ops[count];
1358                 left -= count;
1359
1360                 callfds[idx++] = callfd;
1361
1362                 if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
1363                         VC_LOG_ERR("Too many vqs");
1364                         break;
1365                 }
1366         }
1367
1368         *nb_callfds = idx;
1369
1370         return nb_ops - left;
1371 }