New upstream version 18.08
[deb_dpdk.git] / drivers / crypto / virtio / virtio_cryptodev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
3  */
4 #include <stdbool.h>
5 #include <unistd.h>
6
7 #include <rte_common.h>
8 #include <rte_errno.h>
9 #include <rte_pci.h>
10 #include <rte_bus_pci.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_eal.h>
14
15 #include "virtio_cryptodev.h"
16 #include "virtqueue.h"
17 #include "virtio_crypto_algs.h"
18 #include "virtio_crypto_capabilities.h"
19
20 int virtio_crypto_logtype_init;
21 int virtio_crypto_logtype_session;
22 int virtio_crypto_logtype_rx;
23 int virtio_crypto_logtype_tx;
24 int virtio_crypto_logtype_driver;
25
26 static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
27                 struct rte_cryptodev_config *config);
28 static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
29 static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
30 static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
31 static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
32                 struct rte_cryptodev_info *dev_info);
33 static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
34                 struct rte_cryptodev_stats *stats);
35 static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
36 static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
37                 uint16_t queue_pair_id,
38                 const struct rte_cryptodev_qp_conf *qp_conf,
39                 int socket_id,
40                 struct rte_mempool *session_pool);
41 static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
42                 uint16_t queue_pair_id);
43 static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
44 static unsigned int virtio_crypto_sym_get_session_private_size(
45                 struct rte_cryptodev *dev);
46 static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
47                 struct rte_cryptodev_sym_session *sess);
48 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
49                 struct rte_crypto_sym_xform *xform,
50                 struct rte_cryptodev_sym_session *session,
51                 struct rte_mempool *mp);
52
53 /*
54  * The set of PCI devices this driver supports
55  */
56 static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
57         { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
58                                 VIRTIO_CRYPTO_PCI_DEVICEID) },
59         { .vendor_id = 0, /* sentinel */ },
60 };
61
62 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
63         VIRTIO_SYM_CAPABILITIES,
64         RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
65 };
66
67 uint8_t cryptodev_virtio_driver_id;
68
69 #define NUM_ENTRY_SYM_CREATE_SESSION 4
70
71 static int
72 virtio_crypto_send_command(struct virtqueue *vq,
73                 struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
74                 uint8_t *auth_key, struct virtio_crypto_session *session)
75 {
76         uint8_t idx = 0;
77         uint8_t needed = 1;
78         uint32_t head = 0;
79         uint32_t len_cipher_key = 0;
80         uint32_t len_auth_key = 0;
81         uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
82         uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
83         uint32_t len_total = 0;
84         uint32_t input_offset = 0;
85         void *virt_addr_started = NULL;
86         phys_addr_t phys_addr_started;
87         struct vring_desc *desc;
88         uint32_t desc_offset;
89         struct virtio_crypto_session_input *input;
90         int ret;
91
92         PMD_INIT_FUNC_TRACE();
93
94         if (session == NULL) {
95                 VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
96                 return -EINVAL;
97         }
98         /* cipher only is supported, it is available if auth_key is NULL */
99         if (!cipher_key) {
100                 VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
101                 return -EINVAL;
102         }
103
104         head = vq->vq_desc_head_idx;
105         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
106                                         head, vq);
107
108         if (vq->vq_free_cnt < needed) {
109                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
110                 return -ENOSPC;
111         }
112
113         /* calculate the length of cipher key */
114         if (cipher_key) {
115                 switch (ctrl->u.sym_create_session.op_type) {
116                 case VIRTIO_CRYPTO_SYM_OP_CIPHER:
117                         len_cipher_key
118                                 = ctrl->u.sym_create_session.u.cipher
119                                                         .para.keylen;
120                         break;
121                 case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
122                         len_cipher_key
123                                 = ctrl->u.sym_create_session.u.chain
124                                         .para.cipher_param.keylen;
125                         break;
126                 default:
127                         VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
128                         return -EINVAL;
129                 }
130         }
131
132         /* calculate the length of auth key */
133         if (auth_key) {
134                 len_auth_key =
135                         ctrl->u.sym_create_session.u.chain.para.u.mac_param
136                                 .auth_key_len;
137         }
138
139         /*
140          * malloc memory to store indirect vring_desc entries, including
141          * ctrl request, cipher key, auth key, session input and desc vring
142          */
143         desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
144                 + len_session_input;
145         virt_addr_started = rte_malloc(NULL,
146                 desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
147                         * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
148         if (virt_addr_started == NULL) {
149                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
150                 return -ENOSPC;
151         }
152         phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
153
154         /* address to store indirect vring desc entries */
155         desc = (struct vring_desc *)
156                 ((uint8_t *)virt_addr_started + desc_offset);
157
158         /*  ctrl req part */
159         memcpy(virt_addr_started, ctrl, len_ctrl_req);
160         desc[idx].addr = phys_addr_started;
161         desc[idx].len = len_ctrl_req;
162         desc[idx].flags = VRING_DESC_F_NEXT;
163         desc[idx].next = idx + 1;
164         idx++;
165         len_total += len_ctrl_req;
166         input_offset += len_ctrl_req;
167
168         /* cipher key part */
169         if (len_cipher_key > 0) {
170                 memcpy((uint8_t *)virt_addr_started + len_total,
171                         cipher_key, len_cipher_key);
172
173                 desc[idx].addr = phys_addr_started + len_total;
174                 desc[idx].len = len_cipher_key;
175                 desc[idx].flags = VRING_DESC_F_NEXT;
176                 desc[idx].next = idx + 1;
177                 idx++;
178                 len_total += len_cipher_key;
179                 input_offset += len_cipher_key;
180         }
181
182         /* auth key part */
183         if (len_auth_key > 0) {
184                 memcpy((uint8_t *)virt_addr_started + len_total,
185                         auth_key, len_auth_key);
186
187                 desc[idx].addr = phys_addr_started + len_total;
188                 desc[idx].len = len_auth_key;
189                 desc[idx].flags = VRING_DESC_F_NEXT;
190                 desc[idx].next = idx + 1;
191                 idx++;
192                 len_total += len_auth_key;
193                 input_offset += len_auth_key;
194         }
195
196         /* input part */
197         input = (struct virtio_crypto_session_input *)
198                 ((uint8_t *)virt_addr_started + input_offset);
199         input->status = VIRTIO_CRYPTO_ERR;
200         input->session_id = ~0ULL;
201         desc[idx].addr = phys_addr_started + len_total;
202         desc[idx].len = len_session_input;
203         desc[idx].flags = VRING_DESC_F_WRITE;
204         idx++;
205
206         /* use a single desc entry */
207         vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
208         vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
209         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
210         vq->vq_free_cnt--;
211
212         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
213
214         vq_update_avail_ring(vq, head);
215         vq_update_avail_idx(vq);
216
217         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
218                                         vq->vq_queue_index);
219
220         virtqueue_notify(vq);
221
222         rte_rmb();
223         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
224                 rte_rmb();
225                 usleep(100);
226         }
227
228         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
229                 uint32_t idx, desc_idx, used_idx;
230                 struct vring_used_elem *uep;
231
232                 used_idx = (uint32_t)(vq->vq_used_cons_idx
233                                 & (vq->vq_nentries - 1));
234                 uep = &vq->vq_ring.used->ring[used_idx];
235                 idx = (uint32_t) uep->id;
236                 desc_idx = idx;
237
238                 while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
239                         desc_idx = vq->vq_ring.desc[desc_idx].next;
240                         vq->vq_free_cnt++;
241                 }
242
243                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
244                 vq->vq_desc_head_idx = idx;
245
246                 vq->vq_used_cons_idx++;
247                 vq->vq_free_cnt++;
248         }
249
250         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
251                         "vq->vq_desc_head_idx=%d",
252                         vq->vq_free_cnt, vq->vq_desc_head_idx);
253
254         /* get the result */
255         if (input->status != VIRTIO_CRYPTO_OK) {
256                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
257                                 "status=%u, session_id=%" PRIu64 "",
258                                 input->status, input->session_id);
259                 rte_free(virt_addr_started);
260                 ret = -1;
261         } else {
262                 session->session_id = input->session_id;
263
264                 VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
265                                 "session_id=%" PRIu64 "", input->session_id);
266                 rte_free(virt_addr_started);
267                 ret = 0;
268         }
269
270         return ret;
271 }
272
273 void
274 virtio_crypto_queue_release(struct virtqueue *vq)
275 {
276         struct virtio_crypto_hw *hw;
277
278         PMD_INIT_FUNC_TRACE();
279
280         if (vq) {
281                 hw = vq->hw;
282                 /* Select and deactivate the queue */
283                 VTPCI_OPS(hw)->del_queue(hw, vq);
284
285                 rte_memzone_free(vq->mz);
286                 rte_mempool_free(vq->mpool);
287                 rte_free(vq);
288         }
289 }
290
291 #define MPOOL_MAX_NAME_SZ 32
292
293 int
294 virtio_crypto_queue_setup(struct rte_cryptodev *dev,
295                 int queue_type,
296                 uint16_t vtpci_queue_idx,
297                 uint16_t nb_desc,
298                 int socket_id,
299                 struct virtqueue **pvq)
300 {
301         char vq_name[VIRTQUEUE_MAX_NAME_SZ];
302         char mpool_name[MPOOL_MAX_NAME_SZ];
303         const struct rte_memzone *mz;
304         unsigned int vq_size, size;
305         struct virtio_crypto_hw *hw = dev->data->dev_private;
306         struct virtqueue *vq = NULL;
307         uint32_t i = 0;
308         uint32_t j;
309
310         PMD_INIT_FUNC_TRACE();
311
312         VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
313
314         /*
315          * Read the virtqueue size from the Queue Size field
316          * Always power of 2 and if 0 virtqueue does not exist
317          */
318         vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
319         if (vq_size == 0) {
320                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
321                 return -EINVAL;
322         }
323         VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
324
325         if (!rte_is_power_of_2(vq_size)) {
326                 VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
327                 return -EINVAL;
328         }
329
330         if (queue_type == VTCRYPTO_DATAQ) {
331                 snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
332                                 dev->data->dev_id, vtpci_queue_idx);
333                 snprintf(mpool_name, sizeof(mpool_name),
334                                 "dev%d_dataqueue%d_mpool",
335                                 dev->data->dev_id, vtpci_queue_idx);
336         } else if (queue_type == VTCRYPTO_CTRLQ) {
337                 snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
338                                 dev->data->dev_id);
339                 snprintf(mpool_name, sizeof(mpool_name),
340                                 "dev%d_controlqueue_mpool",
341                                 dev->data->dev_id);
342         }
343         size = RTE_ALIGN_CEIL(sizeof(*vq) +
344                                 vq_size * sizeof(struct vq_desc_extra),
345                                 RTE_CACHE_LINE_SIZE);
346         vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
347                                 socket_id);
348         if (vq == NULL) {
349                 VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
350                 return -ENOMEM;
351         }
352
353         if (queue_type == VTCRYPTO_DATAQ) {
354                 /* pre-allocate a mempool and use it in the data plane to
355                  * improve performance
356                  */
357                 vq->mpool = rte_mempool_lookup(mpool_name);
358                 if (vq->mpool == NULL)
359                         vq->mpool = rte_mempool_create(mpool_name,
360                                         vq_size,
361                                         sizeof(struct virtio_crypto_op_cookie),
362                                         RTE_CACHE_LINE_SIZE, 0,
363                                         NULL, NULL, NULL, NULL, socket_id,
364                                         0);
365                 if (!vq->mpool) {
366                         VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
367                                         "Cannot create mempool");
368                         goto mpool_create_err;
369                 }
370                 for (i = 0; i < vq_size; i++) {
371                         vq->vq_descx[i].cookie =
372                                 rte_zmalloc("crypto PMD op cookie pointer",
373                                         sizeof(struct virtio_crypto_op_cookie),
374                                         RTE_CACHE_LINE_SIZE);
375                         if (vq->vq_descx[i].cookie == NULL) {
376                                 VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
377                                                 "alloc mem for cookie");
378                                 goto cookie_alloc_err;
379                         }
380                 }
381         }
382
383         vq->hw = hw;
384         vq->dev_id = dev->data->dev_id;
385         vq->vq_queue_index = vtpci_queue_idx;
386         vq->vq_nentries = vq_size;
387
388         /*
389          * Using part of the vring entries is permitted, but the maximum
390          * is vq_size
391          */
392         if (nb_desc == 0 || nb_desc > vq_size)
393                 nb_desc = vq_size;
394         vq->vq_free_cnt = nb_desc;
395
396         /*
397          * Reserve a memzone for vring elements
398          */
399         size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
400         vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
401         VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
402                         (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
403                         size, vq->vq_ring_size);
404
405         mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
406                         socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
407         if (mz == NULL) {
408                 if (rte_errno == EEXIST)
409                         mz = rte_memzone_lookup(vq_name);
410                 if (mz == NULL) {
411                         VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
412                         goto mz_reserve_err;
413                 }
414         }
415
416         /*
417          * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
418          * and only accepts 32 bit page frame number.
419          * Check if the allocated physical memory exceeds 16TB.
420          */
421         if ((mz->phys_addr + vq->vq_ring_size - 1)
422                                 >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
423                 VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
424                                         "above 16TB!");
425                 goto vring_addr_err;
426         }
427
428         memset(mz->addr, 0, sizeof(mz->len));
429         vq->mz = mz;
430         vq->vq_ring_mem = mz->phys_addr;
431         vq->vq_ring_virt_mem = mz->addr;
432         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
433                                         (uint64_t)mz->phys_addr);
434         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
435                                         (uint64_t)(uintptr_t)mz->addr);
436
437         *pvq = vq;
438
439         return 0;
440
441 vring_addr_err:
442         rte_memzone_free(mz);
443 mz_reserve_err:
444 cookie_alloc_err:
445         rte_mempool_free(vq->mpool);
446         if (i != 0) {
447                 for (j = 0; j < i; j++)
448                         rte_free(vq->vq_descx[j].cookie);
449         }
450 mpool_create_err:
451         rte_free(vq);
452         return -ENOMEM;
453 }
454
455 static int
456 virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
457 {
458         int ret;
459         struct virtqueue *vq;
460         struct virtio_crypto_hw *hw = dev->data->dev_private;
461
462         /* if virtio device has started, do not touch the virtqueues */
463         if (dev->data->dev_started)
464                 return 0;
465
466         PMD_INIT_FUNC_TRACE();
467
468         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
469                         0, SOCKET_ID_ANY, &vq);
470         if (ret < 0) {
471                 VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
472                 return ret;
473         }
474
475         hw->cvq = vq;
476
477         return 0;
478 }
479
480 static void
481 virtio_crypto_free_queues(struct rte_cryptodev *dev)
482 {
483         unsigned int i;
484         struct virtio_crypto_hw *hw = dev->data->dev_private;
485
486         PMD_INIT_FUNC_TRACE();
487
488         /* control queue release */
489         virtio_crypto_queue_release(hw->cvq);
490
491         /* data queue release */
492         for (i = 0; i < hw->max_dataqueues; i++)
493                 virtio_crypto_queue_release(dev->data->queue_pairs[i]);
494 }
495
496 static int
497 virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
498 {
499         return 0;
500 }
501
502 /*
503  * dev_ops for virtio, bare necessities for basic operation
504  */
505 static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
506         /* Device related operations */
507         .dev_configure                   = virtio_crypto_dev_configure,
508         .dev_start                       = virtio_crypto_dev_start,
509         .dev_stop                        = virtio_crypto_dev_stop,
510         .dev_close                       = virtio_crypto_dev_close,
511         .dev_infos_get                   = virtio_crypto_dev_info_get,
512
513         .stats_get                       = virtio_crypto_dev_stats_get,
514         .stats_reset                     = virtio_crypto_dev_stats_reset,
515
516         .queue_pair_setup                = virtio_crypto_qp_setup,
517         .queue_pair_release              = virtio_crypto_qp_release,
518         .queue_pair_count                = NULL,
519
520         /* Crypto related operations */
521         .sym_session_get_size           = virtio_crypto_sym_get_session_private_size,
522         .sym_session_configure          = virtio_crypto_sym_configure_session,
523         .sym_session_clear              = virtio_crypto_sym_clear_session
524 };
525
526 static void
527 virtio_crypto_update_stats(struct rte_cryptodev *dev,
528                 struct rte_cryptodev_stats *stats)
529 {
530         unsigned int i;
531         struct virtio_crypto_hw *hw = dev->data->dev_private;
532
533         PMD_INIT_FUNC_TRACE();
534
535         if (stats == NULL) {
536                 VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
537                 return;
538         }
539
540         for (i = 0; i < hw->max_dataqueues; i++) {
541                 const struct virtqueue *data_queue
542                         = dev->data->queue_pairs[i];
543                 if (data_queue == NULL)
544                         continue;
545
546                 stats->enqueued_count += data_queue->packets_sent_total;
547                 stats->enqueue_err_count += data_queue->packets_sent_failed;
548
549                 stats->dequeued_count += data_queue->packets_received_total;
550                 stats->dequeue_err_count
551                         += data_queue->packets_received_failed;
552         }
553 }
554
555 static void
556 virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
557                 struct rte_cryptodev_stats *stats)
558 {
559         PMD_INIT_FUNC_TRACE();
560
561         virtio_crypto_update_stats(dev, stats);
562 }
563
564 static void
565 virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
566 {
567         unsigned int i;
568         struct virtio_crypto_hw *hw = dev->data->dev_private;
569
570         PMD_INIT_FUNC_TRACE();
571
572         for (i = 0; i < hw->max_dataqueues; i++) {
573                 struct virtqueue *data_queue = dev->data->queue_pairs[i];
574                 if (data_queue == NULL)
575                         continue;
576
577                 data_queue->packets_sent_total = 0;
578                 data_queue->packets_sent_failed = 0;
579
580                 data_queue->packets_received_total = 0;
581                 data_queue->packets_received_failed = 0;
582         }
583 }
584
585 static int
586 virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
587                 const struct rte_cryptodev_qp_conf *qp_conf,
588                 int socket_id,
589                 struct rte_mempool *session_pool __rte_unused)
590 {
591         int ret;
592         struct virtqueue *vq;
593
594         PMD_INIT_FUNC_TRACE();
595
596         /* if virtio dev is started, do not touch the virtqueues */
597         if (dev->data->dev_started)
598                 return 0;
599
600         ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
601                         qp_conf->nb_descriptors, socket_id, &vq);
602         if (ret < 0) {
603                 VIRTIO_CRYPTO_INIT_LOG_ERR(
604                         "virtio crypto data queue initialization failed\n");
605                 return ret;
606         }
607
608         dev->data->queue_pairs[queue_pair_id] = vq;
609
610         return 0;
611 }
612
613 static int
614 virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
615 {
616         struct virtqueue *vq
617                 = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
618
619         PMD_INIT_FUNC_TRACE();
620
621         if (vq == NULL) {
622                 VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
623                 return 0;
624         }
625
626         virtio_crypto_queue_release(vq);
627         return 0;
628 }
629
630 static int
631 virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
632 {
633         uint64_t host_features;
634
635         PMD_INIT_FUNC_TRACE();
636
637         /* Prepare guest_features: feature that driver wants to support */
638         VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
639                 req_features);
640
641         /* Read device(host) feature bits */
642         host_features = VTPCI_OPS(hw)->get_features(hw);
643         VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
644                 host_features);
645
646         /*
647          * Negotiate features: Subset of device feature bits are written back
648          * guest feature bits.
649          */
650         hw->guest_features = req_features;
651         hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
652                                                         host_features);
653         VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
654                 hw->guest_features);
655
656         if (hw->modern) {
657                 if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
658                         VIRTIO_CRYPTO_INIT_LOG_ERR(
659                                 "VIRTIO_F_VERSION_1 features is not enabled.");
660                         return -1;
661                 }
662                 vtpci_cryptodev_set_status(hw,
663                         VIRTIO_CONFIG_STATUS_FEATURES_OK);
664                 if (!(vtpci_cryptodev_get_status(hw) &
665                         VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
666                         VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
667                                                 "status!");
668                         return -1;
669                 }
670         }
671
672         hw->req_guest_features = req_features;
673
674         return 0;
675 }
676
677 /* reset device and renegotiate features if needed */
678 static int
679 virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
680         uint64_t req_features)
681 {
682         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
683         struct virtio_crypto_config local_config;
684         struct virtio_crypto_config *config = &local_config;
685
686         PMD_INIT_FUNC_TRACE();
687
688         /* Reset the device although not necessary at startup */
689         vtpci_cryptodev_reset(hw);
690
691         /* Tell the host we've noticed this device. */
692         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
693
694         /* Tell the host we've known how to drive the device. */
695         vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
696         if (virtio_negotiate_features(hw, req_features) < 0)
697                 return -1;
698
699         /* Get status of the device */
700         vtpci_read_cryptodev_config(hw,
701                 offsetof(struct virtio_crypto_config, status),
702                 &config->status, sizeof(config->status));
703         if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
704                 VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
705                                 "not ready");
706                 return -1;
707         }
708
709         /* Get number of data queues */
710         vtpci_read_cryptodev_config(hw,
711                 offsetof(struct virtio_crypto_config, max_dataqueues),
712                 &config->max_dataqueues,
713                 sizeof(config->max_dataqueues));
714         hw->max_dataqueues = config->max_dataqueues;
715
716         VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
717                 hw->max_dataqueues);
718
719         return 0;
720 }
721
722 /*
723  * This function is based on probe() function
724  * It returns 0 on success.
725  */
726 static int
727 crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
728                 struct rte_cryptodev_pmd_init_params *init_params)
729 {
730         struct rte_cryptodev *cryptodev;
731         struct virtio_crypto_hw *hw;
732
733         PMD_INIT_FUNC_TRACE();
734
735         cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
736                                         init_params);
737         if (cryptodev == NULL)
738                 return -ENODEV;
739
740         cryptodev->driver_id = cryptodev_virtio_driver_id;
741         cryptodev->dev_ops = &virtio_crypto_dev_ops;
742
743         cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
744         cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
745
746         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
747                 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
748
749         hw = cryptodev->data->dev_private;
750         hw->dev_id = cryptodev->data->dev_id;
751         hw->virtio_dev_capabilities = virtio_capabilities;
752
753         VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
754                 cryptodev->data->dev_id, pci_dev->id.vendor_id,
755                 pci_dev->id.device_id);
756
757         /* pci device init */
758         if (vtpci_cryptodev_init(pci_dev, hw))
759                 return -1;
760
761         if (virtio_crypto_init_device(cryptodev,
762                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
763                 return -1;
764
765         return 0;
766 }
767
768 static int
769 virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
770 {
771         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
772
773         PMD_INIT_FUNC_TRACE();
774
775         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
776                 return -EPERM;
777
778         if (cryptodev->data->dev_started) {
779                 virtio_crypto_dev_stop(cryptodev);
780                 virtio_crypto_dev_close(cryptodev);
781         }
782
783         cryptodev->dev_ops = NULL;
784         cryptodev->enqueue_burst = NULL;
785         cryptodev->dequeue_burst = NULL;
786
787         /* release control queue */
788         virtio_crypto_queue_release(hw->cvq);
789
790         rte_free(cryptodev->data);
791         cryptodev->data = NULL;
792
793         VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
794
795         return 0;
796 }
797
798 static int
799 virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
800         struct rte_cryptodev_config *config __rte_unused)
801 {
802         struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
803
804         PMD_INIT_FUNC_TRACE();
805
806         if (virtio_crypto_init_device(cryptodev,
807                         VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
808                 return -1;
809
810         /* setup control queue
811          * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
812          * config->max_dataqueues is the control queue
813          */
814         if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
815                 VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
816                 return -1;
817         }
818         virtio_crypto_ctrlq_start(cryptodev);
819
820         return 0;
821 }
822
823 static void
824 virtio_crypto_dev_stop(struct rte_cryptodev *dev)
825 {
826         struct virtio_crypto_hw *hw = dev->data->dev_private;
827
828         PMD_INIT_FUNC_TRACE();
829         VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
830
831         vtpci_cryptodev_reset(hw);
832
833         virtio_crypto_dev_free_mbufs(dev);
834         virtio_crypto_free_queues(dev);
835
836         dev->data->dev_started = 0;
837 }
838
839 static int
840 virtio_crypto_dev_start(struct rte_cryptodev *dev)
841 {
842         struct virtio_crypto_hw *hw = dev->data->dev_private;
843
844         if (dev->data->dev_started)
845                 return 0;
846
847         /* Do final configuration before queue engine starts */
848         virtio_crypto_dataq_start(dev);
849         vtpci_cryptodev_reinit_complete(hw);
850
851         dev->data->dev_started = 1;
852
853         return 0;
854 }
855
856 static void
857 virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
858 {
859         uint32_t i;
860         struct virtio_crypto_hw *hw = dev->data->dev_private;
861
862         for (i = 0; i < hw->max_dataqueues; i++) {
863                 VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
864                         "and unused buf", i);
865                 VIRTQUEUE_DUMP((struct virtqueue *)
866                         dev->data->queue_pairs[i]);
867
868                 VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
869                                 i, dev->data->queue_pairs[i]);
870
871                 virtqueue_detatch_unused(dev->data->queue_pairs[i]);
872
873                 VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
874                                         "unused buf", i);
875                 VIRTQUEUE_DUMP(
876                         (struct virtqueue *)dev->data->queue_pairs[i]);
877         }
878 }
879
880 static unsigned int
881 virtio_crypto_sym_get_session_private_size(
882                 struct rte_cryptodev *dev __rte_unused)
883 {
884         PMD_INIT_FUNC_TRACE();
885
886         return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
887 }
888
889 static int
890 virtio_crypto_check_sym_session_paras(
891                 struct rte_cryptodev *dev)
892 {
893         struct virtio_crypto_hw *hw;
894
895         PMD_INIT_FUNC_TRACE();
896
897         if (unlikely(dev == NULL)) {
898                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
899                 return -1;
900         }
901         if (unlikely(dev->data == NULL)) {
902                 VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
903                 return -1;
904         }
905         hw = dev->data->dev_private;
906         if (unlikely(hw == NULL)) {
907                 VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
908                 return -1;
909         }
910         if (unlikely(hw->cvq == NULL)) {
911                 VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
912                 return -1;
913         }
914
915         return 0;
916 }
917
918 static int
919 virtio_crypto_check_sym_clear_session_paras(
920                 struct rte_cryptodev *dev,
921                 struct rte_cryptodev_sym_session *sess)
922 {
923         PMD_INIT_FUNC_TRACE();
924
925         if (sess == NULL) {
926                 VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
927                 return -1;
928         }
929
930         return virtio_crypto_check_sym_session_paras(dev);
931 }
932
933 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
934
935 static void
936 virtio_crypto_sym_clear_session(
937                 struct rte_cryptodev *dev,
938                 struct rte_cryptodev_sym_session *sess)
939 {
940         struct virtio_crypto_hw *hw;
941         struct virtqueue *vq;
942         struct virtio_crypto_session *session;
943         struct virtio_crypto_op_ctrl_req *ctrl;
944         struct vring_desc *desc;
945         uint8_t *status;
946         uint8_t needed = 1;
947         uint32_t head;
948         uint8_t *malloc_virt_addr;
949         uint64_t malloc_phys_addr;
950         uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
951         uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
952         uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
953
954         PMD_INIT_FUNC_TRACE();
955
956         if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
957                 return;
958
959         hw = dev->data->dev_private;
960         vq = hw->cvq;
961         session = (struct virtio_crypto_session *)get_sym_session_private_data(
962                 sess, cryptodev_virtio_driver_id);
963         if (session == NULL) {
964                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
965                 return;
966         }
967
968         VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
969                         "vq = %p", vq->vq_desc_head_idx, vq);
970
971         if (vq->vq_free_cnt < needed) {
972                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
973                                 "vq->vq_free_cnt = %d is less than %d, "
974                                 "not enough", vq->vq_free_cnt, needed);
975                 return;
976         }
977
978         /*
979          * malloc memory to store information of ctrl request op,
980          * returned status and desc vring
981          */
982         malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
983                 + NUM_ENTRY_SYM_CLEAR_SESSION
984                 * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
985         if (malloc_virt_addr == NULL) {
986                 VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
987                 return;
988         }
989         malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
990
991         /* assign ctrl request op part */
992         ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
993         ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
994         /* default data virtqueue is 0 */
995         ctrl->header.queue_id = 0;
996         ctrl->u.destroy_session.session_id = session->session_id;
997
998         /* status part */
999         status = &(((struct virtio_crypto_inhdr *)
1000                 ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
1001         *status = VIRTIO_CRYPTO_ERR;
1002
1003         /* indirect desc vring part */
1004         desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
1005                 + desc_offset);
1006
1007         /* ctrl request part */
1008         desc[0].addr = malloc_phys_addr;
1009         desc[0].len = len_op_ctrl_req;
1010         desc[0].flags = VRING_DESC_F_NEXT;
1011         desc[0].next = 1;
1012
1013         /* status part */
1014         desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
1015         desc[1].len = len_inhdr;
1016         desc[1].flags = VRING_DESC_F_WRITE;
1017
1018         /* use only a single desc entry */
1019         head = vq->vq_desc_head_idx;
1020         vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
1021         vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
1022         vq->vq_ring.desc[head].len
1023                 = NUM_ENTRY_SYM_CLEAR_SESSION
1024                 * sizeof(struct vring_desc);
1025         vq->vq_free_cnt -= needed;
1026
1027         vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
1028
1029         vq_update_avail_ring(vq, head);
1030         vq_update_avail_idx(vq);
1031
1032         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
1033                                         vq->vq_queue_index);
1034
1035         virtqueue_notify(vq);
1036
1037         rte_rmb();
1038         while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
1039                 rte_rmb();
1040                 usleep(100);
1041         }
1042
1043         while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
1044                 uint32_t idx, desc_idx, used_idx;
1045                 struct vring_used_elem *uep;
1046
1047                 used_idx = (uint32_t)(vq->vq_used_cons_idx
1048                                 & (vq->vq_nentries - 1));
1049                 uep = &vq->vq_ring.used->ring[used_idx];
1050                 idx = (uint32_t) uep->id;
1051                 desc_idx = idx;
1052                 while (vq->vq_ring.desc[desc_idx].flags
1053                                 & VRING_DESC_F_NEXT) {
1054                         desc_idx = vq->vq_ring.desc[desc_idx].next;
1055                         vq->vq_free_cnt++;
1056                 }
1057
1058                 vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
1059                 vq->vq_desc_head_idx = idx;
1060                 vq->vq_used_cons_idx++;
1061                 vq->vq_free_cnt++;
1062         }
1063
1064         if (*status != VIRTIO_CRYPTO_OK) {
1065                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
1066                                 "status=%"PRIu32", session_id=%"PRIu64"",
1067                                 *status, session->session_id);
1068                 rte_free(malloc_virt_addr);
1069                 return;
1070         }
1071
1072         VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
1073                         "vq->vq_desc_head_idx=%d",
1074                         vq->vq_free_cnt, vq->vq_desc_head_idx);
1075
1076         VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
1077                         session->session_id);
1078
1079         memset(session, 0, sizeof(struct virtio_crypto_session));
1080         struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
1081         set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
1082         rte_mempool_put(sess_mp, session);
1083         rte_free(malloc_virt_addr);
1084 }
1085
1086 static struct rte_crypto_cipher_xform *
1087 virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
1088 {
1089         do {
1090                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1091                         return &xform->cipher;
1092
1093                 xform = xform->next;
1094         } while (xform);
1095
1096         return NULL;
1097 }
1098
1099 static struct rte_crypto_auth_xform *
1100 virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
1101 {
1102         do {
1103                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1104                         return &xform->auth;
1105
1106                 xform = xform->next;
1107         } while (xform);
1108
1109         return NULL;
1110 }
1111
1112 /** Get xform chain order */
1113 static int
1114 virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
1115 {
1116         if (xform == NULL)
1117                 return -1;
1118
1119         /* Cipher Only */
1120         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1121                         xform->next == NULL)
1122                 return VIRTIO_CRYPTO_CMD_CIPHER;
1123
1124         /* Authentication Only */
1125         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1126                         xform->next == NULL)
1127                 return VIRTIO_CRYPTO_CMD_AUTH;
1128
1129         /* Authenticate then Cipher */
1130         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1131                         xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
1132                 return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
1133
1134         /* Cipher then Authenticate */
1135         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1136                         xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
1137                 return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
1138
1139         return -1;
1140 }
1141
1142 static int
1143 virtio_crypto_sym_pad_cipher_param(
1144                 struct virtio_crypto_cipher_session_para *para,
1145                 struct rte_crypto_cipher_xform *cipher_xform)
1146 {
1147         switch (cipher_xform->algo) {
1148         case RTE_CRYPTO_CIPHER_AES_CBC:
1149                 para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
1150                 break;
1151         default:
1152                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
1153                                 "Cipher alg %u", cipher_xform->algo);
1154                 return -1;
1155         }
1156
1157         para->keylen = cipher_xform->key.length;
1158         switch (cipher_xform->op) {
1159         case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
1160                 para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
1161                 break;
1162         case RTE_CRYPTO_CIPHER_OP_DECRYPT:
1163                 para->op = VIRTIO_CRYPTO_OP_DECRYPT;
1164                 break;
1165         default:
1166                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
1167                                         "parameter");
1168                 return -1;
1169         }
1170
1171         return 0;
1172 }
1173
1174 static int
1175 virtio_crypto_sym_pad_auth_param(
1176                 struct virtio_crypto_op_ctrl_req *ctrl,
1177                 struct rte_crypto_auth_xform *auth_xform)
1178 {
1179         uint32_t *algo;
1180         struct virtio_crypto_alg_chain_session_para *para =
1181                 &(ctrl->u.sym_create_session.u.chain.para);
1182
1183         switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
1184         case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
1185                 algo = &(para->u.hash_param.algo);
1186                 break;
1187         case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
1188                 algo = &(para->u.mac_param.algo);
1189                 break;
1190         default:
1191                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
1192                         "specified",
1193                         ctrl->u.sym_create_session.u.chain.para.hash_mode);
1194                 return -1;
1195         }
1196
1197         switch (auth_xform->algo) {
1198         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1199                 *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
1200                 break;
1201         default:
1202                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1203                         "Crypto: Undefined Hash algo %u specified",
1204                         auth_xform->algo);
1205                 return -1;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int
1212 virtio_crypto_sym_pad_op_ctrl_req(
1213                 struct virtio_crypto_op_ctrl_req *ctrl,
1214                 struct rte_crypto_sym_xform *xform, bool is_chainned,
1215                 uint8_t **cipher_key_data, uint8_t **auth_key_data,
1216                 struct virtio_crypto_session *session)
1217 {
1218         int ret;
1219         struct rte_crypto_auth_xform *auth_xform = NULL;
1220         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1221
1222         /* Get cipher xform from crypto xform chain */
1223         cipher_xform = virtio_crypto_get_cipher_xform(xform);
1224         if (cipher_xform) {
1225                 if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
1226                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1227                                 "cipher IV size cannot be longer than %u",
1228                                 VIRTIO_CRYPTO_MAX_IV_SIZE);
1229                         return -1;
1230                 }
1231                 if (is_chainned)
1232                         ret = virtio_crypto_sym_pad_cipher_param(
1233                                 &ctrl->u.sym_create_session.u.chain.para
1234                                                 .cipher_param, cipher_xform);
1235                 else
1236                         ret = virtio_crypto_sym_pad_cipher_param(
1237                                 &ctrl->u.sym_create_session.u.cipher.para,
1238                                 cipher_xform);
1239
1240                 if (ret < 0) {
1241                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1242                                 "pad cipher parameter failed");
1243                         return -1;
1244                 }
1245
1246                 *cipher_key_data = cipher_xform->key.data;
1247
1248                 session->iv.offset = cipher_xform->iv.offset;
1249                 session->iv.length = cipher_xform->iv.length;
1250         }
1251
1252         /* Get auth xform from crypto xform chain */
1253         auth_xform = virtio_crypto_get_auth_xform(xform);
1254         if (auth_xform) {
1255                 /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
1256                 struct virtio_crypto_alg_chain_session_para *para =
1257                         &(ctrl->u.sym_create_session.u.chain.para);
1258                 if (auth_xform->key.length) {
1259                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
1260                         para->u.mac_param.auth_key_len =
1261                                 (uint32_t)auth_xform->key.length;
1262                         para->u.mac_param.hash_result_len =
1263                                 auth_xform->digest_length;
1264
1265                         *auth_key_data = auth_xform->key.data;
1266                 } else {
1267                         para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
1268                         para->u.hash_param.hash_result_len =
1269                                 auth_xform->digest_length;
1270                 }
1271
1272                 ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
1273                 if (ret < 0) {
1274                         VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
1275                                                 "failed");
1276                         return -1;
1277                 }
1278         }
1279
1280         return 0;
1281 }
1282
1283 static int
1284 virtio_crypto_check_sym_configure_session_paras(
1285                 struct rte_cryptodev *dev,
1286                 struct rte_crypto_sym_xform *xform,
1287                 struct rte_cryptodev_sym_session *sym_sess,
1288                 struct rte_mempool *mempool)
1289 {
1290         if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
1291                 unlikely(mempool == NULL)) {
1292                 VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
1293                 return -1;
1294         }
1295
1296         if (virtio_crypto_check_sym_session_paras(dev) < 0)
1297                 return -1;
1298
1299         return 0;
1300 }
1301
1302 static int
1303 virtio_crypto_sym_configure_session(
1304                 struct rte_cryptodev *dev,
1305                 struct rte_crypto_sym_xform *xform,
1306                 struct rte_cryptodev_sym_session *sess,
1307                 struct rte_mempool *mempool)
1308 {
1309         int ret;
1310         struct virtio_crypto_session crypto_sess;
1311         void *session_private = &crypto_sess;
1312         struct virtio_crypto_session *session;
1313         struct virtio_crypto_op_ctrl_req *ctrl_req;
1314         enum virtio_crypto_cmd_id cmd_id;
1315         uint8_t *cipher_key_data = NULL;
1316         uint8_t *auth_key_data = NULL;
1317         struct virtio_crypto_hw *hw;
1318         struct virtqueue *control_vq;
1319
1320         PMD_INIT_FUNC_TRACE();
1321
1322         ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
1323                         sess, mempool);
1324         if (ret < 0) {
1325                 VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
1326                 return ret;
1327         }
1328
1329         if (rte_mempool_get(mempool, &session_private)) {
1330                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1331                         "Couldn't get object from session mempool");
1332                 return -ENOMEM;
1333         }
1334
1335         session = (struct virtio_crypto_session *)session_private;
1336         memset(session, 0, sizeof(struct virtio_crypto_session));
1337         ctrl_req = &session->ctrl;
1338         ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
1339         /* FIXME: support multiqueue */
1340         ctrl_req->header.queue_id = 0;
1341
1342         hw = dev->data->dev_private;
1343         control_vq = hw->cvq;
1344
1345         cmd_id = virtio_crypto_get_chain_order(xform);
1346         if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
1347                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1348                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
1349         if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
1350                 ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
1351                         = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
1352
1353         switch (cmd_id) {
1354         case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
1355         case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
1356                 ctrl_req->u.sym_create_session.op_type
1357                         = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
1358
1359                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
1360                         xform, true, &cipher_key_data, &auth_key_data, session);
1361                 if (ret < 0) {
1362                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1363                                 "padding sym op ctrl req failed");
1364                         goto error_out;
1365                 }
1366                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1367                         cipher_key_data, auth_key_data, session);
1368                 if (ret < 0) {
1369                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1370                                 "create session failed: %d", ret);
1371                         goto error_out;
1372                 }
1373                 break;
1374         case VIRTIO_CRYPTO_CMD_CIPHER:
1375                 ctrl_req->u.sym_create_session.op_type
1376                         = VIRTIO_CRYPTO_SYM_OP_CIPHER;
1377                 ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
1378                         false, &cipher_key_data, &auth_key_data, session);
1379                 if (ret < 0) {
1380                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1381                                 "padding sym op ctrl req failed");
1382                         goto error_out;
1383                 }
1384                 ret = virtio_crypto_send_command(control_vq, ctrl_req,
1385                         cipher_key_data, NULL, session);
1386                 if (ret < 0) {
1387                         VIRTIO_CRYPTO_SESSION_LOG_ERR(
1388                                 "create session failed: %d", ret);
1389                         goto error_out;
1390                 }
1391                 break;
1392         default:
1393                 VIRTIO_CRYPTO_SESSION_LOG_ERR(
1394                         "Unsupported operation chain order parameter");
1395                 goto error_out;
1396         }
1397
1398         set_sym_session_private_data(sess, dev->driver_id,
1399                 session_private);
1400
1401         return 0;
1402
1403 error_out:
1404         return -1;
1405 }
1406
1407 static void
1408 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
1409                 struct rte_cryptodev_info *info)
1410 {
1411         struct virtio_crypto_hw *hw = dev->data->dev_private;
1412
1413         PMD_INIT_FUNC_TRACE();
1414
1415         if (info != NULL) {
1416                 info->driver_id = cryptodev_virtio_driver_id;
1417                 info->feature_flags = dev->feature_flags;
1418                 info->max_nb_queue_pairs = hw->max_dataqueues;
1419                 /* No limit of number of sessions */
1420                 info->sym.max_nb_sessions = 0;
1421                 info->capabilities = hw->virtio_dev_capabilities;
1422         }
1423 }
1424
1425 static int
1426 crypto_virtio_pci_probe(
1427         struct rte_pci_driver *pci_drv __rte_unused,
1428         struct rte_pci_device *pci_dev)
1429 {
1430         struct rte_cryptodev_pmd_init_params init_params = {
1431                 .name = "",
1432                 .socket_id = rte_socket_id(),
1433                 .private_data_size = sizeof(struct virtio_crypto_hw)
1434         };
1435         char name[RTE_CRYPTODEV_NAME_MAX_LEN];
1436
1437         VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
1438                         pci_dev->addr.bus,
1439                         pci_dev->addr.devid,
1440                         pci_dev->addr.function);
1441
1442         rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
1443
1444         return crypto_virtio_create(name, pci_dev, &init_params);
1445 }
1446
1447 static int
1448 crypto_virtio_pci_remove(
1449         struct rte_pci_device *pci_dev __rte_unused)
1450 {
1451         struct rte_cryptodev *cryptodev;
1452         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1453
1454         if (pci_dev == NULL)
1455                 return -EINVAL;
1456
1457         rte_pci_device_name(&pci_dev->addr, cryptodev_name,
1458                         sizeof(cryptodev_name));
1459
1460         cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
1461         if (cryptodev == NULL)
1462                 return -ENODEV;
1463
1464         return virtio_crypto_dev_uninit(cryptodev);
1465 }
1466
1467 static struct rte_pci_driver rte_virtio_crypto_driver = {
1468         .id_table = pci_id_virtio_crypto_map,
1469         .drv_flags = 0,
1470         .probe = crypto_virtio_pci_probe,
1471         .remove = crypto_virtio_pci_remove
1472 };
1473
1474 static struct cryptodev_driver virtio_crypto_drv;
1475
1476 RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
1477 RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
1478         rte_virtio_crypto_driver.driver,
1479         cryptodev_virtio_driver_id);
1480
1481 RTE_INIT(virtio_crypto_init_log)
1482 {
1483         virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
1484         if (virtio_crypto_logtype_init >= 0)
1485                 rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
1486
1487         virtio_crypto_logtype_session =
1488                 rte_log_register("pmd.crypto.virtio.session");
1489         if (virtio_crypto_logtype_session >= 0)
1490                 rte_log_set_level(virtio_crypto_logtype_session,
1491                                 RTE_LOG_NOTICE);
1492
1493         virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
1494         if (virtio_crypto_logtype_rx >= 0)
1495                 rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
1496
1497         virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
1498         if (virtio_crypto_logtype_tx >= 0)
1499                 rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
1500
1501         virtio_crypto_logtype_driver =
1502                 rte_log_register("pmd.crypto.virtio.driver");
1503         if (virtio_crypto_logtype_driver >= 0)
1504                 rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
1505 }