5048d2144422c73405f00c45c5130eec7ac5469c
[deb_dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_common.h>
35 #include <rte_dev.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_pci.h>
40 #include <rte_atomic.h>
41 #include <rte_prefetch.h>
42
43 #include "qat_logs.h"
44 #include "qat_crypto.h"
45 #include "qat_algs.h"
46 #include "adf_transport_access_macros.h"
47
48 #define ADF_MAX_SYM_DESC                        4096
49 #define ADF_MIN_SYM_DESC                        128
50 #define ADF_SYM_TX_RING_DESC_SIZE               128
51 #define ADF_SYM_RX_RING_DESC_SIZE               32
52 #define ADF_SYM_TX_QUEUE_STARTOFF               2
53 /* Offset from bundle start to 1st Sym Tx queue */
54 #define ADF_SYM_RX_QUEUE_STARTOFF               10
55 #define ADF_ARB_REG_SLOT                        0x1000
56 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
57
58 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
59         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
60         (ADF_ARB_REG_SLOT * index), value)
61
62 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
63         uint32_t queue_size_bytes);
64 static int qat_tx_queue_create(struct rte_cryptodev *dev,
65         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
66         int socket_id);
67 static int qat_rx_queue_create(struct rte_cryptodev *dev,
68         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
69         int socket_id);
70 static void qat_queue_delete(struct qat_queue *queue);
71 static int qat_queue_create(struct rte_cryptodev *dev,
72         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
73         int socket_id);
74 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
75         uint32_t *queue_size_for_csr);
76 static void adf_configure_queues(struct qat_qp *queue);
77 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
78 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
79
80 static const struct rte_memzone *
81 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
82                         int socket_id)
83 {
84         const struct rte_memzone *mz;
85         unsigned memzone_flags = 0;
86         const struct rte_memseg *ms;
87
88         PMD_INIT_FUNC_TRACE();
89         mz = rte_memzone_lookup(queue_name);
90         if (mz != 0) {
91                 if (((size_t)queue_size <= mz->len) &&
92                                 ((socket_id == SOCKET_ID_ANY) ||
93                                         (socket_id == mz->socket_id))) {
94                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
95                                         "allocated for %s", queue_name);
96                         return mz;
97                 }
98
99                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
100                                 "allocated %s, size %u, socket %d. "
101                                 "Requested size %u, socket %u",
102                                 queue_name, (uint32_t)mz->len,
103                                 mz->socket_id, queue_size, socket_id);
104                 return NULL;
105         }
106
107         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
108                                         queue_name, queue_size, socket_id);
109         ms = rte_eal_get_physmem_layout();
110         switch (ms[0].hugepage_sz) {
111         case(RTE_PGSIZE_2M):
112                 memzone_flags = RTE_MEMZONE_2MB;
113         break;
114         case(RTE_PGSIZE_1G):
115                 memzone_flags = RTE_MEMZONE_1GB;
116         break;
117         case(RTE_PGSIZE_16M):
118                 memzone_flags = RTE_MEMZONE_16MB;
119         break;
120         case(RTE_PGSIZE_16G):
121                 memzone_flags = RTE_MEMZONE_16GB;
122         break;
123         default:
124                 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
125 }
126 #ifdef RTE_LIBRTE_XEN_DOM0
127         return rte_memzone_reserve_bounded(queue_name, queue_size,
128                 socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
129 #else
130         return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
131                 memzone_flags, queue_size);
132 #endif
133 }
134
135 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
136         const struct rte_cryptodev_qp_conf *qp_conf,
137         int socket_id, struct rte_mempool *session_pool __rte_unused)
138 {
139         struct qat_qp *qp;
140         struct rte_pci_device *pci_dev;
141         int ret;
142         char op_cookie_pool_name[RTE_RING_NAMESIZE];
143         uint32_t i;
144
145         PMD_INIT_FUNC_TRACE();
146
147         /* If qp is already in use free ring memory and qp metadata. */
148         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
149                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
150                 if (ret < 0)
151                         return ret;
152         }
153
154         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
155                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
156                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
157                                 qp_conf->nb_descriptors);
158                 return -EINVAL;
159         }
160
161         pci_dev = RTE_DEV_TO_PCI(dev->device);
162
163         if (pci_dev->mem_resource[0].addr == NULL) {
164                 PMD_DRV_LOG(ERR, "Could not find VF config space "
165                                 "(UIO driver attached?).");
166                 return -EINVAL;
167         }
168
169         if (queue_pair_id >=
170                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
171                                         ADF_NUM_BUNDLES_PER_DEV)) {
172                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
173                                 queue_pair_id);
174                 return -EINVAL;
175         }
176         /* Allocate the queue pair data structure. */
177         qp = rte_zmalloc("qat PMD qp metadata",
178                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
179         if (qp == NULL) {
180                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
181                 return -ENOMEM;
182         }
183         qp->nb_descriptors = qp_conf->nb_descriptors;
184         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
185                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
186                         RTE_CACHE_LINE_SIZE);
187
188         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
189         rte_atomic16_init(&qp->inflights16);
190
191         if (qat_tx_queue_create(dev, &(qp->tx_q),
192                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
193                 PMD_INIT_LOG(ERR, "Tx queue create failed "
194                                 "queue_pair_id=%u", queue_pair_id);
195                 goto create_err;
196         }
197
198         if (qat_rx_queue_create(dev, &(qp->rx_q),
199                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
200                 PMD_DRV_LOG(ERR, "Rx queue create failed "
201                                 "queue_pair_id=%hu", queue_pair_id);
202                 qat_queue_delete(&(qp->tx_q));
203                 goto create_err;
204         }
205
206         adf_configure_queues(qp);
207         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
208         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
209                 pci_dev->driver->driver.name, dev->data->dev_id,
210                 queue_pair_id);
211
212         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
213         if (qp->op_cookie_pool == NULL)
214                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
215                                 qp->nb_descriptors,
216                                 sizeof(struct qat_crypto_op_cookie), 64, 0,
217                                 NULL, NULL, NULL, NULL, socket_id,
218                                 0);
219         if (!qp->op_cookie_pool) {
220                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
221                                 " op mempool");
222                 goto create_err;
223         }
224
225         for (i = 0; i < qp->nb_descriptors; i++) {
226                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
227                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
228                         return -EFAULT;
229                 }
230
231                 struct qat_crypto_op_cookie *sql_cookie =
232                                 qp->op_cookies[i];
233
234                 sql_cookie->qat_sgl_src_phys_addr =
235                                 rte_mempool_virt2phy(qp->op_cookie_pool,
236                                 sql_cookie) +
237                                 offsetof(struct qat_crypto_op_cookie,
238                                 qat_sgl_list_src);
239
240                 sql_cookie->qat_sgl_dst_phys_addr =
241                                 rte_mempool_virt2phy(qp->op_cookie_pool,
242                                 sql_cookie) +
243                                 offsetof(struct qat_crypto_op_cookie,
244                                 qat_sgl_list_dst);
245         }
246
247         struct qat_pmd_private *internals
248                 = dev->data->dev_private;
249         qp->qat_dev_gen = internals->qat_dev_gen;
250
251         dev->data->queue_pairs[queue_pair_id] = qp;
252         return 0;
253
254 create_err:
255         rte_free(qp);
256         return -EFAULT;
257 }
258
259 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
260 {
261         struct qat_qp *qp =
262                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
263         uint32_t i;
264
265         PMD_INIT_FUNC_TRACE();
266         if (qp == NULL) {
267                 PMD_DRV_LOG(DEBUG, "qp already freed");
268                 return 0;
269         }
270
271         /* Don't free memory if there are still responses to be processed */
272         if (rte_atomic16_read(&(qp->inflights16)) == 0) {
273                 qat_queue_delete(&(qp->tx_q));
274                 qat_queue_delete(&(qp->rx_q));
275         } else {
276                 return -EAGAIN;
277         }
278
279         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
280
281         for (i = 0; i < qp->nb_descriptors; i++)
282                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
283
284         if (qp->op_cookie_pool)
285                 rte_mempool_free(qp->op_cookie_pool);
286
287         rte_free(qp->op_cookies);
288         rte_free(qp);
289         dev->data->queue_pairs[queue_pair_id] = NULL;
290         return 0;
291 }
292
293 static int qat_tx_queue_create(struct rte_cryptodev *dev,
294         struct qat_queue *queue, uint8_t qp_id,
295         uint32_t nb_desc, int socket_id)
296 {
297         PMD_INIT_FUNC_TRACE();
298         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
299         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
300                                                 ADF_SYM_TX_QUEUE_STARTOFF;
301         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
302                 nb_desc, qp_id, queue->hw_bundle_number,
303                 queue->hw_queue_number);
304
305         return qat_queue_create(dev, queue, nb_desc,
306                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
307 }
308
309 static int qat_rx_queue_create(struct rte_cryptodev *dev,
310                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
311                 int socket_id)
312 {
313         PMD_INIT_FUNC_TRACE();
314         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
315         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
316                                                 ADF_SYM_RX_QUEUE_STARTOFF;
317
318         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
319                 nb_desc, qp_id, queue->hw_bundle_number,
320                 queue->hw_queue_number);
321         return qat_queue_create(dev, queue, nb_desc,
322                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
323 }
324
325 static void qat_queue_delete(struct qat_queue *queue)
326 {
327         const struct rte_memzone *mz;
328         int status = 0;
329
330         if (queue == NULL) {
331                 PMD_DRV_LOG(DEBUG, "Invalid queue");
332                 return;
333         }
334         mz = rte_memzone_lookup(queue->memz_name);
335         if (mz != NULL) {
336                 /* Write an unused pattern to the queue memory. */
337                 memset(queue->base_addr, 0x7F, queue->queue_size);
338                 status = rte_memzone_free(mz);
339                 if (status != 0)
340                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
341                                         status, queue->memz_name);
342         } else {
343                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
344                                 queue->memz_name);
345         }
346 }
347
348 static int
349 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
350                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
351 {
352         uint64_t queue_base;
353         void *io_addr;
354         const struct rte_memzone *qp_mz;
355         uint32_t queue_size_bytes = nb_desc*desc_size;
356         struct rte_pci_device *pci_dev;
357
358         PMD_INIT_FUNC_TRACE();
359         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
360                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
361                 return -EINVAL;
362         }
363
364         pci_dev = RTE_DEV_TO_PCI(dev->device);
365
366         /*
367          * Allocate a memzone for the queue - create a unique name.
368          */
369         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
370                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
371                 queue->hw_bundle_number, queue->hw_queue_number);
372         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
373                         socket_id);
374         if (qp_mz == NULL) {
375                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
376                 return -ENOMEM;
377         }
378
379         queue->base_addr = (char *)qp_mz->addr;
380         queue->base_phys_addr = qp_mz->phys_addr;
381         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
382                         queue_size_bytes)) {
383                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
384                                         " 0x%"PRIx64"\n",
385                                         queue->base_phys_addr);
386                 return -EFAULT;
387         }
388
389         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
390                         != 0) {
391                 PMD_DRV_LOG(ERR, "Invalid num inflights");
392                 return -EINVAL;
393         }
394
395         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
396                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
397         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
398         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
399                                 " msg_size %u, max_inflights %u modulo %u",
400                                 queue->queue_size, queue_size_bytes,
401                                 nb_desc, desc_size, queue->max_inflights,
402                                 queue->modulo);
403
404         if (queue->max_inflights < 2) {
405                 PMD_DRV_LOG(ERR, "Invalid num inflights");
406                 return -EINVAL;
407         }
408         queue->head = 0;
409         queue->tail = 0;
410         queue->msg_size = desc_size;
411
412         /*
413          * Write an unused pattern to the queue memory.
414          */
415         memset(queue->base_addr, 0x7F, queue_size_bytes);
416
417         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
418                                         queue->queue_size);
419
420         io_addr = pci_dev->mem_resource[0].addr;
421
422         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
423                         queue->hw_queue_number, queue_base);
424         return 0;
425 }
426
427 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
428                                         uint32_t queue_size_bytes)
429 {
430         PMD_INIT_FUNC_TRACE();
431         if (((queue_size_bytes - 1) & phys_addr) != 0)
432                 return -EINVAL;
433         return 0;
434 }
435
436 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
437         uint32_t *p_queue_size_for_csr)
438 {
439         uint8_t i = ADF_MIN_RING_SIZE;
440
441         PMD_INIT_FUNC_TRACE();
442         for (; i <= ADF_MAX_RING_SIZE; i++)
443                 if ((msg_size * msg_num) ==
444                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
445                         *p_queue_size_for_csr = i;
446                         return 0;
447                 }
448         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
449         return -EINVAL;
450 }
451
452 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
453 {
454         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
455                                         (ADF_ARB_REG_SLOT *
456                                                         txq->hw_bundle_number);
457         uint32_t value;
458
459         PMD_INIT_FUNC_TRACE();
460         value = ADF_CSR_RD(base_addr, arb_csr_offset);
461         value |= (0x01 << txq->hw_queue_number);
462         ADF_CSR_WR(base_addr, arb_csr_offset, value);
463 }
464
465 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
466 {
467         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
468                                         (ADF_ARB_REG_SLOT *
469                                                         txq->hw_bundle_number);
470         uint32_t value;
471
472         PMD_INIT_FUNC_TRACE();
473         value = ADF_CSR_RD(base_addr, arb_csr_offset);
474         value ^= (0x01 << txq->hw_queue_number);
475         ADF_CSR_WR(base_addr, arb_csr_offset, value);
476 }
477
478 static void adf_configure_queues(struct qat_qp *qp)
479 {
480         uint32_t queue_config;
481         struct qat_queue *queue = &qp->tx_q;
482
483         PMD_INIT_FUNC_TRACE();
484         queue_config = BUILD_RING_CONFIG(queue->queue_size);
485
486         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
487                         queue->hw_queue_number, queue_config);
488
489         queue = &qp->rx_q;
490         queue_config =
491                         BUILD_RESP_RING_CONFIG(queue->queue_size,
492                                         ADF_RING_NEAR_WATERMARK_512,
493                                         ADF_RING_NEAR_WATERMARK_0);
494
495         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
496                         queue->hw_queue_number, queue_config);
497 }