New upstream version 17.11.1
[deb_dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_common.h>
35 #include <rte_dev.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_cryptodev_pmd.h>
39 #include <rte_pci.h>
40 #include <rte_bus_pci.h>
41 #include <rte_atomic.h>
42 #include <rte_prefetch.h>
43
44 #include "qat_logs.h"
45 #include "qat_crypto.h"
46 #include "qat_algs.h"
47 #include "adf_transport_access_macros.h"
48
49 #define ADF_MAX_SYM_DESC                        4096
50 #define ADF_MIN_SYM_DESC                        128
51 #define ADF_SYM_TX_RING_DESC_SIZE               128
52 #define ADF_SYM_RX_RING_DESC_SIZE               32
53 #define ADF_SYM_TX_QUEUE_STARTOFF               2
54 /* Offset from bundle start to 1st Sym Tx queue */
55 #define ADF_SYM_RX_QUEUE_STARTOFF               10
56 #define ADF_ARB_REG_SLOT                        0x1000
57 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
58
59 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
60         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
61         (ADF_ARB_REG_SLOT * index), value)
62
63 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
64         uint32_t queue_size_bytes);
65 static int qat_tx_queue_create(struct rte_cryptodev *dev,
66         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
67         int socket_id);
68 static int qat_rx_queue_create(struct rte_cryptodev *dev,
69         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
70         int socket_id);
71 static void qat_queue_delete(struct qat_queue *queue);
72 static int qat_queue_create(struct rte_cryptodev *dev,
73         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
74         int socket_id);
75 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
76         uint32_t *queue_size_for_csr);
77 static void adf_configure_queues(struct qat_qp *queue);
78 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
79 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
80
81 static const struct rte_memzone *
82 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
83                         int socket_id)
84 {
85         const struct rte_memzone *mz;
86         unsigned memzone_flags = 0;
87         const struct rte_memseg *ms;
88
89         PMD_INIT_FUNC_TRACE();
90         mz = rte_memzone_lookup(queue_name);
91         if (mz != 0) {
92                 if (((size_t)queue_size <= mz->len) &&
93                                 ((socket_id == SOCKET_ID_ANY) ||
94                                         (socket_id == mz->socket_id))) {
95                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
96                                         "allocated for %s", queue_name);
97                         return mz;
98                 }
99
100                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
101                                 "allocated %s, size %u, socket %d. "
102                                 "Requested size %u, socket %u",
103                                 queue_name, (uint32_t)mz->len,
104                                 mz->socket_id, queue_size, socket_id);
105                 return NULL;
106         }
107
108         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
109                                         queue_name, queue_size, socket_id);
110         ms = rte_eal_get_physmem_layout();
111         switch (ms[0].hugepage_sz) {
112         case(RTE_PGSIZE_2M):
113                 memzone_flags = RTE_MEMZONE_2MB;
114         break;
115         case(RTE_PGSIZE_1G):
116                 memzone_flags = RTE_MEMZONE_1GB;
117         break;
118         case(RTE_PGSIZE_16M):
119                 memzone_flags = RTE_MEMZONE_16MB;
120         break;
121         case(RTE_PGSIZE_16G):
122                 memzone_flags = RTE_MEMZONE_16GB;
123         break;
124         default:
125                 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
126         }
127         return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
128                 memzone_flags, queue_size);
129 }
130
131 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
132         const struct rte_cryptodev_qp_conf *qp_conf,
133         int socket_id, struct rte_mempool *session_pool __rte_unused)
134 {
135         struct qat_qp *qp;
136         struct rte_pci_device *pci_dev;
137         int ret;
138         char op_cookie_pool_name[RTE_RING_NAMESIZE];
139         uint32_t i;
140
141         PMD_INIT_FUNC_TRACE();
142
143         /* If qp is already in use free ring memory and qp metadata. */
144         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
145                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
146                 if (ret < 0)
147                         return ret;
148         }
149
150         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
151                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
152                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
153                                 qp_conf->nb_descriptors);
154                 return -EINVAL;
155         }
156
157         pci_dev = RTE_DEV_TO_PCI(dev->device);
158
159         if (pci_dev->mem_resource[0].addr == NULL) {
160                 PMD_DRV_LOG(ERR, "Could not find VF config space "
161                                 "(UIO driver attached?).");
162                 return -EINVAL;
163         }
164
165         if (queue_pair_id >=
166                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
167                                         ADF_NUM_BUNDLES_PER_DEV)) {
168                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
169                                 queue_pair_id);
170                 return -EINVAL;
171         }
172         /* Allocate the queue pair data structure. */
173         qp = rte_zmalloc("qat PMD qp metadata",
174                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
175         if (qp == NULL) {
176                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
177                 return -ENOMEM;
178         }
179         qp->nb_descriptors = qp_conf->nb_descriptors;
180         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
181                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
182                         RTE_CACHE_LINE_SIZE);
183         if (qp->op_cookies == NULL) {
184                 PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
185                 rte_free(qp);
186                 return -ENOMEM;
187         }
188
189         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
190         qp->inflights16 = 0;
191
192         if (qat_tx_queue_create(dev, &(qp->tx_q),
193                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
194                 PMD_INIT_LOG(ERR, "Tx queue create failed "
195                                 "queue_pair_id=%u", queue_pair_id);
196                 goto create_err;
197         }
198
199         if (qat_rx_queue_create(dev, &(qp->rx_q),
200                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
201                 PMD_DRV_LOG(ERR, "Rx queue create failed "
202                                 "queue_pair_id=%hu", queue_pair_id);
203                 qat_queue_delete(&(qp->tx_q));
204                 goto create_err;
205         }
206
207         adf_configure_queues(qp);
208         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
209         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
210                 pci_dev->driver->driver.name, dev->data->dev_id,
211                 queue_pair_id);
212
213         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
214         if (qp->op_cookie_pool == NULL)
215                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
216                                 qp->nb_descriptors,
217                                 sizeof(struct qat_crypto_op_cookie), 64, 0,
218                                 NULL, NULL, NULL, NULL, socket_id,
219                                 0);
220         if (!qp->op_cookie_pool) {
221                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
222                                 " op mempool");
223                 goto create_err;
224         }
225
226         for (i = 0; i < qp->nb_descriptors; i++) {
227                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
228                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
229                         goto create_err;
230                 }
231
232                 struct qat_crypto_op_cookie *sql_cookie =
233                                 qp->op_cookies[i];
234
235                 sql_cookie->qat_sgl_src_phys_addr =
236                                 rte_mempool_virt2iova(sql_cookie) +
237                                 offsetof(struct qat_crypto_op_cookie,
238                                 qat_sgl_list_src);
239
240                 sql_cookie->qat_sgl_dst_phys_addr =
241                                 rte_mempool_virt2iova(sql_cookie) +
242                                 offsetof(struct qat_crypto_op_cookie,
243                                 qat_sgl_list_dst);
244         }
245
246         struct qat_pmd_private *internals
247                 = dev->data->dev_private;
248         qp->qat_dev_gen = internals->qat_dev_gen;
249
250         dev->data->queue_pairs[queue_pair_id] = qp;
251         return 0;
252
253 create_err:
254         if (qp->op_cookie_pool)
255                 rte_mempool_free(qp->op_cookie_pool);
256         rte_free(qp->op_cookies);
257         rte_free(qp);
258         return -EFAULT;
259 }
260
261 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
262 {
263         struct qat_qp *qp =
264                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
265         uint32_t i;
266
267         PMD_INIT_FUNC_TRACE();
268         if (qp == NULL) {
269                 PMD_DRV_LOG(DEBUG, "qp already freed");
270                 return 0;
271         }
272
273         /* Don't free memory if there are still responses to be processed */
274         if (qp->inflights16 == 0) {
275                 qat_queue_delete(&(qp->tx_q));
276                 qat_queue_delete(&(qp->rx_q));
277         } else {
278                 return -EAGAIN;
279         }
280
281         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
282
283         for (i = 0; i < qp->nb_descriptors; i++)
284                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
285
286         if (qp->op_cookie_pool)
287                 rte_mempool_free(qp->op_cookie_pool);
288
289         rte_free(qp->op_cookies);
290         rte_free(qp);
291         dev->data->queue_pairs[queue_pair_id] = NULL;
292         return 0;
293 }
294
295 static int qat_tx_queue_create(struct rte_cryptodev *dev,
296         struct qat_queue *queue, uint8_t qp_id,
297         uint32_t nb_desc, int socket_id)
298 {
299         PMD_INIT_FUNC_TRACE();
300         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
301         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
302                                                 ADF_SYM_TX_QUEUE_STARTOFF;
303         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
304                 nb_desc, qp_id, queue->hw_bundle_number,
305                 queue->hw_queue_number);
306
307         return qat_queue_create(dev, queue, nb_desc,
308                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
309 }
310
311 static int qat_rx_queue_create(struct rte_cryptodev *dev,
312                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
313                 int socket_id)
314 {
315         PMD_INIT_FUNC_TRACE();
316         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
317         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
318                                                 ADF_SYM_RX_QUEUE_STARTOFF;
319
320         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
321                 nb_desc, qp_id, queue->hw_bundle_number,
322                 queue->hw_queue_number);
323         return qat_queue_create(dev, queue, nb_desc,
324                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
325 }
326
327 static void qat_queue_delete(struct qat_queue *queue)
328 {
329         const struct rte_memzone *mz;
330         int status = 0;
331
332         if (queue == NULL) {
333                 PMD_DRV_LOG(DEBUG, "Invalid queue");
334                 return;
335         }
336         mz = rte_memzone_lookup(queue->memz_name);
337         if (mz != NULL) {
338                 /* Write an unused pattern to the queue memory. */
339                 memset(queue->base_addr, 0x7F, queue->queue_size);
340                 status = rte_memzone_free(mz);
341                 if (status != 0)
342                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
343                                         status, queue->memz_name);
344         } else {
345                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
346                                 queue->memz_name);
347         }
348 }
349
350 static int
351 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
352                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
353 {
354         uint64_t queue_base;
355         void *io_addr;
356         const struct rte_memzone *qp_mz;
357         uint32_t queue_size_bytes = nb_desc*desc_size;
358         struct rte_pci_device *pci_dev;
359
360         PMD_INIT_FUNC_TRACE();
361         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
362                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
363                 return -EINVAL;
364         }
365
366         pci_dev = RTE_DEV_TO_PCI(dev->device);
367
368         /*
369          * Allocate a memzone for the queue - create a unique name.
370          */
371         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
372                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
373                 queue->hw_bundle_number, queue->hw_queue_number);
374         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
375                         socket_id);
376         if (qp_mz == NULL) {
377                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
378                 return -ENOMEM;
379         }
380
381         queue->base_addr = (char *)qp_mz->addr;
382         queue->base_phys_addr = qp_mz->iova;
383         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
384                         queue_size_bytes)) {
385                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
386                                         " 0x%"PRIx64"\n",
387                                         queue->base_phys_addr);
388                 return -EFAULT;
389         }
390
391         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
392                         != 0) {
393                 PMD_DRV_LOG(ERR, "Invalid num inflights");
394                 return -EINVAL;
395         }
396
397         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
398                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
399         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
400         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
401                                 " msg_size %u, max_inflights %u modulo %u",
402                                 queue->queue_size, queue_size_bytes,
403                                 nb_desc, desc_size, queue->max_inflights,
404                                 queue->modulo);
405
406         if (queue->max_inflights < 2) {
407                 PMD_DRV_LOG(ERR, "Invalid num inflights");
408                 return -EINVAL;
409         }
410         queue->head = 0;
411         queue->tail = 0;
412         queue->msg_size = desc_size;
413
414         /*
415          * Write an unused pattern to the queue memory.
416          */
417         memset(queue->base_addr, 0x7F, queue_size_bytes);
418
419         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
420                                         queue->queue_size);
421
422         io_addr = pci_dev->mem_resource[0].addr;
423
424         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
425                         queue->hw_queue_number, queue_base);
426         return 0;
427 }
428
429 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
430                                         uint32_t queue_size_bytes)
431 {
432         PMD_INIT_FUNC_TRACE();
433         if (((queue_size_bytes - 1) & phys_addr) != 0)
434                 return -EINVAL;
435         return 0;
436 }
437
438 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
439         uint32_t *p_queue_size_for_csr)
440 {
441         uint8_t i = ADF_MIN_RING_SIZE;
442
443         PMD_INIT_FUNC_TRACE();
444         for (; i <= ADF_MAX_RING_SIZE; i++)
445                 if ((msg_size * msg_num) ==
446                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
447                         *p_queue_size_for_csr = i;
448                         return 0;
449                 }
450         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
451         return -EINVAL;
452 }
453
454 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
455 {
456         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
457                                         (ADF_ARB_REG_SLOT *
458                                                         txq->hw_bundle_number);
459         uint32_t value;
460
461         PMD_INIT_FUNC_TRACE();
462         value = ADF_CSR_RD(base_addr, arb_csr_offset);
463         value |= (0x01 << txq->hw_queue_number);
464         ADF_CSR_WR(base_addr, arb_csr_offset, value);
465 }
466
467 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
468 {
469         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
470                                         (ADF_ARB_REG_SLOT *
471                                                         txq->hw_bundle_number);
472         uint32_t value;
473
474         PMD_INIT_FUNC_TRACE();
475         value = ADF_CSR_RD(base_addr, arb_csr_offset);
476         value ^= (0x01 << txq->hw_queue_number);
477         ADF_CSR_WR(base_addr, arb_csr_offset, value);
478 }
479
480 static void adf_configure_queues(struct qat_qp *qp)
481 {
482         uint32_t queue_config;
483         struct qat_queue *queue = &qp->tx_q;
484
485         PMD_INIT_FUNC_TRACE();
486         queue_config = BUILD_RING_CONFIG(queue->queue_size);
487
488         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
489                         queue->hw_queue_number, queue_config);
490
491         queue = &qp->rx_q;
492         queue_config =
493                         BUILD_RESP_RING_CONFIG(queue->queue_size,
494                                         ADF_RING_NEAR_WATERMARK_512,
495                                         ADF_RING_NEAR_WATERMARK_0);
496
497         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
498                         queue->hw_queue_number, queue_config);
499 }