New upstream version 18.02
[deb_dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2015 Intel Corporation
3  */
4
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
14
15 #include "qat_logs.h"
16 #include "qat_crypto.h"
17 #include "qat_algs.h"
18 #include "adf_transport_access_macros.h"
19
20 #define ADF_MAX_SYM_DESC                        4096
21 #define ADF_MIN_SYM_DESC                        128
22 #define ADF_SYM_TX_RING_DESC_SIZE               128
23 #define ADF_SYM_RX_RING_DESC_SIZE               32
24 #define ADF_SYM_TX_QUEUE_STARTOFF               2
25 /* Offset from bundle start to 1st Sym Tx queue */
26 #define ADF_SYM_RX_QUEUE_STARTOFF               10
27 #define ADF_ARB_REG_SLOT                        0x1000
28 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
29
30 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
31         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
32         (ADF_ARB_REG_SLOT * index), value)
33
34 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
35         uint32_t queue_size_bytes);
36 static int qat_tx_queue_create(struct rte_cryptodev *dev,
37         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
38         int socket_id);
39 static int qat_rx_queue_create(struct rte_cryptodev *dev,
40         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
41         int socket_id);
42 static void qat_queue_delete(struct qat_queue *queue);
43 static int qat_queue_create(struct rte_cryptodev *dev,
44         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
45         int socket_id);
46 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
47         uint32_t *queue_size_for_csr);
48 static void adf_configure_queues(struct qat_qp *queue);
49 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
50 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
51
52 static const struct rte_memzone *
53 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
54                         int socket_id)
55 {
56         const struct rte_memzone *mz;
57         unsigned memzone_flags = 0;
58         const struct rte_memseg *ms;
59
60         PMD_INIT_FUNC_TRACE();
61         mz = rte_memzone_lookup(queue_name);
62         if (mz != 0) {
63                 if (((size_t)queue_size <= mz->len) &&
64                                 ((socket_id == SOCKET_ID_ANY) ||
65                                         (socket_id == mz->socket_id))) {
66                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
67                                         "allocated for %s", queue_name);
68                         return mz;
69                 }
70
71                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
72                                 "allocated %s, size %u, socket %d. "
73                                 "Requested size %u, socket %u",
74                                 queue_name, (uint32_t)mz->len,
75                                 mz->socket_id, queue_size, socket_id);
76                 return NULL;
77         }
78
79         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
80                                         queue_name, queue_size, socket_id);
81         ms = rte_eal_get_physmem_layout();
82         switch (ms[0].hugepage_sz) {
83         case(RTE_PGSIZE_2M):
84                 memzone_flags = RTE_MEMZONE_2MB;
85         break;
86         case(RTE_PGSIZE_1G):
87                 memzone_flags = RTE_MEMZONE_1GB;
88         break;
89         case(RTE_PGSIZE_16M):
90                 memzone_flags = RTE_MEMZONE_16MB;
91         break;
92         case(RTE_PGSIZE_16G):
93                 memzone_flags = RTE_MEMZONE_16GB;
94         break;
95         default:
96                 memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
97         }
98         return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
99                 memzone_flags, queue_size);
100 }
101
102 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
103         const struct rte_cryptodev_qp_conf *qp_conf,
104         int socket_id, struct rte_mempool *session_pool __rte_unused)
105 {
106         struct qat_qp *qp;
107         struct rte_pci_device *pci_dev;
108         int ret;
109         char op_cookie_pool_name[RTE_RING_NAMESIZE];
110         uint32_t i;
111
112         PMD_INIT_FUNC_TRACE();
113
114         /* If qp is already in use free ring memory and qp metadata. */
115         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
116                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
117                 if (ret < 0)
118                         return ret;
119         }
120
121         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
122                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
123                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
124                                 qp_conf->nb_descriptors);
125                 return -EINVAL;
126         }
127
128         pci_dev = RTE_DEV_TO_PCI(dev->device);
129
130         if (pci_dev->mem_resource[0].addr == NULL) {
131                 PMD_DRV_LOG(ERR, "Could not find VF config space "
132                                 "(UIO driver attached?).");
133                 return -EINVAL;
134         }
135
136         if (queue_pair_id >=
137                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
138                                         ADF_NUM_BUNDLES_PER_DEV)) {
139                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
140                                 queue_pair_id);
141                 return -EINVAL;
142         }
143         /* Allocate the queue pair data structure. */
144         qp = rte_zmalloc("qat PMD qp metadata",
145                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
146         if (qp == NULL) {
147                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
148                 return -ENOMEM;
149         }
150         qp->nb_descriptors = qp_conf->nb_descriptors;
151         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
152                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
153                         RTE_CACHE_LINE_SIZE);
154         if (qp->op_cookies == NULL) {
155                 PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
156                 rte_free(qp);
157                 return -ENOMEM;
158         }
159
160         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
161         qp->inflights16 = 0;
162
163         if (qat_tx_queue_create(dev, &(qp->tx_q),
164                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
165                 PMD_INIT_LOG(ERR, "Tx queue create failed "
166                                 "queue_pair_id=%u", queue_pair_id);
167                 goto create_err;
168         }
169
170         if (qat_rx_queue_create(dev, &(qp->rx_q),
171                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
172                 PMD_DRV_LOG(ERR, "Rx queue create failed "
173                                 "queue_pair_id=%hu", queue_pair_id);
174                 qat_queue_delete(&(qp->tx_q));
175                 goto create_err;
176         }
177
178         adf_configure_queues(qp);
179         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
180         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
181                 pci_dev->driver->driver.name, dev->data->dev_id,
182                 queue_pair_id);
183
184         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
185         if (qp->op_cookie_pool == NULL)
186                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
187                                 qp->nb_descriptors,
188                                 sizeof(struct qat_crypto_op_cookie), 64, 0,
189                                 NULL, NULL, NULL, NULL, socket_id,
190                                 0);
191         if (!qp->op_cookie_pool) {
192                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
193                                 " op mempool");
194                 goto create_err;
195         }
196
197         for (i = 0; i < qp->nb_descriptors; i++) {
198                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
199                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
200                         goto create_err;
201                 }
202
203                 struct qat_crypto_op_cookie *sql_cookie =
204                                 qp->op_cookies[i];
205
206                 sql_cookie->qat_sgl_src_phys_addr =
207                                 rte_mempool_virt2iova(sql_cookie) +
208                                 offsetof(struct qat_crypto_op_cookie,
209                                 qat_sgl_list_src);
210
211                 sql_cookie->qat_sgl_dst_phys_addr =
212                                 rte_mempool_virt2iova(sql_cookie) +
213                                 offsetof(struct qat_crypto_op_cookie,
214                                 qat_sgl_list_dst);
215         }
216
217         struct qat_pmd_private *internals
218                 = dev->data->dev_private;
219         qp->qat_dev_gen = internals->qat_dev_gen;
220
221         dev->data->queue_pairs[queue_pair_id] = qp;
222         return 0;
223
224 create_err:
225         if (qp->op_cookie_pool)
226                 rte_mempool_free(qp->op_cookie_pool);
227         rte_free(qp->op_cookies);
228         rte_free(qp);
229         return -EFAULT;
230 }
231
232 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
233 {
234         struct qat_qp *qp =
235                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
236         uint32_t i;
237
238         PMD_INIT_FUNC_TRACE();
239         if (qp == NULL) {
240                 PMD_DRV_LOG(DEBUG, "qp already freed");
241                 return 0;
242         }
243
244         /* Don't free memory if there are still responses to be processed */
245         if (qp->inflights16 == 0) {
246                 qat_queue_delete(&(qp->tx_q));
247                 qat_queue_delete(&(qp->rx_q));
248         } else {
249                 return -EAGAIN;
250         }
251
252         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
253
254         for (i = 0; i < qp->nb_descriptors; i++)
255                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
256
257         if (qp->op_cookie_pool)
258                 rte_mempool_free(qp->op_cookie_pool);
259
260         rte_free(qp->op_cookies);
261         rte_free(qp);
262         dev->data->queue_pairs[queue_pair_id] = NULL;
263         return 0;
264 }
265
266 static int qat_tx_queue_create(struct rte_cryptodev *dev,
267         struct qat_queue *queue, uint8_t qp_id,
268         uint32_t nb_desc, int socket_id)
269 {
270         PMD_INIT_FUNC_TRACE();
271         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
272         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
273                                                 ADF_SYM_TX_QUEUE_STARTOFF;
274         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
275                 nb_desc, qp_id, queue->hw_bundle_number,
276                 queue->hw_queue_number);
277
278         return qat_queue_create(dev, queue, nb_desc,
279                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
280 }
281
282 static int qat_rx_queue_create(struct rte_cryptodev *dev,
283                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
284                 int socket_id)
285 {
286         PMD_INIT_FUNC_TRACE();
287         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
288         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
289                                                 ADF_SYM_RX_QUEUE_STARTOFF;
290
291         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
292                 nb_desc, qp_id, queue->hw_bundle_number,
293                 queue->hw_queue_number);
294         return qat_queue_create(dev, queue, nb_desc,
295                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
296 }
297
298 static void qat_queue_delete(struct qat_queue *queue)
299 {
300         const struct rte_memzone *mz;
301         int status = 0;
302
303         if (queue == NULL) {
304                 PMD_DRV_LOG(DEBUG, "Invalid queue");
305                 return;
306         }
307         mz = rte_memzone_lookup(queue->memz_name);
308         if (mz != NULL) {
309                 /* Write an unused pattern to the queue memory. */
310                 memset(queue->base_addr, 0x7F, queue->queue_size);
311                 status = rte_memzone_free(mz);
312                 if (status != 0)
313                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
314                                         status, queue->memz_name);
315         } else {
316                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
317                                 queue->memz_name);
318         }
319 }
320
321 static int
322 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
323                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
324 {
325         uint64_t queue_base;
326         void *io_addr;
327         const struct rte_memzone *qp_mz;
328         uint32_t queue_size_bytes = nb_desc*desc_size;
329         struct rte_pci_device *pci_dev;
330
331         PMD_INIT_FUNC_TRACE();
332         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
333                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
334                 return -EINVAL;
335         }
336
337         pci_dev = RTE_DEV_TO_PCI(dev->device);
338
339         /*
340          * Allocate a memzone for the queue - create a unique name.
341          */
342         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
343                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
344                 queue->hw_bundle_number, queue->hw_queue_number);
345         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
346                         socket_id);
347         if (qp_mz == NULL) {
348                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
349                 return -ENOMEM;
350         }
351
352         queue->base_addr = (char *)qp_mz->addr;
353         queue->base_phys_addr = qp_mz->iova;
354         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
355                         queue_size_bytes)) {
356                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
357                                         " 0x%"PRIx64"\n",
358                                         queue->base_phys_addr);
359                 return -EFAULT;
360         }
361
362         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
363                         != 0) {
364                 PMD_DRV_LOG(ERR, "Invalid num inflights");
365                 return -EINVAL;
366         }
367
368         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
369                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
370         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
371         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
372                                 " msg_size %u, max_inflights %u modulo %u",
373                                 queue->queue_size, queue_size_bytes,
374                                 nb_desc, desc_size, queue->max_inflights,
375                                 queue->modulo);
376
377         if (queue->max_inflights < 2) {
378                 PMD_DRV_LOG(ERR, "Invalid num inflights");
379                 return -EINVAL;
380         }
381         queue->head = 0;
382         queue->tail = 0;
383         queue->msg_size = desc_size;
384
385         /*
386          * Write an unused pattern to the queue memory.
387          */
388         memset(queue->base_addr, 0x7F, queue_size_bytes);
389
390         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
391                                         queue->queue_size);
392
393         io_addr = pci_dev->mem_resource[0].addr;
394
395         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
396                         queue->hw_queue_number, queue_base);
397         return 0;
398 }
399
400 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
401                                         uint32_t queue_size_bytes)
402 {
403         PMD_INIT_FUNC_TRACE();
404         if (((queue_size_bytes - 1) & phys_addr) != 0)
405                 return -EINVAL;
406         return 0;
407 }
408
409 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
410         uint32_t *p_queue_size_for_csr)
411 {
412         uint8_t i = ADF_MIN_RING_SIZE;
413
414         PMD_INIT_FUNC_TRACE();
415         for (; i <= ADF_MAX_RING_SIZE; i++)
416                 if ((msg_size * msg_num) ==
417                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
418                         *p_queue_size_for_csr = i;
419                         return 0;
420                 }
421         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
422         return -EINVAL;
423 }
424
425 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
426 {
427         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
428                                         (ADF_ARB_REG_SLOT *
429                                                         txq->hw_bundle_number);
430         uint32_t value;
431
432         PMD_INIT_FUNC_TRACE();
433         value = ADF_CSR_RD(base_addr, arb_csr_offset);
434         value |= (0x01 << txq->hw_queue_number);
435         ADF_CSR_WR(base_addr, arb_csr_offset, value);
436 }
437
438 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
439 {
440         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
441                                         (ADF_ARB_REG_SLOT *
442                                                         txq->hw_bundle_number);
443         uint32_t value;
444
445         PMD_INIT_FUNC_TRACE();
446         value = ADF_CSR_RD(base_addr, arb_csr_offset);
447         value ^= (0x01 << txq->hw_queue_number);
448         ADF_CSR_WR(base_addr, arb_csr_offset, value);
449 }
450
451 static void adf_configure_queues(struct qat_qp *qp)
452 {
453         uint32_t queue_config;
454         struct qat_queue *queue = &qp->tx_q;
455
456         PMD_INIT_FUNC_TRACE();
457         queue_config = BUILD_RING_CONFIG(queue->queue_size);
458
459         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
460                         queue->hw_queue_number, queue_config);
461
462         queue = &qp->rx_q;
463         queue_config =
464                         BUILD_RESP_RING_CONFIG(queue->queue_size,
465                                         ADF_RING_NEAR_WATERMARK_512,
466                                         ADF_RING_NEAR_WATERMARK_0);
467
468         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
469                         queue->hw_queue_number, queue_config);
470 }