New upstream version 18.05
[deb_dpdk.git] / drivers / crypto / qat / qat_qp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2018 Intel Corporation
3  */
4
5 #include <rte_common.h>
6 #include <rte_dev.h>
7 #include <rte_malloc.h>
8 #include <rte_memzone.h>
9 #include <rte_cryptodev_pmd.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_atomic.h>
13 #include <rte_prefetch.h>
14
15 #include "qat_logs.h"
16 #include "qat_crypto.h"
17 #include "qat_algs.h"
18 #include "adf_transport_access_macros.h"
19
20 #define ADF_MAX_SYM_DESC                        4096
21 #define ADF_MIN_SYM_DESC                        128
22 #define ADF_SYM_TX_RING_DESC_SIZE               128
23 #define ADF_SYM_RX_RING_DESC_SIZE               32
24 #define ADF_SYM_TX_QUEUE_STARTOFF               2
25 /* Offset from bundle start to 1st Sym Tx queue */
26 #define ADF_SYM_RX_QUEUE_STARTOFF               10
27 #define ADF_ARB_REG_SLOT                        0x1000
28 #define ADF_ARB_RINGSRVARBEN_OFFSET             0x19C
29
30 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
31         ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
32         (ADF_ARB_REG_SLOT * index), value)
33
34 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
35         uint32_t queue_size_bytes);
36 static int qat_tx_queue_create(struct rte_cryptodev *dev,
37         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
38         int socket_id);
39 static int qat_rx_queue_create(struct rte_cryptodev *dev,
40         struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
41         int socket_id);
42 static void qat_queue_delete(struct qat_queue *queue);
43 static int qat_queue_create(struct rte_cryptodev *dev,
44         struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
45         int socket_id);
46 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
47         uint32_t *queue_size_for_csr);
48 static void adf_configure_queues(struct qat_qp *queue);
49 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
50 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
51
52 static const struct rte_memzone *
53 queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
54                         int socket_id)
55 {
56         const struct rte_memzone *mz;
57
58         PMD_INIT_FUNC_TRACE();
59         mz = rte_memzone_lookup(queue_name);
60         if (mz != 0) {
61                 if (((size_t)queue_size <= mz->len) &&
62                                 ((socket_id == SOCKET_ID_ANY) ||
63                                         (socket_id == mz->socket_id))) {
64                         PMD_DRV_LOG(DEBUG, "re-use memzone already "
65                                         "allocated for %s", queue_name);
66                         return mz;
67                 }
68
69                 PMD_DRV_LOG(ERR, "Incompatible memzone already "
70                                 "allocated %s, size %u, socket %d. "
71                                 "Requested size %u, socket %u",
72                                 queue_name, (uint32_t)mz->len,
73                                 mz->socket_id, queue_size, socket_id);
74                 return NULL;
75         }
76
77         PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
78                                         queue_name, queue_size, socket_id);
79         return rte_memzone_reserve_aligned(queue_name, queue_size,
80                 socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
81 }
82
83 int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
84         const struct rte_cryptodev_qp_conf *qp_conf,
85         int socket_id, struct rte_mempool *session_pool __rte_unused)
86 {
87         struct qat_qp *qp;
88         struct rte_pci_device *pci_dev;
89         int ret;
90         char op_cookie_pool_name[RTE_RING_NAMESIZE];
91         uint32_t i;
92
93         PMD_INIT_FUNC_TRACE();
94
95         /* If qp is already in use free ring memory and qp metadata. */
96         if (dev->data->queue_pairs[queue_pair_id] != NULL) {
97                 ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
98                 if (ret < 0)
99                         return ret;
100         }
101
102         if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
103                 (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
104                 PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
105                                 qp_conf->nb_descriptors);
106                 return -EINVAL;
107         }
108
109         pci_dev = RTE_DEV_TO_PCI(dev->device);
110
111         if (pci_dev->mem_resource[0].addr == NULL) {
112                 PMD_DRV_LOG(ERR, "Could not find VF config space "
113                                 "(UIO driver attached?).");
114                 return -EINVAL;
115         }
116
117         if (queue_pair_id >=
118                         (ADF_NUM_SYM_QPS_PER_BUNDLE *
119                                         ADF_NUM_BUNDLES_PER_DEV)) {
120                 PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
121                                 queue_pair_id);
122                 return -EINVAL;
123         }
124         /* Allocate the queue pair data structure. */
125         qp = rte_zmalloc("qat PMD qp metadata",
126                         sizeof(*qp), RTE_CACHE_LINE_SIZE);
127         if (qp == NULL) {
128                 PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
129                 return -ENOMEM;
130         }
131         qp->nb_descriptors = qp_conf->nb_descriptors;
132         qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
133                         qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
134                         RTE_CACHE_LINE_SIZE);
135         if (qp->op_cookies == NULL) {
136                 PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
137                 rte_free(qp);
138                 return -ENOMEM;
139         }
140
141         qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
142         qp->inflights16 = 0;
143
144         if (qat_tx_queue_create(dev, &(qp->tx_q),
145                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
146                 PMD_INIT_LOG(ERR, "Tx queue create failed "
147                                 "queue_pair_id=%u", queue_pair_id);
148                 goto create_err;
149         }
150
151         if (qat_rx_queue_create(dev, &(qp->rx_q),
152                 queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
153                 PMD_DRV_LOG(ERR, "Rx queue create failed "
154                                 "queue_pair_id=%hu", queue_pair_id);
155                 qat_queue_delete(&(qp->tx_q));
156                 goto create_err;
157         }
158
159         adf_configure_queues(qp);
160         adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
161         snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
162                 pci_dev->driver->driver.name, dev->data->dev_id,
163                 queue_pair_id);
164
165         qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
166         if (qp->op_cookie_pool == NULL)
167                 qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
168                                 qp->nb_descriptors,
169                                 sizeof(struct qat_crypto_op_cookie), 64, 0,
170                                 NULL, NULL, NULL, NULL, socket_id,
171                                 0);
172         if (!qp->op_cookie_pool) {
173                 PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
174                                 " op mempool");
175                 goto create_err;
176         }
177
178         for (i = 0; i < qp->nb_descriptors; i++) {
179                 if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
180                         PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
181                         goto create_err;
182                 }
183
184                 struct qat_crypto_op_cookie *sql_cookie =
185                                 qp->op_cookies[i];
186
187                 sql_cookie->qat_sgl_src_phys_addr =
188                                 rte_mempool_virt2iova(sql_cookie) +
189                                 offsetof(struct qat_crypto_op_cookie,
190                                 qat_sgl_list_src);
191
192                 sql_cookie->qat_sgl_dst_phys_addr =
193                                 rte_mempool_virt2iova(sql_cookie) +
194                                 offsetof(struct qat_crypto_op_cookie,
195                                 qat_sgl_list_dst);
196         }
197
198         struct qat_pmd_private *internals
199                 = dev->data->dev_private;
200         qp->qat_dev_gen = internals->qat_dev_gen;
201
202         dev->data->queue_pairs[queue_pair_id] = qp;
203         return 0;
204
205 create_err:
206         if (qp->op_cookie_pool)
207                 rte_mempool_free(qp->op_cookie_pool);
208         rte_free(qp->op_cookies);
209         rte_free(qp);
210         return -EFAULT;
211 }
212
213 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
214 {
215         struct qat_qp *qp =
216                         (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
217         uint32_t i;
218
219         PMD_INIT_FUNC_TRACE();
220         if (qp == NULL) {
221                 PMD_DRV_LOG(DEBUG, "qp already freed");
222                 return 0;
223         }
224
225         /* Don't free memory if there are still responses to be processed */
226         if (qp->inflights16 == 0) {
227                 qat_queue_delete(&(qp->tx_q));
228                 qat_queue_delete(&(qp->rx_q));
229         } else {
230                 return -EAGAIN;
231         }
232
233         adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
234
235         for (i = 0; i < qp->nb_descriptors; i++)
236                 rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
237
238         if (qp->op_cookie_pool)
239                 rte_mempool_free(qp->op_cookie_pool);
240
241         rte_free(qp->op_cookies);
242         rte_free(qp);
243         dev->data->queue_pairs[queue_pair_id] = NULL;
244         return 0;
245 }
246
247 static int qat_tx_queue_create(struct rte_cryptodev *dev,
248         struct qat_queue *queue, uint8_t qp_id,
249         uint32_t nb_desc, int socket_id)
250 {
251         PMD_INIT_FUNC_TRACE();
252         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
253         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
254                                                 ADF_SYM_TX_QUEUE_STARTOFF;
255         PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
256                 nb_desc, qp_id, queue->hw_bundle_number,
257                 queue->hw_queue_number);
258
259         return qat_queue_create(dev, queue, nb_desc,
260                                 ADF_SYM_TX_RING_DESC_SIZE, socket_id);
261 }
262
263 static int qat_rx_queue_create(struct rte_cryptodev *dev,
264                 struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
265                 int socket_id)
266 {
267         PMD_INIT_FUNC_TRACE();
268         queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
269         queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
270                                                 ADF_SYM_RX_QUEUE_STARTOFF;
271
272         PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
273                 nb_desc, qp_id, queue->hw_bundle_number,
274                 queue->hw_queue_number);
275         return qat_queue_create(dev, queue, nb_desc,
276                                 ADF_SYM_RX_RING_DESC_SIZE, socket_id);
277 }
278
279 static void qat_queue_delete(struct qat_queue *queue)
280 {
281         const struct rte_memzone *mz;
282         int status = 0;
283
284         if (queue == NULL) {
285                 PMD_DRV_LOG(DEBUG, "Invalid queue");
286                 return;
287         }
288         mz = rte_memzone_lookup(queue->memz_name);
289         if (mz != NULL) {
290                 /* Write an unused pattern to the queue memory. */
291                 memset(queue->base_addr, 0x7F, queue->queue_size);
292                 status = rte_memzone_free(mz);
293                 if (status != 0)
294                         PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
295                                         status, queue->memz_name);
296         } else {
297                 PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
298                                 queue->memz_name);
299         }
300 }
301
302 static int
303 qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
304                 uint32_t nb_desc, uint8_t desc_size, int socket_id)
305 {
306         uint64_t queue_base;
307         void *io_addr;
308         const struct rte_memzone *qp_mz;
309         uint32_t queue_size_bytes = nb_desc*desc_size;
310         struct rte_pci_device *pci_dev;
311
312         PMD_INIT_FUNC_TRACE();
313         if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
314                 PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
315                 return -EINVAL;
316         }
317
318         pci_dev = RTE_DEV_TO_PCI(dev->device);
319
320         /*
321          * Allocate a memzone for the queue - create a unique name.
322          */
323         snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
324                 pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
325                 queue->hw_bundle_number, queue->hw_queue_number);
326         qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
327                         socket_id);
328         if (qp_mz == NULL) {
329                 PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
330                 return -ENOMEM;
331         }
332
333         queue->base_addr = (char *)qp_mz->addr;
334         queue->base_phys_addr = qp_mz->iova;
335         if (qat_qp_check_queue_alignment(queue->base_phys_addr,
336                         queue_size_bytes)) {
337                 PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
338                                         " 0x%"PRIx64"\n",
339                                         queue->base_phys_addr);
340                 return -EFAULT;
341         }
342
343         if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
344                         != 0) {
345                 PMD_DRV_LOG(ERR, "Invalid num inflights");
346                 return -EINVAL;
347         }
348
349         queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
350                                         ADF_BYTES_TO_MSG_SIZE(desc_size));
351         queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
352         PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
353                                 " msg_size %u, max_inflights %u modulo %u",
354                                 queue->queue_size, queue_size_bytes,
355                                 nb_desc, desc_size, queue->max_inflights,
356                                 queue->modulo);
357
358         if (queue->max_inflights < 2) {
359                 PMD_DRV_LOG(ERR, "Invalid num inflights");
360                 return -EINVAL;
361         }
362         queue->head = 0;
363         queue->tail = 0;
364         queue->msg_size = desc_size;
365
366         /*
367          * Write an unused pattern to the queue memory.
368          */
369         memset(queue->base_addr, 0x7F, queue_size_bytes);
370
371         queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
372                                         queue->queue_size);
373
374         io_addr = pci_dev->mem_resource[0].addr;
375
376         WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
377                         queue->hw_queue_number, queue_base);
378         return 0;
379 }
380
381 static int qat_qp_check_queue_alignment(uint64_t phys_addr,
382                                         uint32_t queue_size_bytes)
383 {
384         PMD_INIT_FUNC_TRACE();
385         if (((queue_size_bytes - 1) & phys_addr) != 0)
386                 return -EINVAL;
387         return 0;
388 }
389
390 static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
391         uint32_t *p_queue_size_for_csr)
392 {
393         uint8_t i = ADF_MIN_RING_SIZE;
394
395         PMD_INIT_FUNC_TRACE();
396         for (; i <= ADF_MAX_RING_SIZE; i++)
397                 if ((msg_size * msg_num) ==
398                                 (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
399                         *p_queue_size_for_csr = i;
400                         return 0;
401                 }
402         PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
403         return -EINVAL;
404 }
405
406 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
407 {
408         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
409                                         (ADF_ARB_REG_SLOT *
410                                                         txq->hw_bundle_number);
411         uint32_t value;
412
413         PMD_INIT_FUNC_TRACE();
414         value = ADF_CSR_RD(base_addr, arb_csr_offset);
415         value |= (0x01 << txq->hw_queue_number);
416         ADF_CSR_WR(base_addr, arb_csr_offset, value);
417 }
418
419 static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
420 {
421         uint32_t arb_csr_offset =  ADF_ARB_RINGSRVARBEN_OFFSET +
422                                         (ADF_ARB_REG_SLOT *
423                                                         txq->hw_bundle_number);
424         uint32_t value;
425
426         PMD_INIT_FUNC_TRACE();
427         value = ADF_CSR_RD(base_addr, arb_csr_offset);
428         value ^= (0x01 << txq->hw_queue_number);
429         ADF_CSR_WR(base_addr, arb_csr_offset, value);
430 }
431
432 static void adf_configure_queues(struct qat_qp *qp)
433 {
434         uint32_t queue_config;
435         struct qat_queue *queue = &qp->tx_q;
436
437         PMD_INIT_FUNC_TRACE();
438         queue_config = BUILD_RING_CONFIG(queue->queue_size);
439
440         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
441                         queue->hw_queue_number, queue_config);
442
443         queue = &qp->rx_q;
444         queue_config =
445                         BUILD_RESP_RING_CONFIG(queue->queue_size,
446                                         ADF_RING_NEAR_WATERMARK_512,
447                                         ADF_RING_NEAR_WATERMARK_0);
448
449         WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
450                         queue->hw_queue_number, queue_config);
451 }