New upstream version 18.11.2
[deb_dpdk.git] / lib / librte_eventdev / rte_event_crypto_adapter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Intel Corporation.
3  * All rights reserved.
4  */
5
6 #include <string.h>
7 #include <stdbool.h>
8 #include <rte_common.h>
9 #include <rte_dev.h>
10 #include <rte_errno.h>
11 #include <rte_cryptodev.h>
12 #include <rte_cryptodev_pmd.h>
13 #include <rte_log.h>
14 #include <rte_malloc.h>
15 #include <rte_service_component.h>
16
17 #include "rte_eventdev.h"
18 #include "rte_eventdev_pmd.h"
19 #include "rte_event_crypto_adapter.h"
20
21 #define BATCH_SIZE 32
22 #define DEFAULT_MAX_NB 128
23 #define CRYPTO_ADAPTER_NAME_LEN 32
24 #define CRYPTO_ADAPTER_MEM_NAME_LEN 32
25 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
26
27 /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
28  * iterations of eca_crypto_adapter_enq_run()
29  */
30 #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
31
32 struct rte_event_crypto_adapter {
33         /* Event device identifier */
34         uint8_t eventdev_id;
35         /* Event port identifier */
36         uint8_t event_port_id;
37         /* Store event device's implicit release capability */
38         uint8_t implicit_release_disabled;
39         /* Max crypto ops processed in any service function invocation */
40         uint32_t max_nb;
41         /* Lock to serialize config updates with service function */
42         rte_spinlock_t lock;
43         /* Next crypto device to be processed */
44         uint16_t next_cdev_id;
45         /* Per crypto device structure */
46         struct crypto_device_info *cdevs;
47         /* Loop counter to flush crypto ops */
48         uint16_t transmit_loop_count;
49         /* Per instance stats structure */
50         struct rte_event_crypto_adapter_stats crypto_stats;
51         /* Configuration callback for rte_service configuration */
52         rte_event_crypto_adapter_conf_cb conf_cb;
53         /* Configuration callback argument */
54         void *conf_arg;
55         /* Set if  default_cb is being used */
56         int default_cb_arg;
57         /* Service initialization state */
58         uint8_t service_inited;
59         /* Memory allocation name */
60         char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
61         /* Socket identifier cached from eventdev */
62         int socket_id;
63         /* Per adapter EAL service */
64         uint32_t service_id;
65         /* No. of queue pairs configured */
66         uint16_t nb_qps;
67         /* Adapter mode */
68         enum rte_event_crypto_adapter_mode mode;
69 } __rte_cache_aligned;
70
71 /* Per crypto device information */
72 struct crypto_device_info {
73         /* Pointer to cryptodev */
74         struct rte_cryptodev *dev;
75         /* Pointer to queue pair info */
76         struct crypto_queue_pair_info *qpairs;
77         /* Next queue pair to be processed */
78         uint16_t next_queue_pair_id;
79         /* Set to indicate cryptodev->eventdev packet
80          * transfer uses a hardware mechanism
81          */
82         uint8_t internal_event_port;
83         /* Set to indicate processing has been started */
84         uint8_t dev_started;
85         /* If num_qpairs > 0, the start callback will
86          * be invoked if not already invoked
87          */
88         uint16_t num_qpairs;
89 } __rte_cache_aligned;
90
91 /* Per queue pair information */
92 struct crypto_queue_pair_info {
93         /* Set to indicate queue pair is enabled */
94         bool qp_enabled;
95         /* Pointer to hold rte_crypto_ops for batching */
96         struct rte_crypto_op **op_buffer;
97         /* No of crypto ops accumulated */
98         uint8_t len;
99 } __rte_cache_aligned;
100
101 static struct rte_event_crypto_adapter **event_crypto_adapter;
102
103 /* Macros to check for valid adapter */
104 #define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
105         if (!eca_valid_id(id)) { \
106                 RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
107                 return retval; \
108         } \
109 } while (0)
110
111 static inline int
112 eca_valid_id(uint8_t id)
113 {
114         return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
115 }
116
117 static int
118 eca_init(void)
119 {
120         const char *name = "crypto_adapter_array";
121         const struct rte_memzone *mz;
122         unsigned int sz;
123
124         sz = sizeof(*event_crypto_adapter) *
125             RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
126         sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
127
128         mz = rte_memzone_lookup(name);
129         if (mz == NULL) {
130                 mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
131                                                  RTE_CACHE_LINE_SIZE);
132                 if (mz == NULL) {
133                         RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
134                                         PRId32, rte_errno);
135                         return -rte_errno;
136                 }
137         }
138
139         event_crypto_adapter = mz->addr;
140         return 0;
141 }
142
143 static inline struct rte_event_crypto_adapter *
144 eca_id_to_adapter(uint8_t id)
145 {
146         return event_crypto_adapter ?
147                 event_crypto_adapter[id] : NULL;
148 }
149
150 static int
151 eca_default_config_cb(uint8_t id, uint8_t dev_id,
152                         struct rte_event_crypto_adapter_conf *conf, void *arg)
153 {
154         struct rte_event_dev_config dev_conf;
155         struct rte_eventdev *dev;
156         uint8_t port_id;
157         int started;
158         int ret;
159         struct rte_event_port_conf *port_conf = arg;
160         struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
161
162         if (adapter == NULL)
163                 return -EINVAL;
164
165         dev = &rte_eventdevs[adapter->eventdev_id];
166         dev_conf = dev->data->dev_conf;
167
168         started = dev->data->dev_started;
169         if (started)
170                 rte_event_dev_stop(dev_id);
171         port_id = dev_conf.nb_event_ports;
172         dev_conf.nb_event_ports += 1;
173         ret = rte_event_dev_configure(dev_id, &dev_conf);
174         if (ret) {
175                 RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
176                 if (started) {
177                         if (rte_event_dev_start(dev_id))
178                                 return -EIO;
179                 }
180                 return ret;
181         }
182
183         ret = rte_event_port_setup(dev_id, port_id, port_conf);
184         if (ret) {
185                 RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
186                 return ret;
187         }
188
189         conf->event_port_id = port_id;
190         conf->max_nb = DEFAULT_MAX_NB;
191         if (started)
192                 ret = rte_event_dev_start(dev_id);
193
194         adapter->default_cb_arg = 1;
195         return ret;
196 }
197
198 int __rte_experimental
199 rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
200                                 rte_event_crypto_adapter_conf_cb conf_cb,
201                                 enum rte_event_crypto_adapter_mode mode,
202                                 void *conf_arg)
203 {
204         struct rte_event_crypto_adapter *adapter;
205         char mem_name[CRYPTO_ADAPTER_NAME_LEN];
206         struct rte_event_dev_info dev_info;
207         int socket_id;
208         uint8_t i;
209         int ret;
210
211         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
212         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
213         if (conf_cb == NULL)
214                 return -EINVAL;
215
216         if (event_crypto_adapter == NULL) {
217                 ret = eca_init();
218                 if (ret)
219                         return ret;
220         }
221
222         adapter = eca_id_to_adapter(id);
223         if (adapter != NULL) {
224                 RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
225                 return -EEXIST;
226         }
227
228         socket_id = rte_event_dev_socket_id(dev_id);
229         snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
230                  "rte_event_crypto_adapter_%d", id);
231
232         adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
233                         RTE_CACHE_LINE_SIZE, socket_id);
234         if (adapter == NULL) {
235                 RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
236                 return -ENOMEM;
237         }
238
239         ret = rte_event_dev_info_get(dev_id, &dev_info);
240         if (ret < 0) {
241                 RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
242                                  dev_id, dev_info.driver_name);
243                 return ret;
244         }
245
246         adapter->implicit_release_disabled = (dev_info.event_dev_cap &
247                         RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
248         adapter->eventdev_id = dev_id;
249         adapter->socket_id = socket_id;
250         adapter->conf_cb = conf_cb;
251         adapter->conf_arg = conf_arg;
252         adapter->mode = mode;
253         strcpy(adapter->mem_name, mem_name);
254         adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
255                                         rte_cryptodev_count() *
256                                         sizeof(struct crypto_device_info), 0,
257                                         socket_id);
258         if (adapter->cdevs == NULL) {
259                 RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
260                 rte_free(adapter);
261                 return -ENOMEM;
262         }
263
264         rte_spinlock_init(&adapter->lock);
265         for (i = 0; i < rte_cryptodev_count(); i++)
266                 adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
267
268         event_crypto_adapter[id] = adapter;
269
270         return 0;
271 }
272
273
274 int __rte_experimental
275 rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
276                                 struct rte_event_port_conf *port_config,
277                                 enum rte_event_crypto_adapter_mode mode)
278 {
279         struct rte_event_port_conf *pc;
280         int ret;
281
282         if (port_config == NULL)
283                 return -EINVAL;
284         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
285
286         pc = rte_malloc(NULL, sizeof(*pc), 0);
287         if (pc == NULL)
288                 return -ENOMEM;
289         *pc = *port_config;
290         ret = rte_event_crypto_adapter_create_ext(id, dev_id,
291                                                   eca_default_config_cb,
292                                                   mode,
293                                                   pc);
294         if (ret)
295                 rte_free(pc);
296
297         return ret;
298 }
299
300 int __rte_experimental
301 rte_event_crypto_adapter_free(uint8_t id)
302 {
303         struct rte_event_crypto_adapter *adapter;
304
305         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
306
307         adapter = eca_id_to_adapter(id);
308         if (adapter == NULL)
309                 return -EINVAL;
310
311         if (adapter->nb_qps) {
312                 RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
313                                 adapter->nb_qps);
314                 return -EBUSY;
315         }
316
317         if (adapter->default_cb_arg)
318                 rte_free(adapter->conf_arg);
319         rte_free(adapter->cdevs);
320         rte_free(adapter);
321         event_crypto_adapter[id] = NULL;
322
323         return 0;
324 }
325
326 static inline unsigned int
327 eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
328                  struct rte_event *ev, unsigned int cnt)
329 {
330         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
331         union rte_event_crypto_metadata *m_data = NULL;
332         struct crypto_queue_pair_info *qp_info = NULL;
333         struct rte_crypto_op *crypto_op;
334         unsigned int i, n;
335         uint16_t qp_id, len, ret;
336         uint8_t cdev_id;
337
338         len = 0;
339         ret = 0;
340         n = 0;
341         stats->event_deq_count += cnt;
342
343         for (i = 0; i < cnt; i++) {
344                 crypto_op = ev[i].event_ptr;
345                 if (crypto_op == NULL)
346                         continue;
347                 if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
348                         m_data = rte_cryptodev_sym_session_get_user_data(
349                                         crypto_op->sym->session);
350                         if (m_data == NULL) {
351                                 rte_pktmbuf_free(crypto_op->sym->m_src);
352                                 rte_crypto_op_free(crypto_op);
353                                 continue;
354                         }
355
356                         cdev_id = m_data->request_info.cdev_id;
357                         qp_id = m_data->request_info.queue_pair_id;
358                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
359                         if (!qp_info->qp_enabled) {
360                                 rte_pktmbuf_free(crypto_op->sym->m_src);
361                                 rte_crypto_op_free(crypto_op);
362                                 continue;
363                         }
364                         len = qp_info->len;
365                         qp_info->op_buffer[len] = crypto_op;
366                         len++;
367                 } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
368                                 crypto_op->private_data_offset) {
369                         m_data = (union rte_event_crypto_metadata *)
370                                  ((uint8_t *)crypto_op +
371                                         crypto_op->private_data_offset);
372                         cdev_id = m_data->request_info.cdev_id;
373                         qp_id = m_data->request_info.queue_pair_id;
374                         qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
375                         if (!qp_info->qp_enabled) {
376                                 rte_pktmbuf_free(crypto_op->sym->m_src);
377                                 rte_crypto_op_free(crypto_op);
378                                 continue;
379                         }
380                         len = qp_info->len;
381                         qp_info->op_buffer[len] = crypto_op;
382                         len++;
383                 } else {
384                         rte_pktmbuf_free(crypto_op->sym->m_src);
385                         rte_crypto_op_free(crypto_op);
386                         continue;
387                 }
388
389                 if (len == BATCH_SIZE) {
390                         struct rte_crypto_op **op_buffer = qp_info->op_buffer;
391                         ret = rte_cryptodev_enqueue_burst(cdev_id,
392                                                           qp_id,
393                                                           op_buffer,
394                                                           BATCH_SIZE);
395
396                         stats->crypto_enq_count += ret;
397
398                         while (ret < len) {
399                                 struct rte_crypto_op *op;
400                                 op = op_buffer[ret++];
401                                 stats->crypto_enq_fail++;
402                                 rte_pktmbuf_free(op->sym->m_src);
403                                 rte_crypto_op_free(op);
404                         }
405
406                         len = 0;
407                 }
408
409                 if (qp_info)
410                         qp_info->len = len;
411                 n += ret;
412         }
413
414         return n;
415 }
416
417 static unsigned int
418 eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
419 {
420         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
421         struct crypto_device_info *curr_dev;
422         struct crypto_queue_pair_info *curr_queue;
423         struct rte_crypto_op **op_buffer;
424         struct rte_cryptodev *dev;
425         uint8_t cdev_id;
426         uint16_t qp;
427         uint16_t ret;
428         uint16_t num_cdev = rte_cryptodev_count();
429
430         ret = 0;
431         for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
432                 curr_dev = &adapter->cdevs[cdev_id];
433                 dev = curr_dev->dev;
434                 if (dev == NULL)
435                         continue;
436                 for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
437
438                         curr_queue = &curr_dev->qpairs[qp];
439                         if (!curr_queue->qp_enabled)
440                                 continue;
441
442                         op_buffer = curr_queue->op_buffer;
443                         ret = rte_cryptodev_enqueue_burst(cdev_id,
444                                                           qp,
445                                                           op_buffer,
446                                                           curr_queue->len);
447                         stats->crypto_enq_count += ret;
448
449                         while (ret < curr_queue->len) {
450                                 struct rte_crypto_op *op;
451                                 op = op_buffer[ret++];
452                                 stats->crypto_enq_fail++;
453                                 rte_pktmbuf_free(op->sym->m_src);
454                                 rte_crypto_op_free(op);
455                         }
456                         curr_queue->len = 0;
457                 }
458         }
459
460         return ret;
461 }
462
463 static int
464 eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
465                         unsigned int max_enq)
466 {
467         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
468         struct rte_event ev[BATCH_SIZE];
469         unsigned int nb_enq, nb_enqueued;
470         uint16_t n;
471         uint8_t event_dev_id = adapter->eventdev_id;
472         uint8_t event_port_id = adapter->event_port_id;
473
474         nb_enqueued = 0;
475         if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
476                 return 0;
477
478         for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
479                 stats->event_poll_count++;
480                 n = rte_event_dequeue_burst(event_dev_id,
481                                             event_port_id, ev, BATCH_SIZE, 0);
482
483                 if (!n)
484                         break;
485
486                 nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
487         }
488
489         if ((++adapter->transmit_loop_count &
490                 (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
491                 nb_enqueued += eca_crypto_enq_flush(adapter);
492         }
493
494         return nb_enqueued;
495 }
496
497 static inline void
498 eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
499                   struct rte_crypto_op **ops, uint16_t num)
500 {
501         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
502         union rte_event_crypto_metadata *m_data = NULL;
503         uint8_t event_dev_id = adapter->eventdev_id;
504         uint8_t event_port_id = adapter->event_port_id;
505         struct rte_event events[BATCH_SIZE];
506         uint16_t nb_enqueued, nb_ev;
507         uint8_t retry;
508         uint8_t i;
509
510         nb_ev = 0;
511         retry = 0;
512         nb_enqueued = 0;
513         num = RTE_MIN(num, BATCH_SIZE);
514         for (i = 0; i < num; i++) {
515                 struct rte_event *ev = &events[nb_ev++];
516                 if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
517                         m_data = rte_cryptodev_sym_session_get_user_data(
518                                         ops[i]->sym->session);
519                 } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
520                                 ops[i]->private_data_offset) {
521                         m_data = (union rte_event_crypto_metadata *)
522                                  ((uint8_t *)ops[i] +
523                                   ops[i]->private_data_offset);
524                 }
525
526                 if (unlikely(m_data == NULL)) {
527                         rte_pktmbuf_free(ops[i]->sym->m_src);
528                         rte_crypto_op_free(ops[i]);
529                         continue;
530                 }
531
532                 rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
533                 ev->event_ptr = ops[i];
534                 ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
535                 if (adapter->implicit_release_disabled)
536                         ev->op = RTE_EVENT_OP_FORWARD;
537                 else
538                         ev->op = RTE_EVENT_OP_NEW;
539         }
540
541         do {
542                 nb_enqueued += rte_event_enqueue_burst(event_dev_id,
543                                                   event_port_id,
544                                                   &events[nb_enqueued],
545                                                   nb_ev - nb_enqueued);
546         } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
547                  nb_enqueued < nb_ev);
548
549         /* Free mbufs and rte_crypto_ops for failed events */
550         for (i = nb_enqueued; i < nb_ev; i++) {
551                 struct rte_crypto_op *op = events[i].event_ptr;
552                 rte_pktmbuf_free(op->sym->m_src);
553                 rte_crypto_op_free(op);
554         }
555
556         stats->event_enq_fail_count += nb_ev - nb_enqueued;
557         stats->event_enq_count += nb_enqueued;
558         stats->event_enq_retry_count += retry - 1;
559 }
560
561 static inline unsigned int
562 eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
563                         unsigned int max_deq)
564 {
565         struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
566         struct crypto_device_info *curr_dev;
567         struct crypto_queue_pair_info *curr_queue;
568         struct rte_crypto_op *ops[BATCH_SIZE];
569         uint16_t n, nb_deq;
570         struct rte_cryptodev *dev;
571         uint8_t cdev_id;
572         uint16_t qp, dev_qps;
573         bool done;
574         uint16_t num_cdev = rte_cryptodev_count();
575
576         nb_deq = 0;
577         do {
578                 uint16_t queues = 0;
579                 done = true;
580
581                 for (cdev_id = adapter->next_cdev_id;
582                         cdev_id < num_cdev; cdev_id++) {
583                         curr_dev = &adapter->cdevs[cdev_id];
584                         dev = curr_dev->dev;
585                         if (dev == NULL)
586                                 continue;
587                         dev_qps = dev->data->nb_queue_pairs;
588
589                         for (qp = curr_dev->next_queue_pair_id;
590                                 queues < dev_qps; qp = (qp + 1) % dev_qps,
591                                 queues++) {
592
593                                 curr_queue = &curr_dev->qpairs[qp];
594                                 if (!curr_queue->qp_enabled)
595                                         continue;
596
597                                 n = rte_cryptodev_dequeue_burst(cdev_id, qp,
598                                         ops, BATCH_SIZE);
599                                 if (!n)
600                                         continue;
601
602                                 done = false;
603                                 stats->crypto_deq_count += n;
604                                 eca_ops_enqueue_burst(adapter, ops, n);
605                                 nb_deq += n;
606
607                                 if (nb_deq > max_deq) {
608                                         if ((qp + 1) == dev_qps) {
609                                                 adapter->next_cdev_id =
610                                                         (cdev_id + 1)
611                                                         % num_cdev;
612                                         }
613                                         curr_dev->next_queue_pair_id = (qp + 1)
614                                                 % dev->data->nb_queue_pairs;
615
616                                         return nb_deq;
617                                 }
618                         }
619                 }
620         } while (done == false);
621         return nb_deq;
622 }
623
624 static void
625 eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
626                         unsigned int max_ops)
627 {
628         while (max_ops) {
629                 unsigned int e_cnt, d_cnt;
630
631                 e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
632                 max_ops -= RTE_MIN(max_ops, e_cnt);
633
634                 d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
635                 max_ops -= RTE_MIN(max_ops, d_cnt);
636
637                 if (e_cnt == 0 && d_cnt == 0)
638                         break;
639
640         }
641 }
642
643 static int
644 eca_service_func(void *args)
645 {
646         struct rte_event_crypto_adapter *adapter = args;
647
648         if (rte_spinlock_trylock(&adapter->lock) == 0)
649                 return 0;
650         eca_crypto_adapter_run(adapter, adapter->max_nb);
651         rte_spinlock_unlock(&adapter->lock);
652
653         return 0;
654 }
655
656 static int
657 eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
658 {
659         struct rte_event_crypto_adapter_conf adapter_conf;
660         struct rte_service_spec service;
661         int ret;
662
663         if (adapter->service_inited)
664                 return 0;
665
666         memset(&service, 0, sizeof(service));
667         snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
668                 "rte_event_crypto_adapter_%d", id);
669         service.socket_id = adapter->socket_id;
670         service.callback = eca_service_func;
671         service.callback_userdata = adapter;
672         /* Service function handles locking for queue add/del updates */
673         service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
674         ret = rte_service_component_register(&service, &adapter->service_id);
675         if (ret) {
676                 RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
677                         service.name, ret);
678                 return ret;
679         }
680
681         ret = adapter->conf_cb(id, adapter->eventdev_id,
682                 &adapter_conf, adapter->conf_arg);
683         if (ret) {
684                 RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
685                         ret);
686                 return ret;
687         }
688
689         adapter->max_nb = adapter_conf.max_nb;
690         adapter->event_port_id = adapter_conf.event_port_id;
691         adapter->service_inited = 1;
692
693         return ret;
694 }
695
696 static void
697 eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
698                         struct crypto_device_info *dev_info,
699                         int32_t queue_pair_id,
700                         uint8_t add)
701 {
702         struct crypto_queue_pair_info *qp_info;
703         int enabled;
704         uint16_t i;
705
706         if (dev_info->qpairs == NULL)
707                 return;
708
709         if (queue_pair_id == -1) {
710                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
711                         eca_update_qp_info(adapter, dev_info, i, add);
712         } else {
713                 qp_info = &dev_info->qpairs[queue_pair_id];
714                 enabled = qp_info->qp_enabled;
715                 if (add) {
716                         adapter->nb_qps += !enabled;
717                         dev_info->num_qpairs += !enabled;
718                 } else {
719                         adapter->nb_qps -= enabled;
720                         dev_info->num_qpairs -= enabled;
721                 }
722                 qp_info->qp_enabled = !!add;
723         }
724 }
725
726 static int
727 eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
728                 uint8_t cdev_id,
729                 int queue_pair_id)
730 {
731         struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
732         struct crypto_queue_pair_info *qpairs;
733         uint32_t i;
734
735         if (dev_info->qpairs == NULL) {
736                 dev_info->qpairs =
737                     rte_zmalloc_socket(adapter->mem_name,
738                                         dev_info->dev->data->nb_queue_pairs *
739                                         sizeof(struct crypto_queue_pair_info),
740                                         0, adapter->socket_id);
741                 if (dev_info->qpairs == NULL)
742                         return -ENOMEM;
743
744                 qpairs = dev_info->qpairs;
745                 qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
746                                         BATCH_SIZE *
747                                         sizeof(struct rte_crypto_op *),
748                                         0, adapter->socket_id);
749                 if (!qpairs->op_buffer) {
750                         rte_free(qpairs);
751                         return -ENOMEM;
752                 }
753         }
754
755         if (queue_pair_id == -1) {
756                 for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
757                         eca_update_qp_info(adapter, dev_info, i, 1);
758         } else
759                 eca_update_qp_info(adapter, dev_info,
760                                         (uint16_t)queue_pair_id, 1);
761
762         return 0;
763 }
764
765 int __rte_experimental
766 rte_event_crypto_adapter_queue_pair_add(uint8_t id,
767                         uint8_t cdev_id,
768                         int32_t queue_pair_id,
769                         const struct rte_event *event)
770 {
771         struct rte_event_crypto_adapter *adapter;
772         struct rte_eventdev *dev;
773         struct crypto_device_info *dev_info;
774         uint32_t cap;
775         int ret;
776
777         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
778
779         if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
780                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
781                 return -EINVAL;
782         }
783
784         adapter = eca_id_to_adapter(id);
785         if (adapter == NULL)
786                 return -EINVAL;
787
788         dev = &rte_eventdevs[adapter->eventdev_id];
789         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
790                                                 cdev_id,
791                                                 &cap);
792         if (ret) {
793                 RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
794                         " cdev %" PRIu8, id, cdev_id);
795                 return ret;
796         }
797
798         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
799             (event == NULL)) {
800                 RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
801                                   cdev_id);
802                 return -EINVAL;
803         }
804
805         dev_info = &adapter->cdevs[cdev_id];
806
807         if (queue_pair_id != -1 &&
808             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
809                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
810                                  (uint16_t)queue_pair_id);
811                 return -EINVAL;
812         }
813
814         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
815          * no need of service core as HW supports event forward capability.
816          */
817         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
818             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
819              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
820             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
821              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
822                 RTE_FUNC_PTR_OR_ERR_RET(
823                         *dev->dev_ops->crypto_adapter_queue_pair_add,
824                         -ENOTSUP);
825                 if (dev_info->qpairs == NULL) {
826                         dev_info->qpairs =
827                             rte_zmalloc_socket(adapter->mem_name,
828                                         dev_info->dev->data->nb_queue_pairs *
829                                         sizeof(struct crypto_queue_pair_info),
830                                         0, adapter->socket_id);
831                         if (dev_info->qpairs == NULL)
832                                 return -ENOMEM;
833                 }
834
835                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
836                                 dev_info->dev,
837                                 queue_pair_id,
838                                 event);
839                 if (ret)
840                         return ret;
841
842                 else
843                         eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
844                                            queue_pair_id, 1);
845         }
846
847         /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
848          * or SW adapter, initiate services so the application can choose
849          * which ever way it wants to use the adapter.
850          * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
851          *         Application may wants to use one of below two mode
852          *          a. OP_FORWARD mode -> HW Dequeue + SW enqueue
853          *          b. OP_NEW mode -> HW Dequeue
854          * Case 2: No HW caps, use SW adapter
855          *          a. OP_FORWARD mode -> SW enqueue & dequeue
856          *          b. OP_NEW mode -> SW Dequeue
857          */
858         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
859              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
860              (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
861               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
862               !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
863                (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
864                 rte_spinlock_lock(&adapter->lock);
865                 ret = eca_init_service(adapter, id);
866                 if (ret == 0)
867                         ret = eca_add_queue_pair(adapter, cdev_id,
868                                                  queue_pair_id);
869                 rte_spinlock_unlock(&adapter->lock);
870
871                 if (ret)
872                         return ret;
873
874                 rte_service_component_runstate_set(adapter->service_id, 1);
875         }
876
877         return 0;
878 }
879
880 int __rte_experimental
881 rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
882                                         int32_t queue_pair_id)
883 {
884         struct rte_event_crypto_adapter *adapter;
885         struct crypto_device_info *dev_info;
886         struct rte_eventdev *dev;
887         int ret;
888         uint32_t cap;
889         uint16_t i;
890
891         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
892
893         if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
894                 RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
895                 return -EINVAL;
896         }
897
898         adapter = eca_id_to_adapter(id);
899         if (adapter == NULL)
900                 return -EINVAL;
901
902         dev = &rte_eventdevs[adapter->eventdev_id];
903         ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
904                                                 cdev_id,
905                                                 &cap);
906         if (ret)
907                 return ret;
908
909         dev_info = &adapter->cdevs[cdev_id];
910
911         if (queue_pair_id != -1 &&
912             (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
913                 RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
914                                  (uint16_t)queue_pair_id);
915                 return -EINVAL;
916         }
917
918         if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
919             (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
920              adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
921                 RTE_FUNC_PTR_OR_ERR_RET(
922                         *dev->dev_ops->crypto_adapter_queue_pair_del,
923                         -ENOTSUP);
924                 ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
925                                                 dev_info->dev,
926                                                 queue_pair_id);
927                 if (ret == 0) {
928                         eca_update_qp_info(adapter,
929                                         &adapter->cdevs[cdev_id],
930                                         queue_pair_id,
931                                         0);
932                         if (dev_info->num_qpairs == 0) {
933                                 rte_free(dev_info->qpairs);
934                                 dev_info->qpairs = NULL;
935                         }
936                 }
937         } else {
938                 if (adapter->nb_qps == 0)
939                         return 0;
940
941                 rte_spinlock_lock(&adapter->lock);
942                 if (queue_pair_id == -1) {
943                         for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
944                                 i++)
945                                 eca_update_qp_info(adapter, dev_info,
946                                                         queue_pair_id, 0);
947                 } else {
948                         eca_update_qp_info(adapter, dev_info,
949                                                 (uint16_t)queue_pair_id, 0);
950                 }
951
952                 if (dev_info->num_qpairs == 0) {
953                         rte_free(dev_info->qpairs);
954                         dev_info->qpairs = NULL;
955                 }
956
957                 rte_spinlock_unlock(&adapter->lock);
958                 rte_service_component_runstate_set(adapter->service_id,
959                                 adapter->nb_qps);
960         }
961
962         return ret;
963 }
964
965 static int
966 eca_adapter_ctrl(uint8_t id, int start)
967 {
968         struct rte_event_crypto_adapter *adapter;
969         struct crypto_device_info *dev_info;
970         struct rte_eventdev *dev;
971         uint32_t i;
972         int use_service;
973         int stop = !start;
974
975         use_service = 0;
976         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
977         adapter = eca_id_to_adapter(id);
978         if (adapter == NULL)
979                 return -EINVAL;
980
981         dev = &rte_eventdevs[adapter->eventdev_id];
982
983         for (i = 0; i < rte_cryptodev_count(); i++) {
984                 dev_info = &adapter->cdevs[i];
985                 /* if start  check for num queue pairs */
986                 if (start && !dev_info->num_qpairs)
987                         continue;
988                 /* if stop check if dev has been started */
989                 if (stop && !dev_info->dev_started)
990                         continue;
991                 use_service |= !dev_info->internal_event_port;
992                 dev_info->dev_started = start;
993                 if (dev_info->internal_event_port == 0)
994                         continue;
995                 start ? (*dev->dev_ops->crypto_adapter_start)(dev,
996                                                 &dev_info->dev[i]) :
997                         (*dev->dev_ops->crypto_adapter_stop)(dev,
998                                                 &dev_info->dev[i]);
999         }
1000
1001         if (use_service)
1002                 rte_service_runstate_set(adapter->service_id, start);
1003
1004         return 0;
1005 }
1006
1007 int __rte_experimental
1008 rte_event_crypto_adapter_start(uint8_t id)
1009 {
1010         struct rte_event_crypto_adapter *adapter;
1011
1012         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1013         adapter = eca_id_to_adapter(id);
1014         if (adapter == NULL)
1015                 return -EINVAL;
1016
1017         return eca_adapter_ctrl(id, 1);
1018 }
1019
1020 int __rte_experimental
1021 rte_event_crypto_adapter_stop(uint8_t id)
1022 {
1023         return eca_adapter_ctrl(id, 0);
1024 }
1025
1026 int __rte_experimental
1027 rte_event_crypto_adapter_stats_get(uint8_t id,
1028                                 struct rte_event_crypto_adapter_stats *stats)
1029 {
1030         struct rte_event_crypto_adapter *adapter;
1031         struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
1032         struct rte_event_crypto_adapter_stats dev_stats;
1033         struct rte_eventdev *dev;
1034         struct crypto_device_info *dev_info;
1035         uint32_t i;
1036         int ret;
1037
1038         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1039
1040         adapter = eca_id_to_adapter(id);
1041         if (adapter == NULL || stats == NULL)
1042                 return -EINVAL;
1043
1044         dev = &rte_eventdevs[adapter->eventdev_id];
1045         memset(stats, 0, sizeof(*stats));
1046         for (i = 0; i < rte_cryptodev_count(); i++) {
1047                 dev_info = &adapter->cdevs[i];
1048                 if (dev_info->internal_event_port == 0 ||
1049                         dev->dev_ops->crypto_adapter_stats_get == NULL)
1050                         continue;
1051                 ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
1052                                                 dev_info->dev,
1053                                                 &dev_stats);
1054                 if (ret)
1055                         continue;
1056
1057                 dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
1058                 dev_stats_sum.event_enq_count +=
1059                         dev_stats.event_enq_count;
1060         }
1061
1062         if (adapter->service_inited)
1063                 *stats = adapter->crypto_stats;
1064
1065         stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
1066         stats->event_enq_count += dev_stats_sum.event_enq_count;
1067
1068         return 0;
1069 }
1070
1071 int __rte_experimental
1072 rte_event_crypto_adapter_stats_reset(uint8_t id)
1073 {
1074         struct rte_event_crypto_adapter *adapter;
1075         struct crypto_device_info *dev_info;
1076         struct rte_eventdev *dev;
1077         uint32_t i;
1078
1079         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1080
1081         adapter = eca_id_to_adapter(id);
1082         if (adapter == NULL)
1083                 return -EINVAL;
1084
1085         dev = &rte_eventdevs[adapter->eventdev_id];
1086         for (i = 0; i < rte_cryptodev_count(); i++) {
1087                 dev_info = &adapter->cdevs[i];
1088                 if (dev_info->internal_event_port == 0 ||
1089                         dev->dev_ops->crypto_adapter_stats_reset == NULL)
1090                         continue;
1091                 (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
1092                                                 dev_info->dev);
1093         }
1094
1095         memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
1096         return 0;
1097 }
1098
1099 int __rte_experimental
1100 rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
1101 {
1102         struct rte_event_crypto_adapter *adapter;
1103
1104         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1105
1106         adapter = eca_id_to_adapter(id);
1107         if (adapter == NULL || service_id == NULL)
1108                 return -EINVAL;
1109
1110         if (adapter->service_inited)
1111                 *service_id = adapter->service_id;
1112
1113         return adapter->service_inited ? 0 : -ESRCH;
1114 }
1115
1116 int __rte_experimental
1117 rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
1118 {
1119         struct rte_event_crypto_adapter *adapter;
1120
1121         EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
1122
1123         adapter = eca_id_to_adapter(id);
1124         if (adapter == NULL || event_port_id == NULL)
1125                 return -EINVAL;
1126
1127         *event_port_id = adapter->event_port_id;
1128
1129         return 0;
1130 }