New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / crypto / scheduler / rte_cryptodev_scheduler.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <rte_reorder.h>
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_malloc.h>
8
9 #include "rte_cryptodev_scheduler.h"
10 #include "scheduler_pmd_private.h"
11
12 int scheduler_logtype_driver;
13
14 /** update the scheduler pmd's capability with attaching device's
15  *  capability.
16  *  For each device to be attached, the scheduler's capability should be
17  *  the common capability set of all slaves
18  **/
19 static uint32_t
20 sync_caps(struct rte_cryptodev_capabilities *caps,
21                 uint32_t nb_caps,
22                 const struct rte_cryptodev_capabilities *slave_caps)
23 {
24         uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
25         uint32_t i;
26
27         while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
28                 nb_slave_caps++;
29
30         if (nb_caps == 0) {
31                 rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
32                 return nb_slave_caps;
33         }
34
35         for (i = 0; i < sync_nb_caps; i++) {
36                 struct rte_cryptodev_capabilities *cap = &caps[i];
37                 uint32_t j;
38
39                 for (j = 0; j < nb_slave_caps; j++) {
40                         const struct rte_cryptodev_capabilities *s_cap =
41                                         &slave_caps[j];
42
43                         if (s_cap->op != cap->op || s_cap->sym.xform_type !=
44                                         cap->sym.xform_type)
45                                 continue;
46
47                         if (s_cap->sym.xform_type ==
48                                         RTE_CRYPTO_SYM_XFORM_AUTH) {
49                                 if (s_cap->sym.auth.algo !=
50                                                 cap->sym.auth.algo)
51                                         continue;
52
53                                 cap->sym.auth.digest_size.min =
54                                         s_cap->sym.auth.digest_size.min <
55                                         cap->sym.auth.digest_size.min ?
56                                         s_cap->sym.auth.digest_size.min :
57                                         cap->sym.auth.digest_size.min;
58                                 cap->sym.auth.digest_size.max =
59                                         s_cap->sym.auth.digest_size.max <
60                                         cap->sym.auth.digest_size.max ?
61                                         s_cap->sym.auth.digest_size.max :
62                                         cap->sym.auth.digest_size.max;
63
64                         }
65
66                         if (s_cap->sym.xform_type ==
67                                         RTE_CRYPTO_SYM_XFORM_CIPHER)
68                                 if (s_cap->sym.cipher.algo !=
69                                                 cap->sym.cipher.algo)
70                                         continue;
71
72                         /* no common cap found */
73                         break;
74                 }
75
76                 if (j < nb_slave_caps)
77                         continue;
78
79                 /* remove a uncommon cap from the array */
80                 for (j = i; j < sync_nb_caps - 1; j++)
81                         rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
82
83                 memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
84                 sync_nb_caps--;
85         }
86
87         return sync_nb_caps;
88 }
89
90 static int
91 update_scheduler_capability(struct scheduler_ctx *sched_ctx)
92 {
93         struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
94         uint32_t nb_caps = 0, i;
95
96         if (sched_ctx->capabilities) {
97                 rte_free(sched_ctx->capabilities);
98                 sched_ctx->capabilities = NULL;
99         }
100
101         for (i = 0; i < sched_ctx->nb_slaves; i++) {
102                 struct rte_cryptodev_info dev_info;
103
104                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
105
106                 nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
107                 if (nb_caps == 0)
108                         return -1;
109         }
110
111         sched_ctx->capabilities = rte_zmalloc_socket(NULL,
112                         sizeof(struct rte_cryptodev_capabilities) *
113                         (nb_caps + 1), 0, SOCKET_ID_ANY);
114         if (!sched_ctx->capabilities)
115                 return -ENOMEM;
116
117         rte_memcpy(sched_ctx->capabilities, tmp_caps,
118                         sizeof(struct rte_cryptodev_capabilities) * nb_caps);
119
120         return 0;
121 }
122
123 static void
124 update_scheduler_feature_flag(struct rte_cryptodev *dev)
125 {
126         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
127         uint32_t i;
128
129         dev->feature_flags = 0;
130
131         for (i = 0; i < sched_ctx->nb_slaves; i++) {
132                 struct rte_cryptodev_info dev_info;
133
134                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
135
136                 dev->feature_flags |= dev_info.feature_flags;
137         }
138 }
139
140 static void
141 update_max_nb_qp(struct scheduler_ctx *sched_ctx)
142 {
143         uint32_t i;
144         uint32_t max_nb_qp;
145
146         if (!sched_ctx->nb_slaves)
147                 return;
148
149         max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
150
151         for (i = 0; i < sched_ctx->nb_slaves; i++) {
152                 struct rte_cryptodev_info dev_info;
153
154                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
155                 max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
156                                 dev_info.max_nb_queue_pairs : max_nb_qp;
157         }
158
159         sched_ctx->max_nb_queue_pairs = max_nb_qp;
160 }
161
162 /** Attach a device to the scheduler. */
163 int
164 rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
165 {
166         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
167         struct scheduler_ctx *sched_ctx;
168         struct scheduler_slave *slave;
169         struct rte_cryptodev_info dev_info;
170         uint32_t i;
171
172         if (!dev) {
173                 CR_SCHED_LOG(ERR, "Operation not supported");
174                 return -ENOTSUP;
175         }
176
177         if (dev->driver_id != cryptodev_scheduler_driver_id) {
178                 CR_SCHED_LOG(ERR, "Operation not supported");
179                 return -ENOTSUP;
180         }
181
182         if (dev->data->dev_started) {
183                 CR_SCHED_LOG(ERR, "Illegal operation");
184                 return -EBUSY;
185         }
186
187         sched_ctx = dev->data->dev_private;
188         if (sched_ctx->nb_slaves >=
189                         RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
190                 CR_SCHED_LOG(ERR, "Too many slaves attached");
191                 return -ENOMEM;
192         }
193
194         for (i = 0; i < sched_ctx->nb_slaves; i++)
195                 if (sched_ctx->slaves[i].dev_id == slave_id) {
196                         CR_SCHED_LOG(ERR, "Slave already added");
197                         return -ENOTSUP;
198                 }
199
200         slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
201
202         rte_cryptodev_info_get(slave_id, &dev_info);
203
204         slave->dev_id = slave_id;
205         slave->driver_id = dev_info.driver_id;
206         sched_ctx->nb_slaves++;
207
208         if (update_scheduler_capability(sched_ctx) < 0) {
209                 slave->dev_id = 0;
210                 slave->driver_id = 0;
211                 sched_ctx->nb_slaves--;
212
213                 CR_SCHED_LOG(ERR, "capabilities update failed");
214                 return -ENOTSUP;
215         }
216
217         update_scheduler_feature_flag(dev);
218
219         update_max_nb_qp(sched_ctx);
220
221         return 0;
222 }
223
224 int
225 rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
226 {
227         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
228         struct scheduler_ctx *sched_ctx;
229         uint32_t i, slave_pos;
230
231         if (!dev) {
232                 CR_SCHED_LOG(ERR, "Operation not supported");
233                 return -ENOTSUP;
234         }
235
236         if (dev->driver_id != cryptodev_scheduler_driver_id) {
237                 CR_SCHED_LOG(ERR, "Operation not supported");
238                 return -ENOTSUP;
239         }
240
241         if (dev->data->dev_started) {
242                 CR_SCHED_LOG(ERR, "Illegal operation");
243                 return -EBUSY;
244         }
245
246         sched_ctx = dev->data->dev_private;
247
248         for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
249                 if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
250                         break;
251         if (slave_pos == sched_ctx->nb_slaves) {
252                 CR_SCHED_LOG(ERR, "Cannot find slave");
253                 return -ENOTSUP;
254         }
255
256         if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
257                 CR_SCHED_LOG(ERR, "Failed to detach slave");
258                 return -ENOTSUP;
259         }
260
261         for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
262                 memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
263                                 sizeof(struct scheduler_slave));
264         }
265         memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
266                         sizeof(struct scheduler_slave));
267         sched_ctx->nb_slaves--;
268
269         if (update_scheduler_capability(sched_ctx) < 0) {
270                 CR_SCHED_LOG(ERR, "capabilities update failed");
271                 return -ENOTSUP;
272         }
273
274         update_scheduler_feature_flag(dev);
275
276         update_max_nb_qp(sched_ctx);
277
278         return 0;
279 }
280
281 int
282 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
283                 enum rte_cryptodev_scheduler_mode mode)
284 {
285         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
286         struct scheduler_ctx *sched_ctx;
287
288         if (!dev) {
289                 CR_SCHED_LOG(ERR, "Operation not supported");
290                 return -ENOTSUP;
291         }
292
293         if (dev->driver_id != cryptodev_scheduler_driver_id) {
294                 CR_SCHED_LOG(ERR, "Operation not supported");
295                 return -ENOTSUP;
296         }
297
298         if (dev->data->dev_started) {
299                 CR_SCHED_LOG(ERR, "Illegal operation");
300                 return -EBUSY;
301         }
302
303         sched_ctx = dev->data->dev_private;
304
305         if (mode == sched_ctx->mode)
306                 return 0;
307
308         switch (mode) {
309         case CDEV_SCHED_MODE_ROUNDROBIN:
310                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
311                                 crypto_scheduler_roundrobin) < 0) {
312                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
313                         return -1;
314                 }
315                 break;
316         case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
317                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
318                                 crypto_scheduler_pkt_size_based_distr) < 0) {
319                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
320                         return -1;
321                 }
322                 break;
323         case CDEV_SCHED_MODE_FAILOVER:
324                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
325                                 crypto_scheduler_failover) < 0) {
326                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
327                         return -1;
328                 }
329                 break;
330         case CDEV_SCHED_MODE_MULTICORE:
331                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
332                                 crypto_scheduler_multicore) < 0) {
333                         CR_SCHED_LOG(ERR, "Failed to load scheduler");
334                         return -1;
335                 }
336                 break;
337         default:
338                 CR_SCHED_LOG(ERR, "Not yet supported");
339                 return -ENOTSUP;
340         }
341
342         return 0;
343 }
344
345 enum rte_cryptodev_scheduler_mode
346 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
347 {
348         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
349         struct scheduler_ctx *sched_ctx;
350
351         if (!dev) {
352                 CR_SCHED_LOG(ERR, "Operation not supported");
353                 return -ENOTSUP;
354         }
355
356         if (dev->driver_id != cryptodev_scheduler_driver_id) {
357                 CR_SCHED_LOG(ERR, "Operation not supported");
358                 return -ENOTSUP;
359         }
360
361         sched_ctx = dev->data->dev_private;
362
363         return sched_ctx->mode;
364 }
365
366 int
367 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
368                 uint32_t enable_reorder)
369 {
370         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
371         struct scheduler_ctx *sched_ctx;
372
373         if (!dev) {
374                 CR_SCHED_LOG(ERR, "Operation not supported");
375                 return -ENOTSUP;
376         }
377
378         if (dev->driver_id != cryptodev_scheduler_driver_id) {
379                 CR_SCHED_LOG(ERR, "Operation not supported");
380                 return -ENOTSUP;
381         }
382
383         if (dev->data->dev_started) {
384                 CR_SCHED_LOG(ERR, "Illegal operation");
385                 return -EBUSY;
386         }
387
388         sched_ctx = dev->data->dev_private;
389
390         sched_ctx->reordering_enabled = enable_reorder;
391
392         return 0;
393 }
394
395 int
396 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
397 {
398         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
399         struct scheduler_ctx *sched_ctx;
400
401         if (!dev) {
402                 CR_SCHED_LOG(ERR, "Operation not supported");
403                 return -ENOTSUP;
404         }
405
406         if (dev->driver_id != cryptodev_scheduler_driver_id) {
407                 CR_SCHED_LOG(ERR, "Operation not supported");
408                 return -ENOTSUP;
409         }
410
411         sched_ctx = dev->data->dev_private;
412
413         return (int)sched_ctx->reordering_enabled;
414 }
415
416 int
417 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
418                 struct rte_cryptodev_scheduler *scheduler) {
419
420         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
421         struct scheduler_ctx *sched_ctx;
422
423         if (!dev) {
424                 CR_SCHED_LOG(ERR, "Operation not supported");
425                 return -ENOTSUP;
426         }
427
428         if (dev->driver_id != cryptodev_scheduler_driver_id) {
429                 CR_SCHED_LOG(ERR, "Operation not supported");
430                 return -ENOTSUP;
431         }
432
433         if (dev->data->dev_started) {
434                 CR_SCHED_LOG(ERR, "Illegal operation");
435                 return -EBUSY;
436         }
437
438         sched_ctx = dev->data->dev_private;
439
440         if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
441                 CR_SCHED_LOG(ERR, "Invalid name %s, should be less than "
442                                 "%u bytes.", scheduler->name,
443                                 RTE_CRYPTODEV_NAME_MAX_LEN);
444                 return -EINVAL;
445         }
446         snprintf(sched_ctx->name, sizeof(sched_ctx->name), "%s",
447                         scheduler->name);
448
449         if (strlen(scheduler->description) >
450                         RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
451                 CR_SCHED_LOG(ERR, "Invalid description %s, should be less than "
452                                 "%u bytes.", scheduler->description,
453                                 RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
454                 return -EINVAL;
455         }
456         snprintf(sched_ctx->description, sizeof(sched_ctx->description), "%s",
457                         scheduler->description);
458
459         /* load scheduler instance operations functions */
460         sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
461         sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
462         sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
463         sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
464         sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
465         sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
466         sched_ctx->ops.option_set = scheduler->ops->option_set;
467         sched_ctx->ops.option_get = scheduler->ops->option_get;
468
469         if (sched_ctx->private_ctx) {
470                 rte_free(sched_ctx->private_ctx);
471                 sched_ctx->private_ctx = NULL;
472         }
473
474         if (sched_ctx->ops.create_private_ctx) {
475                 int ret = (*sched_ctx->ops.create_private_ctx)(dev);
476
477                 if (ret < 0) {
478                         CR_SCHED_LOG(ERR, "Unable to create scheduler private "
479                                         "context");
480                         return ret;
481                 }
482         }
483
484         sched_ctx->mode = scheduler->mode;
485
486         return 0;
487 }
488
489 int
490 rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
491 {
492         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
493         struct scheduler_ctx *sched_ctx;
494         uint32_t nb_slaves = 0;
495
496         if (!dev) {
497                 CR_SCHED_LOG(ERR, "Operation not supported");
498                 return -ENOTSUP;
499         }
500
501         if (dev->driver_id != cryptodev_scheduler_driver_id) {
502                 CR_SCHED_LOG(ERR, "Operation not supported");
503                 return -ENOTSUP;
504         }
505
506         sched_ctx = dev->data->dev_private;
507
508         nb_slaves = sched_ctx->nb_slaves;
509
510         if (slaves && nb_slaves) {
511                 uint32_t i;
512
513                 for (i = 0; i < nb_slaves; i++)
514                         slaves[i] = sched_ctx->slaves[i].dev_id;
515         }
516
517         return (int)nb_slaves;
518 }
519
520 int
521 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
522                 enum rte_cryptodev_schedule_option_type option_type,
523                 void *option)
524 {
525         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
526         struct scheduler_ctx *sched_ctx;
527
528         if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
529                         option_type >= CDEV_SCHED_OPTION_COUNT) {
530                 CR_SCHED_LOG(ERR, "Invalid option parameter");
531                 return -EINVAL;
532         }
533
534         if (!option) {
535                 CR_SCHED_LOG(ERR, "Invalid option parameter");
536                 return -EINVAL;
537         }
538
539         if (dev->data->dev_started) {
540                 CR_SCHED_LOG(ERR, "Illegal operation");
541                 return -EBUSY;
542         }
543
544         sched_ctx = dev->data->dev_private;
545
546         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
547
548         return (*sched_ctx->ops.option_set)(dev, option_type, option);
549 }
550
551 int
552 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
553                 enum rte_cryptodev_schedule_option_type option_type,
554                 void *option)
555 {
556         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
557         struct scheduler_ctx *sched_ctx;
558
559         if (!dev) {
560                 CR_SCHED_LOG(ERR, "Operation not supported");
561                 return -ENOTSUP;
562         }
563
564         if (!option) {
565                 CR_SCHED_LOG(ERR, "Invalid option parameter");
566                 return -EINVAL;
567         }
568
569         if (dev->driver_id != cryptodev_scheduler_driver_id) {
570                 CR_SCHED_LOG(ERR, "Operation not supported");
571                 return -ENOTSUP;
572         }
573
574         sched_ctx = dev->data->dev_private;
575
576         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
577
578         return (*sched_ctx->ops.option_get)(dev, option_type, option);
579 }
580
581 RTE_INIT(scheduler_init_log)
582 {
583         scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler");
584 }