New upstream version 18.02
[deb_dpdk.git] / drivers / crypto / scheduler / rte_cryptodev_scheduler.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Intel Corporation
3  */
4 #include <rte_reorder.h>
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_malloc.h>
8
9 #include "rte_cryptodev_scheduler.h"
10 #include "scheduler_pmd_private.h"
11
12 /** update the scheduler pmd's capability with attaching device's
13  *  capability.
14  *  For each device to be attached, the scheduler's capability should be
15  *  the common capability set of all slaves
16  **/
17 static uint32_t
18 sync_caps(struct rte_cryptodev_capabilities *caps,
19                 uint32_t nb_caps,
20                 const struct rte_cryptodev_capabilities *slave_caps)
21 {
22         uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
23         uint32_t i;
24
25         while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
26                 nb_slave_caps++;
27
28         if (nb_caps == 0) {
29                 rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
30                 return nb_slave_caps;
31         }
32
33         for (i = 0; i < sync_nb_caps; i++) {
34                 struct rte_cryptodev_capabilities *cap = &caps[i];
35                 uint32_t j;
36
37                 for (j = 0; j < nb_slave_caps; j++) {
38                         const struct rte_cryptodev_capabilities *s_cap =
39                                         &slave_caps[j];
40
41                         if (s_cap->op != cap->op || s_cap->sym.xform_type !=
42                                         cap->sym.xform_type)
43                                 continue;
44
45                         if (s_cap->sym.xform_type ==
46                                         RTE_CRYPTO_SYM_XFORM_AUTH) {
47                                 if (s_cap->sym.auth.algo !=
48                                                 cap->sym.auth.algo)
49                                         continue;
50
51                                 cap->sym.auth.digest_size.min =
52                                         s_cap->sym.auth.digest_size.min <
53                                         cap->sym.auth.digest_size.min ?
54                                         s_cap->sym.auth.digest_size.min :
55                                         cap->sym.auth.digest_size.min;
56                                 cap->sym.auth.digest_size.max =
57                                         s_cap->sym.auth.digest_size.max <
58                                         cap->sym.auth.digest_size.max ?
59                                         s_cap->sym.auth.digest_size.max :
60                                         cap->sym.auth.digest_size.max;
61
62                         }
63
64                         if (s_cap->sym.xform_type ==
65                                         RTE_CRYPTO_SYM_XFORM_CIPHER)
66                                 if (s_cap->sym.cipher.algo !=
67                                                 cap->sym.cipher.algo)
68                                         continue;
69
70                         /* no common cap found */
71                         break;
72                 }
73
74                 if (j < nb_slave_caps)
75                         continue;
76
77                 /* remove a uncommon cap from the array */
78                 for (j = i; j < sync_nb_caps - 1; j++)
79                         rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
80
81                 memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
82                 sync_nb_caps--;
83         }
84
85         return sync_nb_caps;
86 }
87
88 static int
89 update_scheduler_capability(struct scheduler_ctx *sched_ctx)
90 {
91         struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
92         uint32_t nb_caps = 0, i;
93
94         if (sched_ctx->capabilities)
95                 rte_free(sched_ctx->capabilities);
96
97         for (i = 0; i < sched_ctx->nb_slaves; i++) {
98                 struct rte_cryptodev_info dev_info;
99
100                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
101
102                 nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
103                 if (nb_caps == 0)
104                         return -1;
105         }
106
107         sched_ctx->capabilities = rte_zmalloc_socket(NULL,
108                         sizeof(struct rte_cryptodev_capabilities) *
109                         (nb_caps + 1), 0, SOCKET_ID_ANY);
110         if (!sched_ctx->capabilities)
111                 return -ENOMEM;
112
113         rte_memcpy(sched_ctx->capabilities, tmp_caps,
114                         sizeof(struct rte_cryptodev_capabilities) * nb_caps);
115
116         return 0;
117 }
118
119 static void
120 update_scheduler_feature_flag(struct rte_cryptodev *dev)
121 {
122         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
123         uint32_t i;
124
125         dev->feature_flags = 0;
126
127         for (i = 0; i < sched_ctx->nb_slaves; i++) {
128                 struct rte_cryptodev_info dev_info;
129
130                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
131
132                 dev->feature_flags |= dev_info.feature_flags;
133         }
134 }
135
136 static void
137 update_max_nb_qp(struct scheduler_ctx *sched_ctx)
138 {
139         uint32_t i;
140         uint32_t max_nb_qp;
141
142         if (!sched_ctx->nb_slaves)
143                 return;
144
145         max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
146
147         for (i = 0; i < sched_ctx->nb_slaves; i++) {
148                 struct rte_cryptodev_info dev_info;
149
150                 rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
151                 max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
152                                 dev_info.max_nb_queue_pairs : max_nb_qp;
153         }
154
155         sched_ctx->max_nb_queue_pairs = max_nb_qp;
156 }
157
158 /** Attach a device to the scheduler. */
159 int
160 rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
161 {
162         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
163         struct scheduler_ctx *sched_ctx;
164         struct scheduler_slave *slave;
165         struct rte_cryptodev_info dev_info;
166         uint32_t i;
167
168         if (!dev) {
169                 CS_LOG_ERR("Operation not supported");
170                 return -ENOTSUP;
171         }
172
173         if (dev->driver_id != cryptodev_driver_id) {
174                 CS_LOG_ERR("Operation not supported");
175                 return -ENOTSUP;
176         }
177
178         if (dev->data->dev_started) {
179                 CS_LOG_ERR("Illegal operation");
180                 return -EBUSY;
181         }
182
183         sched_ctx = dev->data->dev_private;
184         if (sched_ctx->nb_slaves >=
185                         RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
186                 CS_LOG_ERR("Too many slaves attached");
187                 return -ENOMEM;
188         }
189
190         for (i = 0; i < sched_ctx->nb_slaves; i++)
191                 if (sched_ctx->slaves[i].dev_id == slave_id) {
192                         CS_LOG_ERR("Slave already added");
193                         return -ENOTSUP;
194                 }
195
196         slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
197
198         rte_cryptodev_info_get(slave_id, &dev_info);
199
200         slave->dev_id = slave_id;
201         slave->driver_id = dev_info.driver_id;
202         sched_ctx->nb_slaves++;
203
204         if (update_scheduler_capability(sched_ctx) < 0) {
205                 slave->dev_id = 0;
206                 slave->driver_id = 0;
207                 sched_ctx->nb_slaves--;
208
209                 CS_LOG_ERR("capabilities update failed");
210                 return -ENOTSUP;
211         }
212
213         update_scheduler_feature_flag(dev);
214
215         update_max_nb_qp(sched_ctx);
216
217         return 0;
218 }
219
220 int
221 rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
222 {
223         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
224         struct scheduler_ctx *sched_ctx;
225         uint32_t i, slave_pos;
226
227         if (!dev) {
228                 CS_LOG_ERR("Operation not supported");
229                 return -ENOTSUP;
230         }
231
232         if (dev->driver_id != cryptodev_driver_id) {
233                 CS_LOG_ERR("Operation not supported");
234                 return -ENOTSUP;
235         }
236
237         if (dev->data->dev_started) {
238                 CS_LOG_ERR("Illegal operation");
239                 return -EBUSY;
240         }
241
242         sched_ctx = dev->data->dev_private;
243
244         for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
245                 if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
246                         break;
247         if (slave_pos == sched_ctx->nb_slaves) {
248                 CS_LOG_ERR("Cannot find slave");
249                 return -ENOTSUP;
250         }
251
252         if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
253                 CS_LOG_ERR("Failed to detach slave");
254                 return -ENOTSUP;
255         }
256
257         for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
258                 memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
259                                 sizeof(struct scheduler_slave));
260         }
261         memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
262                         sizeof(struct scheduler_slave));
263         sched_ctx->nb_slaves--;
264
265         if (update_scheduler_capability(sched_ctx) < 0) {
266                 CS_LOG_ERR("capabilities update failed");
267                 return -ENOTSUP;
268         }
269
270         update_scheduler_feature_flag(dev);
271
272         update_max_nb_qp(sched_ctx);
273
274         return 0;
275 }
276
277 int
278 rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
279                 enum rte_cryptodev_scheduler_mode mode)
280 {
281         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
282         struct scheduler_ctx *sched_ctx;
283
284         if (!dev) {
285                 CS_LOG_ERR("Operation not supported");
286                 return -ENOTSUP;
287         }
288
289         if (dev->driver_id != cryptodev_driver_id) {
290                 CS_LOG_ERR("Operation not supported");
291                 return -ENOTSUP;
292         }
293
294         if (dev->data->dev_started) {
295                 CS_LOG_ERR("Illegal operation");
296                 return -EBUSY;
297         }
298
299         sched_ctx = dev->data->dev_private;
300
301         if (mode == sched_ctx->mode)
302                 return 0;
303
304         switch (mode) {
305         case CDEV_SCHED_MODE_ROUNDROBIN:
306                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
307                                 roundrobin_scheduler) < 0) {
308                         CS_LOG_ERR("Failed to load scheduler");
309                         return -1;
310                 }
311                 break;
312         case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
313                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
314                                 pkt_size_based_distr_scheduler) < 0) {
315                         CS_LOG_ERR("Failed to load scheduler");
316                         return -1;
317                 }
318                 break;
319         case CDEV_SCHED_MODE_FAILOVER:
320                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
321                                 failover_scheduler) < 0) {
322                         CS_LOG_ERR("Failed to load scheduler");
323                         return -1;
324                 }
325                 break;
326         case CDEV_SCHED_MODE_MULTICORE:
327                 if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
328                                 multicore_scheduler) < 0) {
329                         CS_LOG_ERR("Failed to load scheduler");
330                         return -1;
331                 }
332                 break;
333         default:
334                 CS_LOG_ERR("Not yet supported");
335                 return -ENOTSUP;
336         }
337
338         return 0;
339 }
340
341 enum rte_cryptodev_scheduler_mode
342 rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
343 {
344         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
345         struct scheduler_ctx *sched_ctx;
346
347         if (!dev) {
348                 CS_LOG_ERR("Operation not supported");
349                 return -ENOTSUP;
350         }
351
352         if (dev->driver_id != cryptodev_driver_id) {
353                 CS_LOG_ERR("Operation not supported");
354                 return -ENOTSUP;
355         }
356
357         sched_ctx = dev->data->dev_private;
358
359         return sched_ctx->mode;
360 }
361
362 int
363 rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
364                 uint32_t enable_reorder)
365 {
366         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
367         struct scheduler_ctx *sched_ctx;
368
369         if (!dev) {
370                 CS_LOG_ERR("Operation not supported");
371                 return -ENOTSUP;
372         }
373
374         if (dev->driver_id != cryptodev_driver_id) {
375                 CS_LOG_ERR("Operation not supported");
376                 return -ENOTSUP;
377         }
378
379         if (dev->data->dev_started) {
380                 CS_LOG_ERR("Illegal operation");
381                 return -EBUSY;
382         }
383
384         sched_ctx = dev->data->dev_private;
385
386         sched_ctx->reordering_enabled = enable_reorder;
387
388         return 0;
389 }
390
391 int
392 rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
393 {
394         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
395         struct scheduler_ctx *sched_ctx;
396
397         if (!dev) {
398                 CS_LOG_ERR("Operation not supported");
399                 return -ENOTSUP;
400         }
401
402         if (dev->driver_id != cryptodev_driver_id) {
403                 CS_LOG_ERR("Operation not supported");
404                 return -ENOTSUP;
405         }
406
407         sched_ctx = dev->data->dev_private;
408
409         return (int)sched_ctx->reordering_enabled;
410 }
411
412 int
413 rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
414                 struct rte_cryptodev_scheduler *scheduler) {
415
416         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
417         struct scheduler_ctx *sched_ctx;
418
419         if (!dev) {
420                 CS_LOG_ERR("Operation not supported");
421                 return -ENOTSUP;
422         }
423
424         if (dev->driver_id != cryptodev_driver_id) {
425                 CS_LOG_ERR("Operation not supported");
426                 return -ENOTSUP;
427         }
428
429         if (dev->data->dev_started) {
430                 CS_LOG_ERR("Illegal operation");
431                 return -EBUSY;
432         }
433
434         sched_ctx = dev->data->dev_private;
435
436         if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
437                 CS_LOG_ERR("Invalid name %s, should be less than "
438                                 "%u bytes.\n", scheduler->name,
439                                 RTE_CRYPTODEV_NAME_MAX_LEN);
440                 return -EINVAL;
441         }
442         snprintf(sched_ctx->name, sizeof(sched_ctx->name), "%s",
443                         scheduler->name);
444
445         if (strlen(scheduler->description) >
446                         RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
447                 CS_LOG_ERR("Invalid description %s, should be less than "
448                                 "%u bytes.\n", scheduler->description,
449                                 RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
450                 return -EINVAL;
451         }
452         snprintf(sched_ctx->description, sizeof(sched_ctx->description), "%s",
453                         scheduler->description);
454
455         /* load scheduler instance operations functions */
456         sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
457         sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
458         sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
459         sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
460         sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
461         sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
462         sched_ctx->ops.option_set = scheduler->ops->option_set;
463         sched_ctx->ops.option_get = scheduler->ops->option_get;
464
465         if (sched_ctx->private_ctx)
466                 rte_free(sched_ctx->private_ctx);
467
468         if (sched_ctx->ops.create_private_ctx) {
469                 int ret = (*sched_ctx->ops.create_private_ctx)(dev);
470
471                 if (ret < 0) {
472                         CS_LOG_ERR("Unable to create scheduler private "
473                                         "context");
474                         return ret;
475                 }
476         }
477
478         sched_ctx->mode = scheduler->mode;
479
480         return 0;
481 }
482
483 int
484 rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
485 {
486         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
487         struct scheduler_ctx *sched_ctx;
488         uint32_t nb_slaves = 0;
489
490         if (!dev) {
491                 CS_LOG_ERR("Operation not supported");
492                 return -ENOTSUP;
493         }
494
495         if (dev->driver_id != cryptodev_driver_id) {
496                 CS_LOG_ERR("Operation not supported");
497                 return -ENOTSUP;
498         }
499
500         sched_ctx = dev->data->dev_private;
501
502         nb_slaves = sched_ctx->nb_slaves;
503
504         if (slaves && nb_slaves) {
505                 uint32_t i;
506
507                 for (i = 0; i < nb_slaves; i++)
508                         slaves[i] = sched_ctx->slaves[i].dev_id;
509         }
510
511         return (int)nb_slaves;
512 }
513
514 int
515 rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
516                 enum rte_cryptodev_schedule_option_type option_type,
517                 void *option)
518 {
519         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
520         struct scheduler_ctx *sched_ctx;
521
522         if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
523                         option_type >= CDEV_SCHED_OPTION_COUNT) {
524                 CS_LOG_ERR("Invalid option parameter");
525                 return -EINVAL;
526         }
527
528         if (!option) {
529                 CS_LOG_ERR("Invalid option parameter");
530                 return -EINVAL;
531         }
532
533         if (dev->data->dev_started) {
534                 CS_LOG_ERR("Illegal operation");
535                 return -EBUSY;
536         }
537
538         sched_ctx = dev->data->dev_private;
539
540         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
541
542         return (*sched_ctx->ops.option_set)(dev, option_type, option);
543 }
544
545 int
546 rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
547                 enum rte_cryptodev_schedule_option_type option_type,
548                 void *option)
549 {
550         struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
551         struct scheduler_ctx *sched_ctx;
552
553         if (!dev) {
554                 CS_LOG_ERR("Operation not supported");
555                 return -ENOTSUP;
556         }
557
558         if (!option) {
559                 CS_LOG_ERR("Invalid option parameter");
560                 return -EINVAL;
561         }
562
563         if (dev->driver_id != cryptodev_driver_id) {
564                 CS_LOG_ERR("Operation not supported");
565                 return -ENOTSUP;
566         }
567
568         sched_ctx = dev->data->dev_private;
569
570         RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
571
572         return (*sched_ctx->ops.option_get)(dev, option_type, option);
573 }