01162764628e74f1715284e4b60c72f9b4d4e89d
[deb_dpdk.git] / drivers / crypto / scheduler / scheduler_roundrobin.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
35
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
38
39 struct rr_scheduler_qp_ctx {
40         struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
41         uint32_t nb_slaves;
42
43         uint32_t last_enq_slave_idx;
44         uint32_t last_deq_slave_idx;
45 };
46
47 static uint16_t
48 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
49 {
50         struct rr_scheduler_qp_ctx *rr_qp_ctx =
51                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
52         uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
53         struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
54         uint16_t i, processed_ops;
55         struct rte_cryptodev_sym_session *sessions[nb_ops];
56         struct scheduler_session *sess0, *sess1, *sess2, *sess3;
57
58         if (unlikely(nb_ops == 0))
59                 return 0;
60
61         for (i = 0; i < nb_ops && i < 4; i++)
62                 rte_prefetch0(ops[i]->sym->session);
63
64         for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
65                 sess0 = (struct scheduler_session *)
66                                 ops[i]->sym->session->_private;
67                 sess1 = (struct scheduler_session *)
68                                 ops[i+1]->sym->session->_private;
69                 sess2 = (struct scheduler_session *)
70                                 ops[i+2]->sym->session->_private;
71                 sess3 = (struct scheduler_session *)
72                                 ops[i+3]->sym->session->_private;
73
74                 sessions[i] = ops[i]->sym->session;
75                 sessions[i + 1] = ops[i + 1]->sym->session;
76                 sessions[i + 2] = ops[i + 2]->sym->session;
77                 sessions[i + 3] = ops[i + 3]->sym->session;
78
79                 ops[i]->sym->session = sess0->sessions[slave_idx];
80                 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
81                 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
82                 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
83
84                 rte_prefetch0(ops[i + 4]->sym->session);
85                 rte_prefetch0(ops[i + 5]->sym->session);
86                 rte_prefetch0(ops[i + 6]->sym->session);
87                 rte_prefetch0(ops[i + 7]->sym->session);
88         }
89
90         for (; i < nb_ops; i++) {
91                 sess0 = (struct scheduler_session *)
92                                 ops[i]->sym->session->_private;
93                 sessions[i] = ops[i]->sym->session;
94                 ops[i]->sym->session = sess0->sessions[slave_idx];
95         }
96
97         processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
98                         slave->qp_id, ops, nb_ops);
99
100         slave->nb_inflight_cops += processed_ops;
101
102         rr_qp_ctx->last_enq_slave_idx += 1;
103         rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
104
105         /* recover session if enqueue is failed */
106         if (unlikely(processed_ops < nb_ops)) {
107                 for (i = processed_ops; i < nb_ops; i++)
108                         ops[i]->sym->session = sessions[i];
109         }
110
111         return processed_ops;
112 }
113
114 static uint16_t
115 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
116                 uint16_t nb_ops)
117 {
118         struct rte_ring *order_ring =
119                         ((struct scheduler_qp_ctx *)qp)->order_ring;
120         uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
121                         nb_ops);
122         uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
123                         nb_ops_to_enq);
124
125         scheduler_order_insert(order_ring, ops, nb_ops_enqd);
126
127         return nb_ops_enqd;
128 }
129
130
131 static uint16_t
132 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
133 {
134         struct rr_scheduler_qp_ctx *rr_qp_ctx =
135                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
136         struct scheduler_slave *slave;
137         uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
138         uint16_t nb_deq_ops;
139
140         if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
141                 do {
142                         last_slave_idx += 1;
143
144                         if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
145                                 last_slave_idx = 0;
146                         /* looped back, means no inflight cops in the queue */
147                         if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
148                                 return 0;
149                 } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
150                                 == 0);
151         }
152
153         slave = &rr_qp_ctx->slaves[last_slave_idx];
154
155         nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
156                         slave->qp_id, ops, nb_ops);
157
158         last_slave_idx += 1;
159         last_slave_idx %= rr_qp_ctx->nb_slaves;
160
161         rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
162
163         slave->nb_inflight_cops -= nb_deq_ops;
164
165         return nb_deq_ops;
166 }
167
168 static uint16_t
169 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
170                 uint16_t nb_ops)
171 {
172         struct rte_ring *order_ring =
173                         ((struct scheduler_qp_ctx *)qp)->order_ring;
174
175         schedule_dequeue(qp, ops, nb_ops);
176
177         return scheduler_order_drain(order_ring, ops, nb_ops);
178 }
179
180 static int
181 slave_attach(__rte_unused struct rte_cryptodev *dev,
182                 __rte_unused uint8_t slave_id)
183 {
184         return 0;
185 }
186
187 static int
188 slave_detach(__rte_unused struct rte_cryptodev *dev,
189                 __rte_unused uint8_t slave_id)
190 {
191         return 0;
192 }
193
194 static int
195 scheduler_start(struct rte_cryptodev *dev)
196 {
197         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
198         uint16_t i;
199
200         if (sched_ctx->reordering_enabled) {
201                 dev->enqueue_burst = &schedule_enqueue_ordering;
202                 dev->dequeue_burst = &schedule_dequeue_ordering;
203         } else {
204                 dev->enqueue_burst = &schedule_enqueue;
205                 dev->dequeue_burst = &schedule_dequeue;
206         }
207
208         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
209                 struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
210                 struct rr_scheduler_qp_ctx *rr_qp_ctx =
211                                 qp_ctx->private_qp_ctx;
212                 uint32_t j;
213
214                 memset(rr_qp_ctx->slaves, 0,
215                                 RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
216                                 sizeof(struct scheduler_slave));
217                 for (j = 0; j < sched_ctx->nb_slaves; j++) {
218                         rr_qp_ctx->slaves[j].dev_id =
219                                         sched_ctx->slaves[j].dev_id;
220                         rr_qp_ctx->slaves[j].qp_id = i;
221                 }
222
223                 rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
224
225                 rr_qp_ctx->last_enq_slave_idx = 0;
226                 rr_qp_ctx->last_deq_slave_idx = 0;
227         }
228
229         return 0;
230 }
231
232 static int
233 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
234 {
235         return 0;
236 }
237
238 static int
239 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
240 {
241         struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
242         struct rr_scheduler_qp_ctx *rr_qp_ctx;
243
244         rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
245                         rte_socket_id());
246         if (!rr_qp_ctx) {
247                 CS_LOG_ERR("failed allocate memory for private queue pair");
248                 return -ENOMEM;
249         }
250
251         qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
252
253         return 0;
254 }
255
256 static int
257 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
258 {
259         return 0;
260 }
261
262 struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
263         slave_attach,
264         slave_detach,
265         scheduler_start,
266         scheduler_stop,
267         scheduler_config_qp,
268         scheduler_create_private_ctx,
269         NULL,   /* option_set */
270         NULL    /* option_get */
271 };
272
273 struct rte_cryptodev_scheduler scheduler = {
274                 .name = "roundrobin-scheduler",
275                 .description = "scheduler which will round robin burst across "
276                                 "slave crypto devices",
277                 .mode = CDEV_SCHED_MODE_ROUNDROBIN,
278                 .ops = &scheduler_rr_ops
279 };
280
281 struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;