2471a5f14f11680cc7583765a64f120060732694
[deb_dpdk.git] / drivers / crypto / scheduler / scheduler_failover.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_cryptodev.h>
34 #include <rte_malloc.h>
35
36 #include "rte_cryptodev_scheduler_operations.h"
37 #include "scheduler_pmd_private.h"
38
39 #define PRIMARY_SLAVE_IDX       0
40 #define SECONDARY_SLAVE_IDX     1
41 #define NB_FAILOVER_SLAVES      2
42 #define SLAVE_SWITCH_MASK       (0x01)
43
44 struct fo_scheduler_qp_ctx {
45         struct scheduler_slave primary_slave;
46         struct scheduler_slave secondary_slave;
47
48         uint8_t deq_idx;
49 };
50
51 static inline uint16_t __attribute__((always_inline))
52 failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
53                 struct rte_crypto_op **ops, uint16_t nb_ops)
54 {
55         uint16_t i, processed_ops;
56         struct rte_cryptodev_sym_session *sessions[nb_ops];
57         struct scheduler_session *sess0, *sess1, *sess2, *sess3;
58
59         for (i = 0; i < nb_ops && i < 4; i++)
60                 rte_prefetch0(ops[i]->sym->session);
61
62         for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
63                 rte_prefetch0(ops[i + 4]->sym->session);
64                 rte_prefetch0(ops[i + 5]->sym->session);
65                 rte_prefetch0(ops[i + 6]->sym->session);
66                 rte_prefetch0(ops[i + 7]->sym->session);
67
68                 sess0 = (struct scheduler_session *)
69                                 ops[i]->sym->session->_private;
70                 sess1 = (struct scheduler_session *)
71                                 ops[i+1]->sym->session->_private;
72                 sess2 = (struct scheduler_session *)
73                                 ops[i+2]->sym->session->_private;
74                 sess3 = (struct scheduler_session *)
75                                 ops[i+3]->sym->session->_private;
76
77                 sessions[i] = ops[i]->sym->session;
78                 sessions[i + 1] = ops[i + 1]->sym->session;
79                 sessions[i + 2] = ops[i + 2]->sym->session;
80                 sessions[i + 3] = ops[i + 3]->sym->session;
81
82                 ops[i]->sym->session = sess0->sessions[slave_idx];
83                 ops[i + 1]->sym->session = sess1->sessions[slave_idx];
84                 ops[i + 2]->sym->session = sess2->sessions[slave_idx];
85                 ops[i + 3]->sym->session = sess3->sessions[slave_idx];
86         }
87
88         for (; i < nb_ops; i++) {
89                 sess0 = (struct scheduler_session *)
90                                 ops[i]->sym->session->_private;
91                 sessions[i] = ops[i]->sym->session;
92                 ops[i]->sym->session = sess0->sessions[slave_idx];
93         }
94
95         processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
96                         slave->qp_id, ops, nb_ops);
97         slave->nb_inflight_cops += processed_ops;
98
99         if (unlikely(processed_ops < nb_ops))
100                 for (i = processed_ops; i < nb_ops; i++)
101                         ops[i]->sym->session = sessions[i];
102
103         return processed_ops;
104 }
105
106 static uint16_t
107 schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
108 {
109         struct fo_scheduler_qp_ctx *qp_ctx =
110                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
111         uint16_t enqueued_ops;
112
113         if (unlikely(nb_ops == 0))
114                 return 0;
115
116         enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
117                         PRIMARY_SLAVE_IDX, ops, nb_ops);
118
119         if (enqueued_ops < nb_ops)
120                 enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
121                                 SECONDARY_SLAVE_IDX, &ops[enqueued_ops],
122                                 nb_ops - enqueued_ops);
123
124         return enqueued_ops;
125 }
126
127
128 static uint16_t
129 schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
130                 uint16_t nb_ops)
131 {
132         struct rte_ring *order_ring =
133                         ((struct scheduler_qp_ctx *)qp)->order_ring;
134         uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
135                         nb_ops);
136         uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
137                         nb_ops_to_enq);
138
139         scheduler_order_insert(order_ring, ops, nb_ops_enqd);
140
141         return nb_ops_enqd;
142 }
143
144 static uint16_t
145 schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
146 {
147         struct fo_scheduler_qp_ctx *qp_ctx =
148                         ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
149         struct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {
150                         &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
151         struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
152         uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
153
154         if (slave->nb_inflight_cops) {
155                 nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
156                         slave->qp_id, ops, nb_ops);
157                 slave->nb_inflight_cops -= nb_deq_ops;
158         }
159
160         qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;
161
162         if (nb_deq_ops == nb_ops)
163                 return nb_deq_ops;
164
165         slave = slaves[qp_ctx->deq_idx];
166
167         if (slave->nb_inflight_cops) {
168                 nb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,
169                         slave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
170                 slave->nb_inflight_cops -= nb_deq_ops2;
171         }
172
173         return nb_deq_ops + nb_deq_ops2;
174 }
175
176 static uint16_t
177 schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
178                 uint16_t nb_ops)
179 {
180         struct rte_ring *order_ring =
181                         ((struct scheduler_qp_ctx *)qp)->order_ring;
182
183         schedule_dequeue(qp, ops, nb_ops);
184
185         return scheduler_order_drain(order_ring, ops, nb_ops);
186 }
187
188 static int
189 slave_attach(__rte_unused struct rte_cryptodev *dev,
190                 __rte_unused uint8_t slave_id)
191 {
192         return 0;
193 }
194
195 static int
196 slave_detach(__rte_unused struct rte_cryptodev *dev,
197                 __rte_unused uint8_t slave_id)
198 {
199         return 0;
200 }
201
202 static int
203 scheduler_start(struct rte_cryptodev *dev)
204 {
205         struct scheduler_ctx *sched_ctx = dev->data->dev_private;
206         uint16_t i;
207
208         if (sched_ctx->nb_slaves < 2) {
209                 CS_LOG_ERR("Number of slaves shall no less than 2");
210                 return -ENOMEM;
211         }
212
213         if (sched_ctx->reordering_enabled) {
214                 dev->enqueue_burst = schedule_enqueue_ordering;
215                 dev->dequeue_burst = schedule_dequeue_ordering;
216         } else {
217                 dev->enqueue_burst = schedule_enqueue;
218                 dev->dequeue_burst = schedule_dequeue;
219         }
220
221         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
222                 struct fo_scheduler_qp_ctx *qp_ctx =
223                         ((struct scheduler_qp_ctx *)
224                                 dev->data->queue_pairs[i])->private_qp_ctx;
225
226                 rte_memcpy(&qp_ctx->primary_slave,
227                                 &sched_ctx->slaves[PRIMARY_SLAVE_IDX],
228                                 sizeof(struct scheduler_slave));
229                 rte_memcpy(&qp_ctx->secondary_slave,
230                                 &sched_ctx->slaves[SECONDARY_SLAVE_IDX],
231                                 sizeof(struct scheduler_slave));
232         }
233
234         return 0;
235 }
236
237 static int
238 scheduler_stop(__rte_unused struct rte_cryptodev *dev)
239 {
240         return 0;
241 }
242
243 static int
244 scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
245 {
246         struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
247         struct fo_scheduler_qp_ctx *fo_qp_ctx;
248
249         fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
250                         rte_socket_id());
251         if (!fo_qp_ctx) {
252                 CS_LOG_ERR("failed allocate memory for private queue pair");
253                 return -ENOMEM;
254         }
255
256         qp_ctx->private_qp_ctx = (void *)fo_qp_ctx;
257
258         return 0;
259 }
260
261 static int
262 scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
263 {
264         return 0;
265 }
266
267 struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
268         slave_attach,
269         slave_detach,
270         scheduler_start,
271         scheduler_stop,
272         scheduler_config_qp,
273         scheduler_create_private_ctx,
274         NULL,   /* option_set */
275         NULL    /*option_get */
276 };
277
278 struct rte_cryptodev_scheduler fo_scheduler = {
279                 .name = "failover-scheduler",
280                 .description = "scheduler which enqueues to the primary slave, "
281                                 "and only then enqueues to the secondary slave "
282                                 "upon failing on enqueuing to primary",
283                 .mode = CDEV_SCHED_MODE_FAILOVER,
284                 .ops = &scheduler_fo_ops
285 };
286
287 struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;