crypto: SW scheduler async crypto engine
[vpp.git] / src / vnet / crypto / node.c
1 /*
2  * Copyright (c) 2020 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <stdbool.h>
17 #include <vlib/vlib.h>
18 #include <vnet/crypto/crypto.h>
19
20 typedef enum
21 {
22 #define _(sym,str) VNET_CRYPTO_ASYNC_ERROR_##sym,
23   foreach_crypto_op_status
24 #undef _
25     VNET_CRYPTO_ASYNC_N_ERROR,
26 } vnet_crypto_async_error_t;
27
28 static char *vnet_crypto_async_error_strings[] = {
29 #define _(sym,string) string,
30   foreach_crypto_op_status
31 #undef _
32 };
33
34 #define foreach_crypto_dispatch_next \
35   _(ERR_DROP, "error-drop")
36
37 typedef enum
38 {
39 #define _(n, s) CRYPTO_DISPATCH_NEXT_##n,
40   foreach_crypto_dispatch_next
41 #undef _
42     CRYPTO_DISPATCH_N_NEXT,
43 } crypto_dispatch_next_t;
44
45 typedef struct
46 {
47   vnet_crypto_op_status_t op_status;
48   vnet_crypto_async_op_id_t op;
49 } crypto_dispatch_trace_t;
50
51 static u8 *
52 format_crypto_dispatch_trace (u8 * s, va_list * args)
53 {
54   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
56   crypto_dispatch_trace_t *t = va_arg (*args, crypto_dispatch_trace_t *);
57
58   s = format (s, "%U: %U", format_vnet_crypto_async_op, t->op,
59               format_vnet_crypto_op_status, t->op_status);
60   return s;
61 }
62
63 static void
64 vnet_crypto_async_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
65                              vlib_buffer_t * b,
66                              vnet_crypto_async_op_id_t op_id,
67                              vnet_crypto_op_status_t status)
68 {
69   crypto_dispatch_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
70   tr->op_status = status;
71   tr->op = op_id;
72 }
73
74 static_always_inline u32
75 crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
76                       vnet_crypto_thread_t * ct,
77                       vnet_crypto_frame_dequeue_t * hdl, u32 n_cache,
78                       u32 * n_total)
79 {
80   vnet_crypto_main_t *cm = &crypto_main;
81   u32 n_elts = 0;
82   u32 enqueue_thread_idx = ~0;
83   vnet_crypto_async_frame_t *cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
84   *n_total += n_elts;
85
86   while (cf || n_elts)
87     {
88       if (cf)
89         {
90           vec_validate (ct->buffer_indice, n_cache + cf->n_elts);
91           vec_validate (ct->nexts, n_cache + cf->n_elts);
92           clib_memcpy_fast (ct->buffer_indice + n_cache, cf->buffer_indices,
93                             sizeof (u32) * cf->n_elts);
94           if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS)
95             {
96               clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index,
97                                 sizeof (u16) * cf->n_elts);
98             }
99           else
100             {
101               u32 i;
102               for (i = 0; i < cf->n_elts; i++)
103                 {
104                   if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED)
105                     {
106                       ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP;
107                       vlib_node_increment_counter (vm, node->node_index,
108                                                    cf->elts[i].status, 1);
109                     }
110                   else
111                     ct->nexts[i + n_cache] = cf->next_node_index[i];
112                 }
113             }
114           n_cache += cf->n_elts;
115           if (n_cache >= VLIB_FRAME_SIZE)
116             {
117               vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indice,
118                                            ct->nexts, n_cache);
119               n_cache = 0;
120             }
121
122           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
123             {
124               u32 i;
125
126               for (i = 0; i < cf->n_elts; i++)
127                 {
128                   vlib_buffer_t *b = vlib_get_buffer (vm,
129                                                       cf->buffer_indices[i]);
130                   if (b->flags & VLIB_BUFFER_IS_TRACED)
131                     vnet_crypto_async_add_trace (vm, node, b, cf->op,
132                                                  cf->elts[i].status);
133                 }
134             }
135           vnet_crypto_async_free_frame (vm, cf);
136         }
137       /* signal enqueue-thread to dequeue the processed frame (n_elts>0) */
138       if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT
139           && n_elts > 0)
140         {
141           vlib_node_set_interrupt_pending (vlib_mains[enqueue_thread_idx],
142                                            cm->crypto_node_index);
143         }
144
145       n_elts = 0;
146       enqueue_thread_idx = 0;
147       cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
148       *n_total += n_elts;
149     }
150
151   return n_cache;
152 }
153
154 VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm,
155                                      vlib_node_runtime_t * node,
156                                      vlib_frame_t * frame)
157 {
158   vnet_crypto_main_t *cm = &crypto_main;
159   vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
160   u32 n_dispatched = 0, n_cache = 0;
161   u32 index;
162
163   /* *INDENT-OFF* */
164   clib_bitmap_foreach (index, cm->async_active_ids, ({
165     n_cache = crypto_dequeue_frame (vm, node, ct, cm->dequeue_handlers[index],
166                                     n_cache, &n_dispatched);
167   }));
168   /* *INDENT-ON* */
169   if (n_cache)
170     vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indice, ct->nexts,
171                                  n_cache);
172
173   return n_dispatched;
174 }
175
176 /* *INDENT-OFF* */
177 VLIB_REGISTER_NODE (crypto_dispatch_node) = {
178   .name = "crypto-dispatch",
179   .type = VLIB_NODE_TYPE_INPUT,
180   .state = VLIB_NODE_STATE_DISABLED,
181   .format_trace = format_crypto_dispatch_trace,
182
183   .n_errors = ARRAY_LEN(vnet_crypto_async_error_strings),
184   .error_strings = vnet_crypto_async_error_strings,
185
186   .n_next_nodes = CRYPTO_DISPATCH_N_NEXT,
187   .next_nodes = {
188 #define _(n, s) \
189   [CRYPTO_DISPATCH_NEXT_##n] = s,
190       foreach_crypto_dispatch_next
191 #undef _
192   },
193 };
194 /* *INDENT-ON* */
195
196 /*
197  * fd.io coding-style-patch-verification: ON
198  *
199  * Local Variables:
200  * eval: (c-set-style "gnu")
201  * End:
202  */