crypto: make crypto-dispatch node working in adaptive mode
[vpp.git] / src / vnet / crypto / node.c
1 /*
2  * Copyright (c) 2020 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <stdbool.h>
17 #include <vlib/vlib.h>
18 #include <vnet/crypto/crypto.h>
19
20 typedef enum
21 {
22 #define _(sym,str) VNET_CRYPTO_ASYNC_ERROR_##sym,
23   foreach_crypto_op_status
24 #undef _
25     VNET_CRYPTO_ASYNC_N_ERROR,
26 } vnet_crypto_async_error_t;
27
28 static char *vnet_crypto_async_error_strings[] = {
29 #define _(sym,string) string,
30   foreach_crypto_op_status
31 #undef _
32 };
33
34 #define foreach_crypto_dispatch_next \
35   _(ERR_DROP, "error-drop")
36
37 typedef enum
38 {
39 #define _(n, s) CRYPTO_DISPATCH_NEXT_##n,
40   foreach_crypto_dispatch_next
41 #undef _
42     CRYPTO_DISPATCH_N_NEXT,
43 } crypto_dispatch_next_t;
44
45 typedef struct
46 {
47   vnet_crypto_op_status_t op_status;
48   vnet_crypto_async_op_id_t op;
49 } crypto_dispatch_trace_t;
50
51 static u8 *
52 format_crypto_dispatch_trace (u8 * s, va_list * args)
53 {
54   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
56   crypto_dispatch_trace_t *t = va_arg (*args, crypto_dispatch_trace_t *);
57
58   s = format (s, "%U: %U", format_vnet_crypto_async_op, t->op,
59               format_vnet_crypto_op_status, t->op_status);
60   return s;
61 }
62
63 static void
64 vnet_crypto_async_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
65                              vlib_buffer_t * b,
66                              vnet_crypto_async_op_id_t op_id,
67                              vnet_crypto_op_status_t status)
68 {
69   crypto_dispatch_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
70   tr->op_status = status;
71   tr->op = op_id;
72 }
73
74 static_always_inline u32
75 crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
76                       vnet_crypto_thread_t * ct,
77                       vnet_crypto_frame_dequeue_t * hdl, u32 n_cache,
78                       u32 * n_total)
79 {
80   vnet_crypto_main_t *cm = &crypto_main;
81   u32 n_elts = 0;
82   u32 enqueue_thread_idx = ~0;
83   vnet_crypto_async_frame_t *cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
84   *n_total += n_elts;
85
86   while (cf || n_elts)
87     {
88       if (cf)
89         {
90           vec_validate (ct->buffer_indices, n_cache + cf->n_elts);
91           vec_validate (ct->nexts, n_cache + cf->n_elts);
92           clib_memcpy_fast (ct->buffer_indices + n_cache, cf->buffer_indices,
93                             sizeof (u32) * cf->n_elts);
94           if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS)
95             {
96               clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index,
97                                 sizeof (u16) * cf->n_elts);
98             }
99           else
100             {
101               u32 i;
102               for (i = 0; i < cf->n_elts; i++)
103                 {
104                   if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED)
105                     {
106                       ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP;
107                       vlib_node_increment_counter (vm, node->node_index,
108                                                    cf->elts[i].status, 1);
109                     }
110                   else
111                     ct->nexts[i + n_cache] = cf->next_node_index[i];
112                 }
113             }
114           n_cache += cf->n_elts;
115           if (n_cache >= VLIB_FRAME_SIZE)
116             {
117               vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices,
118                                                &ct->nexts, n_cache);
119               n_cache = 0;
120             }
121
122           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
123             {
124               u32 i;
125
126               for (i = 0; i < cf->n_elts; i++)
127                 {
128                   vlib_buffer_t *b = vlib_get_buffer (vm,
129                                                       cf->buffer_indices[i]);
130                   if (b->flags & VLIB_BUFFER_IS_TRACED)
131                     vnet_crypto_async_add_trace (vm, node, b, cf->op,
132                                                  cf->elts[i].status);
133                 }
134             }
135           vnet_crypto_async_free_frame (vm, cf);
136         }
137       /* signal enqueue-thread to dequeue the processed frame (n_elts>0) */
138       if (n_elts > 0 &&
139           ((node->state == VLIB_NODE_STATE_POLLING &&
140             (node->flags &
141              VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)) ||
142            node->state == VLIB_NODE_STATE_INTERRUPT))
143         {
144           vlib_node_set_interrupt_pending (
145             vlib_get_main_by_index (enqueue_thread_idx),
146             cm->crypto_node_index);
147         }
148
149       n_elts = 0;
150       enqueue_thread_idx = 0;
151       cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
152       *n_total += n_elts;
153     }
154
155   return n_cache;
156 }
157
158 VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm,
159                                      vlib_node_runtime_t * node,
160                                      vlib_frame_t * frame)
161 {
162   vnet_crypto_main_t *cm = &crypto_main;
163   vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
164   u32 n_dispatched = 0, n_cache = 0, index;
165   vec_foreach_index (index, cm->dequeue_handlers)
166     {
167       n_cache = crypto_dequeue_frame (
168         vm, node, ct, cm->dequeue_handlers[index], n_cache, &n_dispatched);
169     }
170   /* *INDENT-ON* */
171   if (n_cache)
172     vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices, &ct->nexts,
173                                      n_cache);
174
175   /* if there are still pending tasks and node in interrupt mode,
176   sending current thread signal to dequeue next loop */
177   if (pool_elts (ct->frame_pool) > 0 &&
178       ((node->state == VLIB_NODE_STATE_POLLING &&
179         (node->flags &
180          VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)) ||
181        node->state == VLIB_NODE_STATE_INTERRUPT))
182     {
183       vlib_node_set_interrupt_pending (vm, node->node_index);
184     }
185
186   return n_dispatched;
187 }
188
189 /* *INDENT-OFF* */
190 VLIB_REGISTER_NODE (crypto_dispatch_node) = {
191   .name = "crypto-dispatch",
192   .type = VLIB_NODE_TYPE_INPUT,
193   .flags = VLIB_NODE_FLAG_ADAPTIVE_MODE,
194   .state = VLIB_NODE_STATE_INTERRUPT,
195   .format_trace = format_crypto_dispatch_trace,
196
197   .n_errors = ARRAY_LEN(vnet_crypto_async_error_strings),
198   .error_strings = vnet_crypto_async_error_strings,
199
200   .n_next_nodes = CRYPTO_DISPATCH_N_NEXT,
201   .next_nodes = {
202 #define _(n, s) \
203   [CRYPTO_DISPATCH_NEXT_##n] = s,
204       foreach_crypto_dispatch_next
205 #undef _
206   },
207 };
208 /* *INDENT-ON* */
209
210 /*
211  * fd.io coding-style-patch-verification: ON
212  *
213  * Local Variables:
214  * eval: (c-set-style "gnu")
215  * End:
216  */