vlib: introduce vlib_get_main_by_index(), vlib_get_n_threads()
[vpp.git] / src / vnet / crypto / node.c
1 /*
2  * Copyright (c) 2020 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15
16 #include <stdbool.h>
17 #include <vlib/vlib.h>
18 #include <vnet/crypto/crypto.h>
19
20 typedef enum
21 {
22 #define _(sym,str) VNET_CRYPTO_ASYNC_ERROR_##sym,
23   foreach_crypto_op_status
24 #undef _
25     VNET_CRYPTO_ASYNC_N_ERROR,
26 } vnet_crypto_async_error_t;
27
28 static char *vnet_crypto_async_error_strings[] = {
29 #define _(sym,string) string,
30   foreach_crypto_op_status
31 #undef _
32 };
33
34 #define foreach_crypto_dispatch_next \
35   _(ERR_DROP, "error-drop")
36
37 typedef enum
38 {
39 #define _(n, s) CRYPTO_DISPATCH_NEXT_##n,
40   foreach_crypto_dispatch_next
41 #undef _
42     CRYPTO_DISPATCH_N_NEXT,
43 } crypto_dispatch_next_t;
44
45 typedef struct
46 {
47   vnet_crypto_op_status_t op_status;
48   vnet_crypto_async_op_id_t op;
49 } crypto_dispatch_trace_t;
50
51 static u8 *
52 format_crypto_dispatch_trace (u8 * s, va_list * args)
53 {
54   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
56   crypto_dispatch_trace_t *t = va_arg (*args, crypto_dispatch_trace_t *);
57
58   s = format (s, "%U: %U", format_vnet_crypto_async_op, t->op,
59               format_vnet_crypto_op_status, t->op_status);
60   return s;
61 }
62
63 static void
64 vnet_crypto_async_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
65                              vlib_buffer_t * b,
66                              vnet_crypto_async_op_id_t op_id,
67                              vnet_crypto_op_status_t status)
68 {
69   crypto_dispatch_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
70   tr->op_status = status;
71   tr->op = op_id;
72 }
73
74 static_always_inline u32
75 crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
76                       vnet_crypto_thread_t * ct,
77                       vnet_crypto_frame_dequeue_t * hdl, u32 n_cache,
78                       u32 * n_total)
79 {
80   vnet_crypto_main_t *cm = &crypto_main;
81   u32 n_elts = 0;
82   u32 enqueue_thread_idx = ~0;
83   vnet_crypto_async_frame_t *cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
84   *n_total += n_elts;
85
86   while (cf || n_elts)
87     {
88       if (cf)
89         {
90           vec_validate (ct->buffer_indices, n_cache + cf->n_elts);
91           vec_validate (ct->nexts, n_cache + cf->n_elts);
92           clib_memcpy_fast (ct->buffer_indices + n_cache, cf->buffer_indices,
93                             sizeof (u32) * cf->n_elts);
94           if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS)
95             {
96               clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index,
97                                 sizeof (u16) * cf->n_elts);
98             }
99           else
100             {
101               u32 i;
102               for (i = 0; i < cf->n_elts; i++)
103                 {
104                   if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED)
105                     {
106                       ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP;
107                       vlib_node_increment_counter (vm, node->node_index,
108                                                    cf->elts[i].status, 1);
109                     }
110                   else
111                     ct->nexts[i + n_cache] = cf->next_node_index[i];
112                 }
113             }
114           n_cache += cf->n_elts;
115           if (n_cache >= VLIB_FRAME_SIZE)
116             {
117               vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indices,
118                                            ct->nexts, n_cache);
119               n_cache = 0;
120             }
121
122           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
123             {
124               u32 i;
125
126               for (i = 0; i < cf->n_elts; i++)
127                 {
128                   vlib_buffer_t *b = vlib_get_buffer (vm,
129                                                       cf->buffer_indices[i]);
130                   if (b->flags & VLIB_BUFFER_IS_TRACED)
131                     vnet_crypto_async_add_trace (vm, node, b, cf->op,
132                                                  cf->elts[i].status);
133                 }
134             }
135           vnet_crypto_async_free_frame (vm, cf);
136         }
137       /* signal enqueue-thread to dequeue the processed frame (n_elts>0) */
138       if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT
139           && n_elts > 0)
140         {
141           vlib_node_set_interrupt_pending (
142             vlib_get_main_by_index (enqueue_thread_idx),
143             cm->crypto_node_index);
144         }
145
146       n_elts = 0;
147       enqueue_thread_idx = 0;
148       cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
149       *n_total += n_elts;
150     }
151
152   return n_cache;
153 }
154
155 VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm,
156                                      vlib_node_runtime_t * node,
157                                      vlib_frame_t * frame)
158 {
159   vnet_crypto_main_t *cm = &crypto_main;
160   vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
161   u32 n_dispatched = 0, n_cache = 0;
162   u32 index;
163
164   /* *INDENT-OFF* */
165   clib_bitmap_foreach (index, cm->async_active_ids)  {
166     n_cache = crypto_dequeue_frame (vm, node, ct, cm->dequeue_handlers[index],
167                                     n_cache, &n_dispatched);
168   }
169   /* *INDENT-ON* */
170   if (n_cache)
171     vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indices, ct->nexts,
172                                  n_cache);
173
174   return n_dispatched;
175 }
176
177 /* *INDENT-OFF* */
178 VLIB_REGISTER_NODE (crypto_dispatch_node) = {
179   .name = "crypto-dispatch",
180   .type = VLIB_NODE_TYPE_INPUT,
181   .state = VLIB_NODE_STATE_DISABLED,
182   .format_trace = format_crypto_dispatch_trace,
183
184   .n_errors = ARRAY_LEN(vnet_crypto_async_error_strings),
185   .error_strings = vnet_crypto_async_error_strings,
186
187   .n_next_nodes = CRYPTO_DISPATCH_N_NEXT,
188   .next_nodes = {
189 #define _(n, s) \
190   [CRYPTO_DISPATCH_NEXT_##n] = s,
191       foreach_crypto_dispatch_next
192 #undef _
193   },
194 };
195 /* *INDENT-ON* */
196
197 /*
198  * fd.io coding-style-patch-verification: ON
199  *
200  * Local Variables:
201  * eval: (c-set-style "gnu")
202  * End:
203  */