2 * Copyright (c) 2020 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 #include <vlib/vlib.h>
18 #include <vnet/crypto/crypto.h>
22 #define _(sym,str) VNET_CRYPTO_ASYNC_ERROR_##sym,
23 foreach_crypto_op_status
25 VNET_CRYPTO_ASYNC_N_ERROR,
26 } vnet_crypto_async_error_t;
28 static char *vnet_crypto_async_error_strings[] = {
29 #define _(sym,string) string,
30 foreach_crypto_op_status
34 #define foreach_crypto_dispatch_next \
35 _(ERR_DROP, "error-drop")
39 #define _(n, s) CRYPTO_DISPATCH_NEXT_##n,
40 foreach_crypto_dispatch_next
42 CRYPTO_DISPATCH_N_NEXT,
43 } crypto_dispatch_next_t;
47 vnet_crypto_op_status_t op_status;
48 vnet_crypto_async_op_id_t op;
49 } crypto_dispatch_trace_t;
52 format_crypto_dispatch_trace (u8 * s, va_list * args)
54 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
56 crypto_dispatch_trace_t *t = va_arg (*args, crypto_dispatch_trace_t *);
58 s = format (s, "%U: %U", format_vnet_crypto_async_op, t->op,
59 format_vnet_crypto_op_status, t->op_status);
64 vnet_crypto_async_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
66 vnet_crypto_async_op_id_t op_id,
67 vnet_crypto_op_status_t status)
69 crypto_dispatch_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
70 tr->op_status = status;
74 static_always_inline u32
75 crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
76 vnet_crypto_thread_t * ct,
77 vnet_crypto_frame_dequeue_t * hdl, u32 n_cache,
80 vnet_crypto_main_t *cm = &crypto_main;
82 u32 enqueue_thread_idx = ~0;
83 vnet_crypto_async_frame_t *cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
90 vec_validate (ct->buffer_indices, n_cache + cf->n_elts);
91 vec_validate (ct->nexts, n_cache + cf->n_elts);
92 clib_memcpy_fast (ct->buffer_indices + n_cache, cf->buffer_indices,
93 sizeof (u32) * cf->n_elts);
94 if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS)
96 clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index,
97 sizeof (u16) * cf->n_elts);
102 for (i = 0; i < cf->n_elts; i++)
104 if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED)
106 ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP;
107 vlib_node_increment_counter (vm, node->node_index,
108 cf->elts[i].status, 1);
111 ct->nexts[i + n_cache] = cf->next_node_index[i];
114 n_cache += cf->n_elts;
115 if (n_cache >= VLIB_FRAME_SIZE)
117 vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices,
118 &ct->nexts, n_cache);
122 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
126 for (i = 0; i < cf->n_elts; i++)
128 vlib_buffer_t *b = vlib_get_buffer (vm,
129 cf->buffer_indices[i]);
130 if (b->flags & VLIB_BUFFER_IS_TRACED)
131 vnet_crypto_async_add_trace (vm, node, b, cf->op,
135 vnet_crypto_async_free_frame (vm, cf);
137 /* signal enqueue-thread to dequeue the processed frame (n_elts>0) */
139 ((node->state == VLIB_NODE_STATE_POLLING &&
141 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)) ||
142 node->state == VLIB_NODE_STATE_INTERRUPT))
144 vlib_node_set_interrupt_pending (
145 vlib_get_main_by_index (enqueue_thread_idx),
146 cm->crypto_node_index);
150 enqueue_thread_idx = 0;
151 cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
158 VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm,
159 vlib_node_runtime_t * node,
160 vlib_frame_t * frame)
162 vnet_crypto_main_t *cm = &crypto_main;
163 vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
164 u32 n_dispatched = 0, n_cache = 0, index;
165 vec_foreach_index (index, cm->dequeue_handlers)
167 n_cache = crypto_dequeue_frame (
168 vm, node, ct, cm->dequeue_handlers[index], n_cache, &n_dispatched);
172 vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices, &ct->nexts,
175 /* if there are still pending tasks and node in interrupt mode,
176 sending current thread signal to dequeue next loop */
177 if (pool_elts (ct->frame_pool) > 0 &&
178 ((node->state == VLIB_NODE_STATE_POLLING &&
180 VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)) ||
181 node->state == VLIB_NODE_STATE_INTERRUPT))
183 vlib_node_set_interrupt_pending (vm, node->node_index);
190 VLIB_REGISTER_NODE (crypto_dispatch_node) = {
191 .name = "crypto-dispatch",
192 .type = VLIB_NODE_TYPE_INPUT,
193 .flags = VLIB_NODE_FLAG_ADAPTIVE_MODE,
194 .state = VLIB_NODE_STATE_INTERRUPT,
195 .format_trace = format_crypto_dispatch_trace,
197 .n_errors = ARRAY_LEN(vnet_crypto_async_error_strings),
198 .error_strings = vnet_crypto_async_error_strings,
200 .n_next_nodes = CRYPTO_DISPATCH_N_NEXT,
203 [CRYPTO_DISPATCH_NEXT_##n] = s,
204 foreach_crypto_dispatch_next
211 * fd.io coding-style-patch-verification: ON
214 * eval: (c-set-style "gnu")