1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
6 #include <vnet/feature/feature.h>
7 #include <snort/snort.h>
16 format_snort_deq_trace (u8 *s, va_list *args)
18 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
19 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
20 snort_deq_trace_t *t = va_arg (*args, snort_deq_trace_t *);
22 s = format (s, "snort-deq: sw_if_index %d, next index %d\n", t->sw_if_index,
28 #define foreach_snort_deq_error \
29 _ (BAD_DESC, "bad descriptor") \
30 _ (BAD_DESC_INDEX, "bad descriptor index")
34 #define _(sym, str) SNORT_DEQ_ERROR_##sym,
35 foreach_snort_deq_error
40 static char *snort_deq_error_strings[] = {
41 #define _(sym, string) string,
42 foreach_snort_deq_error
46 static_always_inline uword
47 snort_deq_instance (vlib_main_t *vm, u32 instance_index, snort_qpair_t *qp,
48 u32 *buffer_indices, u16 *nexts, u32 max_recv)
50 snort_main_t *sm = &snort_main;
51 snort_per_thread_data_t *ptd =
52 vec_elt_at_index (sm->per_thread_data, vm->thread_index);
53 u32 mask = pow2_mask (qp->log2_queue_size);
54 u32 head, next, n_recv = 0, n_left;
56 head = __atomic_load_n (qp->deq_head, __ATOMIC_ACQUIRE);
64 if (n_left > max_recv)
67 clib_interrupt_set (ptd->interrupts, instance_index);
68 vlib_node_set_interrupt_pending (vm, snort_deq_node.index);
76 /* check if descriptor index taken from dequqe ring is valid */
77 if ((desc_index = qp->deq_ring[next & mask]) & ~mask)
79 vlib_node_increment_counter (vm, snort_deq_node.index,
80 SNORT_DEQ_ERROR_BAD_DESC_INDEX, 1);
84 /* check if descriptor index taken from dequeue ring points to enqueued
86 if ((bi = qp->buffer_indices[desc_index]) == ~0)
88 vlib_node_increment_counter (vm, snort_deq_node.index,
89 SNORT_DEQ_ERROR_BAD_DESC, 1);
93 /* put descriptor back to freelist */
94 vec_add1 (qp->freelist, desc_index);
95 d = qp->descriptors + desc_index;
96 buffer_indices++[0] = bi;
97 if (d->action == DAQ_VPP_ACTION_FORWARD)
98 nexts[0] = qp->next_indices[desc_index];
100 nexts[0] = SNORT_ENQ_NEXT_DROP;
101 qp->buffer_indices[desc_index] = ~0;
111 qp->next_desc = next;
116 static_always_inline u32
117 snort_process_all_buffer_indices (snort_qpair_t *qp, u32 *b, u16 *nexts,
118 u32 max_recv, u8 drop_on_disconnect)
120 u32 *bi, n_processed = 0;
123 vec_foreach (bi, qp->buffer_indices)
125 if (n_processed >= max_recv)
131 desc_index = bi - qp->buffer_indices;
134 if (drop_on_disconnect)
135 nexts[0] = SNORT_ENQ_NEXT_DROP;
137 nexts[0] = qp->next_indices[desc_index];
138 qp->buffer_indices[desc_index] = ~0;
147 static_always_inline uword
148 snort_deq_instance_all_interrupt (vlib_main_t *vm, u32 instance_index,
149 snort_qpair_t *qp, u32 *buffer_indices,
150 u16 *nexts, u32 max_recv,
151 u8 drop_on_disconnect)
153 snort_main_t *sm = &snort_main;
154 snort_per_thread_data_t *ptd =
155 vec_elt_at_index (sm->per_thread_data, vm->thread_index);
158 n_processed = snort_process_all_buffer_indices (
159 qp, buffer_indices, nexts, max_recv, drop_on_disconnect);
161 if (n_processed == max_recv)
163 clib_interrupt_set (ptd->interrupts, instance_index);
164 vlib_node_set_interrupt_pending (vm, snort_deq_node.index);
168 *qp->enq_head = *qp->deq_head = qp->next_desc = 0;
169 snort_freelist_init (qp->freelist);
170 __atomic_store_n (&qp->ready, 1, __ATOMIC_RELEASE);
177 snort_deq_node_interrupt (vlib_main_t *vm, vlib_node_runtime_t *node,
180 snort_main_t *sm = &snort_main;
181 snort_per_thread_data_t *ptd =
182 vec_elt_at_index (sm->per_thread_data, vm->thread_index);
183 u32 buffer_indices[VLIB_FRAME_SIZE], *bi = buffer_indices;
184 u16 next_indices[VLIB_FRAME_SIZE], *nexts = next_indices;
185 u32 n_left = VLIB_FRAME_SIZE, n;
187 snort_instance_t *si;
190 while ((inst = clib_interrupt_get_next (ptd->interrupts, inst)) != -1)
192 clib_interrupt_clear (ptd->interrupts, inst);
193 si = vec_elt_at_index (sm->instances, inst);
194 qp = vec_elt_at_index (si->qpairs, vm->thread_index);
195 u32 ready = __atomic_load_n (&qp->ready, __ATOMIC_ACQUIRE);
197 n = snort_deq_instance_all_interrupt (vm, inst, qp, bi, nexts, n_left,
198 si->drop_on_disconnect);
200 n = snort_deq_instance (vm, inst, qp, bi, nexts, n_left);
210 if (n_left == VLIB_FRAME_SIZE)
214 n = VLIB_FRAME_SIZE - n_left;
215 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, next_indices, n);
219 static_always_inline uword
220 snort_deq_instance_poll (vlib_main_t *vm, snort_qpair_t *qp,
221 u32 *buffer_indices, u16 *nexts, u32 max_recv)
223 u32 mask = pow2_mask (qp->log2_queue_size);
224 u32 head, next, n_recv = 0, n_left;
226 head = __atomic_load_n (qp->deq_head, __ATOMIC_ACQUIRE);
227 next = qp->next_desc;
229 n_left = head - next;
234 if (n_left > max_recv)
242 /* check if descriptor index taken from dequqe ring is valid */
243 if ((desc_index = qp->deq_ring[next & mask]) & ~mask)
245 vlib_node_increment_counter (vm, snort_deq_node.index,
246 SNORT_DEQ_ERROR_BAD_DESC_INDEX, 1);
250 /* check if descriptor index taken from dequeue ring points to enqueued
252 if ((bi = qp->buffer_indices[desc_index]) == ~0)
254 vlib_node_increment_counter (vm, snort_deq_node.index,
255 SNORT_DEQ_ERROR_BAD_DESC, 1);
259 /* put descriptor back to freelist */
260 vec_add1 (qp->freelist, desc_index);
261 d = qp->descriptors + desc_index;
262 buffer_indices++[0] = bi;
263 if (d->action == DAQ_VPP_ACTION_FORWARD)
264 nexts[0] = qp->next_indices[desc_index];
266 nexts[0] = SNORT_ENQ_NEXT_DROP;
267 qp->buffer_indices[desc_index] = ~0;
277 qp->next_desc = next;
282 static_always_inline uword
283 snort_deq_instance_all_poll (vlib_main_t *vm, snort_qpair_t *qp,
284 u32 *buffer_indices, u16 *nexts, u32 max_recv,
285 u8 drop_on_disconnect)
287 u32 n_processed = snort_process_all_buffer_indices (
288 qp, buffer_indices, nexts, max_recv, drop_on_disconnect);
289 if (n_processed < max_recv)
291 *qp->enq_head = *qp->deq_head = qp->next_desc = 0;
292 snort_freelist_init (qp->freelist);
293 __atomic_store_n (&qp->ready, 1, __ATOMIC_RELEASE);
300 snort_deq_node_polling (vlib_main_t *vm, vlib_node_runtime_t *node,
303 snort_main_t *sm = &snort_main;
304 u32 buffer_indices[VLIB_FRAME_SIZE], *bi = buffer_indices;
305 u16 next_indices[VLIB_FRAME_SIZE], *nexts = next_indices;
306 u32 n_left = VLIB_FRAME_SIZE, n, n_total = 0;
308 snort_instance_t *si;
310 vec_foreach (si, sm->instances)
312 qp = vec_elt_at_index (si->qpairs, vm->thread_index);
313 u32 ready = __atomic_load_n (&qp->ready, __ATOMIC_ACQUIRE);
315 n = snort_deq_instance_all_poll (vm, qp, bi, nexts, n_left,
316 si->drop_on_disconnect);
318 n = snort_deq_instance_poll (vm, qp, bi, nexts, n_left);
326 n = VLIB_FRAME_SIZE - n_left;
327 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, next_indices,
329 n_left = VLIB_FRAME_SIZE;
331 nexts = next_indices;
336 if (n_left < VLIB_FRAME_SIZE)
338 n = VLIB_FRAME_SIZE - n_left;
339 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, next_indices, n);
345 VLIB_NODE_FN (snort_deq_node)
346 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
348 snort_main_t *sm = &snort_main;
349 if (sm->input_mode == VLIB_NODE_STATE_POLLING)
350 return snort_deq_node_polling (vm, node, frame);
351 return snort_deq_node_interrupt (vm, node, frame);
354 VLIB_REGISTER_NODE (snort_deq_node) = {
356 .vector_size = sizeof (u32),
357 .format_trace = format_snort_deq_trace,
358 .type = VLIB_NODE_TYPE_INPUT,
359 .state = VLIB_NODE_STATE_DISABLED,
360 .sibling_of = "snort-enq",
362 .n_errors = ARRAY_LEN (snort_deq_error_strings),
363 .error_strings = snort_deq_error_strings,