1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
6 #include <vnet/feature/feature.h>
7 #include <snort/snort.h>
21 format_snort_enq_trace (u8 *s, va_list *args)
23 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
24 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
25 snort_enq_trace_t *t = va_arg (*args, snort_enq_trace_t *);
26 u32 indent = format_get_indent (s);
29 "sw-if-index %u next-index %u\n"
30 "%Uinstance %u qpair %u desc-index %u slot %u\n"
31 "%Udesc: buffer-pool %u offset %u len %u address-space-id %u\n",
32 t->sw_if_index, t->next_index, format_white_space, indent,
33 t->instance, t->qpair, t->desc_index, t->enq_slot,
34 format_white_space, indent, t->desc.buffer_pool, t->desc.offset,
35 t->desc.length, t->desc.address_space_id);
40 #define foreach_snort_enq_error \
41 _ (SOCKET_ERROR, "write socket error") \
42 _ (NO_INSTANCE, "no snort instance") \
43 _ (NO_ENQ_SLOTS, "no enqueue slots (packet dropped)")
47 #define _(sym, str) SNORT_ENQ_ERROR_##sym,
48 foreach_snort_enq_error
53 static char *snort_enq_error_strings[] = {
54 #define _(sym, string) string,
55 foreach_snort_enq_error
59 static_always_inline uword
60 snort_enq_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
61 vlib_frame_t *frame, int with_trace)
63 snort_main_t *sm = &snort_main;
64 snort_instance_t *si = 0;
65 snort_qpair_t *qp = 0;
66 u32 thread_index = vm->thread_index;
67 u32 n_left = frame->n_vectors;
69 u32 total_enq = 0, n_processed = 0;
70 u32 *from = vlib_frame_vector_args (frame);
71 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
72 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
74 vlib_get_buffers (vm, from, bufs, n_left);
78 u32 instance_index, next_index, n;
80 *(u32 *) vnet_feature_next_with_data (&next_index, b[0], sizeof (u32));
81 si = vec_elt_at_index (sm->instances, instance_index);
83 /* if client isn't connected skip enqueue and take default action */
84 if (PREDICT_FALSE (si->client_index == ~0))
86 if (si->drop_on_disconnect)
87 next[0] = SNORT_ENQ_NEXT_DROP;
95 qp = vec_elt_at_index (si->qpairs, thread_index);
97 daq_vpp_desc_t *d = qp->pending_descs + n;
99 qp->pending_nexts[n] = next_index;
100 qp->pending_buffers[n] = from[0];
102 vlib_buffer_chain_linearize (vm, b[0]);
104 /* If this pkt is traced, snapshoot the data */
105 if (with_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED)
108 /* fill descriptor */
109 d->buffer_pool = b[0]->buffer_pool_index;
110 d->length = b[0]->current_length;
111 d->offset = (u8 *) b[0]->data + b[0]->current_data -
112 sm->buffer_pool_base_addrs[d->buffer_pool];
113 d->address_space_id = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
123 vlib_node_increment_counter (vm, snort_enq_node.index,
124 SNORT_ENQ_ERROR_NO_INSTANCE, n_processed);
125 vlib_buffer_enqueue_to_next (vm, node, vlib_frame_vector_args (frame),
129 vec_foreach (si, sm->instances)
131 u32 head, freelist_len, n_pending, n_enq, mask;
133 qp = vec_elt_at_index (si->qpairs, thread_index);
134 mask = pow2_mask (qp->log2_queue_size);
135 n_pending = qp->n_pending;
141 freelist_len = vec_len (qp->freelist);
143 if (freelist_len < n_pending)
145 n_enq = freelist_len;
146 vlib_buffer_free (vm, qp->pending_buffers + n_enq,
148 vlib_node_increment_counter (vm, snort_enq_node.index,
149 SNORT_ENQ_ERROR_NO_ENQ_SLOTS,
159 head = *qp->enq_head;
161 for (u32 i = 0; i < n_enq; i++)
163 u32 desc_index = qp->freelist[--freelist_len];
164 qp->next_indices[desc_index] = qp->pending_nexts[i];
165 ASSERT (qp->buffer_indices[desc_index] == ~0);
166 qp->buffer_indices[desc_index] = qp->pending_buffers[i];
167 clib_memcpy_fast (qp->descriptors + desc_index,
168 qp->pending_descs + i, sizeof (daq_vpp_desc_t));
169 qp->enq_ring[head & mask] = desc_index;
172 if (with_trace && n_trace)
174 vlib_buffer_t *tb = vlib_get_buffer (vm, qp->pending_buffers[i]);
175 if (tb->flags & VLIB_BUFFER_IS_TRACED)
177 snort_enq_trace_t *t =
178 vlib_add_trace (vm, node, tb, sizeof (*t));
179 t->sw_if_index = vnet_buffer (tb)->sw_if_index[VLIB_RX];
180 t->next_index = qp->pending_nexts[i];
181 t->instance = si->index;
182 t->qpair = qp - si->qpairs;
183 t->enq_slot = head & mask;
184 t->desc_index = desc_index;
185 clib_memcpy_fast (&t->desc, qp->pending_descs + i,
186 sizeof (daq_vpp_desc_t));
192 __atomic_store_n (qp->enq_head, head, __ATOMIC_RELEASE);
193 _vec_len (qp->freelist) = freelist_len;
194 if (sm->input_mode == VLIB_NODE_STATE_INTERRUPT)
196 if (write (qp->enq_fd, &ctr, sizeof (ctr)) < 0)
197 vlib_node_increment_counter (vm, snort_enq_node.index,
198 SNORT_ENQ_ERROR_SOCKET_ERROR, 1);
205 VLIB_NODE_FN (snort_enq_node)
206 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
208 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
209 return snort_enq_node_inline (vm, node, frame, 1 /* is_trace*/);
211 return snort_enq_node_inline (vm, node, frame, 0 /* is_trace*/);
214 VLIB_REGISTER_NODE (snort_enq_node) = {
216 .vector_size = sizeof (u32),
217 .format_trace = format_snort_enq_trace,
218 .type = VLIB_NODE_TYPE_INTERNAL,
219 .n_next_nodes = SNORT_ENQ_N_NEXT_NODES,
220 .next_nodes = SNORT_ENQ_NEXT_NODES,
221 .n_errors = ARRAY_LEN (snort_enq_error_strings),
222 .error_strings = snort_enq_error_strings,