1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
6 #include <vnet/feature/feature.h>
7 #include <snort/snort.h>
21 format_snort_enq_trace (u8 *s, va_list *args)
23 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
24 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
25 snort_enq_trace_t *t = va_arg (*args, snort_enq_trace_t *);
26 u32 indent = format_get_indent (s);
29 "sw-if-index %u next-index %u\n"
30 "%Uinstance %u qpair %u desc-index %u slot %u\n"
31 "%Udesc: buffer-pool %u offset %u len %u address-space-id %u\n",
32 t->sw_if_index, t->next_index, format_white_space, indent,
33 t->instance, t->qpair, t->desc_index, t->enq_slot,
34 format_white_space, indent, t->desc.buffer_pool, t->desc.offset,
35 t->desc.length, t->desc.address_space_id);
40 #define foreach_snort_enq_error \
41 _ (SOCKET_ERROR, "write socket error") \
42 _ (NO_INSTANCE, "no snort instance") \
43 _ (NO_ENQ_SLOTS, "no enqueue slots (packet dropped)")
47 #define _(sym, str) SNORT_ENQ_ERROR_##sym,
48 foreach_snort_enq_error
53 static char *snort_enq_error_strings[] = {
54 #define _(sym, string) string,
55 foreach_snort_enq_error
59 static_always_inline uword
60 snort_enq_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
61 vlib_frame_t *frame, int with_trace)
63 snort_main_t *sm = &snort_main;
64 snort_instance_t *si = 0;
65 snort_qpair_t *qp = 0;
66 u32 thread_index = vm->thread_index;
67 u32 n_left = frame->n_vectors;
69 u32 total_enq = 0, n_processed = 0;
70 u32 *from = vlib_frame_vector_args (frame);
71 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
72 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
74 vlib_get_buffers (vm, from, bufs, n_left);
79 u32 instance_index, next_index, n;
83 *(u64 *) vnet_feature_next_with_data (&next_index, b[0], sizeof (u64));
85 instance_index = (u32) (fa_data & 0xffffffff);
87 (fa_data >> 32) ? vnet_buffer (b[0])->ip.save_rewrite_length : 0;
88 si = vec_elt_at_index (sm->instances, instance_index);
90 /* if client isn't connected skip enqueue and take default action */
91 if (PREDICT_FALSE (si->client_index == ~0))
93 if (si->drop_on_disconnect)
94 next[0] = SNORT_ENQ_NEXT_DROP;
102 qp = vec_elt_at_index (si->qpairs, thread_index);
104 daq_vpp_desc_t *d = qp->pending_descs + n;
106 qp->pending_nexts[n] = next_index;
107 qp->pending_buffers[n] = from[0];
109 vlib_buffer_chain_linearize (vm, b[0]);
111 /* If this pkt is traced, snapshoot the data */
112 if (with_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED)
115 /* fill descriptor */
116 d->buffer_pool = b[0]->buffer_pool_index;
117 d->length = b[0]->current_length;
118 d->offset = (u8 *) b[0]->data + b[0]->current_data + l3_offset -
119 sm->buffer_pool_base_addrs[d->buffer_pool];
120 d->address_space_id = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
130 vlib_node_increment_counter (vm, snort_enq_node.index,
131 SNORT_ENQ_ERROR_NO_INSTANCE, n_processed);
132 vlib_buffer_enqueue_to_next (vm, node, vlib_frame_vector_args (frame),
136 vec_foreach (si, sm->instances)
138 u32 head, freelist_len, n_pending, n_enq, mask;
140 qp = vec_elt_at_index (si->qpairs, thread_index);
141 mask = pow2_mask (qp->log2_queue_size);
142 n_pending = qp->n_pending;
148 freelist_len = vec_len (qp->freelist);
150 if (freelist_len < n_pending)
152 n_enq = freelist_len;
153 vlib_buffer_free (vm, qp->pending_buffers + n_enq,
155 vlib_node_increment_counter (vm, snort_enq_node.index,
156 SNORT_ENQ_ERROR_NO_ENQ_SLOTS,
166 head = *qp->enq_head;
168 for (u32 i = 0; i < n_enq; i++)
170 u32 desc_index = qp->freelist[--freelist_len];
171 qp->next_indices[desc_index] = qp->pending_nexts[i];
172 ASSERT (qp->buffer_indices[desc_index] == ~0);
173 qp->buffer_indices[desc_index] = qp->pending_buffers[i];
174 clib_memcpy_fast (qp->descriptors + desc_index,
175 qp->pending_descs + i, sizeof (daq_vpp_desc_t));
176 qp->enq_ring[head & mask] = desc_index;
179 if (with_trace && n_trace)
181 vlib_buffer_t *tb = vlib_get_buffer (vm, qp->pending_buffers[i]);
182 if (tb->flags & VLIB_BUFFER_IS_TRACED)
184 snort_enq_trace_t *t =
185 vlib_add_trace (vm, node, tb, sizeof (*t));
186 t->sw_if_index = vnet_buffer (tb)->sw_if_index[VLIB_RX];
187 t->next_index = qp->pending_nexts[i];
188 t->instance = si->index;
189 t->qpair = qp - si->qpairs;
190 t->enq_slot = head & mask;
191 t->desc_index = desc_index;
192 clib_memcpy_fast (&t->desc, qp->pending_descs + i,
193 sizeof (daq_vpp_desc_t));
199 __atomic_store_n (qp->enq_head, head, __ATOMIC_RELEASE);
200 vec_set_len (qp->freelist, freelist_len);
201 if (sm->input_mode == VLIB_NODE_STATE_INTERRUPT)
203 if (write (qp->enq_fd, &ctr, sizeof (ctr)) < 0)
204 vlib_node_increment_counter (vm, snort_enq_node.index,
205 SNORT_ENQ_ERROR_SOCKET_ERROR, 1);
212 VLIB_NODE_FN (snort_enq_node)
213 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
215 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
216 return snort_enq_node_inline (vm, node, frame, 1 /* is_trace*/);
218 return snort_enq_node_inline (vm, node, frame, 0 /* is_trace*/);
221 VLIB_REGISTER_NODE (snort_enq_node) = {
223 .vector_size = sizeof (u32),
224 .format_trace = format_snort_enq_trace,
225 .type = VLIB_NODE_TYPE_INTERNAL,
226 .n_next_nodes = SNORT_ENQ_N_NEXT_NODES,
227 .next_nodes = SNORT_ENQ_NEXT_NODES,
228 .n_errors = ARRAY_LEN (snort_enq_error_strings),
229 .error_strings = snort_enq_error_strings,