2 * Copyright (c) 2021 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/plugin/plugin.h>
17 #include <vpp/app/version.h>
20 #include <vnet/tcp/tcp_types.h>
22 char *hsi_error_strings[] = {
23 #define hsi_error(n, s) s,
24 #include <hsi/hsi_error.def>
28 typedef enum hsi_input_next_
30 HSI_INPUT_NEXT_UDP_INPUT,
31 HSI_INPUT_NEXT_TCP_INPUT,
32 HSI_INPUT_NEXT_TCP_INPUT_NOLOOKUP,
36 #define foreach_hsi4_input_next \
37 _ (UDP_INPUT, "udp4-input") \
38 _ (TCP_INPUT, "tcp4-input") \
39 _ (TCP_INPUT_NOLOOKUP, "tcp4-input-nolookup")
41 #define foreach_hsi6_input_next \
42 _ (UDP_INPUT, "udp6-input") \
43 _ (TCP_INPUT, "tcp6-input") \
44 _ (TCP_INPUT_NOLOOKUP, "tcp6-input-nolookup")
52 format_hsi_trace (u8 *s, va_list *args)
54 vlib_main_t *vm = va_arg (*args, vlib_main_t *);
55 vlib_node_t *node = va_arg (*args, vlib_node_t *);
56 hsi_trace_t *t = va_arg (*args, hsi_trace_t *);
59 nn = vlib_get_next_node (vm, node->index, t->next_node);
60 s = format (s, "session %sfound, next node: %v",
61 t->next_node < HSI_INPUT_N_NEXT ? "" : "not ", nn->name);
66 hsi_udp_lookup (vlib_buffer_t *b, void *ip_hdr, u8 is_ip4)
73 ip4_header_t *ip4 = (ip4_header_t *) ip_hdr;
74 hdr = ip4_next_header (ip4);
75 s = session_lookup_safe4 (
76 vnet_buffer (b)->ip.fib_index, &ip4->dst_address, &ip4->src_address,
77 hdr->dst_port, hdr->src_port, TRANSPORT_PROTO_UDP);
81 ip6_header_t *ip6 = (ip6_header_t *) ip_hdr;
82 hdr = ip6_next_header (ip6);
83 s = session_lookup_safe6 (
84 vnet_buffer (b)->ip.fib_index, &ip6->dst_address, &ip6->src_address,
85 hdr->dst_port, hdr->src_port, TRANSPORT_PROTO_UDP);
91 always_inline transport_connection_t *
92 hsi_tcp_lookup (vlib_buffer_t *b, void *ip_hdr, u8 is_ip4)
94 transport_connection_t *tc;
100 ip4_header_t *ip4 = (ip4_header_t *) ip_hdr;
101 hdr = ip4_next_header (ip4);
102 tc = session_lookup_connection_wt4 (
103 vnet_buffer (b)->ip.fib_index, &ip4->dst_address, &ip4->src_address,
104 hdr->dst_port, hdr->src_port, TRANSPORT_PROTO_TCP,
105 vlib_get_thread_index (), &result);
109 ip6_header_t *ip6 = (ip6_header_t *) ip_hdr;
110 hdr = ip6_next_header (ip6);
111 tc = session_lookup_connection_wt6 (
112 vnet_buffer (b)->ip.fib_index, &ip6->dst_address, &ip6->src_address,
113 hdr->dst_port, hdr->src_port, TRANSPORT_PROTO_TCP,
114 vlib_get_thread_index (), &result);
117 return result == 0 ? tc : 0;
121 hsi_lookup_and_update (vlib_buffer_t *b, u32 *next, u8 is_ip4)
123 transport_connection_t *tc;
124 u8 proto, state, have_udp;
128 rw_len = vnet_buffer (b)->ip.save_rewrite_length;
129 ip_hdr = vlib_buffer_get_current (b) + rw_len;
132 proto = ((ip4_header_t *) ip_hdr)->protocol;
134 proto = ((ip6_header_t *) ip_hdr)->protocol;
138 case IP_PROTOCOL_TCP:
139 tc = hsi_tcp_lookup (b, ip_hdr, is_ip4);
142 state = ((tcp_connection_t *) tc)->state;
143 if (state == TCP_STATE_LISTEN)
145 *next = HSI_INPUT_NEXT_TCP_INPUT;
147 else if (state == TCP_STATE_SYN_SENT)
149 *next = HSI_INPUT_NEXT_TCP_INPUT;
153 /* Lookup already done, use result */
154 *next = HSI_INPUT_NEXT_TCP_INPUT_NOLOOKUP;
155 vnet_buffer (b)->tcp.connection_index = tc->c_index;
157 vlib_buffer_advance (b, rw_len);
161 vnet_feature_next (next, b);
164 case IP_PROTOCOL_UDP:
165 have_udp = hsi_udp_lookup (b, ip_hdr, is_ip4);
168 *next = HSI_INPUT_NEXT_UDP_INPUT;
169 vlib_buffer_advance (b, rw_len);
173 vnet_feature_next (next, b);
177 vnet_feature_next (next, b);
183 hsi_input_trace_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
184 vlib_buffer_t **bufs, u16 *nexts, u32 n_bufs, u8 is_ip4)
190 for (i = 0; i < n_bufs; i++)
193 if (!(b->flags & VLIB_BUFFER_IS_TRACED))
195 t = vlib_add_trace (vm, node, b, sizeof (*t));
196 t->next_node = nexts[i];
201 hsi46_input_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
202 vlib_frame_t *frame, int is_ip4)
204 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
205 u16 nexts[VLIB_FRAME_SIZE], *next;
206 u32 n_left_from, *from;
208 from = vlib_frame_vector_args (frame);
209 n_left_from = frame->n_vectors;
211 vlib_get_buffers (vm, from, bufs, n_left_from);
215 while (n_left_from >= 4)
219 vlib_prefetch_buffer_header (b[2], LOAD);
220 CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
222 vlib_prefetch_buffer_header (b[3], LOAD);
223 CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
225 hsi_lookup_and_update (b[0], &next0, is_ip4);
226 hsi_lookup_and_update (b[1], &next1, is_ip4);
240 hsi_lookup_and_update (b[0], &next0, is_ip4);
249 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
251 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
252 hsi_input_trace_frame (vm, node, bufs, nexts, frame->n_vectors, is_ip4);
254 return frame->n_vectors;
257 VLIB_NODE_FN (hsi4_in_node)
258 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
260 return hsi46_input_inline (vm, node, frame, 1 /* is_ip4 */);
263 VLIB_REGISTER_NODE (hsi4_in_node) = {
265 .vector_size = sizeof (u32),
266 .format_trace = format_hsi_trace,
267 .type = VLIB_NODE_TYPE_INTERNAL,
268 .n_errors = HSI_N_ERROR,
269 .error_strings = hsi_error_strings,
270 .n_next_nodes = HSI_INPUT_N_NEXT,
272 #define _(s, n) [HSI_INPUT_NEXT_##s] = n,
273 foreach_hsi4_input_next
278 VNET_FEATURE_INIT (hsi4_in_feature, static) = {
279 .arc_name = "ip4-unicast",
280 .node_name = "hsi4-in",
281 .runs_before = VNET_FEATURES ("ip4-lookup"),
284 VLIB_NODE_FN (hsi4_out_node)
285 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
287 return hsi46_input_inline (vm, node, frame, 1 /* is_ip4 */);
290 VLIB_REGISTER_NODE (hsi4_out_node) = {
292 .vector_size = sizeof (u32),
293 .format_trace = format_hsi_trace,
294 .type = VLIB_NODE_TYPE_INTERNAL,
295 .n_errors = HSI_N_ERROR,
296 .error_strings = hsi_error_strings,
297 .n_next_nodes = HSI_INPUT_N_NEXT,
299 #define _(s, n) [HSI_INPUT_NEXT_##s] = n,
300 foreach_hsi4_input_next
305 VNET_FEATURE_INIT (hsi4_out_feature, static) = {
306 .arc_name = "ip4-output",
307 .node_name = "hsi4-out",
308 .runs_before = VNET_FEATURES ("interface-output"),
311 VLIB_NODE_FN (hsi6_in_node)
312 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
314 return hsi46_input_inline (vm, node, frame, 0 /* is_ip4 */);
317 VLIB_REGISTER_NODE (hsi6_in_node) = {
319 .vector_size = sizeof (u32),
320 .format_trace = format_hsi_trace,
321 .type = VLIB_NODE_TYPE_INTERNAL,
322 .n_errors = HSI_N_ERROR,
323 .error_strings = hsi_error_strings,
324 .n_next_nodes = HSI_INPUT_N_NEXT,
326 #define _(s, n) [HSI_INPUT_NEXT_##s] = n,
327 foreach_hsi6_input_next
332 VNET_FEATURE_INIT (hsi6_in_feature, static) = {
333 .arc_name = "ip6-unicast",
334 .node_name = "hsi6-in",
335 .runs_before = VNET_FEATURES ("ip6-lookup"),
338 VLIB_NODE_FN (hsi6_out_node)
339 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
341 return hsi46_input_inline (vm, node, frame, 0 /* is_ip4 */);
344 VLIB_REGISTER_NODE (hsi6_out_node) = {
346 .vector_size = sizeof (u32),
347 .format_trace = format_hsi_trace,
348 .type = VLIB_NODE_TYPE_INTERNAL,
349 .n_errors = HSI_N_ERROR,
350 .error_strings = hsi_error_strings,
351 .n_next_nodes = HSI_INPUT_N_NEXT,
353 #define _(s, n) [HSI_INPUT_NEXT_##s] = n,
354 foreach_hsi6_input_next
359 VNET_FEATURE_INIT (hsi6_out_feature, static) = {
360 .arc_name = "ip6-output",
361 .node_name = "hsi6-out",
362 .runs_before = VNET_FEATURES ("interface-output"),
365 VLIB_PLUGIN_REGISTER () = {
366 .version = VPP_BUILD_VER,
367 .description = "Host Stack Intercept (HSI)",
368 .default_disabled = 0,
372 * fd.io coding-style-patch-verification: ON
375 * eval: (c-set-style "gnu")