2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/ip/ip.h>
16 #include <vnet/classify/vnet_classify.h>
17 #include <vnet/classify/input_acl.h>
27 /* packet trace format function */
29 format_ip_inacl_trace (u8 * s, va_list * args)
31 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
32 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
33 ip_inacl_trace_t *t = va_arg (*args, ip_inacl_trace_t *);
35 s = format (s, "INACL: sw_if_index %d, next_index %d, table %d, offset %d",
36 t->sw_if_index, t->next_index, t->table_index, t->offset);
40 vlib_node_registration_t ip4_inacl_node;
41 vlib_node_registration_t ip6_inacl_node;
43 #define foreach_ip_inacl_error \
44 _(MISS, "input ACL misses") \
45 _(HIT, "input ACL hits") \
46 _(CHAIN_HIT, "input ACL hits after chain walk")
50 #define _(sym,str) IP_INACL_ERROR_##sym,
51 foreach_ip_inacl_error
56 static char *ip_inacl_error_strings[] = {
57 #define _(sym,string) string,
58 foreach_ip_inacl_error
63 ip_inacl_inline (vlib_main_t * vm,
64 vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip4)
66 u32 n_left_from, *from, *to_next;
67 acl_next_index_t next_index;
68 input_acl_main_t *am = &input_acl_main;
69 vnet_classify_main_t *vcm = am->vnet_classify_main;
70 f64 now = vlib_time_now (vm);
74 input_acl_table_id_t tid;
75 vlib_node_runtime_t *error_node;
78 n_next_nodes = node->n_next_nodes;
82 tid = INPUT_ACL_TABLE_IP4;
83 error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
87 tid = INPUT_ACL_TABLE_IP6;
88 error_node = vlib_node_get_runtime (vm, ip6_input_node.index);
91 from = vlib_frame_vector_args (frame);
92 n_left_from = frame->n_vectors;
94 /* First pass: compute hashes */
96 while (n_left_from > 2)
98 vlib_buffer_t *b0, *b1;
101 u32 sw_if_index0, sw_if_index1;
102 u32 table_index0, table_index1;
103 vnet_classify_table_t *t0, *t1;
105 /* prefetch next iteration */
107 vlib_buffer_t *p1, *p2;
109 p1 = vlib_get_buffer (vm, from[1]);
110 p2 = vlib_get_buffer (vm, from[2]);
112 vlib_prefetch_buffer_header (p1, STORE);
113 CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
114 vlib_prefetch_buffer_header (p2, STORE);
115 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
119 b0 = vlib_get_buffer (vm, bi0);
122 b1 = vlib_get_buffer (vm, bi1);
124 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
126 am->classify_table_index_by_sw_if_index[tid][sw_if_index0];
128 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
130 am->classify_table_index_by_sw_if_index[tid][sw_if_index1];
132 t0 = pool_elt_at_index (vcm->tables, table_index0);
134 t1 = pool_elt_at_index (vcm->tables, table_index1);
136 if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
137 h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset;
141 vnet_buffer (b0)->l2_classify.hash =
142 vnet_classify_hash_packet (t0, (u8 *) h0);
144 vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
146 if (t1->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
147 h1 = (void *) vlib_buffer_get_current (b1) + t1->current_data_offset;
151 vnet_buffer (b1)->l2_classify.hash =
152 vnet_classify_hash_packet (t1, (u8 *) h1);
154 vnet_classify_prefetch_bucket (t1, vnet_buffer (b1)->l2_classify.hash);
156 vnet_buffer (b0)->l2_classify.table_index = table_index0;
158 vnet_buffer (b1)->l2_classify.table_index = table_index1;
164 while (n_left_from > 0)
171 vnet_classify_table_t *t0;
174 b0 = vlib_get_buffer (vm, bi0);
176 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
178 am->classify_table_index_by_sw_if_index[tid][sw_if_index0];
180 t0 = pool_elt_at_index (vcm->tables, table_index0);
182 if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
183 h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset;
187 vnet_buffer (b0)->l2_classify.hash =
188 vnet_classify_hash_packet (t0, (u8 *) h0);
190 vnet_buffer (b0)->l2_classify.table_index = table_index0;
191 vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash);
197 next_index = node->cached_next_index;
198 from = vlib_frame_vector_args (frame);
199 n_left_from = frame->n_vectors;
201 while (n_left_from > 0)
205 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
207 /* Not enough load/store slots to dual loop... */
208 while (n_left_from > 0 && n_left_to_next > 0)
212 u32 next0 = ACL_NEXT_INDEX_DENY;
214 vnet_classify_table_t *t0;
215 vnet_classify_entry_t *e0;
220 /* Stride 3 seems to work best */
221 if (PREDICT_TRUE (n_left_from > 3))
223 vlib_buffer_t *p1 = vlib_get_buffer (vm, from[3]);
224 vnet_classify_table_t *tp1;
228 table_index1 = vnet_buffer (p1)->l2_classify.table_index;
230 if (PREDICT_TRUE (table_index1 != ~0))
232 tp1 = pool_elt_at_index (vcm->tables, table_index1);
233 phash1 = vnet_buffer (p1)->l2_classify.hash;
234 vnet_classify_prefetch_entry (tp1, phash1);
238 /* speculatively enqueue b0 to the current next frame */
246 b0 = vlib_get_buffer (vm, bi0);
247 table_index0 = vnet_buffer (b0)->l2_classify.table_index;
250 vnet_get_config_data (am->vnet_config_main[tid],
251 &b0->current_config_index, &next0,
252 /* # bytes of config data */ 0);
254 vnet_buffer (b0)->l2_classify.opaque_index = ~0;
256 if (PREDICT_TRUE (table_index0 != ~0))
258 hash0 = vnet_buffer (b0)->l2_classify.hash;
259 t0 = pool_elt_at_index (vcm->tables, table_index0);
261 if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
263 (void *) vlib_buffer_get_current (b0) +
264 t0->current_data_offset;
268 e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now);
271 vnet_buffer (b0)->l2_classify.opaque_index
273 vlib_buffer_advance (b0, e0->advance);
275 next0 = (e0->next_index < n_next_nodes) ?
276 e0->next_index : next0;
281 error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
282 IP4_ERROR_INACL_SESSION_DENY : IP4_ERROR_NONE;
284 error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
285 IP6_ERROR_INACL_SESSION_DENY : IP6_ERROR_NONE;
286 b0->error = error_node->errors[error0];
288 if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
289 e0->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
290 vnet_buffer (b0)->sw_if_index[VLIB_TX] = e0->metadata;
291 else if (e0->action == CLASSIFY_ACTION_SET_METADATA)
292 vnet_buffer (b0)->ip.adj_index[VLIB_TX] = e0->metadata;
298 if (PREDICT_TRUE (t0->next_table_index != ~0))
299 t0 = pool_elt_at_index (vcm->tables,
300 t0->next_table_index);
303 next0 = (t0->miss_next_index < n_next_nodes) ?
304 t0->miss_next_index : next0;
309 error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
310 IP4_ERROR_INACL_TABLE_MISS : IP4_ERROR_NONE;
312 error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
313 IP6_ERROR_INACL_TABLE_MISS : IP6_ERROR_NONE;
314 b0->error = error_node->errors[error0];
318 if (t0->current_data_flag ==
319 CLASSIFY_FLAG_USE_CURR_DATA)
321 (void *) vlib_buffer_get_current (b0) +
322 t0->current_data_offset;
326 hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
327 e0 = vnet_classify_find_entry
328 (t0, (u8 *) h0, hash0, now);
331 vnet_buffer (b0)->l2_classify.opaque_index
333 vlib_buffer_advance (b0, e0->advance);
334 next0 = (e0->next_index < n_next_nodes) ?
335 e0->next_index : next0;
340 error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
341 IP4_ERROR_INACL_SESSION_DENY : IP4_ERROR_NONE;
343 error0 = (next0 == ACL_NEXT_INDEX_DENY) ?
344 IP6_ERROR_INACL_SESSION_DENY : IP6_ERROR_NONE;
345 b0->error = error_node->errors[error0];
347 if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX
349 CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
350 vnet_buffer (b0)->sw_if_index[VLIB_TX] =
358 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
359 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
361 ip_inacl_trace_t *t =
362 vlib_add_trace (vm, node, b0, sizeof (*t));
363 t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
364 t->next_index = next0;
365 t->table_index = t0 ? t0 - vcm->tables : ~0;
366 t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0) : ~0;
369 /* verify speculative enqueue, maybe switch current next frame */
370 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
371 to_next, n_left_to_next,
375 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
378 vlib_node_increment_counter (vm, node->node_index,
379 IP_INACL_ERROR_MISS, misses);
380 vlib_node_increment_counter (vm, node->node_index,
381 IP_INACL_ERROR_HIT, hits);
382 vlib_node_increment_counter (vm, node->node_index,
383 IP_INACL_ERROR_CHAIN_HIT, chain_hits);
384 return frame->n_vectors;
388 ip4_inacl (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
390 return ip_inacl_inline (vm, node, frame, 1 /* is_ip4 */ );
395 VLIB_REGISTER_NODE (ip4_inacl_node) = {
396 .function = ip4_inacl,
398 .vector_size = sizeof (u32),
399 .format_trace = format_ip_inacl_trace,
400 .n_errors = ARRAY_LEN(ip_inacl_error_strings),
401 .error_strings = ip_inacl_error_strings,
403 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
405 [ACL_NEXT_INDEX_DENY] = "error-drop",
410 VLIB_NODE_FUNCTION_MULTIARCH (ip4_inacl_node, ip4_inacl);
413 ip6_inacl (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
415 return ip_inacl_inline (vm, node, frame, 0 /* is_ip4 */ );
420 VLIB_REGISTER_NODE (ip6_inacl_node) = {
421 .function = ip6_inacl,
423 .vector_size = sizeof (u32),
424 .format_trace = format_ip_inacl_trace,
425 .n_errors = ARRAY_LEN(ip_inacl_error_strings),
426 .error_strings = ip_inacl_error_strings,
428 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
430 [ACL_NEXT_INDEX_DENY] = "error-drop",
435 VLIB_NODE_FUNCTION_MULTIARCH (ip6_inacl_node, ip6_inacl);
437 static clib_error_t *
438 ip_inacl_init (vlib_main_t * vm)
443 VLIB_INIT_FUNCTION (ip_inacl_init);
447 * fd.io coding-style-patch-verification: ON
450 * eval: (c-set-style "gnu")