2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/ip/ip.h>
16 #include <vnet/classify/vnet_classify.h>
17 #include <vnet/classify/input_acl.h>
26 /* packet trace format function */
27 static u8 * format_ip_inacl_trace (u8 * s, va_list * args)
29 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
30 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
31 ip_inacl_trace_t * t = va_arg (*args, ip_inacl_trace_t *);
33 s = format (s, "INACL: sw_if_index %d, next_index %d, table %d, offset %d",
34 t->sw_if_index, t->next_index, t->table_index, t->offset);
38 vlib_node_registration_t ip4_inacl_node;
39 vlib_node_registration_t ip6_inacl_node;
41 #define foreach_ip_inacl_error \
42 _(MISS, "input ACL misses") \
43 _(HIT, "input ACL hits") \
44 _(CHAIN_HIT, "input ACL hits after chain walk")
47 #define _(sym,str) IP_INACL_ERROR_##sym,
48 foreach_ip_inacl_error
53 static char * ip_inacl_error_strings[] = {
54 #define _(sym,string) string,
55 foreach_ip_inacl_error
60 ip_inacl_inline (vlib_main_t * vm,
61 vlib_node_runtime_t * node,
62 vlib_frame_t * frame, int is_ip4)
64 u32 n_left_from, * from, * to_next;
65 acl_next_index_t next_index;
66 input_acl_main_t * am = &input_acl_main;
67 vnet_classify_main_t * vcm = am->vnet_classify_main;
68 f64 now = vlib_time_now (vm);
72 input_acl_table_id_t tid;
73 vlib_node_runtime_t * error_node;
76 n_next_nodes = node->n_next_nodes;
80 tid = INPUT_ACL_TABLE_IP4;
81 error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
85 tid = INPUT_ACL_TABLE_IP6;
86 error_node = vlib_node_get_runtime (vm, ip6_input_node.index);
89 from = vlib_frame_vector_args (frame);
90 n_left_from = frame->n_vectors;
92 /* First pass: compute hashes */
94 while (n_left_from > 2)
96 vlib_buffer_t * b0, * b1;
99 u32 sw_if_index0, sw_if_index1;
100 u32 table_index0, table_index1;
101 vnet_classify_table_t * t0, * t1;
103 /* prefetch next iteration */
105 vlib_buffer_t * p1, * p2;
107 p1 = vlib_get_buffer (vm, from[1]);
108 p2 = vlib_get_buffer (vm, from[2]);
110 vlib_prefetch_buffer_header (p1, STORE);
111 CLIB_PREFETCH (p1->data, CLIB_CACHE_LINE_BYTES, STORE);
112 vlib_prefetch_buffer_header (p2, STORE);
113 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
117 b0 = vlib_get_buffer (vm, bi0);
121 b1 = vlib_get_buffer (vm, bi1);
124 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
125 table_index0 = am->classify_table_index_by_sw_if_index[tid][sw_if_index0];
127 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
128 table_index1 = am->classify_table_index_by_sw_if_index[tid][sw_if_index1];
130 t0 = pool_elt_at_index (vcm->tables, table_index0);
132 t1 = pool_elt_at_index (vcm->tables, table_index1);
134 vnet_buffer(b0)->l2_classify.hash =
135 vnet_classify_hash_packet (t0, (u8 *) h0);
137 vnet_classify_prefetch_bucket (t0, vnet_buffer(b0)->l2_classify.hash);
139 vnet_buffer(b1)->l2_classify.hash =
140 vnet_classify_hash_packet (t1, (u8 *) h1);
142 vnet_classify_prefetch_bucket (t1, vnet_buffer(b1)->l2_classify.hash);
144 vnet_buffer(b0)->l2_classify.table_index = table_index0;
146 vnet_buffer(b1)->l2_classify.table_index = table_index1;
152 while (n_left_from > 0)
159 vnet_classify_table_t * t0;
162 b0 = vlib_get_buffer (vm, bi0);
165 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
166 table_index0 = am->classify_table_index_by_sw_if_index[tid][sw_if_index0];
168 t0 = pool_elt_at_index (vcm->tables, table_index0);
169 vnet_buffer(b0)->l2_classify.hash =
170 vnet_classify_hash_packet (t0, (u8 *) h0);
172 vnet_buffer(b0)->l2_classify.table_index = table_index0;
173 vnet_classify_prefetch_bucket (t0, vnet_buffer(b0)->l2_classify.hash);
179 next_index = node->cached_next_index;
180 from = vlib_frame_vector_args (frame);
181 n_left_from = frame->n_vectors;
183 while (n_left_from > 0)
187 vlib_get_next_frame (vm, node, next_index,
188 to_next, n_left_to_next);
190 /* Not enough load/store slots to dual loop... */
191 while (n_left_from > 0 && n_left_to_next > 0)
195 u32 next0 = ACL_NEXT_INDEX_DENY;
197 vnet_classify_table_t * t0;
198 vnet_classify_entry_t * e0;
203 /* Stride 3 seems to work best */
204 if (PREDICT_TRUE (n_left_from > 3))
206 vlib_buffer_t * p1 = vlib_get_buffer(vm, from[3]);
207 vnet_classify_table_t * tp1;
211 table_index1 = vnet_buffer(p1)->l2_classify.table_index;
213 if (PREDICT_TRUE (table_index1 != ~0))
215 tp1 = pool_elt_at_index (vcm->tables, table_index1);
216 phash1 = vnet_buffer(p1)->l2_classify.hash;
217 vnet_classify_prefetch_entry (tp1, phash1);
221 /* speculatively enqueue b0 to the current next frame */
229 b0 = vlib_get_buffer (vm, bi0);
231 table_index0 = vnet_buffer(b0)->l2_classify.table_index;
234 vnet_get_config_data (am->vnet_config_main[tid],
235 &b0->current_config_index,
237 /* # bytes of config data */ 0);
239 vnet_buffer(b0)->l2_classify.opaque_index = ~0;
241 if (PREDICT_TRUE(table_index0 != ~0))
243 hash0 = vnet_buffer(b0)->l2_classify.hash;
244 t0 = pool_elt_at_index (vcm->tables, table_index0);
246 e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0,
250 vnet_buffer(b0)->l2_classify.opaque_index
252 vlib_buffer_advance (b0, e0->advance);
254 next0 = (e0->next_index < n_next_nodes)?
255 e0->next_index:next0;
260 error0 = (next0 == ACL_NEXT_INDEX_DENY)?
261 IP4_ERROR_INACL_SESSION_DENY:IP4_ERROR_NONE;
263 error0 = (next0 == ACL_NEXT_INDEX_DENY)?
264 IP6_ERROR_INACL_SESSION_DENY:IP6_ERROR_NONE;
265 b0->error = error_node->errors[error0];
271 if (PREDICT_TRUE(t0->next_table_index != ~0))
272 t0 = pool_elt_at_index (vcm->tables,
273 t0->next_table_index);
276 next0 = (t0->miss_next_index < n_next_nodes)?
277 t0->miss_next_index:next0;
282 error0 = (next0 == ACL_NEXT_INDEX_DENY)?
283 IP4_ERROR_INACL_TABLE_MISS:IP4_ERROR_NONE;
285 error0 = (next0 == ACL_NEXT_INDEX_DENY)?
286 IP6_ERROR_INACL_TABLE_MISS:IP6_ERROR_NONE;
287 b0->error = error_node->errors[error0];
291 hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
292 e0 = vnet_classify_find_entry
293 (t0, (u8 *) h0, hash0, now);
296 vnet_buffer(b0)->l2_classify.opaque_index
298 vlib_buffer_advance (b0, e0->advance);
299 next0 = (e0->next_index < n_next_nodes)?
300 e0->next_index:next0;
305 error0 = (next0 == ACL_NEXT_INDEX_DENY)?
306 IP4_ERROR_INACL_SESSION_DENY:IP4_ERROR_NONE;
308 error0 = (next0 == ACL_NEXT_INDEX_DENY)?
309 IP6_ERROR_INACL_SESSION_DENY:IP6_ERROR_NONE;
310 b0->error = error_node->errors[error0];
317 if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
318 && (b0->flags & VLIB_BUFFER_IS_TRACED)))
320 ip_inacl_trace_t *t =
321 vlib_add_trace (vm, node, b0, sizeof (*t));
322 t->sw_if_index = vnet_buffer(b0)->sw_if_index[VLIB_RX];
323 t->next_index = next0;
324 t->table_index = t0 ? t0 - vcm->tables : ~0;
325 t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0): ~0;
328 /* verify speculative enqueue, maybe switch current next frame */
329 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
330 to_next, n_left_to_next,
334 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
337 vlib_node_increment_counter (vm, node->node_index,
340 vlib_node_increment_counter (vm, node->node_index,
343 vlib_node_increment_counter (vm, node->node_index,
344 IP_INACL_ERROR_CHAIN_HIT,
346 return frame->n_vectors;
350 ip4_inacl (vlib_main_t * vm,
351 vlib_node_runtime_t * node,
352 vlib_frame_t * frame)
354 return ip_inacl_inline (vm, node, frame, 1 /* is_ip4 */);
358 VLIB_REGISTER_NODE (ip4_inacl_node) = {
359 .function = ip4_inacl,
361 .vector_size = sizeof (u32),
362 .format_trace = format_ip_inacl_trace,
363 .n_errors = ARRAY_LEN(ip_inacl_error_strings),
364 .error_strings = ip_inacl_error_strings,
366 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
368 [ACL_NEXT_INDEX_DENY] = "error-drop",
372 VLIB_NODE_FUNCTION_MULTIARCH (ip4_inacl_node, ip4_inacl)
375 ip6_inacl (vlib_main_t * vm,
376 vlib_node_runtime_t * node,
377 vlib_frame_t * frame)
379 return ip_inacl_inline (vm, node, frame, 0 /* is_ip4 */);
383 VLIB_REGISTER_NODE (ip6_inacl_node) = {
384 .function = ip6_inacl,
386 .vector_size = sizeof (u32),
387 .format_trace = format_ip_inacl_trace,
388 .n_errors = ARRAY_LEN(ip_inacl_error_strings),
389 .error_strings = ip_inacl_error_strings,
391 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
393 [ACL_NEXT_INDEX_DENY] = "error-drop",
397 VLIB_NODE_FUNCTION_MULTIARCH (ip6_inacl_node, ip6_inacl)
399 static clib_error_t *
400 ip_inacl_init (vlib_main_t * vm)
405 VLIB_INIT_FUNCTION (ip_inacl_init);