2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/ip/ip.h>
16 #include <vnet/classify/vnet_classify.h>
17 #include <vnet/classify/in_out_acl.h>
26 ip_in_out_acl_trace_t;
28 /* packet trace format function */
30 format_ip_in_out_acl_trace (u8 * s, u32 is_output, va_list * args)
32 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
33 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
34 ip_in_out_acl_trace_t *t = va_arg (*args, ip_in_out_acl_trace_t *);
36 s = format (s, "%s: sw_if_index %d, next_index %d, table %d, offset %d",
37 is_output ? "OUTACL" : "INACL",
38 t->sw_if_index, t->next_index, t->table_index, t->offset);
43 format_ip_inacl_trace (u8 * s, va_list * args)
45 return format_ip_in_out_acl_trace (s, 0 /* is_output */ , args);
49 format_ip_outacl_trace (u8 * s, va_list * args)
51 return format_ip_in_out_acl_trace (s, 1 /* is_output */ , args);
54 extern vlib_node_registration_t ip4_inacl_node;
55 extern vlib_node_registration_t ip4_outacl_node;
56 extern vlib_node_registration_t ip6_inacl_node;
57 extern vlib_node_registration_t ip6_outacl_node;
59 #define foreach_ip_inacl_error \
60 _(MISS, "input ACL misses") \
61 _(HIT, "input ACL hits") \
62 _(CHAIN_HIT, "input ACL hits after chain walk")
64 #define foreach_ip_outacl_error \
65 _(MISS, "output ACL misses") \
66 _(HIT, "output ACL hits") \
67 _(CHAIN_HIT, "output ACL hits after chain walk")
71 #define _(sym,str) IP_INACL_ERROR_##sym,
72 foreach_ip_inacl_error
78 static char *ip_inacl_error_strings[] = {
79 #define _(sym,string) string,
80 foreach_ip_inacl_error
86 #define _(sym,str) IP_OUTACL_ERROR_##sym,
87 foreach_ip_outacl_error
93 static char *ip_outacl_error_strings[] = {
94 #define _(sym,string) string,
95 foreach_ip_outacl_error
99 static_always_inline void
100 ip_in_out_acl_inline_trace (
101 vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
102 vlib_buffer_t **b, u16 *next, u32 n_left, u32 *hits__, u32 *misses__,
103 u32 *chain_hits__, const vlib_error_t error_none,
104 const vlib_error_t error_deny, const vlib_error_t error_miss,
105 vnet_classify_table_t *tables, const u32 *table_index_by_sw_if_index,
106 u32 *fib_index_by_sw_if_index, vnet_config_main_t *cm,
107 const vlib_rx_or_tx_t way, const int is_output, const int do_trace)
109 f64 now = vlib_time_now (vm);
113 u32 n_next_nodes = node->n_next_nodes;
117 vnet_classify_table_t *t[4] = { 0, 0 };
120 /* calculate hashes for b[0] & b[1] */
123 /* ~0 is used as a wildcard to say 'always use sw_if_index 0'
124 * aka local0. It is used when we do not care about the sw_if_index, as
126 sw_if_index[2] = ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
127 sw_if_index[3] = ~0 == way ? 0 : vnet_buffer (b[1])->sw_if_index[way];
129 table_index[2] = table_index_by_sw_if_index[sw_if_index[2]];
130 table_index[3] = table_index_by_sw_if_index[sw_if_index[3]];
132 t[2] = pool_elt_at_index (tables, table_index[2]);
133 t[3] = pool_elt_at_index (tables, table_index[3]);
135 if (t[2]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
137 (void *) vlib_buffer_get_current (b[0]) + t[2]->current_data_offset;
141 if (t[3]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
143 (void *) vlib_buffer_get_current (b[1]) + t[3]->current_data_offset;
149 /* Save the rewrite length, since we are using the l2_classify struct */
150 vnet_buffer (b[0])->l2_classify.pad.l2_len =
151 vnet_buffer (b[0])->ip.save_rewrite_length;
152 /* advance the match pointer so the matching happens on IP header */
153 h[2] += vnet_buffer (b[0])->l2_classify.pad.l2_len;
155 /* Save the rewrite length, since we are using the l2_classify struct */
156 vnet_buffer (b[1])->l2_classify.pad.l2_len =
157 vnet_buffer (b[1])->ip.save_rewrite_length;
158 /* advance the match pointer so the matching happens on IP header */
159 h[3] += vnet_buffer (b[1])->l2_classify.pad.l2_len;
162 hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]);
163 hash[3] = vnet_classify_hash_packet_inline (t[3], (u8 *) h[3]);
165 vnet_buffer (b[0])->l2_classify.hash = hash[2];
166 vnet_buffer (b[1])->l2_classify.hash = hash[3];
168 vnet_buffer (b[0])->l2_classify.table_index = table_index[2];
169 vnet_buffer (b[1])->l2_classify.table_index = table_index[3];
171 vnet_buffer (b[0])->l2_classify.opaque_index = ~0;
172 vnet_buffer (b[1])->l2_classify.opaque_index = ~0;
174 vnet_classify_prefetch_bucket (t[2],
175 vnet_buffer (b[0])->l2_classify.hash);
176 vnet_classify_prefetch_bucket (t[3],
177 vnet_buffer (b[1])->l2_classify.hash);
182 vnet_classify_entry_t *e[2] = { 0, 0 };
183 u32 _next[2] = { ACL_NEXT_INDEX_DENY, ACL_NEXT_INDEX_DENY };
190 sw_if_index[0] = sw_if_index[2];
191 sw_if_index[1] = sw_if_index[3];
193 table_index[0] = table_index[2];
194 table_index[1] = table_index[3];
199 /* prefetch next iteration */
202 vlib_prefetch_buffer_header (b[4], LOAD);
203 vlib_prefetch_buffer_header (b[5], LOAD);
205 clib_prefetch_load (b[4]->data);
206 clib_prefetch_load (b[5]->data);
209 /* calculate hashes for b[2] & b[3] */
213 ~0 == way ? 0 : vnet_buffer (b[2])->sw_if_index[way];
215 ~0 == way ? 0 : vnet_buffer (b[3])->sw_if_index[way];
217 table_index[2] = table_index_by_sw_if_index[sw_if_index[2]];
218 table_index[3] = table_index_by_sw_if_index[sw_if_index[3]];
220 t[2] = pool_elt_at_index (tables, table_index[2]);
221 t[3] = pool_elt_at_index (tables, table_index[3]);
223 if (t[2]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
225 (void *) vlib_buffer_get_current (b[2]) +
226 t[2]->current_data_offset;
230 if (t[3]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
232 (void *) vlib_buffer_get_current (b[3]) +
233 t[3]->current_data_offset;
239 /* Save the rewrite length, since we are using the l2_classify struct */
240 vnet_buffer (b[2])->l2_classify.pad.l2_len =
241 vnet_buffer (b[2])->ip.save_rewrite_length;
242 /* advance the match pointer so the matching happens on IP header */
243 h[2] += vnet_buffer (b[2])->l2_classify.pad.l2_len;
245 /* Save the rewrite length, since we are using the l2_classify struct */
246 vnet_buffer (b[3])->l2_classify.pad.l2_len =
247 vnet_buffer (b[3])->ip.save_rewrite_length;
248 /* advance the match pointer so the matching happens on IP header */
249 h[3] += vnet_buffer (b[3])->l2_classify.pad.l2_len;
252 hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]);
253 hash[3] = vnet_classify_hash_packet_inline (t[3], (u8 *) h[3]);
255 vnet_buffer (b[2])->l2_classify.hash = hash[2];
256 vnet_buffer (b[3])->l2_classify.hash = hash[3];
258 vnet_buffer (b[2])->l2_classify.table_index = table_index[2];
259 vnet_buffer (b[3])->l2_classify.table_index = table_index[3];
261 vnet_buffer (b[2])->l2_classify.opaque_index = ~0;
262 vnet_buffer (b[3])->l2_classify.opaque_index = ~0;
264 vnet_classify_prefetch_bucket (t[2],
267 vnet_classify_prefetch_bucket (t[3],
272 /* find entry for b[0] & b[1] */
273 vnet_get_config_data (cm, &b[0]->current_config_index, &_next[0],
274 /* # bytes of config data */ 0);
275 vnet_get_config_data (cm, &b[1]->current_config_index, &_next[1],
276 /* # bytes of config data */ 0);
278 if (PREDICT_TRUE (table_index[0] != ~0))
281 vnet_classify_find_entry_inline (t[0], (u8 *) h[0], hash[0], now);
284 vnet_buffer (b[0])->l2_classify.opaque_index
285 = e[0]->opaque_index;
286 vlib_buffer_advance (b[0], e[0]->advance);
288 _next[0] = (e[0]->next_index < n_next_nodes) ?
289 e[0]->next_index : _next[0];
294 (_next[0] == ACL_NEXT_INDEX_DENY) ? error_deny : error_none;
298 if (e[0]->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
299 e[0]->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
300 vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e[0]->metadata;
301 else if (e[0]->action == CLASSIFY_ACTION_SET_METADATA)
303 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
305 /* For source check in case we skip the lookup node */
306 ip_lookup_set_buffer_fib_index (fib_index_by_sw_if_index,
315 if (PREDICT_TRUE (t[0]->next_table_index != ~0))
316 t[0] = pool_elt_at_index (tables, t[0]->next_table_index);
319 _next[0] = (t[0]->miss_next_index < n_next_nodes) ?
320 t[0]->miss_next_index : _next[0];
324 b[0]->error = (_next[0] == ACL_NEXT_INDEX_DENY) ?
330 if (t[0]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
332 (void *) vlib_buffer_get_current (b[0]) +
333 t[0]->current_data_offset;
337 /* advance the match pointer so the matching happens on IP header */
339 h[0] += vnet_buffer (b[0])->l2_classify.pad.l2_len;
342 vnet_classify_hash_packet_inline (t[0], (u8 *) h[0]);
344 vnet_classify_find_entry_inline (t[0], (u8 *) h[0],
348 vnet_buffer (b[0])->l2_classify.opaque_index
349 = e[0]->opaque_index;
350 vlib_buffer_advance (b[0], e[0]->advance);
351 _next[0] = (e[0]->next_index < n_next_nodes) ?
352 e[0]->next_index : _next[0];
356 b[0]->error = (_next[0] == ACL_NEXT_INDEX_DENY) ?
363 CLASSIFY_ACTION_SET_IP4_FIB_INDEX
365 CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
366 vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
368 else if (e[0]->action ==
369 CLASSIFY_ACTION_SET_METADATA)
371 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
373 /* For source check in case we skip the lookup
375 ip_lookup_set_buffer_fib_index (
376 fib_index_by_sw_if_index, b[0]);
385 if (PREDICT_TRUE (table_index[1] != ~0))
388 vnet_classify_find_entry_inline (t[1], (u8 *) h[1], hash[1], now);
391 vnet_buffer (b[1])->l2_classify.opaque_index
392 = e[1]->opaque_index;
393 vlib_buffer_advance (b[1], e[1]->advance);
395 _next[1] = (e[1]->next_index < n_next_nodes) ?
396 e[1]->next_index : _next[1];
401 (_next[1] == ACL_NEXT_INDEX_DENY) ? error_deny : error_none;
405 if (e[1]->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
406 e[1]->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
407 vnet_buffer (b[1])->sw_if_index[VLIB_TX] = e[1]->metadata;
408 else if (e[1]->action == CLASSIFY_ACTION_SET_METADATA)
410 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] =
412 /* For source check in case we skip the lookup node */
413 ip_lookup_set_buffer_fib_index (fib_index_by_sw_if_index,
422 if (PREDICT_TRUE (t[1]->next_table_index != ~0))
423 t[1] = pool_elt_at_index (tables, t[1]->next_table_index);
426 _next[1] = (t[1]->miss_next_index < n_next_nodes) ?
427 t[1]->miss_next_index : _next[1];
431 b[1]->error = (_next[1] == ACL_NEXT_INDEX_DENY) ?
437 if (t[1]->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
439 (void *) vlib_buffer_get_current (b[1]) +
440 t[1]->current_data_offset;
444 /* advance the match pointer so the matching happens on IP header */
446 h[1] += vnet_buffer (b[1])->l2_classify.pad.l2_len;
449 vnet_classify_hash_packet_inline (t[1], (u8 *) h[1]);
451 vnet_classify_find_entry_inline (t[1], (u8 *) h[1],
455 vnet_buffer (b[1])->l2_classify.opaque_index
456 = e[1]->opaque_index;
457 vlib_buffer_advance (b[1], e[1]->advance);
458 _next[1] = (e[1]->next_index < n_next_nodes) ?
459 e[1]->next_index : _next[1];
463 b[1]->error = (_next[1] == ACL_NEXT_INDEX_DENY) ?
470 CLASSIFY_ACTION_SET_IP4_FIB_INDEX
472 CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
473 vnet_buffer (b[1])->sw_if_index[VLIB_TX] =
475 else if (e[1]->action ==
476 CLASSIFY_ACTION_SET_METADATA)
478 vnet_buffer (b[1])->ip.adj_index[VLIB_TX] =
480 /* For source check in case we skip the lookup
482 ip_lookup_set_buffer_fib_index (
483 fib_index_by_sw_if_index, b[1]);
492 if (do_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED)
494 ip_in_out_acl_trace_t *_t =
495 vlib_add_trace (vm, node, b[0], sizeof (*_t));
497 ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
498 _t->next_index = _next[0];
499 _t->table_index = t[0] ? t[0] - tables : ~0;
501 && t[0]) ? vnet_classify_get_offset (t[0], e[0]) : ~0;
504 if (do_trace && b[1]->flags & VLIB_BUFFER_IS_TRACED)
506 ip_in_out_acl_trace_t *_t =
507 vlib_add_trace (vm, node, b[1], sizeof (*_t));
509 ~0 == way ? 0 : vnet_buffer (b[1])->sw_if_index[way];
510 _t->next_index = _next[1];
511 _t->table_index = t[1] ? t[1] - tables : ~0;
513 && t[1]) ? vnet_classify_get_offset (t[1], e[1]) : ~0;
516 if ((_next[0] == ACL_NEXT_INDEX_DENY) && is_output)
518 /* on output, for the drop node to work properly, go back to ip header */
519 vlib_buffer_advance (b[0], vnet_buffer (b[0])->l2.l2_len);
522 if ((_next[1] == ACL_NEXT_INDEX_DENY) && is_output)
524 /* on output, for the drop node to work properly, go back to ip header */
525 vlib_buffer_advance (b[1], vnet_buffer (b[1])->l2.l2_len);
542 vnet_classify_table_t *t0 = 0;
543 vnet_classify_entry_t *e0 = 0;
544 u32 next0 = ACL_NEXT_INDEX_DENY;
547 sw_if_index0 = ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
548 table_index0 = table_index_by_sw_if_index[sw_if_index0];
550 t0 = pool_elt_at_index (tables, table_index0);
552 if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
554 (void *) vlib_buffer_get_current (b[0]) + t0->current_data_offset;
560 /* Save the rewrite length, since we are using the l2_classify struct */
561 vnet_buffer (b[0])->l2_classify.pad.l2_len =
562 vnet_buffer (b[0])->ip.save_rewrite_length;
563 /* advance the match pointer so the matching happens on IP header */
564 h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
567 vnet_buffer (b[0])->l2_classify.hash =
568 vnet_classify_hash_packet (t0, (u8 *) h0);
570 vnet_buffer (b[0])->l2_classify.table_index = table_index0;
571 vnet_buffer (b[0])->l2_classify.opaque_index = ~0;
573 vnet_get_config_data (cm, &b[0]->current_config_index, &next0,
574 /* # bytes of config data */ 0);
576 if (PREDICT_TRUE (table_index0 != ~0))
578 hash0 = vnet_buffer (b[0])->l2_classify.hash;
579 t0 = pool_elt_at_index (tables, table_index0);
581 if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
583 (void *) vlib_buffer_get_current (b[0]) +
584 t0->current_data_offset;
588 /* advance the match pointer so the matching happens on IP header */
590 h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
592 e0 = vnet_classify_find_entry_inline (t0, (u8 *) h0, hash0, now);
595 vnet_buffer (b[0])->l2_classify.opaque_index = e0->opaque_index;
596 vlib_buffer_advance (b[0], e0->advance);
598 next0 = (e0->next_index < n_next_nodes) ?
599 e0->next_index : next0;
604 (next0 == ACL_NEXT_INDEX_DENY) ? error_deny : error_none;
608 if (e0->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX ||
609 e0->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
610 vnet_buffer (b[0])->sw_if_index[VLIB_TX] = e0->metadata;
611 else if (e0->action == CLASSIFY_ACTION_SET_METADATA)
613 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = e0->metadata;
614 /* For source check in case we skip the lookup node */
615 ip_lookup_set_buffer_fib_index (fib_index_by_sw_if_index,
624 if (PREDICT_TRUE (t0->next_table_index != ~0))
625 t0 = pool_elt_at_index (tables, t0->next_table_index);
628 next0 = (t0->miss_next_index < n_next_nodes) ?
629 t0->miss_next_index : next0;
633 b[0]->error = (next0 == ACL_NEXT_INDEX_DENY) ?
639 if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA)
641 (void *) vlib_buffer_get_current (b[0]) +
642 t0->current_data_offset;
646 /* advance the match pointer so the matching happens on IP header */
648 h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
650 hash0 = vnet_classify_hash_packet_inline (t0, (u8 *) h0);
651 e0 = vnet_classify_find_entry_inline
652 (t0, (u8 *) h0, hash0, now);
655 vnet_buffer (b[0])->l2_classify.opaque_index
657 vlib_buffer_advance (b[0], e0->advance);
658 next0 = (e0->next_index < n_next_nodes) ?
659 e0->next_index : next0;
662 b[0]->error = (next0 == ACL_NEXT_INDEX_DENY) ?
669 CLASSIFY_ACTION_SET_IP4_FIB_INDEX
671 CLASSIFY_ACTION_SET_IP6_FIB_INDEX)
672 vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
674 else if (e0->action == CLASSIFY_ACTION_SET_METADATA)
676 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] =
678 /* For source check in case we skip the lookup
680 ip_lookup_set_buffer_fib_index (
681 fib_index_by_sw_if_index, b[0]);
690 if (do_trace && b[0]->flags & VLIB_BUFFER_IS_TRACED)
692 ip_in_out_acl_trace_t *t =
693 vlib_add_trace (vm, node, b[0], sizeof (*t));
695 ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
696 t->next_index = next0;
697 t->table_index = t0 - tables;
698 t->offset = (e0 && t0) ? vnet_classify_get_offset (t0, e0) : ~0;
701 if ((next0 == ACL_NEXT_INDEX_DENY) && is_output)
703 /* on output, for the drop node to work properly, go back to ip header */
704 vlib_buffer_advance (b[0], vnet_buffer (b[0])->l2.l2_len);
717 *chain_hits__ = chain_hits;
720 static_always_inline uword
721 ip_in_out_acl_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
722 vlib_frame_t *frame, const in_out_acl_table_id_t tid,
723 u32 *fib_index_by_sw_if_index,
724 const vlib_node_registration_t *parent_error_node,
725 const u32 error_none_index, const u32 error_deny_index,
726 const u32 error_miss_index, const vlib_rx_or_tx_t way,
729 const in_out_acl_main_t *am = &in_out_acl_main;
730 vnet_classify_table_t *tables = am->vnet_classify_main->tables;
731 u32 *from = vlib_frame_vector_args (frame);
732 const u32 *table_index_by_sw_if_index =
733 am->classify_table_index_by_sw_if_index[is_output][tid];
734 vnet_config_main_t *cm = am->vnet_config_main[is_output][tid];
735 const vlib_node_runtime_t *error_node =
736 vlib_node_get_runtime (vm, parent_error_node->index);
737 const vlib_error_t error_none = error_node->errors[error_none_index];
738 const vlib_error_t error_deny = error_node->errors[error_deny_index];
739 const vlib_error_t error_miss = error_node->errors[error_miss_index];
740 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
741 u16 nexts[VLIB_FRAME_SIZE];
742 u32 hits, misses, chain_hits;
744 vlib_get_buffers (vm, from, bufs, frame->n_vectors);
746 #define ip_in_out_acl_inline_trace__(do_trace) \
747 ip_in_out_acl_inline_trace ( \
748 vm, node, frame, bufs, nexts, frame->n_vectors, &hits, &misses, \
749 &chain_hits, error_deny, error_miss, error_none, tables, \
750 table_index_by_sw_if_index, fib_index_by_sw_if_index, cm, way, is_output, \
753 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
754 ip_in_out_acl_inline_trace__ (1 /* do_trace */);
756 ip_in_out_acl_inline_trace__ (0 /* do_trace */);
758 vlib_node_increment_counter (
759 vm, node->node_index,
760 is_output ? IP_OUTACL_ERROR_MISS : IP_INACL_ERROR_MISS, misses);
761 vlib_node_increment_counter (
762 vm, node->node_index, is_output ? IP_OUTACL_ERROR_HIT : IP_INACL_ERROR_HIT,
764 vlib_node_increment_counter (vm, node->node_index,
765 is_output ? IP_OUTACL_ERROR_CHAIN_HIT :
766 IP_INACL_ERROR_CHAIN_HIT,
769 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
770 return frame->n_vectors;
773 VLIB_NODE_FN (ip4_inacl_node)
774 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
776 return ip_in_out_acl_inline (
777 vm, node, frame, IN_OUT_ACL_TABLE_IP4, ip4_main.fib_index_by_sw_if_index,
778 &ip4_input_node, IP4_ERROR_NONE, IP4_ERROR_INACL_SESSION_DENY,
779 IP4_ERROR_INACL_TABLE_MISS, VLIB_RX, 0 /* is_output */);
782 VLIB_NODE_FN (ip4_punt_acl_node)
783 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
785 return ip_in_out_acl_inline (
786 vm, node, frame, IN_OUT_ACL_TABLE_IP4_PUNT,
787 ip4_main.fib_index_by_sw_if_index, &ip4_input_node, IP4_ERROR_NONE,
788 IP4_ERROR_INACL_SESSION_DENY, IP4_ERROR_INACL_TABLE_MISS, ~0 /* way */,
792 VLIB_NODE_FN (ip4_outacl_node)
793 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
795 return ip_in_out_acl_inline (
796 vm, node, frame, IN_OUT_ACL_TABLE_IP4, NULL, &ip4_input_node,
797 IP4_ERROR_NONE, IP4_ERROR_INACL_SESSION_DENY, IP4_ERROR_INACL_TABLE_MISS,
798 VLIB_TX, 1 /* is_output */);
802 VLIB_REGISTER_NODE (ip4_inacl_node) = {
804 .vector_size = sizeof (u32),
805 .format_trace = format_ip_inacl_trace,
806 .n_errors = ARRAY_LEN(ip_inacl_error_strings),
807 .error_strings = ip_inacl_error_strings,
809 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
811 [ACL_NEXT_INDEX_DENY] = "ip4-drop",
815 VLIB_REGISTER_NODE (ip4_punt_acl_node) = {
816 .name = "ip4-punt-acl",
817 .vector_size = sizeof (u32),
818 .format_trace = format_ip_inacl_trace,
819 .n_errors = ARRAY_LEN(ip_inacl_error_strings),
820 .error_strings = ip_inacl_error_strings,
822 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
824 [ACL_NEXT_INDEX_DENY] = "ip4-drop",
828 VLIB_REGISTER_NODE (ip4_outacl_node) = {
829 .name = "ip4-outacl",
830 .vector_size = sizeof (u32),
831 .format_trace = format_ip_outacl_trace,
832 .n_errors = ARRAY_LEN(ip_outacl_error_strings),
833 .error_strings = ip_outacl_error_strings,
835 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
837 [ACL_NEXT_INDEX_DENY] = "ip4-drop",
842 VNET_FEATURE_INIT (ip4_punt_acl_feature) = {
843 .arc_name = "ip4-punt",
844 .node_name = "ip4-punt-acl",
845 .runs_after = VNET_FEATURES ("ip4-punt-policer"),
848 VLIB_NODE_FN (ip6_inacl_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
849 vlib_frame_t * frame)
851 return ip_in_out_acl_inline (
852 vm, node, frame, IN_OUT_ACL_TABLE_IP6, ip6_main.fib_index_by_sw_if_index,
853 &ip6_input_node, IP6_ERROR_NONE, IP6_ERROR_INACL_SESSION_DENY,
854 IP6_ERROR_INACL_TABLE_MISS, VLIB_RX, 0 /* is_output */);
857 VLIB_NODE_FN (ip6_punt_acl_node)
858 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
860 return ip_in_out_acl_inline (
861 vm, node, frame, IN_OUT_ACL_TABLE_IP6_PUNT,
862 ip4_main.fib_index_by_sw_if_index, &ip6_input_node, IP6_ERROR_NONE,
863 IP6_ERROR_INACL_SESSION_DENY, IP6_ERROR_INACL_TABLE_MISS, ~0 /* way */,
867 VLIB_NODE_FN (ip6_outacl_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
868 vlib_frame_t * frame)
870 return ip_in_out_acl_inline (
871 vm, node, frame, IN_OUT_ACL_TABLE_IP6, NULL, &ip6_input_node,
872 IP6_ERROR_NONE, IP6_ERROR_INACL_SESSION_DENY, IP6_ERROR_INACL_TABLE_MISS,
873 VLIB_TX, 1 /* is_output */);
877 VLIB_REGISTER_NODE (ip6_inacl_node) = {
879 .vector_size = sizeof (u32),
880 .format_trace = format_ip_inacl_trace,
881 .n_errors = ARRAY_LEN(ip_inacl_error_strings),
882 .error_strings = ip_inacl_error_strings,
884 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
886 [ACL_NEXT_INDEX_DENY] = "ip6-drop",
890 VLIB_REGISTER_NODE (ip6_punt_acl_node) = {
891 .name = "ip6-punt-acl",
892 .vector_size = sizeof (u32),
893 .format_trace = format_ip_inacl_trace,
894 .n_errors = ARRAY_LEN(ip_inacl_error_strings),
895 .error_strings = ip_inacl_error_strings,
897 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
899 [ACL_NEXT_INDEX_DENY] = "ip6-drop",
903 VLIB_REGISTER_NODE (ip6_outacl_node) = {
904 .name = "ip6-outacl",
905 .vector_size = sizeof (u32),
906 .format_trace = format_ip_outacl_trace,
907 .n_errors = ARRAY_LEN(ip_outacl_error_strings),
908 .error_strings = ip_outacl_error_strings,
910 .n_next_nodes = ACL_NEXT_INDEX_N_NEXT,
912 [ACL_NEXT_INDEX_DENY] = "ip6-drop",
917 VNET_FEATURE_INIT (ip6_punt_acl_feature) = {
918 .arc_name = "ip6-punt",
919 .node_name = "ip6-punt-acl",
920 .runs_after = VNET_FEATURES ("ip6-punt-policer"),
923 #ifndef CLIB_MARCH_VARIANT
924 static clib_error_t *
925 ip_in_out_acl_init (vlib_main_t * vm)
930 VLIB_INIT_FUNCTION (ip_in_out_acl_init);
931 #endif /* CLIB_MARCH_VARIANT */
935 * fd.io coding-style-patch-verification: ON
938 * eval: (c-set-style "gnu")