2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_acl_inlines_h
17 #define included_acl_inlines_h
21 #include <plugins/acl/acl.h>
22 #include <plugins/acl/fa_node.h>
23 #include <plugins/acl/hash_lookup_private.h>
26 /* check if a given ACL exists */
28 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
31 * Define a pointer to the acl_main which will be filled during the initialization.
33 acl_main_t *p_acl_main = 0;
36 * If the file is included more than once, the symbol collision will make the problem obvious.
37 * If the include is done only once, it is just a lonely null var
40 void *ERROR_ACL_PLUGIN_EXPORTS_FILE_MUST_BE_INCLUDED_ONLY_IN_ONE_PLACE = 0;
42 u8 (*acl_plugin_acl_exists) (u32 acl_index);
44 u8 acl_plugin_acl_exists (u32 acl_index);
49 * If you are using ACL plugin, get this unique ID first,
50 * so you can identify yourself when creating the lookup contexts.
53 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
54 u32 (*acl_plugin_register_user_module) (char *caller_module_string, char *val1_label, char *val2_label);
56 u32 acl_plugin_register_user_module (char *caller_module_string, char *val1_label, char *val2_label);
60 * Allocate a new lookup context index.
61 * Supply the id assigned to your module during registration,
62 * and two values of your choice identifying instances
63 * of use within your module. They are useful for debugging.
65 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
66 int (*acl_plugin_get_lookup_context_index) (u32 acl_user_id, u32 val1, u32 val2);
68 int acl_plugin_get_lookup_context_index (u32 acl_user_id, u32 val1, u32 val2);
72 * Release the lookup context index and destroy
73 * any asssociated data structures.
75 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
76 void (*acl_plugin_put_lookup_context_index) (u32 lc_index);
78 void acl_plugin_put_lookup_context_index (u32 lc_index);
82 * Prepare the sequential vector of ACL#s to lookup within a given context.
83 * Any existing list will be overwritten. acl_list is a vector.
85 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
86 int (*acl_plugin_set_acl_vec_for_context) (u32 lc_index, u32 *acl_list);
88 int acl_plugin_set_acl_vec_for_context (u32 lc_index, u32 *acl_list);
91 /* Fill the 5-tuple from the packet */
93 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
94 void (*acl_plugin_fill_5tuple) (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
95 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
97 void acl_plugin_fill_5tuple (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
98 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
101 #ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
103 void acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
104 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt) {
105 /* FIXME: normally the inlined version of filling in the 5-tuple. But for now just call the non-inlined version */
106 acl_plugin_fill_5tuple(lc_index, b0, is_ip6, is_input, is_l2_path, p5tuple_pkt);
111 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
112 int (*acl_plugin_match_5tuple) (u32 lc_index,
113 fa_5tuple_opaque_t * pkt_5tuple,
114 int is_ip6, u8 * r_action,
117 u32 * r_rule_match_p,
120 int acl_plugin_match_5tuple (u32 lc_index,
121 fa_5tuple_opaque_t * pkt_5tuple,
122 int is_ip6, u8 * r_action,
125 u32 * r_rule_match_p,
129 #ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
131 acl_plugin_match_5tuple_inline (u32 lc_index,
132 fa_5tuple_opaque_t * pkt_5tuple,
133 int is_ip6, u8 * r_action,
136 u32 * r_rule_match_p,
137 u32 * trace_bitmap) {
138 return acl_plugin_match_5tuple(lc_index, pkt_5tuple, is_ip6, r_action, r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
142 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
144 #define LOAD_SYMBOL_FROM_PLUGIN_TO(p, s, st) \
146 st = vlib_get_plugin_symbol(p, #s); \
148 return clib_error_return(0, \
149 "Plugin %s and/or symbol %s not found.", p, #s); \
152 #define LOAD_SYMBOL(s) LOAD_SYMBOL_FROM_PLUGIN_TO("acl_plugin.so", s, s)
154 static inline clib_error_t * acl_plugin_exports_init (void)
156 LOAD_SYMBOL_FROM_PLUGIN_TO("acl_plugin.so", acl_main, p_acl_main);
157 LOAD_SYMBOL(acl_plugin_acl_exists);
158 LOAD_SYMBOL(acl_plugin_register_user_module);
159 LOAD_SYMBOL(acl_plugin_get_lookup_context_index);
160 LOAD_SYMBOL(acl_plugin_put_lookup_context_index);
161 LOAD_SYMBOL(acl_plugin_set_acl_vec_for_context);
162 LOAD_SYMBOL(acl_plugin_fill_5tuple);
163 LOAD_SYMBOL(acl_plugin_match_5tuple);
172 get_ptr_to_offset (vlib_buffer_t * b0, int offset)
174 u8 *p = vlib_buffer_get_current (b0) + offset;
179 offset_within_packet (vlib_buffer_t * b0, int offset)
181 /* For the purposes of this code, "within" means we have at least 8 bytes after it */
182 return (offset <= (b0->current_length - 8));
186 acl_fill_5tuple (acl_main_t * am, vlib_buffer_t * b0, int is_ip6,
187 int is_input, int is_l2_path, fa_5tuple_t * p5tuple_pkt)
189 /* IP4 and IP6 protocol numbers of ICMP */
190 static u8 icmp_protos_v4v6[] = { IP_PROTOCOL_ICMP, IP_PROTOCOL_ICMP6 };
199 l3_offset = ethernet_buffer_header_size(b0);
206 l3_offset = vnet_buffer(b0)->ip.save_rewrite_length;
209 /* key[0..3] contains src/dst address and is cleared/set below */
210 /* Remainder of the key and per-packet non-key data */
211 p5tuple_pkt->kv_40_8.key[4] = 0;
212 p5tuple_pkt->kv_40_8.value = 0;
213 p5tuple_pkt->pkt.is_ip6 = is_ip6;
217 clib_memcpy (&p5tuple_pkt->ip6_addr,
218 get_ptr_to_offset (b0,
219 offsetof (ip6_header_t,
220 src_address) + l3_offset),
221 sizeof (p5tuple_pkt->ip6_addr));
223 *(u8 *) get_ptr_to_offset (b0,
224 offsetof (ip6_header_t,
225 protocol) + l3_offset);
226 l4_offset = l3_offset + sizeof (ip6_header_t);
227 #ifdef FA_NODE_VERBOSE_DEBUG
228 clib_warning ("ACL_FA_NODE_DBG: proto: %d, l4_offset: %d", proto,
231 /* IP6 EH handling is here, increment l4_offset if needs to, update the proto */
232 int need_skip_eh = clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
233 if (PREDICT_FALSE (need_skip_eh))
235 while (need_skip_eh && offset_within_packet (b0, l4_offset))
237 /* Fragment header needs special handling */
238 if (PREDICT_FALSE(ACL_EH_FRAGMENT == proto))
240 proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
242 clib_memcpy (&frag_offset, get_ptr_to_offset (b0, 2 + l4_offset), sizeof(frag_offset));
243 frag_offset = clib_net_to_host_u16(frag_offset) >> 3;
246 p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
247 /* invalidate L4 offset so we don't try to find L4 info */
248 l4_offset += b0->current_length;
252 /* First fragment: skip the frag header and move on. */
258 u8 nwords = *(u8 *) get_ptr_to_offset (b0, 1 + l4_offset);
259 proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
260 l4_offset += 8 * (1 + (u16) nwords);
262 #ifdef FA_NODE_VERBOSE_DEBUG
263 clib_warning ("ACL_FA_NODE_DBG: new proto: %d, new offset: %d",
267 clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
273 memset(p5tuple_pkt->l3_zero_pad, 0, sizeof(p5tuple_pkt->l3_zero_pad));
274 clib_memcpy (&p5tuple_pkt->ip4_addr,
275 get_ptr_to_offset (b0,
276 offsetof (ip4_header_t,
277 src_address) + l3_offset),
278 sizeof (p5tuple_pkt->ip4_addr));
280 *(u8 *) get_ptr_to_offset (b0,
281 offsetof (ip4_header_t,
282 protocol) + l3_offset);
283 l4_offset = l3_offset + sizeof (ip4_header_t);
284 u16 flags_and_fragment_offset;
285 clib_memcpy (&flags_and_fragment_offset,
286 get_ptr_to_offset (b0,
287 offsetof (ip4_header_t,
288 flags_and_fragment_offset)) + l3_offset,
289 sizeof(flags_and_fragment_offset));
290 flags_and_fragment_offset = clib_net_to_host_u16 (flags_and_fragment_offset);
292 /* non-initial fragments have non-zero offset */
293 if ((PREDICT_FALSE(0xfff & flags_and_fragment_offset)))
295 p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
296 /* invalidate L4 offset so we don't try to find L4 info */
297 l4_offset += b0->current_length;
301 p5tuple_pkt->l4.proto = proto;
302 p5tuple_pkt->l4.is_input = is_input;
304 if (PREDICT_TRUE (offset_within_packet (b0, l4_offset)))
306 p5tuple_pkt->pkt.l4_valid = 1;
307 if (icmp_protos_v4v6[is_ip6] == proto)
310 p5tuple_pkt->l4.port[0] =
311 *(u8 *) get_ptr_to_offset (b0,
312 l4_offset + offsetof (icmp46_header_t,
315 p5tuple_pkt->l4.port[1] =
316 *(u8 *) get_ptr_to_offset (b0,
317 l4_offset + offsetof (icmp46_header_t,
319 p5tuple_pkt->l4.is_slowpath = 1;
321 else if ((IP_PROTOCOL_TCP == proto) || (IP_PROTOCOL_UDP == proto))
324 get_ptr_to_offset (b0,
325 l4_offset + offsetof (tcp_header_t,
328 p5tuple_pkt->l4.port[0] = clib_net_to_host_u16 (ports[0]);
329 p5tuple_pkt->l4.port[1] = clib_net_to_host_u16 (ports[1]);
331 p5tuple_pkt->pkt.tcp_flags =
332 *(u8 *) get_ptr_to_offset (b0,
333 l4_offset + offsetof (tcp_header_t,
335 p5tuple_pkt->pkt.tcp_flags_valid = (proto == IP_PROTOCOL_TCP);
336 p5tuple_pkt->l4.is_slowpath = 0;
340 p5tuple_pkt->l4.is_slowpath = 1;
346 acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6,
347 int is_input, int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt)
349 acl_main_t *am = p_acl_main;
350 acl_fill_5tuple(am, b0, is_ip6, is_input, is_l2_path, (fa_5tuple_t *)p5tuple_pkt);
356 fa_acl_match_ip4_addr (ip4_address_t * addr1, ip4_address_t * addr2,
361 /* match any always succeeds */
364 uint32_t a1 = clib_net_to_host_u32 (addr1->as_u32);
365 uint32_t a2 = clib_net_to_host_u32 (addr2->as_u32);
366 uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1);
367 return (a1 & mask0) == a2;
371 fa_acl_match_ip6_addr (ip6_address_t * addr1, ip6_address_t * addr2,
376 /* match any always succeeds */
379 if (memcmp (addr1, addr2, prefixlen / 8))
381 /* If the starting full bytes do not match, no point in bittwidling the thumbs further */
386 u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
387 u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
388 u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
389 return (b1 & mask0) == b2;
393 /* The prefix fits into integer number of bytes, so nothing left to do */
399 fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
401 return ((port >= port_first) && (port <= port_last));
405 single_acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
406 int is_ip6, u8 * r_action, u32 * r_acl_match_p,
407 u32 * r_rule_match_p, u32 * trace_bitmap)
413 if (pool_is_free_index (am->acls, acl_index))
416 *r_acl_match_p = acl_index;
418 *r_rule_match_p = -1;
419 /* the ACL does not exist but is used for policy. Block traffic. */
422 a = am->acls + acl_index;
423 for (i = 0; i < a->count; i++)
426 if (is_ip6 != r->is_ipv6)
431 if (!fa_acl_match_ip6_addr
432 (&pkt_5tuple->ip6_addr[1], &r->dst.ip6, r->dst_prefixlen))
434 if (!fa_acl_match_ip6_addr
435 (&pkt_5tuple->ip6_addr[0], &r->src.ip6, r->src_prefixlen))
438 if (!fa_acl_match_ip4_addr
439 (&pkt_5tuple->ip4_addr[1], &r->dst.ip4, r->dst_prefixlen))
441 if (!fa_acl_match_ip4_addr
442 (&pkt_5tuple->ip4_addr[0], &r->src.ip4, r->src_prefixlen))
448 if (pkt_5tuple->l4.proto != r->proto)
451 if (PREDICT_FALSE (pkt_5tuple->pkt.is_nonfirst_fragment &&
452 am->l4_match_nonfirst_fragment))
454 /* non-initial fragment with frag match configured - match this rule */
455 *trace_bitmap |= 0x80000000;
456 *r_action = r->is_permit;
458 *r_acl_match_p = acl_index;
464 /* A sanity check just to ensure we are about to match the ports extracted from the packet */
465 if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
468 #ifdef FA_NODE_VERBOSE_DEBUG
470 ("ACL_FA_NODE_DBG acl %d rule %d pkt proto %d match rule %d",
471 acl_index, i, pkt_5tuple->l4.proto, r->proto);
474 if (!fa_acl_match_port
475 (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
476 r->src_port_or_type_last, is_ip6))
479 #ifdef FA_NODE_VERBOSE_DEBUG
481 ("ACL_FA_NODE_DBG acl %d rule %d pkt sport %d match rule [%d..%d]",
482 acl_index, i, pkt_5tuple->l4.port[0], r->src_port_or_type_first,
483 r->src_port_or_type_last);
486 if (!fa_acl_match_port
487 (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
488 r->dst_port_or_code_last, is_ip6))
491 #ifdef FA_NODE_VERBOSE_DEBUG
493 ("ACL_FA_NODE_DBG acl %d rule %d pkt dport %d match rule [%d..%d]",
494 acl_index, i, pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
495 r->dst_port_or_code_last);
497 if (pkt_5tuple->pkt.tcp_flags_valid
498 && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
502 /* everything matches! */
503 #ifdef FA_NODE_VERBOSE_DEBUG
504 clib_warning ("ACL_FA_NODE_DBG acl %d rule %d FULL-MATCH, action %d",
505 acl_index, i, r->is_permit);
507 *r_action = r->is_permit;
509 *r_acl_match_p = acl_index;
518 acl_plugin_single_acl_match_5tuple (u32 acl_index, fa_5tuple_t * pkt_5tuple,
519 int is_ip6, u8 * r_action, u32 * r_acl_match_p,
520 u32 * r_rule_match_p, u32 * trace_bitmap)
522 acl_main_t * am = p_acl_main;
523 return single_acl_match_5tuple(am, acl_index, pkt_5tuple, is_ip6, r_action,
524 r_acl_match_p, r_rule_match_p, trace_bitmap);
528 linear_multi_acl_match_5tuple (u32 lc_index, fa_5tuple_t * pkt_5tuple,
529 int is_ip6, u8 *r_action, u32 *acl_pos_p, u32 * acl_match_p,
530 u32 * rule_match_p, u32 * trace_bitmap)
532 acl_main_t *am = p_acl_main;
536 acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
538 acl_vector = acontext->acl_indices;
540 for (i = 0; i < vec_len (acl_vector); i++)
542 #ifdef FA_NODE_VERBOSE_DEBUG
543 clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
546 if (single_acl_match_5tuple
547 (am, acl_vector[i], pkt_5tuple, is_ip6, &action,
548 acl_match_p, rule_match_p, trace_bitmap))
555 if (vec_len (acl_vector) > 0)
559 #ifdef FA_NODE_VERBOSE_DEBUG
560 clib_warning ("ACL_FA_NODE_DBG: No ACL on lc_index %d", lc_index);
562 /* If there are no ACLs defined we should not be here. */
569 * This returns true if there is indeed a match on the portranges.
570 * With all these levels of indirections, this is not going to be very fast,
571 * so, best use the individual ports or wildcard ports for performance.
574 match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
577 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
578 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), index);
580 acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]);
582 #ifdef FA_NODE_VERBOSE_DEBUG
583 clib_warning("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?",
584 r->src_port_or_type_first, match->l4.port[0], r->src_port_or_type_last,
585 r->dst_port_or_code_first, match->l4.port[1], r->dst_port_or_code_last);
588 return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) &&
589 ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
593 multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match)
595 clib_bihash_kv_48_8_t kv;
596 clib_bihash_kv_48_8_t result;
597 fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key;
598 hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
599 u64 *pmatch = (u64 *)match;
603 u32 curr_match_index = ~0;
605 u32 lc_index = match->pkt.lc_index;
606 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
607 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
609 DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
610 pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
612 for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) {
613 if (!clib_bitmap_get(vec_elt_at_index((*applied_hash_acls), lc_index)->mask_type_index_bitmap, mask_type_index)) {
614 /* This bit is not set. Avoid trying to match */
617 ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, mask_type_index);
618 pmatch = (u64 *)match;
619 pmask = (u64 *)&mte->mask;
620 pkey = (u64 *)kv.key;
622 * unrolling the below loop results in a noticeable performance increase.
625 kv.key[i] = pmatch[i] & pmask[i];
629 *pkey++ = *pmatch++ & *pmask++;
630 *pkey++ = *pmatch++ & *pmask++;
631 *pkey++ = *pmatch++ & *pmask++;
632 *pkey++ = *pmatch++ & *pmask++;
633 *pkey++ = *pmatch++ & *pmask++;
634 *pkey++ = *pmatch++ & *pmask++;
636 kv_key->pkt.mask_type_index_lsb = mask_type_index;
637 DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index,
638 kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]);
639 int res = clib_bihash_search_48_8 (&am->acl_lookup_hash, &kv, &result);
641 DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64);
642 if (result_val->applied_entry_index < curr_match_index) {
643 if (PREDICT_FALSE(result_val->need_portrange_check)) {
645 * This is going to be slow, since we can have multiple superset
646 * entries for narrow-ish portranges, e.g.:
647 * 0..42 100..400, 230..60000,
648 * so we need to walk linearly and check if they match.
651 u32 curr_index = result_val->applied_entry_index;
652 while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) {
653 /* while no match and there are more entries, walk... */
654 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces),curr_index);
655 DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index);
656 curr_index = pae->next_applied_entry_index;
658 if (curr_index < curr_match_index) {
659 DBG("The index %d is the new candidate in portrange matches.", curr_index);
660 curr_match_index = curr_index;
662 DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index);
665 /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */
666 DBG("This match is the new candidate");
667 curr_match_index = result_val->applied_entry_index;
668 if (!result_val->shadowed) {
669 /* new result is known to not be shadowed, so no point to look up further */
676 DBG("MATCH-RESULT: %d", curr_match_index);
677 return curr_match_index;
681 hash_multi_acl_match_5tuple (u32 lc_index, fa_5tuple_t * pkt_5tuple,
682 int is_ip6, u8 *action, u32 *acl_pos_p, u32 * acl_match_p,
683 u32 * rule_match_p, u32 * trace_bitmap)
685 acl_main_t *am = p_acl_main;
686 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
687 u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple);
688 if (match_index < vec_len((*applied_hash_aces))) {
689 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index);
691 *acl_pos_p = pae->acl_position;
692 *acl_match_p = pae->acl_index;
693 *rule_match_p = pae->ace_index;
694 *action = pae->action;
703 acl_plugin_match_5tuple_inline (u32 lc_index,
704 fa_5tuple_opaque_t * pkt_5tuple,
705 int is_ip6, u8 * r_action,
708 u32 * r_rule_match_p,
711 acl_main_t *am = p_acl_main;
712 fa_5tuple_t * pkt_5tuple_internal = (fa_5tuple_t *)pkt_5tuple;
713 pkt_5tuple_internal->pkt.lc_index = lc_index;
714 if (am->use_hash_acl_matching) {
715 return hash_multi_acl_match_5tuple(lc_index, pkt_5tuple_internal, is_ip6, r_action,
716 r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
718 return linear_multi_acl_match_5tuple(lc_index, pkt_5tuple_internal, is_ip6, r_action,
719 r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);