2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_acl_inlines_h
17 #define included_acl_inlines_h
20 #include <plugins/acl/acl.h>
21 #include <plugins/acl/fa_node.h>
22 #include <plugins/acl/hash_lookup_private.h>
25 /* check if a given ACL exists */
27 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
28 u8 (*acl_plugin_acl_exists) (u32 acl_index);
30 u8 acl_plugin_acl_exists (u32 acl_index);
35 * If you are using ACL plugin, get this unique ID first,
36 * so you can identify yourself when creating the lookup contexts.
39 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
40 u32 (*acl_plugin_register_user_module) (char *caller_module_string, char *val1_label, char *val2_label);
42 u32 acl_plugin_register_user_module (char *caller_module_string, char *val1_label, char *val2_label);
46 * Allocate a new lookup context index.
47 * Supply the id assigned to your module during registration,
48 * and two values of your choice identifying instances
49 * of use within your module. They are useful for debugging.
51 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
52 int (*acl_plugin_get_lookup_context_index) (u32 acl_user_id, u32 val1, u32 val2);
54 int acl_plugin_get_lookup_context_index (u32 acl_user_id, u32 val1, u32 val2);
58 * Release the lookup context index and destroy
59 * any asssociated data structures.
61 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
62 void (*acl_plugin_put_lookup_context_index) (u32 lc_index);
64 void acl_plugin_put_lookup_context_index (u32 lc_index);
68 * Prepare the sequential vector of ACL#s to lookup within a given context.
69 * Any existing list will be overwritten. acl_list is a vector.
71 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
72 int (*acl_plugin_set_acl_vec_for_context) (u32 lc_index, u32 *acl_list);
74 int acl_plugin_set_acl_vec_for_context (u32 lc_index, u32 *acl_list);
77 /* Fill the 5-tuple from the packet */
79 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
80 void (*acl_plugin_fill_5tuple) (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
81 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
83 void acl_plugin_fill_5tuple (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
84 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
87 #ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
89 void acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
90 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt) {
91 /* FIXME: normally the inlined version of filling in the 5-tuple. But for now just call the non-inlined version */
92 acl_plugin_fill_5tuple(lc_index, b0, is_ip6, is_input, is_l2_path, p5tuple_pkt);
97 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
98 int (*acl_plugin_match_5tuple) (u32 lc_index,
99 fa_5tuple_opaque_t * pkt_5tuple,
100 int is_ip6, u8 * r_action,
103 u32 * r_rule_match_p,
106 int acl_plugin_match_5tuple (u32 lc_index,
107 fa_5tuple_opaque_t * pkt_5tuple,
108 int is_ip6, u8 * r_action,
111 u32 * r_rule_match_p,
115 #ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
117 acl_plugin_match_5tuple_inline (u32 lc_index,
118 fa_5tuple_opaque_t * pkt_5tuple,
119 int is_ip6, u8 * r_action,
122 u32 * r_rule_match_p,
123 u32 * trace_bitmap) {
124 return acl_plugin_match_5tuple(lc_index, pkt_5tuple, is_ip6, r_action, r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
128 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
130 #define LOAD_SYMBOL_FROM_PLUGIN(p, s) \
132 s = vlib_get_plugin_symbol(p, #s); \
134 return clib_error_return(0, \
135 "Plugin %s and/or symbol %s not found.", p, #s); \
138 #define LOAD_SYMBOL(s) LOAD_SYMBOL_FROM_PLUGIN("acl_plugin.so", s)
140 static inline clib_error_t * acl_plugin_exports_init (void)
142 LOAD_SYMBOL(acl_plugin_acl_exists);
143 LOAD_SYMBOL(acl_plugin_register_user_module);
144 LOAD_SYMBOL(acl_plugin_get_lookup_context_index);
145 LOAD_SYMBOL(acl_plugin_put_lookup_context_index);
146 LOAD_SYMBOL(acl_plugin_set_acl_vec_for_context);
147 LOAD_SYMBOL(acl_plugin_fill_5tuple);
148 LOAD_SYMBOL(acl_plugin_match_5tuple);
157 get_ptr_to_offset (vlib_buffer_t * b0, int offset)
159 u8 *p = vlib_buffer_get_current (b0) + offset;
164 offset_within_packet (vlib_buffer_t * b0, int offset)
166 /* For the purposes of this code, "within" means we have at least 8 bytes after it */
167 return (offset <= (b0->current_length - 8));
171 acl_fill_5tuple (acl_main_t * am, vlib_buffer_t * b0, int is_ip6,
172 int is_input, int is_l2_path, fa_5tuple_t * p5tuple_pkt)
174 /* IP4 and IP6 protocol numbers of ICMP */
175 static u8 icmp_protos_v4v6[] = { IP_PROTOCOL_ICMP, IP_PROTOCOL_ICMP6 };
184 l3_offset = ethernet_buffer_header_size(b0);
191 l3_offset = vnet_buffer(b0)->ip.save_rewrite_length;
194 /* key[0..3] contains src/dst address and is cleared/set below */
195 /* Remainder of the key and per-packet non-key data */
196 p5tuple_pkt->kv.key[4] = 0;
197 p5tuple_pkt->kv.value = 0;
201 clib_memcpy (&p5tuple_pkt->addr,
202 get_ptr_to_offset (b0,
203 offsetof (ip6_header_t,
204 src_address) + l3_offset),
205 sizeof (p5tuple_pkt->addr));
207 *(u8 *) get_ptr_to_offset (b0,
208 offsetof (ip6_header_t,
209 protocol) + l3_offset);
210 l4_offset = l3_offset + sizeof (ip6_header_t);
211 #ifdef FA_NODE_VERBOSE_DEBUG
212 clib_warning ("ACL_FA_NODE_DBG: proto: %d, l4_offset: %d", proto,
215 /* IP6 EH handling is here, increment l4_offset if needs to, update the proto */
216 int need_skip_eh = clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
217 if (PREDICT_FALSE (need_skip_eh))
219 while (need_skip_eh && offset_within_packet (b0, l4_offset))
221 /* Fragment header needs special handling */
222 if (PREDICT_FALSE(ACL_EH_FRAGMENT == proto))
224 proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
226 clib_memcpy (&frag_offset, get_ptr_to_offset (b0, 2 + l4_offset), sizeof(frag_offset));
227 frag_offset = clib_net_to_host_u16(frag_offset) >> 3;
230 p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
231 /* invalidate L4 offset so we don't try to find L4 info */
232 l4_offset += b0->current_length;
236 /* First fragment: skip the frag header and move on. */
242 u8 nwords = *(u8 *) get_ptr_to_offset (b0, 1 + l4_offset);
243 proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
244 l4_offset += 8 * (1 + (u16) nwords);
246 #ifdef FA_NODE_VERBOSE_DEBUG
247 clib_warning ("ACL_FA_NODE_DBG: new proto: %d, new offset: %d",
251 clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
257 p5tuple_pkt->kv.key[0] = 0;
258 p5tuple_pkt->kv.key[1] = 0;
259 p5tuple_pkt->kv.key[2] = 0;
260 p5tuple_pkt->kv.key[3] = 0;
261 clib_memcpy (&p5tuple_pkt->addr[0].ip4,
262 get_ptr_to_offset (b0,
263 offsetof (ip4_header_t,
264 src_address) + l3_offset),
265 sizeof (p5tuple_pkt->addr[0].ip4));
266 clib_memcpy (&p5tuple_pkt->addr[1].ip4,
267 get_ptr_to_offset (b0,
268 offsetof (ip4_header_t,
269 dst_address) + l3_offset),
270 sizeof (p5tuple_pkt->addr[1].ip4));
272 *(u8 *) get_ptr_to_offset (b0,
273 offsetof (ip4_header_t,
274 protocol) + l3_offset);
275 l4_offset = l3_offset + sizeof (ip4_header_t);
276 u16 flags_and_fragment_offset;
277 clib_memcpy (&flags_and_fragment_offset,
278 get_ptr_to_offset (b0,
279 offsetof (ip4_header_t,
280 flags_and_fragment_offset)) + l3_offset,
281 sizeof(flags_and_fragment_offset));
282 flags_and_fragment_offset = clib_net_to_host_u16 (flags_and_fragment_offset);
284 /* non-initial fragments have non-zero offset */
285 if ((PREDICT_FALSE(0xfff & flags_and_fragment_offset)))
287 p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
288 /* invalidate L4 offset so we don't try to find L4 info */
289 l4_offset += b0->current_length;
293 p5tuple_pkt->l4.proto = proto;
294 if (PREDICT_TRUE (offset_within_packet (b0, l4_offset)))
296 p5tuple_pkt->pkt.l4_valid = 1;
297 if (icmp_protos_v4v6[is_ip6] == proto)
300 p5tuple_pkt->l4.port[0] =
301 *(u8 *) get_ptr_to_offset (b0,
302 l4_offset + offsetof (icmp46_header_t,
305 p5tuple_pkt->l4.port[1] =
306 *(u8 *) get_ptr_to_offset (b0,
307 l4_offset + offsetof (icmp46_header_t,
310 else if ((IP_PROTOCOL_TCP == proto) || (IP_PROTOCOL_UDP == proto))
313 get_ptr_to_offset (b0,
314 l4_offset + offsetof (tcp_header_t,
317 p5tuple_pkt->l4.port[0] = clib_net_to_host_u16 (ports[0]);
318 p5tuple_pkt->l4.port[1] = clib_net_to_host_u16 (ports[1]);
320 p5tuple_pkt->pkt.tcp_flags =
321 *(u8 *) get_ptr_to_offset (b0,
322 l4_offset + offsetof (tcp_header_t,
324 p5tuple_pkt->pkt.tcp_flags_valid = (proto == IP_PROTOCOL_TCP);
327 * FIXME: rather than the above conditional, here could
328 * be a nice generic mechanism to extract two L4 values:
330 * have a per-protocol array of 4 elements like this:
331 * u8 offset; to take the byte from, off L4 header
332 * u8 mask; to mask it with, before storing
334 * this way we can describe UDP, TCP and ICMP[46] semantics,
335 * and add a sort of FPM-type behavior for other protocols.
337 * Of course, is it faster ? and is it needed ?
344 acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6,
345 int is_input, int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt)
347 acl_main_t *am = &acl_main;
348 acl_fill_5tuple(am, b0, is_ip6, is_input, is_l2_path, (fa_5tuple_t *)p5tuple_pkt);
354 fa_acl_match_addr (ip46_address_t * addr1, ip46_address_t * addr2,
355 int prefixlen, int is_ip6)
359 /* match any always succeeds */
364 if (memcmp (addr1, addr2, prefixlen / 8))
366 /* If the starting full bytes do not match, no point in bittwidling the thumbs further */
371 u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
372 u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
373 u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
374 return (b1 & mask0) == b2;
378 /* The prefix fits into integer number of bytes, so nothing left to do */
384 uint32_t a1 = clib_net_to_host_u32 (addr1->ip4.as_u32);
385 uint32_t a2 = clib_net_to_host_u32 (addr2->ip4.as_u32);
386 uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1);
387 return (a1 & mask0) == a2;
392 fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
394 return ((port >= port_first) && (port <= port_last));
398 single_acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
399 int is_ip6, u8 * r_action, u32 * r_acl_match_p,
400 u32 * r_rule_match_p, u32 * trace_bitmap)
406 if (pool_is_free_index (am->acls, acl_index))
409 *r_acl_match_p = acl_index;
411 *r_rule_match_p = -1;
412 /* the ACL does not exist but is used for policy. Block traffic. */
415 a = am->acls + acl_index;
416 for (i = 0; i < a->count; i++)
419 #ifdef FA_NODE_VERBOSE_DEBUG
420 clib_warning("ACL_FA_NODE_DBG acl %d rule %d tag %s", acl_index, i, a->tag);
422 if (is_ip6 != r->is_ipv6)
426 if (!fa_acl_match_addr
427 (&pkt_5tuple->addr[1], &r->dst, r->dst_prefixlen, is_ip6))
430 #ifdef FA_NODE_VERBOSE_DEBUG
432 ("ACL_FA_NODE_DBG acl %d rule %d pkt dst addr %U match rule addr %U/%d",
433 acl_index, i, format_ip46_address, &pkt_5tuple->addr[1],
434 r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
435 &r->dst, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
439 if (!fa_acl_match_addr
440 (&pkt_5tuple->addr[0], &r->src, r->src_prefixlen, is_ip6))
443 #ifdef FA_NODE_VERBOSE_DEBUG
445 ("ACL_FA_NODE_DBG acl %d rule %d pkt src addr %U match rule addr %U/%d",
446 acl_index, i, format_ip46_address, &pkt_5tuple->addr[0],
447 r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
448 &r->src, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
451 ("ACL_FA_NODE_DBG acl %d rule %d trying to match pkt proto %d with rule %d",
452 acl_index, i, pkt_5tuple->l4.proto, r->proto);
456 if (pkt_5tuple->l4.proto != r->proto)
459 if (PREDICT_FALSE (pkt_5tuple->pkt.is_nonfirst_fragment &&
460 am->l4_match_nonfirst_fragment))
462 /* non-initial fragment with frag match configured - match this rule */
463 *trace_bitmap |= 0x80000000;
464 *r_action = r->is_permit;
466 *r_acl_match_p = acl_index;
472 /* A sanity check just to ensure we are about to match the ports extracted from the packet */
473 if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
476 #ifdef FA_NODE_VERBOSE_DEBUG
478 ("ACL_FA_NODE_DBG acl %d rule %d pkt proto %d match rule %d",
479 acl_index, i, pkt_5tuple->l4.proto, r->proto);
482 if (!fa_acl_match_port
483 (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
484 r->src_port_or_type_last, is_ip6))
487 #ifdef FA_NODE_VERBOSE_DEBUG
489 ("ACL_FA_NODE_DBG acl %d rule %d pkt sport %d match rule [%d..%d]",
490 acl_index, i, pkt_5tuple->l4.port[0], r->src_port_or_type_first,
491 r->src_port_or_type_last);
494 if (!fa_acl_match_port
495 (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
496 r->dst_port_or_code_last, is_ip6))
499 #ifdef FA_NODE_VERBOSE_DEBUG
501 ("ACL_FA_NODE_DBG acl %d rule %d pkt dport %d match rule [%d..%d]",
502 acl_index, i, pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
503 r->dst_port_or_code_last);
505 if (pkt_5tuple->pkt.tcp_flags_valid
506 && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
510 /* everything matches! */
511 #ifdef FA_NODE_VERBOSE_DEBUG
512 clib_warning ("ACL_FA_NODE_DBG acl %d rule %d FULL-MATCH, action %d",
513 acl_index, i, r->is_permit);
515 *r_action = r->is_permit;
517 *r_acl_match_p = acl_index;
526 acl_plugin_single_acl_match_5tuple (u32 acl_index, fa_5tuple_t * pkt_5tuple,
527 int is_ip6, u8 * r_action, u32 * r_acl_match_p,
528 u32 * r_rule_match_p, u32 * trace_bitmap)
530 acl_main_t * am = &acl_main;
531 return single_acl_match_5tuple(am, acl_index, pkt_5tuple, is_ip6, r_action,
532 r_acl_match_p, r_rule_match_p, trace_bitmap);
536 linear_multi_acl_match_5tuple (u32 lc_index, fa_5tuple_t * pkt_5tuple,
537 int is_ip6, u8 *r_action, u32 *acl_pos_p, u32 * acl_match_p,
538 u32 * rule_match_p, u32 * trace_bitmap)
540 acl_main_t *am = &acl_main;
544 acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
546 acl_vector = acontext->acl_indices;
548 for (i = 0; i < vec_len (acl_vector); i++)
550 #ifdef FA_NODE_VERBOSE_DEBUG
551 clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
554 if (single_acl_match_5tuple
555 (am, acl_vector[i], pkt_5tuple, is_ip6, &action,
556 acl_match_p, rule_match_p, trace_bitmap))
563 if (vec_len (acl_vector) > 0)
567 #ifdef FA_NODE_VERBOSE_DEBUG
568 clib_warning ("ACL_FA_NODE_DBG: No ACL on lc_index %d", lc_index);
570 /* If there are no ACLs defined we should not be here. */
577 * This returns true if there is indeed a match on the portranges.
578 * With all these levels of indirections, this is not going to be very fast,
579 * so, best use the individual ports or wildcard ports for performance.
582 match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
585 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
586 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), index);
588 acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]);
590 #ifdef FA_NODE_VERBOSE_DEBUG
591 clib_warning("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?",
592 r->src_port_or_type_first, match->l4.port[0], r->src_port_or_type_last,
593 r->dst_port_or_code_first, match->l4.port[1], r->dst_port_or_code_last);
596 return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) &&
597 ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
601 multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match)
603 clib_bihash_kv_48_8_t kv;
604 clib_bihash_kv_48_8_t result;
605 fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key;
606 hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
607 u64 *pmatch = (u64 *)match;
611 u32 curr_match_index = ~0;
613 u32 lc_index = match->pkt.lc_index;
614 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
615 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
617 DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
618 pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
620 for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) {
621 if (!clib_bitmap_get(vec_elt_at_index((*applied_hash_acls), lc_index)->mask_type_index_bitmap, mask_type_index)) {
622 /* This bit is not set. Avoid trying to match */
625 ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, mask_type_index);
626 pmatch = (u64 *)match;
627 pmask = (u64 *)&mte->mask;
628 pkey = (u64 *)kv.key;
630 * unrolling the below loop results in a noticeable performance increase.
633 kv.key[i] = pmatch[i] & pmask[i];
637 *pkey++ = *pmatch++ & *pmask++;
638 *pkey++ = *pmatch++ & *pmask++;
639 *pkey++ = *pmatch++ & *pmask++;
640 *pkey++ = *pmatch++ & *pmask++;
641 *pkey++ = *pmatch++ & *pmask++;
642 *pkey++ = *pmatch++ & *pmask++;
644 kv_key->pkt.mask_type_index_lsb = mask_type_index;
645 DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index,
646 kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]);
647 int res = clib_bihash_search_48_8 (&am->acl_lookup_hash, &kv, &result);
649 DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64);
650 if (result_val->applied_entry_index < curr_match_index) {
651 if (PREDICT_FALSE(result_val->need_portrange_check)) {
653 * This is going to be slow, since we can have multiple superset
654 * entries for narrow-ish portranges, e.g.:
655 * 0..42 100..400, 230..60000,
656 * so we need to walk linearly and check if they match.
659 u32 curr_index = result_val->applied_entry_index;
660 while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) {
661 /* while no match and there are more entries, walk... */
662 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces),curr_index);
663 DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index);
664 curr_index = pae->next_applied_entry_index;
666 if (curr_index < curr_match_index) {
667 DBG("The index %d is the new candidate in portrange matches.", curr_index);
668 curr_match_index = curr_index;
670 DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index);
673 /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */
674 DBG("This match is the new candidate");
675 curr_match_index = result_val->applied_entry_index;
676 if (!result_val->shadowed) {
677 /* new result is known to not be shadowed, so no point to look up further */
684 DBG("MATCH-RESULT: %d", curr_match_index);
685 return curr_match_index;
689 hash_multi_acl_match_5tuple (u32 lc_index, fa_5tuple_t * pkt_5tuple,
690 int is_ip6, u8 *action, u32 *acl_pos_p, u32 * acl_match_p,
691 u32 * rule_match_p, u32 * trace_bitmap)
693 acl_main_t *am = &acl_main;
694 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
695 u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple);
696 if (match_index < vec_len((*applied_hash_aces))) {
697 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index);
699 *acl_pos_p = pae->acl_position;
700 *acl_match_p = pae->acl_index;
701 *rule_match_p = pae->ace_index;
702 *action = pae->action;
711 acl_plugin_match_5tuple_inline (u32 lc_index,
712 fa_5tuple_opaque_t * pkt_5tuple,
713 int is_ip6, u8 * r_action,
716 u32 * r_rule_match_p,
719 acl_main_t *am = &acl_main;
720 if (am->use_hash_acl_matching) {
721 return hash_multi_acl_match_5tuple(lc_index, (fa_5tuple_t *)pkt_5tuple, is_ip6, r_action,
722 r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
724 return linear_multi_acl_match_5tuple(lc_index, (fa_5tuple_t *)pkt_5tuple, is_ip6, r_action,
725 r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);