2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_acl_inlines_h
17 #define included_acl_inlines_h
21 #include <plugins/acl/acl.h>
22 #include <plugins/acl/fa_node.h>
23 #include <plugins/acl/hash_lookup_private.h>
26 /* check if a given ACL exists */
28 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
31 * Define a pointer to the acl_main which will be filled during the initialization.
33 acl_main_t *p_acl_main = 0;
36 * If the file is included more than once, the symbol collision will make the problem obvious.
37 * If the include is done only once, it is just a lonely null var
40 void *ERROR_ACL_PLUGIN_EXPORTS_FILE_MUST_BE_INCLUDED_ONLY_IN_ONE_PLACE = 0;
42 u8 (*acl_plugin_acl_exists) (u32 acl_index);
44 u8 acl_plugin_acl_exists (u32 acl_index);
49 * If you are using ACL plugin, get this unique ID first,
50 * so you can identify yourself when creating the lookup contexts.
53 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
54 u32 (*acl_plugin_register_user_module) (char *caller_module_string, char *val1_label, char *val2_label);
56 u32 acl_plugin_register_user_module (char *caller_module_string, char *val1_label, char *val2_label);
60 * Allocate a new lookup context index.
61 * Supply the id assigned to your module during registration,
62 * and two values of your choice identifying instances
63 * of use within your module. They are useful for debugging.
65 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
66 int (*acl_plugin_get_lookup_context_index) (u32 acl_user_id, u32 val1, u32 val2);
68 int acl_plugin_get_lookup_context_index (u32 acl_user_id, u32 val1, u32 val2);
72 * Release the lookup context index and destroy
73 * any asssociated data structures.
75 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
76 void (*acl_plugin_put_lookup_context_index) (u32 lc_index);
78 void acl_plugin_put_lookup_context_index (u32 lc_index);
82 * Prepare the sequential vector of ACL#s to lookup within a given context.
83 * Any existing list will be overwritten. acl_list is a vector.
85 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
86 int (*acl_plugin_set_acl_vec_for_context) (u32 lc_index, u32 *acl_list);
88 int acl_plugin_set_acl_vec_for_context (u32 lc_index, u32 *acl_list);
91 /* Fill the 5-tuple from the packet */
93 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
94 void (*acl_plugin_fill_5tuple) (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
95 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
97 void acl_plugin_fill_5tuple (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
98 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt);
101 #ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
103 void acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input,
104 int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt) {
105 /* FIXME: normally the inlined version of filling in the 5-tuple. But for now just call the non-inlined version */
106 acl_plugin_fill_5tuple(lc_index, b0, is_ip6, is_input, is_l2_path, p5tuple_pkt);
111 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
112 int (*acl_plugin_match_5tuple) (u32 lc_index,
113 fa_5tuple_opaque_t * pkt_5tuple,
114 int is_ip6, u8 * r_action,
117 u32 * r_rule_match_p,
120 int acl_plugin_match_5tuple (u32 lc_index,
121 fa_5tuple_opaque_t * pkt_5tuple,
122 int is_ip6, u8 * r_action,
125 u32 * r_rule_match_p,
129 #ifdef ACL_PLUGIN_DEFINED_BELOW_IN_FILE
131 acl_plugin_match_5tuple_inline (u32 lc_index,
132 fa_5tuple_opaque_t * pkt_5tuple,
133 int is_ip6, u8 * r_action,
136 u32 * r_rule_match_p,
137 u32 * trace_bitmap) {
138 return acl_plugin_match_5tuple(lc_index, pkt_5tuple, is_ip6, r_action, r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
142 #ifdef ACL_PLUGIN_EXTERNAL_EXPORTS
144 #define LOAD_SYMBOL_FROM_PLUGIN_TO(p, s, st) \
146 st = vlib_get_plugin_symbol(p, #s); \
148 return clib_error_return(0, \
149 "Plugin %s and/or symbol %s not found.", p, #s); \
152 #define LOAD_SYMBOL(s) LOAD_SYMBOL_FROM_PLUGIN_TO("acl_plugin.so", s, s)
154 static inline clib_error_t * acl_plugin_exports_init (void)
156 LOAD_SYMBOL_FROM_PLUGIN_TO("acl_plugin.so", acl_main, p_acl_main);
157 LOAD_SYMBOL(acl_plugin_acl_exists);
158 LOAD_SYMBOL(acl_plugin_register_user_module);
159 LOAD_SYMBOL(acl_plugin_get_lookup_context_index);
160 LOAD_SYMBOL(acl_plugin_put_lookup_context_index);
161 LOAD_SYMBOL(acl_plugin_set_acl_vec_for_context);
162 LOAD_SYMBOL(acl_plugin_fill_5tuple);
163 LOAD_SYMBOL(acl_plugin_match_5tuple);
172 get_ptr_to_offset (vlib_buffer_t * b0, int offset)
174 u8 *p = vlib_buffer_get_current (b0) + offset;
179 offset_within_packet (vlib_buffer_t * b0, int offset)
181 /* For the purposes of this code, "within" means we have at least 8 bytes after it */
182 return (offset <= (b0->current_length - 8));
186 acl_fill_5tuple (acl_main_t * am, vlib_buffer_t * b0, int is_ip6,
187 int is_input, int is_l2_path, fa_5tuple_t * p5tuple_pkt)
189 /* IP4 and IP6 protocol numbers of ICMP */
190 static u8 icmp_protos_v4v6[] = { IP_PROTOCOL_ICMP, IP_PROTOCOL_ICMP6 };
199 l3_offset = ethernet_buffer_header_size(b0);
206 l3_offset = vnet_buffer(b0)->ip.save_rewrite_length;
209 /* key[0..3] contains src/dst address and is cleared/set below */
210 /* Remainder of the key and per-packet non-key data */
211 p5tuple_pkt->kv.key[4] = 0;
212 p5tuple_pkt->kv.value = 0;
213 p5tuple_pkt->pkt.is_ip6 = is_ip6;
217 clib_memcpy (&p5tuple_pkt->addr,
218 get_ptr_to_offset (b0,
219 offsetof (ip6_header_t,
220 src_address) + l3_offset),
221 sizeof (p5tuple_pkt->addr));
223 *(u8 *) get_ptr_to_offset (b0,
224 offsetof (ip6_header_t,
225 protocol) + l3_offset);
226 l4_offset = l3_offset + sizeof (ip6_header_t);
227 #ifdef FA_NODE_VERBOSE_DEBUG
228 clib_warning ("ACL_FA_NODE_DBG: proto: %d, l4_offset: %d", proto,
231 /* IP6 EH handling is here, increment l4_offset if needs to, update the proto */
232 int need_skip_eh = clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
233 if (PREDICT_FALSE (need_skip_eh))
235 while (need_skip_eh && offset_within_packet (b0, l4_offset))
237 /* Fragment header needs special handling */
238 if (PREDICT_FALSE(ACL_EH_FRAGMENT == proto))
240 proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
242 clib_memcpy (&frag_offset, get_ptr_to_offset (b0, 2 + l4_offset), sizeof(frag_offset));
243 frag_offset = clib_net_to_host_u16(frag_offset) >> 3;
246 p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
247 /* invalidate L4 offset so we don't try to find L4 info */
248 l4_offset += b0->current_length;
252 /* First fragment: skip the frag header and move on. */
258 u8 nwords = *(u8 *) get_ptr_to_offset (b0, 1 + l4_offset);
259 proto = *(u8 *) get_ptr_to_offset (b0, l4_offset);
260 l4_offset += 8 * (1 + (u16) nwords);
262 #ifdef FA_NODE_VERBOSE_DEBUG
263 clib_warning ("ACL_FA_NODE_DBG: new proto: %d, new offset: %d",
267 clib_bitmap_get (am->fa_ipv6_known_eh_bitmap, proto);
273 p5tuple_pkt->kv.key[0] = 0;
274 p5tuple_pkt->kv.key[1] = 0;
275 p5tuple_pkt->kv.key[2] = 0;
276 p5tuple_pkt->kv.key[3] = 0;
277 clib_memcpy (&p5tuple_pkt->addr[0].ip4,
278 get_ptr_to_offset (b0,
279 offsetof (ip4_header_t,
280 src_address) + l3_offset),
281 sizeof (p5tuple_pkt->addr[0].ip4));
282 clib_memcpy (&p5tuple_pkt->addr[1].ip4,
283 get_ptr_to_offset (b0,
284 offsetof (ip4_header_t,
285 dst_address) + l3_offset),
286 sizeof (p5tuple_pkt->addr[1].ip4));
288 *(u8 *) get_ptr_to_offset (b0,
289 offsetof (ip4_header_t,
290 protocol) + l3_offset);
291 l4_offset = l3_offset + sizeof (ip4_header_t);
292 u16 flags_and_fragment_offset;
293 clib_memcpy (&flags_and_fragment_offset,
294 get_ptr_to_offset (b0,
295 offsetof (ip4_header_t,
296 flags_and_fragment_offset)) + l3_offset,
297 sizeof(flags_and_fragment_offset));
298 flags_and_fragment_offset = clib_net_to_host_u16 (flags_and_fragment_offset);
300 /* non-initial fragments have non-zero offset */
301 if ((PREDICT_FALSE(0xfff & flags_and_fragment_offset)))
303 p5tuple_pkt->pkt.is_nonfirst_fragment = 1;
304 /* invalidate L4 offset so we don't try to find L4 info */
305 l4_offset += b0->current_length;
309 p5tuple_pkt->l4.proto = proto;
310 p5tuple_pkt->l4.is_input = is_input;
312 if (PREDICT_TRUE (offset_within_packet (b0, l4_offset)))
314 p5tuple_pkt->pkt.l4_valid = 1;
315 if (icmp_protos_v4v6[is_ip6] == proto)
318 p5tuple_pkt->l4.port[0] =
319 *(u8 *) get_ptr_to_offset (b0,
320 l4_offset + offsetof (icmp46_header_t,
323 p5tuple_pkt->l4.port[1] =
324 *(u8 *) get_ptr_to_offset (b0,
325 l4_offset + offsetof (icmp46_header_t,
327 p5tuple_pkt->l4.is_slowpath = 1;
329 else if ((IP_PROTOCOL_TCP == proto) || (IP_PROTOCOL_UDP == proto))
332 get_ptr_to_offset (b0,
333 l4_offset + offsetof (tcp_header_t,
336 p5tuple_pkt->l4.port[0] = clib_net_to_host_u16 (ports[0]);
337 p5tuple_pkt->l4.port[1] = clib_net_to_host_u16 (ports[1]);
339 p5tuple_pkt->pkt.tcp_flags =
340 *(u8 *) get_ptr_to_offset (b0,
341 l4_offset + offsetof (tcp_header_t,
343 p5tuple_pkt->pkt.tcp_flags_valid = (proto == IP_PROTOCOL_TCP);
344 p5tuple_pkt->l4.is_slowpath = 0;
348 p5tuple_pkt->l4.is_slowpath = 1;
354 acl_plugin_fill_5tuple_inline (u32 lc_index, vlib_buffer_t * b0, int is_ip6,
355 int is_input, int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt)
357 acl_main_t *am = p_acl_main;
358 acl_fill_5tuple(am, b0, is_ip6, is_input, is_l2_path, (fa_5tuple_t *)p5tuple_pkt);
364 fa_acl_match_addr (ip46_address_t * addr1, ip46_address_t * addr2,
365 int prefixlen, int is_ip6)
369 /* match any always succeeds */
374 if (memcmp (addr1, addr2, prefixlen / 8))
376 /* If the starting full bytes do not match, no point in bittwidling the thumbs further */
381 u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8);
382 u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8);
383 u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1));
384 return (b1 & mask0) == b2;
388 /* The prefix fits into integer number of bytes, so nothing left to do */
394 uint32_t a1 = clib_net_to_host_u32 (addr1->ip4.as_u32);
395 uint32_t a2 = clib_net_to_host_u32 (addr2->ip4.as_u32);
396 uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1);
397 return (a1 & mask0) == a2;
402 fa_acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6)
404 return ((port >= port_first) && (port <= port_last));
408 single_acl_match_5tuple (acl_main_t * am, u32 acl_index, fa_5tuple_t * pkt_5tuple,
409 int is_ip6, u8 * r_action, u32 * r_acl_match_p,
410 u32 * r_rule_match_p, u32 * trace_bitmap)
416 if (pool_is_free_index (am->acls, acl_index))
419 *r_acl_match_p = acl_index;
421 *r_rule_match_p = -1;
422 /* the ACL does not exist but is used for policy. Block traffic. */
425 a = am->acls + acl_index;
426 for (i = 0; i < a->count; i++)
429 #ifdef FA_NODE_VERBOSE_DEBUG
430 clib_warning("ACL_FA_NODE_DBG acl %d rule %d tag %s", acl_index, i, a->tag);
432 if (is_ip6 != r->is_ipv6)
436 if (!fa_acl_match_addr
437 (&pkt_5tuple->addr[1], &r->dst, r->dst_prefixlen, is_ip6))
440 #ifdef FA_NODE_VERBOSE_DEBUG
442 ("ACL_FA_NODE_DBG acl %d rule %d pkt dst addr %U match rule addr %U/%d",
443 acl_index, i, format_ip46_address, &pkt_5tuple->addr[1],
444 r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
445 &r->dst, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
449 if (!fa_acl_match_addr
450 (&pkt_5tuple->addr[0], &r->src, r->src_prefixlen, is_ip6))
453 #ifdef FA_NODE_VERBOSE_DEBUG
455 ("ACL_FA_NODE_DBG acl %d rule %d pkt src addr %U match rule addr %U/%d",
456 acl_index, i, format_ip46_address, &pkt_5tuple->addr[0],
457 r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4, format_ip46_address,
458 &r->src, r->is_ipv6 ? IP46_TYPE_IP6: IP46_TYPE_IP4,
461 ("ACL_FA_NODE_DBG acl %d rule %d trying to match pkt proto %d with rule %d",
462 acl_index, i, pkt_5tuple->l4.proto, r->proto);
466 if (pkt_5tuple->l4.proto != r->proto)
469 if (PREDICT_FALSE (pkt_5tuple->pkt.is_nonfirst_fragment &&
470 am->l4_match_nonfirst_fragment))
472 /* non-initial fragment with frag match configured - match this rule */
473 *trace_bitmap |= 0x80000000;
474 *r_action = r->is_permit;
476 *r_acl_match_p = acl_index;
482 /* A sanity check just to ensure we are about to match the ports extracted from the packet */
483 if (PREDICT_FALSE (!pkt_5tuple->pkt.l4_valid))
486 #ifdef FA_NODE_VERBOSE_DEBUG
488 ("ACL_FA_NODE_DBG acl %d rule %d pkt proto %d match rule %d",
489 acl_index, i, pkt_5tuple->l4.proto, r->proto);
492 if (!fa_acl_match_port
493 (pkt_5tuple->l4.port[0], r->src_port_or_type_first,
494 r->src_port_or_type_last, is_ip6))
497 #ifdef FA_NODE_VERBOSE_DEBUG
499 ("ACL_FA_NODE_DBG acl %d rule %d pkt sport %d match rule [%d..%d]",
500 acl_index, i, pkt_5tuple->l4.port[0], r->src_port_or_type_first,
501 r->src_port_or_type_last);
504 if (!fa_acl_match_port
505 (pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
506 r->dst_port_or_code_last, is_ip6))
509 #ifdef FA_NODE_VERBOSE_DEBUG
511 ("ACL_FA_NODE_DBG acl %d rule %d pkt dport %d match rule [%d..%d]",
512 acl_index, i, pkt_5tuple->l4.port[1], r->dst_port_or_code_first,
513 r->dst_port_or_code_last);
515 if (pkt_5tuple->pkt.tcp_flags_valid
516 && ((pkt_5tuple->pkt.tcp_flags & r->tcp_flags_mask) !=
520 /* everything matches! */
521 #ifdef FA_NODE_VERBOSE_DEBUG
522 clib_warning ("ACL_FA_NODE_DBG acl %d rule %d FULL-MATCH, action %d",
523 acl_index, i, r->is_permit);
525 *r_action = r->is_permit;
527 *r_acl_match_p = acl_index;
536 acl_plugin_single_acl_match_5tuple (u32 acl_index, fa_5tuple_t * pkt_5tuple,
537 int is_ip6, u8 * r_action, u32 * r_acl_match_p,
538 u32 * r_rule_match_p, u32 * trace_bitmap)
540 acl_main_t * am = p_acl_main;
541 return single_acl_match_5tuple(am, acl_index, pkt_5tuple, is_ip6, r_action,
542 r_acl_match_p, r_rule_match_p, trace_bitmap);
546 linear_multi_acl_match_5tuple (u32 lc_index, fa_5tuple_t * pkt_5tuple,
547 int is_ip6, u8 *r_action, u32 *acl_pos_p, u32 * acl_match_p,
548 u32 * rule_match_p, u32 * trace_bitmap)
550 acl_main_t *am = p_acl_main;
554 acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
556 acl_vector = acontext->acl_indices;
558 for (i = 0; i < vec_len (acl_vector); i++)
560 #ifdef FA_NODE_VERBOSE_DEBUG
561 clib_warning ("ACL_FA_NODE_DBG: Trying to match ACL: %d",
564 if (single_acl_match_5tuple
565 (am, acl_vector[i], pkt_5tuple, is_ip6, &action,
566 acl_match_p, rule_match_p, trace_bitmap))
573 if (vec_len (acl_vector) > 0)
577 #ifdef FA_NODE_VERBOSE_DEBUG
578 clib_warning ("ACL_FA_NODE_DBG: No ACL on lc_index %d", lc_index);
580 /* If there are no ACLs defined we should not be here. */
587 * This returns true if there is indeed a match on the portranges.
588 * With all these levels of indirections, this is not going to be very fast,
589 * so, best use the individual ports or wildcard ports for performance.
592 match_portranges(acl_main_t *am, fa_5tuple_t *match, u32 index)
595 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
596 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), index);
598 acl_rule_t *r = &(am->acls[pae->acl_index].rules[pae->ace_index]);
600 #ifdef FA_NODE_VERBOSE_DEBUG
601 clib_warning("PORTMATCH: %d <= %d <= %d && %d <= %d <= %d ?",
602 r->src_port_or_type_first, match->l4.port[0], r->src_port_or_type_last,
603 r->dst_port_or_code_first, match->l4.port[1], r->dst_port_or_code_last);
606 return ( ((r->src_port_or_type_first <= match->l4.port[0]) && r->src_port_or_type_last >= match->l4.port[0]) &&
607 ((r->dst_port_or_code_first <= match->l4.port[1]) && r->dst_port_or_code_last >= match->l4.port[1]) );
611 multi_acl_match_get_applied_ace_index(acl_main_t *am, fa_5tuple_t *match)
613 clib_bihash_kv_48_8_t kv;
614 clib_bihash_kv_48_8_t result;
615 fa_5tuple_t *kv_key = (fa_5tuple_t *)kv.key;
616 hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
617 u64 *pmatch = (u64 *)match;
621 u32 curr_match_index = ~0;
623 u32 lc_index = match->pkt.lc_index;
624 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, match->pkt.lc_index);
625 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
627 DBG("TRYING TO MATCH: %016llx %016llx %016llx %016llx %016llx %016llx",
628 pmatch[0], pmatch[1], pmatch[2], pmatch[3], pmatch[4], pmatch[5]);
630 for(mask_type_index=0; mask_type_index < pool_len(am->ace_mask_type_pool); mask_type_index++) {
631 if (!clib_bitmap_get(vec_elt_at_index((*applied_hash_acls), lc_index)->mask_type_index_bitmap, mask_type_index)) {
632 /* This bit is not set. Avoid trying to match */
635 ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, mask_type_index);
636 pmatch = (u64 *)match;
637 pmask = (u64 *)&mte->mask;
638 pkey = (u64 *)kv.key;
640 * unrolling the below loop results in a noticeable performance increase.
643 kv.key[i] = pmatch[i] & pmask[i];
647 *pkey++ = *pmatch++ & *pmask++;
648 *pkey++ = *pmatch++ & *pmask++;
649 *pkey++ = *pmatch++ & *pmask++;
650 *pkey++ = *pmatch++ & *pmask++;
651 *pkey++ = *pmatch++ & *pmask++;
652 *pkey++ = *pmatch++ & *pmask++;
654 kv_key->pkt.mask_type_index_lsb = mask_type_index;
655 DBG(" KEY %3d: %016llx %016llx %016llx %016llx %016llx %016llx", mask_type_index,
656 kv.key[0], kv.key[1], kv.key[2], kv.key[3], kv.key[4], kv.key[5]);
657 int res = clib_bihash_search_48_8 (&am->acl_lookup_hash, &kv, &result);
659 DBG("ACL-MATCH! result_val: %016llx", result_val->as_u64);
660 if (result_val->applied_entry_index < curr_match_index) {
661 if (PREDICT_FALSE(result_val->need_portrange_check)) {
663 * This is going to be slow, since we can have multiple superset
664 * entries for narrow-ish portranges, e.g.:
665 * 0..42 100..400, 230..60000,
666 * so we need to walk linearly and check if they match.
669 u32 curr_index = result_val->applied_entry_index;
670 while ((curr_index != ~0) && !match_portranges(am, match, curr_index)) {
671 /* while no match and there are more entries, walk... */
672 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces),curr_index);
673 DBG("entry %d did not portmatch, advancing to %d", curr_index, pae->next_applied_entry_index);
674 curr_index = pae->next_applied_entry_index;
676 if (curr_index < curr_match_index) {
677 DBG("The index %d is the new candidate in portrange matches.", curr_index);
678 curr_match_index = curr_index;
680 DBG("Curr portmatch index %d is too big vs. current matched one %d", curr_index, curr_match_index);
683 /* The usual path is here. Found an entry in front of the current candiate - so it's a new one */
684 DBG("This match is the new candidate");
685 curr_match_index = result_val->applied_entry_index;
686 if (!result_val->shadowed) {
687 /* new result is known to not be shadowed, so no point to look up further */
694 DBG("MATCH-RESULT: %d", curr_match_index);
695 return curr_match_index;
699 hash_multi_acl_match_5tuple (u32 lc_index, fa_5tuple_t * pkt_5tuple,
700 int is_ip6, u8 *action, u32 *acl_pos_p, u32 * acl_match_p,
701 u32 * rule_match_p, u32 * trace_bitmap)
703 acl_main_t *am = p_acl_main;
704 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
705 u32 match_index = multi_acl_match_get_applied_ace_index(am, pkt_5tuple);
706 if (match_index < vec_len((*applied_hash_aces))) {
707 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), match_index);
709 *acl_pos_p = pae->acl_position;
710 *acl_match_p = pae->acl_index;
711 *rule_match_p = pae->ace_index;
712 *action = pae->action;
721 acl_plugin_match_5tuple_inline (u32 lc_index,
722 fa_5tuple_opaque_t * pkt_5tuple,
723 int is_ip6, u8 * r_action,
726 u32 * r_rule_match_p,
729 acl_main_t *am = p_acl_main;
730 fa_5tuple_t * pkt_5tuple_internal = (fa_5tuple_t *)pkt_5tuple;
731 pkt_5tuple_internal->pkt.lc_index = lc_index;
732 if (am->use_hash_acl_matching) {
733 return hash_multi_acl_match_5tuple(lc_index, pkt_5tuple_internal, is_ip6, r_action,
734 r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);
736 return linear_multi_acl_match_5tuple(lc_index, pkt_5tuple_internal, is_ip6, r_action,
737 r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap);