#include <vppinfra/bihash_48_8.h>
#include <vppinfra/bihash_40_8.h>
#include <vppinfra/bihash_16_8.h>
+#include <vlib/counter.h>
#include "types.h"
#include "fa_node.h"
#include "lookup_context.h"
#define ACL_PLUGIN_VERSION_MAJOR 1
-#define ACL_PLUGIN_VERSION_MINOR 3
+#define ACL_PLUGIN_VERSION_MINOR 4
#define UDP_SESSION_IDLE_TIMEOUT_SEC 600
#define TCP_SESSION_IDLE_TIMEOUT_SEC (3600*24)
/** Required for pool_get_aligned */
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
u8 tag[64];
- u32 count;
acl_rule_t *rules;
} acl_list_t;
CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
fa_5tuple_t mask;
u32 refcount;
+ u8 from_tm;
} ace_mask_type_entry_t;
typedef struct {
hash_acl_info_t *hash_acl_infos; /* corresponding hash matching housekeeping info */
clib_bihash_48_8_t acl_lookup_hash; /* ACL lookup hash table. */
u32 hash_lookup_hash_buckets;
- u32 hash_lookup_hash_memory;
+ uword hash_lookup_hash_memory;
/* mheap to hold all the miscellaneous allocations related to hash-based lookups */
void *hash_lookup_mheap;
/* Do we use hash-based ACL matching or linear */
int use_hash_acl_matching;
+ /* Do we use the TupleMerge for hash ACLs or not */
+ int use_tuple_merge;
+
+ /* Max collision vector length before splitting the tuple */
+#define TM_SPLIT_THRESHOLD 39
+ int tuple_merge_split_threshold;
+
/* a pool of all mask types present in all ACEs */
ace_mask_type_entry_t *ace_mask_type_pool;
int fa_sessions_hash_is_initialized;
clib_bihash_40_8_t fa_ip6_sessions_hash;
clib_bihash_16_8_t fa_ip4_sessions_hash;
- /* The process node which orcherstrates the cleanup */
+ /* The process node which orchestrates the cleanup */
u32 fa_cleaner_node_index;
/* FA session timeouts, in seconds */
u32 session_timeout_sec[ACL_N_TIMEOUTS];
/* how many sessions went into purgatory */
u64 fa_session_total_deactivations;
- /* L2 datapath glue */
-
- /* next indices within L2 classifiers for ip4/ip6 fa L2 nodes */
- u32 l2_input_classify_next_acl_ip4;
- u32 l2_input_classify_next_acl_ip6;
- u32 l2_output_classify_next_acl_ip4;
- u32 l2_output_classify_next_acl_ip6;
- /* next node indices for L2 dispatch */
- u32 fa_acl_in_ip4_l2_node_feat_next_node_index[32];
- u32 fa_acl_in_ip6_l2_node_feat_next_node_index[32];
- u32 fa_acl_out_ip4_l2_node_feat_next_node_index[32];
- u32 fa_acl_out_ip6_l2_node_feat_next_node_index[32];
-
/* EH values that we can skip over */
uword *fa_ipv6_known_eh_bitmap;
vnet_main_t * vnet_main;
/* logging */
vlib_log_class_t log_default;
+ /* acl counters exposed via stats segment */
+ volatile u32 *acl_counter_lock;
+ vlib_combined_counter_main_t *combined_acl_counters;
+ /* enable/disable ACL counters for interface processing */
+ u32 interface_acl_counters_enabled;
} acl_main_t;
#define acl_log_err(...) \
vlib_log(VLIB_LOG_LEVEL_INFO, acl_main.log_default, __VA_ARGS__)
+static inline void
+acl_plugin_counter_lock (acl_main_t * am)
+{
+ if (am->acl_counter_lock)
+ while (clib_atomic_test_and_set (am->acl_counter_lock))
+ /* zzzz */ ;
+}
+
+static inline void
+acl_plugin_counter_unlock (acl_main_t * am)
+{
+ if (am->acl_counter_lock)
+ clib_atomic_release (am->acl_counter_lock);
+}
+
#define foreach_acl_eh \
_(HOPBYHOP , 0 , "IPv6ExtHdrHopByHop") \
void *acl_plugin_set_heap();
+typedef enum {
+ ACL_FA_REQ_SESS_RESCHEDULE = 0,
+ ACL_FA_N_REQ,
+} acl_fa_sess_req_t;
+
+void aclp_post_session_change_request(acl_main_t *am, u32 target_thread, u32 target_session, acl_fa_sess_req_t request_type);
+void aclp_swap_wip_and_pending_session_change_requests(acl_main_t *am, u32 target_thread);
+
#endif