_(MACIP_ACL_DEL, macip_acl_del) \
_(MACIP_ACL_INTERFACE_ADD_DEL, macip_acl_interface_add_del) \
_(MACIP_ACL_DUMP, macip_acl_dump) \
-_(MACIP_ACL_INTERFACE_GET, macip_acl_interface_get)
+_(MACIP_ACL_INTERFACE_GET, macip_acl_interface_get) \
+_(MACIP_ACL_INTERFACE_LIST_DUMP, macip_acl_interface_list_dump)
+
/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () = {
};
/* *INDENT-ON* */
+
+static void *
+acl_set_heap(acl_main_t *am)
+{
+ if (0 == am->acl_mheap) {
+ am->acl_mheap = mheap_alloc (0 /* use VM */ , 2 << 29);
+ mheap_t *h = mheap_header (am->acl_mheap);
+ h->flags |= MHEAP_FLAG_THREAD_SAFE;
+ }
+ void *oldheap = clib_mem_set_heap(am->acl_mheap);
+ return oldheap;
+}
+
+void
+acl_plugin_acl_set_validate_heap(acl_main_t *am, int on)
+{
+ clib_mem_set_heap(acl_set_heap(am));
+ mheap_t *h = mheap_header (am->acl_mheap);
+ if (on) {
+ h->flags |= MHEAP_FLAG_VALIDATE;
+ h->flags &= ~MHEAP_FLAG_SMALL_OBJECT_CACHE;
+ mheap_validate(h);
+ } else {
+ h->flags &= ~MHEAP_FLAG_VALIDATE;
+ h->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
+ }
+}
+
+void
+acl_plugin_acl_set_trace_heap(acl_main_t *am, int on)
+{
+ clib_mem_set_heap(acl_set_heap(am));
+ mheap_t *h = mheap_header (am->acl_mheap);
+ if (on) {
+ h->flags |= MHEAP_FLAG_TRACE;
+ } else {
+ h->flags &= ~MHEAP_FLAG_TRACE;
+ }
+}
+
static void
vl_api_acl_plugin_get_version_t_handler (vl_api_acl_plugin_get_version_t * mp)
{
acl_main_t *am = &acl_main;
acl_list_t *a;
acl_rule_t *r;
- acl_rule_t *acl_new_rules;
+ acl_rule_t *acl_new_rules = 0;
int i;
if (*acl_list_index != ~0)
if (pool_is_free_index (am->acls, *acl_list_index))
{
/* tried to replace a non-existent ACL, no point doing anything */
+ clib_warning("acl-plugin-error: Trying to replace nonexistent ACL %d (tag %s)", *acl_list_index, tag);
return -1;
}
}
+ if (0 == count) {
+ clib_warning("acl-plugin-warning: supplied no rules for ACL %d (tag %s)", *acl_list_index, tag);
+ }
+
+ void *oldheap = acl_set_heap(am);
/* Create and populate the rules */
- acl_new_rules = clib_mem_alloc_aligned (sizeof (acl_rule_t) * count,
- CLIB_CACHE_LINE_BYTES);
- if (!acl_new_rules)
- {
- /* Could not allocate rules. New or existing ACL - bail out regardless */
- return -1;
- }
+ if (count > 0)
+ vec_validate(acl_new_rules, count-1);
for (i = 0; i < count; i++)
{
- r = &acl_new_rules[i];
+ r = vec_elt_at_index(acl_new_rules, i);
memset(r, 0, sizeof(*r));
r->is_permit = rules[i].is_permit;
r->is_ipv6 = rules[i].is_ipv6;
a = am->acls + *acl_list_index;
hash_acl_delete(am, *acl_list_index);
/* Get rid of the old rules */
- clib_mem_free (a->rules);
+ if (a->rules)
+ vec_free (a->rules);
}
a->rules = acl_new_rules;
a->count = count;
memcpy (a->tag, tag, sizeof (a->tag));
hash_acl_add(am, *acl_list_index);
-
+ clib_mem_set_heap (oldheap);
return 0;
}
}
}
+ void *oldheap = acl_set_heap(am);
/* delete any references to the ACL */
for (i = 0; i < vec_len (am->output_acl_vec_by_sw_if_index); i++)
{
/* now we can delete the ACL itself */
a = &am->acls[acl_list_index];
if (a->rules)
- {
- clib_mem_free (a->rules);
- }
+ vec_free (a->rules);
+
pool_put (am->acls, a);
+ clib_mem_set_heap (oldheap);
return 0;
}
if (0 == match)
match = 1;
-
- return vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets,
+ void *oldheap = clib_mem_set_heap (cm->vlib_main->heap_base);
+ int ret = vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets,
memory_size, skip, match,
next_table_index, miss_next_index,
table_index, current_data_flag,
current_data_offset, is_add,
1 /* delete_chain */);
+ clib_mem_set_heap (oldheap);
+ return ret;
}
static int
if (0 == match)
match = 1;
- return vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets,
+ void *oldheap = clib_mem_set_heap (cm->vlib_main->heap_base);
+ int ret = vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets,
memory_size, skip, match,
next_table_index, miss_next_index,
table_index, current_data_flag,
current_data_offset, is_add,
1 /* delete_chain */);
+ clib_mem_set_heap (oldheap);
+ return ret;
}
vnet_classify_main_t *cm = &vnet_classify_main;
u32 ip4_table_index = ~0;
u32 ip6_table_index = ~0;
+ void *oldheap = acl_set_heap(am);
vec_validate_init_empty (am->acl_ip4_input_classify_table_by_sw_if_index,
sw_if_index, ~0);
vec_validate_init_empty (am->acl_ip6_input_classify_table_by_sw_if_index,
sw_if_index, ~0);
+ /* switch to global heap while calling vnet_* functions */
+ clib_mem_set_heap (cm->vlib_main->heap_base);
vnet_l2_input_classify_enable_disable (sw_if_index, 0);
if (am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] != ~0)
am->l2_input_classify_next_acl_ip6,
&ip6_table_index, 0);
}
-
+ clib_mem_set_heap (oldheap);
return 0;
}
vnet_classify_main_t *cm = &vnet_classify_main;
u32 ip4_table_index = ~0;
u32 ip6_table_index = ~0;
+ void *oldheap = acl_set_heap(am);
vec_validate_init_empty (am->acl_ip4_output_classify_table_by_sw_if_index,
sw_if_index, ~0);
vec_validate_init_empty (am->acl_ip6_output_classify_table_by_sw_if_index,
sw_if_index, ~0);
+ /* switch to global heap while calling vnet_* functions */
+ clib_mem_set_heap (cm->vlib_main->heap_base);
+
vnet_l2_output_classify_enable_disable (sw_if_index, 0);
if (am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] != ~0)
am->l2_output_classify_next_acl_ip6,
&ip6_table_index, 0);
}
-
+ clib_mem_set_heap (oldheap);
return 0;
}
u32 ip6_table_index = ~0;
int rv;
+ void *prevheap = clib_mem_set_heap (cm->vlib_main->heap_base);
+
/* in case there were previous tables attached */
acl_unhook_l2_input_classify (am, sw_if_index);
rv =
am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 1);
if (rv)
- return rv;
+ goto done;
rv =
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
- return rv;
+ goto done;
}
rv =
vnet_l2_input_classify_set_tables (sw_if_index, ip4_table_index,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_input_classify_next_acl_ip4,
&ip4_table_index, 0);
- return rv;
+ goto done;
}
am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] =
ip6_table_index;
vnet_l2_input_classify_enable_disable (sw_if_index, 1);
+done:
+ clib_mem_set_heap (prevheap);
return rv;
}
u32 ip6_table_index = ~0;
int rv;
+ void *prevheap = clib_mem_set_heap (cm->vlib_main->heap_base);
+
/* in case there were previous tables attached */
acl_unhook_l2_output_classify (am, sw_if_index);
rv =
am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 1);
if (rv)
- return rv;
+ goto done;
rv =
acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask,
sizeof (ip6_5tuple_mask) - 1, ~0,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
- return rv;
+ goto done;
}
rv =
vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index,
sizeof (ip4_5tuple_mask) - 1, ~0,
am->l2_output_classify_next_acl_ip4,
&ip4_table_index, 0);
- return rv;
+ goto done;
}
am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] =
ip6_table_index;
vnet_l2_output_classify_enable_disable (sw_if_index, 1);
+done:
+ clib_mem_set_heap (prevheap);
return rv;
}
/* ACL is not defined. Can not apply */
return -1;
}
+ void *oldheap = acl_set_heap(am);
if (is_input)
{
clib_warning("ACL %d is already applied inbound on sw_if_index %d (index %d)",
acl_list_index, sw_if_index, index);
/* the entry is already there */
+ clib_mem_set_heap (oldheap);
return -1;
}
/* if there was no ACL applied before, enable the ACL processing */
clib_warning("ACL %d is already applied outbound on sw_if_index %d (index %d)",
acl_list_index, sw_if_index, index);
/* the entry is already there */
+ clib_mem_set_heap (oldheap);
return -1;
}
/* if there was no ACL applied before, enable the ACL processing */
vec_add (am->output_sw_if_index_vec_by_acl[acl_list_index], &sw_if_index,
1);
}
+ clib_mem_set_heap (oldheap);
return 0;
}
acl_main_t *am = &acl_main;
int i;
int rv = -1;
+ void *oldheap = acl_set_heap(am);
if (is_input)
{
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
acl_interface_out_enable_disable (am, sw_if_index, 0);
}
}
+ clib_mem_set_heap (oldheap);
return rv;
}
{
acl_main_t *am = &acl_main;
int i;
+ void *oldheap = acl_set_heap(am);
if (is_input)
{
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
vec_reset_length (am->output_acl_vec_by_sw_if_index[sw_if_index]);
}
+ clib_mem_set_heap (oldheap);
}
static int
{
int rv = -1;
acl_main_t *am = &acl_main;
+ void *oldheap = acl_set_heap(am);
if (is_add)
{
rv =
rv =
acl_interface_del_inout_acl (sw_if_index, is_input, acl_list_index);
}
+ clib_mem_set_heap (oldheap);
return rv;
}
match_type_index = vec_len (mvec);
vec_validate (mvec, match_type_index);
memcpy (mvec[match_type_index].mac_mask,
- a->rules[match_type_index].src_mac_mask, 6);
+ a->rules[i].src_mac_mask, 6);
mvec[match_type_index].prefix_len = a->rules[i].src_prefixlen;
mvec[match_type_index].is_ipv6 = a->rules[i].is_ipv6;
mvec[match_type_index].table_index = ~0;
macip_find_match_type (mvec, a->rules[i].src_mac_mask,
a->rules[i].src_prefixlen,
a->rules[i].is_ipv6);
+ ASSERT(match_type_index != ~0);
/* add session to table mvec[match_type_index].table_index; */
vnet_classify_add_del_session (cm, mvec[match_type_index].table_index,
mask, a->rules[i].is_permit ? ~0 : 0, i,
acl_main_t *am = &acl_main;
macip_acl_list_t *a;
macip_acl_rule_t *r;
- macip_acl_rule_t *acl_new_rules;
+ macip_acl_rule_t *acl_new_rules = 0;
int i;
-
+ if (0 == count) {
+ clib_warning("acl-plugin-warning: Trying to create empty MACIP ACL (tag %s)", tag);
+ }
+ void *oldheap = acl_set_heap(am);
/* Create and populate the rules */
- acl_new_rules = clib_mem_alloc_aligned (sizeof (macip_acl_rule_t) * count,
- CLIB_CACHE_LINE_BYTES);
- if (!acl_new_rules)
- {
- /* Could not allocate rules. New or existing ACL - bail out regardless */
- return -1;
- }
+ if (count > 0)
+ vec_validate(acl_new_rules, count-1);
for (i = 0; i < count; i++)
{
/* Create and populate the classifer tables */
macip_create_classify_tables (am, *acl_list_index);
-
+ clib_mem_set_heap (oldheap);
return 0;
}
int rv;
u32 macip_acl_index;
macip_acl_list_t *a;
+ void *oldheap = acl_set_heap(am);
vec_validate_init_empty (am->macip_acl_by_sw_if_index, sw_if_index, ~0);
+ clib_mem_set_heap (oldheap);
macip_acl_index = am->macip_acl_by_sw_if_index[sw_if_index];
/* No point in deleting MACIP ACL which is not applied */
if (~0 == macip_acl_index)
{
return -1;
}
+ void *oldheap = acl_set_heap(am);
a = &am->macip_acls[macip_acl_index];
vec_validate_init_empty (am->macip_acl_by_sw_if_index, sw_if_index, ~0);
/* If there already a MACIP ACL applied, unapply it */
if (~0 != am->macip_acl_by_sw_if_index[sw_if_index])
macip_acl_interface_del_acl(am, sw_if_index);
am->macip_acl_by_sw_if_index[sw_if_index] = macip_acl_index;
+ clib_mem_set_heap (oldheap);
+
/* Apply the classifier tables for L2 ACLs */
rv =
vnet_set_input_acl_intfc (am->vlib_main, sw_if_index, a->ip4_table_index,
macip_acl_del_list (u32 acl_list_index)
{
acl_main_t *am = &acl_main;
+ void *oldheap = acl_set_heap(am);
macip_acl_list_t *a;
int i;
if (pool_is_free_index (am->macip_acls, acl_list_index))
a = &am->macip_acls[acl_list_index];
if (a->rules)
{
- clib_mem_free (a->rules);
+ vec_free (a->rules);
}
pool_put (am->macip_acls, a);
+ clib_mem_set_heap (oldheap);
return 0;
}
u32 acl_list_index)
{
acl_main_t *am = &acl_main;
+ void *oldheap = acl_set_heap(am);
int rv = -1;
if (is_add)
{
{
rv = macip_acl_interface_del_acl (am, sw_if_index);
}
+ clib_mem_set_heap (oldheap);
return rv;
}
vl_api_acl_rule_t *rules;
int i;
int msg_size = sizeof (*mp) + sizeof (mp->r[0]) * acl->count;
+ void *oldheap = acl_set_heap(am);
mp = vl_msg_api_alloc (msg_size);
memset (mp, 0, msg_size);
}
clib_warning("Sending acl details for ACL index %d", ntohl(mp->acl_index));
+ clib_mem_set_heap (oldheap);
vl_msg_api_send_shmem (q, (u8 *) & mp);
}
int n_output;
int count;
int i = 0;
+ void *oldheap = acl_set_heap(am);
vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index);
vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index);
mp->acls[n_input + i] =
htonl (am->output_acl_vec_by_sw_if_index[sw_if_index][i]);
}
-
+ clib_mem_set_heap (oldheap);
vl_msg_api_send_shmem (q, (u8 *) & mp);
}
vl_msg_api_send_shmem (q, (u8 *) & rmp);
}
+static void
+send_macip_acl_interface_list_details (acl_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u32 sw_if_index,
+ u32 acl_index,
+ u32 context)
+{
+ vl_api_macip_acl_interface_list_details_t *rmp;
+ /* at this time there is only ever 1 mac ip acl per interface */
+ int msg_size = sizeof (*rmp) + sizeof (rmp->acls[0]);
+
+ rmp = vl_msg_api_alloc (msg_size);
+ memset (rmp, 0, msg_size);
+ rmp->_vl_msg_id = ntohs (VL_API_MACIP_ACL_INTERFACE_LIST_DETAILS + am->msg_id_base);
+
+ /* fill in the message */
+ rmp->context = context;
+ rmp->count = 1;
+ rmp->sw_if_index = htonl (sw_if_index);
+ rmp->acls[0] = htonl (acl_index);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+vl_api_macip_acl_interface_list_dump_t_handler (vl_api_macip_acl_interface_list_dump_t *mp)
+{
+ unix_shared_memory_queue_t *q;
+ acl_main_t *am = &acl_main;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ {
+ return;
+ }
+
+ if (sw_if_index == ~0)
+ {
+ vec_foreach_index(sw_if_index, am->macip_acl_by_sw_if_index)
+ {
+ if (~0 != am->macip_acl_by_sw_if_index[sw_if_index])
+ {
+ send_macip_acl_interface_list_details(am, q, sw_if_index,
+ am->macip_acl_by_sw_if_index[sw_if_index],
+ mp->context);
+ }
+ }
+ }
+ else
+ {
+ if (vec_len(am->macip_acl_by_sw_if_index) > sw_if_index)
+ {
+ send_macip_acl_interface_list_details(am, q, sw_if_index,
+ am->macip_acl_by_sw_if_index[sw_if_index],
+ mp->context);
+ }
+ }
+}
+
/* Set up the API message handling tables */
static clib_error_t *
acl_plugin_api_hookup (vlib_main_t * vm)
acl_set_skip_ipv6_eh(u32 eh, u32 value)
{
acl_main_t *am = &acl_main;
+
if ((eh < 256) && (value < 2))
{
am->fa_ipv6_known_eh_bitmap = clib_bitmap_set(am->fa_ipv6_known_eh_bitmap, eh, value);
acl_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
{
acl_main_t *am = &acl_main;
+ if (0 == am->acl_mheap) {
+ /* ACL heap is not initialized, so definitely nothing to do. */
+ return 0;
+ }
if (0 == is_add) {
vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index,
ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, sw_if_index);
VNET_SW_INTERFACE_ADD_DEL_FUNCTION (acl_sw_interface_add_del);
+
+
static clib_error_t *
acl_set_aclplugin_fn (vlib_main_t * vm,
unformat_input_t * input,
am->l4_match_nonfirst_fragment = (val != 0);
goto done;
}
+ if (unformat (input, "heap"))
+ {
+ if (unformat(input, "main"))
+ {
+ if (unformat(input, "validate %u", &val))
+ acl_plugin_acl_set_validate_heap(am, val);
+ else if (unformat(input, "trace %u", &val))
+ acl_plugin_acl_set_trace_heap(am, val);
+ goto done;
+ }
+ else if (unformat(input, "hash"))
+ {
+ if (unformat(input, "validate %u", &val))
+ acl_plugin_hash_acl_set_validate_heap(am, val);
+ else if (unformat(input, "trace %u", &val))
+ acl_plugin_hash_acl_set_trace_heap(am, val);
+ goto done;
+ }
+ goto done;
+ }
if (unformat (input, "session")) {
if (unformat (input, "table")) {
/* The commands here are for tuning/testing. No user-serviceable parts inside */
return error;
}
+static u8 *
+my_format_mac_address (u8 * s, va_list * args)
+{
+ u8 *a = va_arg (*args, u8 *);
+ return format (s, "%02x:%02x:%02x:%02x:%02x:%02x",
+ a[0], a[1], a[2], a[3], a[4], a[5]);
+}
+
+static inline u8 *
+my_macip_acl_rule_t_pretty_format (u8 *out, va_list *args)
+{
+ macip_acl_rule_t *a = va_arg (*args, macip_acl_rule_t *);
+
+ out = format(out, "%s action %d ip %U/%d mac %U mask %U",
+ a->is_ipv6 ? "ipv6" : "ipv4", a->is_permit,
+ format_ip46_address, &a->src_ip_addr, IP46_TYPE_ANY,
+ a->src_prefixlen,
+ my_format_mac_address, a->src_mac,
+ my_format_mac_address, a->src_mac_mask);
+ return(out);
+}
+
+static void
+macip_acl_print(acl_main_t *am, u32 macip_acl_index)
+{
+ vlib_main_t * vm = am->vlib_main;
+ int i;
+
+ /* Don't try to print someone else's memory */
+ if (macip_acl_index > vec_len(am->macip_acls))
+ return;
+
+ macip_acl_list_t *a = vec_elt_at_index(am->macip_acls, macip_acl_index);
+ int free_pool_slot = pool_is_free_index(am->macip_acls, macip_acl_index);
+
+ vlib_cli_output(vm, "MACIP acl_index: %d, count: %d (true len %d) tag {%s} is free pool slot: %d\n",
+ macip_acl_index, a->count, vec_len(a->rules), a->tag, free_pool_slot);
+ vlib_cli_output(vm, " ip4_table_index %d, ip6_table_index %d, l2_table_index %d\n",
+ a->ip4_table_index, a->ip6_table_index, a->l2_table_index);
+ for(i=0; i<vec_len(a->rules); i++)
+ vlib_cli_output(vm, " rule %d: %U\n", i, my_macip_acl_rule_t_pretty_format,
+ vec_elt_at_index(a->rules, i));
+
+}
+
+static clib_error_t *
+acl_show_aclplugin_macip_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ acl_main_t *am = &acl_main;
+ int i;
+ if (unformat (input, "interface"))
+ {
+ for(i=0; i < vec_len(am->macip_acl_by_sw_if_index); i++)
+ {
+ vlib_cli_output(vm, " sw_if_index %d: %d\n", i, vec_elt(am->macip_acl_by_sw_if_index, i));
+ }
+ }
+ else if (unformat (input, "acl"))
+ {
+ for(i=0; i < vec_len(am->macip_acls); i++)
+ macip_acl_print(am, i);
+ }
+ return error;
+}
+
+
static clib_error_t *
acl_show_aclplugin_fn (vlib_main_t * vm,
unformat_input_t * input,
out0 = format(out0, " interrupt is pending: %d\n", pw->interrupt_is_pending);
out0 = format(out0, " interrupt is needed: %d\n", pw->interrupt_is_needed);
out0 = format(out0, " interrupt is unwanted: %d\n", pw->interrupt_is_unwanted);
+ out0 = format(out0, " interrupt generation: %d\n", pw->interrupt_generation);
}
out0 = format(out0, "\n\nConn cleaner thread counters:\n");
#define _(cnt, desc) out0 = format(out0, " %20lu: %s\n", am->cnt, desc);
#undef _
vec_terminate_c_string(out0);
vlib_cli_output(vm, "\n\n%s\n\n", out0);
+ vlib_cli_output(vm, "Interrupt generation: %d\n", am->fa_interrupt_generation);
vlib_cli_output(vm, "Sessions per interval: min %lu max %lu increment: %f ms current: %f ms",
am->fa_min_deleted_sessions_per_interval, am->fa_max_deleted_sessions_per_interval,
am->fa_cleaner_wait_time_increment * 1000.0, ((f64)am->fa_current_cleaner_timer_wait_interval) * 1000.0/(f64)vm->clib_time.clocks_per_second);
vlib_cli_output(vm, "\n%s\n", out0);
vec_free(out0);
}
+ else if (unformat (input, "memory"))
+ {
+ vlib_cli_output (vm, "ACL plugin main heap statistics:\n");
+ if (am->acl_mheap) {
+ vlib_cli_output (vm, " %U\n", format_mheap, am->acl_mheap, 1);
+ } else {
+ vlib_cli_output (vm, " Not initialized\n");
+ }
+ vlib_cli_output (vm, "ACL hash lookup support heap statistics:\n");
+ if (am->hash_lookup_mheap) {
+ vlib_cli_output (vm, " %U\n", format_mheap, am->hash_lookup_mheap, 1);
+ } else {
+ vlib_cli_output (vm, " Not initialized\n");
+ }
+ }
else if (unformat (input, "tables"))
{
ace_mask_type_entry_t *mte;
if (swi < vec_len(am->input_applied_hash_acl_info_by_sw_if_index)) {
applied_hash_acl_info_t *pal = &am->input_applied_hash_acl_info_by_sw_if_index[swi];
out0 = format(out0, " input lookup mask_type_index_bitmap: %U\n", format_bitmap_hex, pal->mask_type_index_bitmap);
+ out0 = format(out0, " input applied acls: %U\n", format_vec32, pal->applied_acls, "%d");
}
if (swi < vec_len(am->input_hash_entry_vec_by_sw_if_index)) {
out0 = format(out0, " input lookup applied entries:\n");
for(j=0; j<vec_len(am->input_hash_entry_vec_by_sw_if_index[swi]); j++) {
applied_hash_ace_entry_t *pae = &am->input_hash_entry_vec_by_sw_if_index[swi][j];
- out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d\n",
+ out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d\n",
j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index,
- pae->next_applied_entry_index, pae->prev_applied_entry_index);
+ pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index);
}
}
if (swi < vec_len(am->output_applied_hash_acl_info_by_sw_if_index)) {
applied_hash_acl_info_t *pal = &am->output_applied_hash_acl_info_by_sw_if_index[swi];
out0 = format(out0, " output lookup mask_type_index_bitmap: %U\n", format_bitmap_hex, pal->mask_type_index_bitmap);
+ out0 = format(out0, " output applied acls: %U\n", format_vec32, pal->applied_acls, "%d");
}
if (swi < vec_len(am->output_hash_entry_vec_by_sw_if_index)) {
out0 = format(out0, " output lookup applied entries:\n");
for(j=0; j<vec_len(am->output_hash_entry_vec_by_sw_if_index[swi]); j++) {
applied_hash_ace_entry_t *pae = &am->output_hash_entry_vec_by_sw_if_index[swi][j];
- out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d\n",
+ out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d\n",
j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index,
- pae->next_applied_entry_index, pae->prev_applied_entry_index);
+ pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index);
}
}
.function = acl_show_aclplugin_fn,
};
+VLIB_CLI_COMMAND (aclplugin_show_macip_command, static) = {
+ .path = "show acl-plugin macip",
+ .short_help = "show acl-plugin macip {acl|interface}",
+ .function = acl_show_aclplugin_macip_fn,
+};
+
+
VLIB_CLI_COMMAND (aclplugin_clear_command, static) = {
.path = "clear acl-plugin sessions",
.short_help = "clear acl-plugin sessions",
vec_free (name);
acl_setup_fa_nodes();
+
am->session_timeout_sec[ACL_TIMEOUT_TCP_TRANSIENT] = TCP_SESSION_TRANSIENT_TIMEOUT_SEC;
am->session_timeout_sec[ACL_TIMEOUT_TCP_IDLE] = TCP_SESSION_IDLE_TIMEOUT_SEC;
am->session_timeout_sec[ACL_TIMEOUT_UDP_IDLE] = UDP_SESSION_IDLE_TIMEOUT_SEC;
am->l4_match_nonfirst_fragment = 1;
/* use the new fancy hash-based matching */
- // NOT IMMEDIATELY
am->use_hash_acl_matching = 1;
return error;