X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Facl%2Facl.c;h=611efbb7be6550afb359dcb2c6393c1355c04c60;hb=c29940c58de3e44c0c1dd5c4eda5e0268d963b14;hp=6657d370e63c383b959df91e5dce5ad8fc7a8f7f;hpb=cbbd08e1d41deae5745a83d1f941d4133e110826;p=vpp.git diff --git a/src/plugins/acl/acl.c b/src/plugins/acl/acl.c index 6657d370e63..611efbb7be6 100644 --- a/src/plugins/acl/acl.c +++ b/src/plugins/acl/acl.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include @@ -52,9 +51,8 @@ #include #undef vl_api_version -#include "node_in.h" -#include "node_out.h" #include "fa_node.h" +#include "hash_lookup.h" acl_main_t acl_main; @@ -65,6 +63,7 @@ acl_main_t acl_main; #define foreach_acl_plugin_api_msg \ _(ACL_PLUGIN_GET_VERSION, acl_plugin_get_version) \ +_(ACL_PLUGIN_CONTROL_PING, acl_plugin_control_ping) \ _(ACL_ADD_REPLACE, acl_add_replace) \ _(ACL_DEL, acl_del) \ _(ACL_INTERFACE_ADD_DEL, acl_interface_add_del) \ @@ -72,10 +71,13 @@ _(ACL_INTERFACE_SET_ACL_LIST, acl_interface_set_acl_list) \ _(ACL_DUMP, acl_dump) \ _(ACL_INTERFACE_LIST_DUMP, acl_interface_list_dump) \ _(MACIP_ACL_ADD, macip_acl_add) \ +_(MACIP_ACL_ADD_REPLACE, macip_acl_add_replace) \ _(MACIP_ACL_DEL, macip_acl_del) \ _(MACIP_ACL_INTERFACE_ADD_DEL, macip_acl_interface_add_del) \ _(MACIP_ACL_DUMP, macip_acl_dump) \ -_(MACIP_ACL_INTERFACE_GET, macip_acl_interface_get) +_(MACIP_ACL_INTERFACE_GET, macip_acl_interface_get) \ +_(MACIP_ACL_INTERFACE_LIST_DUMP, macip_acl_interface_list_dump) + /* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { @@ -84,6 +86,46 @@ VLIB_PLUGIN_REGISTER () = { }; /* *INDENT-ON* */ + +static void * +acl_set_heap(acl_main_t *am) +{ + if (0 == am->acl_mheap) { + am->acl_mheap = mheap_alloc (0 /* use VM */ , 2 << 29); + mheap_t *h = mheap_header (am->acl_mheap); + h->flags |= MHEAP_FLAG_THREAD_SAFE; + } + void *oldheap = clib_mem_set_heap(am->acl_mheap); + return oldheap; +} + +void +acl_plugin_acl_set_validate_heap(acl_main_t *am, int on) +{ + clib_mem_set_heap(acl_set_heap(am)); + mheap_t *h = mheap_header (am->acl_mheap); + if (on) { + h->flags |= MHEAP_FLAG_VALIDATE; + h->flags &= ~MHEAP_FLAG_SMALL_OBJECT_CACHE; + mheap_validate(h); + } else { + h->flags &= ~MHEAP_FLAG_VALIDATE; + h->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE; + } +} + +void +acl_plugin_acl_set_trace_heap(acl_main_t *am, int on) +{ + clib_mem_set_heap(acl_set_heap(am)); + mheap_t *h = mheap_header (am->acl_mheap); + if (on) { + h->flags |= MHEAP_FLAG_TRACE; + } else { + h->flags &= ~MHEAP_FLAG_TRACE; + } +} + static void vl_api_acl_plugin_get_version_t_handler (vl_api_acl_plugin_get_version_t * mp) { @@ -109,6 +151,20 @@ vl_api_acl_plugin_get_version_t_handler (vl_api_acl_plugin_get_version_t * mp) vl_msg_api_send_shmem (q, (u8 *) & rmp); } +static void +vl_api_acl_plugin_control_ping_t_handler (vl_api_acl_plugin_control_ping_t * mp) +{ + vl_api_acl_plugin_control_ping_reply_t *rmp; + acl_main_t *am = &acl_main; + int rv = 0; + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_ACL_PLUGIN_CONTROL_PING_REPLY, + ({ + rmp->vpe_pid = ntohl (getpid ()); + })); + /* *INDENT-ON* */ +} static int acl_add_list (u32 count, vl_api_acl_rule_t rules[], @@ -117,7 +173,7 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[], acl_main_t *am = &acl_main; acl_list_t *a; acl_rule_t *r; - acl_rule_t *acl_new_rules; + acl_rule_t *acl_new_rules = 0; int i; if (*acl_list_index != ~0) @@ -126,22 +182,24 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[], if (pool_is_free_index (am->acls, *acl_list_index)) { /* tried to replace a non-existent ACL, no point doing anything */ + clib_warning("acl-plugin-error: Trying to replace nonexistent ACL %d (tag %s)", *acl_list_index, tag); return -1; } } + if (0 == count) { + clib_warning("acl-plugin-warning: supplied no rules for ACL %d (tag %s)", *acl_list_index, tag); + } + + void *oldheap = acl_set_heap(am); /* Create and populate the rules */ - acl_new_rules = clib_mem_alloc_aligned (sizeof (acl_rule_t) * count, - CLIB_CACHE_LINE_BYTES); - if (!acl_new_rules) - { - /* Could not allocate rules. New or existing ACL - bail out regardless */ - return -1; - } + if (count > 0) + vec_validate(acl_new_rules, count-1); for (i = 0; i < count; i++) { - r = &acl_new_rules[i]; + r = vec_elt_at_index(acl_new_rules, i); + memset(r, 0, sizeof(*r)); r->is_permit = rules[i].is_permit; r->is_ipv6 = rules[i].is_ipv6; if (r->is_ipv6) @@ -176,13 +234,16 @@ acl_add_list (u32 count, vl_api_acl_rule_t rules[], else { a = am->acls + *acl_list_index; + hash_acl_delete(am, *acl_list_index); /* Get rid of the old rules */ - clib_mem_free (a->rules); + if (a->rules) + vec_free (a->rules); } a->rules = acl_new_rules; a->count = count; memcpy (a->tag, tag, sizeof (a->tag)); - + hash_acl_add(am, *acl_list_index); + clib_mem_set_heap (oldheap); return 0; } @@ -197,6 +258,20 @@ acl_del_list (u32 acl_list_index) return -1; } + if (acl_list_index < vec_len(am->input_sw_if_index_vec_by_acl)) { + if (vec_len(am->input_sw_if_index_vec_by_acl[acl_list_index]) > 0) { + /* ACL is applied somewhere inbound. Refuse to delete */ + return -1; + } + } + if (acl_list_index < vec_len(am->output_sw_if_index_vec_by_acl)) { + if (vec_len(am->output_sw_if_index_vec_by_acl[acl_list_index]) > 0) { + /* ACL is applied somewhere outbound. Refuse to delete */ + return -1; + } + } + + void *oldheap = acl_set_heap(am); /* delete any references to the ACL */ for (i = 0; i < vec_len (am->output_acl_vec_by_sw_if_index); i++) { @@ -228,14 +303,16 @@ acl_del_list (u32 acl_list_index) } } } + /* delete the hash table data */ + hash_acl_delete(am, acl_list_index); /* now we can delete the ACL itself */ a = &am->acls[acl_list_index]; if (a->rules) - { - clib_mem_free (a->rules); - } + vec_free (a->rules); + pool_put (am->acls, a); + clib_mem_set_heap (oldheap); return 0; } @@ -313,13 +390,13 @@ _(ether) __ __ __ __ __ __ v __ __ __ __ __ __ v __ __ v } static int -acl_classify_add_del_table_big (vnet_classify_main_t * cm, u8 * mask, +acl_classify_add_del_table_tiny (vnet_classify_main_t * cm, u8 * mask, u32 mask_len, u32 next_table_index, u32 miss_next_index, u32 * table_index, int is_add) { - u32 nbuckets = 65536; - u32 memory_size = 2 << 30; + u32 nbuckets = 1; + u32 memory_size = 2 << 13; u32 skip = count_skip (mask, mask_len); u32 match = (mask_len / 16) - skip; u8 *skip_mask_ptr = mask + 16 * skip; @@ -328,13 +405,15 @@ acl_classify_add_del_table_big (vnet_classify_main_t * cm, u8 * mask, if (0 == match) match = 1; - - return vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets, + void *oldheap = clib_mem_set_heap (cm->vlib_main->heap_base); + int ret = vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets, memory_size, skip, match, next_table_index, miss_next_index, table_index, current_data_flag, current_data_offset, is_add, 1 /* delete_chain */); + clib_mem_set_heap (oldheap); + return ret; } static int @@ -354,12 +433,15 @@ acl_classify_add_del_table_small (vnet_classify_main_t * cm, u8 * mask, if (0 == match) match = 1; - return vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets, + void *oldheap = clib_mem_set_heap (cm->vlib_main->heap_base); + int ret = vnet_classify_add_del_table (cm, skip_mask_ptr, nbuckets, memory_size, skip, match, next_table_index, miss_next_index, table_index, current_data_flag, current_data_offset, is_add, 1 /* delete_chain */); + clib_mem_set_heap (oldheap); + return ret; } @@ -369,12 +451,15 @@ acl_unhook_l2_input_classify (acl_main_t * am, u32 sw_if_index) vnet_classify_main_t *cm = &vnet_classify_main; u32 ip4_table_index = ~0; u32 ip6_table_index = ~0; + void *oldheap = acl_set_heap(am); vec_validate_init_empty (am->acl_ip4_input_classify_table_by_sw_if_index, sw_if_index, ~0); vec_validate_init_empty (am->acl_ip6_input_classify_table_by_sw_if_index, sw_if_index, ~0); + /* switch to global heap while calling vnet_* functions */ + clib_mem_set_heap (cm->vlib_main->heap_base); vnet_l2_input_classify_enable_disable (sw_if_index, 0); if (am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] != ~0) @@ -382,7 +467,7 @@ acl_unhook_l2_input_classify (acl_main_t * am, u32 sw_if_index) ip4_table_index = am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index]; am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] = ~0; - acl_classify_add_del_table_big (cm, ip4_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask, sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip4, &ip4_table_index, 0); @@ -392,12 +477,12 @@ acl_unhook_l2_input_classify (acl_main_t * am, u32 sw_if_index) ip6_table_index = am->acl_ip6_input_classify_table_by_sw_if_index[sw_if_index]; am->acl_ip6_input_classify_table_by_sw_if_index[sw_if_index] = ~0; - acl_classify_add_del_table_big (cm, ip6_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask, sizeof (ip6_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip6, &ip6_table_index, 0); } - + clib_mem_set_heap (oldheap); return 0; } @@ -407,12 +492,16 @@ acl_unhook_l2_output_classify (acl_main_t * am, u32 sw_if_index) vnet_classify_main_t *cm = &vnet_classify_main; u32 ip4_table_index = ~0; u32 ip6_table_index = ~0; + void *oldheap = acl_set_heap(am); vec_validate_init_empty (am->acl_ip4_output_classify_table_by_sw_if_index, sw_if_index, ~0); vec_validate_init_empty (am->acl_ip6_output_classify_table_by_sw_if_index, sw_if_index, ~0); + /* switch to global heap while calling vnet_* functions */ + clib_mem_set_heap (cm->vlib_main->heap_base); + vnet_l2_output_classify_enable_disable (sw_if_index, 0); if (am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] != ~0) @@ -420,7 +509,7 @@ acl_unhook_l2_output_classify (acl_main_t * am, u32 sw_if_index) ip4_table_index = am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index]; am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] = ~0; - acl_classify_add_del_table_big (cm, ip4_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask, sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip4, &ip4_table_index, 0); @@ -430,12 +519,12 @@ acl_unhook_l2_output_classify (acl_main_t * am, u32 sw_if_index) ip6_table_index = am->acl_ip6_output_classify_table_by_sw_if_index[sw_if_index]; am->acl_ip6_output_classify_table_by_sw_if_index[sw_if_index] = ~0; - acl_classify_add_del_table_big (cm, ip6_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask, sizeof (ip6_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip6, &ip6_table_index, 0); } - + clib_mem_set_heap (oldheap); return 0; } @@ -447,27 +536,29 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index) u32 ip6_table_index = ~0; int rv; + void *prevheap = clib_mem_set_heap (cm->vlib_main->heap_base); + /* in case there were previous tables attached */ acl_unhook_l2_input_classify (am, sw_if_index); rv = - acl_classify_add_del_table_big (cm, ip4_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask, sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip4, &ip4_table_index, 1); if (rv) - return rv; + goto done; rv = - acl_classify_add_del_table_big (cm, ip6_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask, sizeof (ip6_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip6, &ip6_table_index, 1); if (rv) { - acl_classify_add_del_table_big (cm, ip4_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask, sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip4, &ip4_table_index, 0); - return rv; + goto done; } rv = vnet_l2_input_classify_set_tables (sw_if_index, ip4_table_index, @@ -477,15 +568,15 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index) sw_if_index, ip4_table_index, ip6_table_index); if (rv) { - acl_classify_add_del_table_big (cm, ip6_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask, sizeof (ip6_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip6, &ip6_table_index, 0); - acl_classify_add_del_table_big (cm, ip4_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask, sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_input_classify_next_acl_ip4, &ip4_table_index, 0); - return rv; + goto done; } am->acl_ip4_input_classify_table_by_sw_if_index[sw_if_index] = @@ -494,6 +585,8 @@ acl_hook_l2_input_classify (acl_main_t * am, u32 sw_if_index) ip6_table_index; vnet_l2_input_classify_enable_disable (sw_if_index, 1); +done: + clib_mem_set_heap (prevheap); return rv; } @@ -505,27 +598,29 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index) u32 ip6_table_index = ~0; int rv; + void *prevheap = clib_mem_set_heap (cm->vlib_main->heap_base); + /* in case there were previous tables attached */ acl_unhook_l2_output_classify (am, sw_if_index); rv = - acl_classify_add_del_table_big (cm, ip4_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask, sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip4, &ip4_table_index, 1); if (rv) - return rv; + goto done; rv = - acl_classify_add_del_table_big (cm, ip6_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask, sizeof (ip6_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip6, &ip6_table_index, 1); if (rv) { - acl_classify_add_del_table_big (cm, ip4_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask, sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip4, &ip4_table_index, 0); - return rv; + goto done; } rv = vnet_l2_output_classify_set_tables (sw_if_index, ip4_table_index, @@ -535,15 +630,15 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index) sw_if_index, ip4_table_index, ip6_table_index); if (rv) { - acl_classify_add_del_table_big (cm, ip6_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip6_5tuple_mask, sizeof (ip6_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip6, &ip6_table_index, 0); - acl_classify_add_del_table_big (cm, ip4_5tuple_mask, + acl_classify_add_del_table_tiny (cm, ip4_5tuple_mask, sizeof (ip4_5tuple_mask) - 1, ~0, am->l2_output_classify_next_acl_ip4, &ip4_table_index, 0); - return rv; + goto done; } am->acl_ip4_output_classify_table_by_sw_if_index[sw_if_index] = @@ -552,6 +647,8 @@ acl_hook_l2_output_classify (acl_main_t * am, u32 sw_if_index) ip6_table_index; vnet_l2_output_classify_enable_disable (sw_if_index, 1); +done: + clib_mem_set_heap (prevheap); return rv; } @@ -607,34 +704,79 @@ acl_interface_out_enable_disable (acl_main_t * am, u32 sw_if_index, return rv; } +static int +acl_is_not_defined(acl_main_t *am, u32 acl_list_index) +{ + return (pool_is_free_index (am->acls, acl_list_index)); +} + static int acl_interface_add_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) { acl_main_t *am = &acl_main; + if (acl_is_not_defined(am, acl_list_index)) { + /* ACL is not defined. Can not apply */ + return -1; + } + void *oldheap = acl_set_heap(am); + if (is_input) { vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index); + + u32 index = vec_search(am->input_acl_vec_by_sw_if_index[sw_if_index], acl_list_index); + if (index < vec_len(am->input_acl_vec_by_sw_if_index[sw_if_index])) { + clib_warning("ACL %d is already applied inbound on sw_if_index %d (index %d)", + acl_list_index, sw_if_index, index); + /* the entry is already there */ + clib_mem_set_heap (oldheap); + return -1; + } + /* if there was no ACL applied before, enable the ACL processing */ + if (vec_len(am->input_acl_vec_by_sw_if_index[sw_if_index]) == 0) { + acl_interface_in_enable_disable (am, sw_if_index, 1); + } vec_add (am->input_acl_vec_by_sw_if_index[sw_if_index], &acl_list_index, 1); - acl_interface_in_enable_disable (am, sw_if_index, 1); + vec_validate (am->input_sw_if_index_vec_by_acl, acl_list_index); + vec_add (am->input_sw_if_index_vec_by_acl[acl_list_index], &sw_if_index, + 1); } else { vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index); + + u32 index = vec_search(am->output_acl_vec_by_sw_if_index[sw_if_index], acl_list_index); + if (index < vec_len(am->output_acl_vec_by_sw_if_index[sw_if_index])) { + clib_warning("ACL %d is already applied outbound on sw_if_index %d (index %d)", + acl_list_index, sw_if_index, index); + /* the entry is already there */ + clib_mem_set_heap (oldheap); + return -1; + } + /* if there was no ACL applied before, enable the ACL processing */ + if (vec_len(am->output_acl_vec_by_sw_if_index[sw_if_index]) == 0) { + acl_interface_out_enable_disable (am, sw_if_index, 1); + } vec_add (am->output_acl_vec_by_sw_if_index[sw_if_index], &acl_list_index, 1); - acl_interface_out_enable_disable (am, sw_if_index, 1); + vec_validate (am->output_sw_if_index_vec_by_acl, acl_list_index); + vec_add (am->output_sw_if_index_vec_by_acl[acl_list_index], &sw_if_index, + 1); } + clib_mem_set_heap (oldheap); return 0; } + static int acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) { acl_main_t *am = &acl_main; int i; int rv = -1; + void *oldheap = acl_set_heap(am); if (is_input) { vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index); @@ -649,6 +791,16 @@ acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) break; } } + + if (acl_list_index < vec_len(am->input_sw_if_index_vec_by_acl)) { + u32 index = vec_search(am->input_sw_if_index_vec_by_acl[acl_list_index], sw_if_index); + if (index < vec_len(am->input_sw_if_index_vec_by_acl[acl_list_index])) { + hash_acl_unapply(am, sw_if_index, is_input, acl_list_index); + vec_del1 (am->input_sw_if_index_vec_by_acl[acl_list_index], index); + } + } + + /* If there is no more ACLs applied on an interface, disable ACL processing */ if (0 == vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index])) { acl_interface_in_enable_disable (am, sw_if_index, 0); @@ -668,11 +820,22 @@ acl_interface_del_inout_acl (u32 sw_if_index, u8 is_input, u32 acl_list_index) break; } } + + if (acl_list_index < vec_len(am->output_sw_if_index_vec_by_acl)) { + u32 index = vec_search(am->output_sw_if_index_vec_by_acl[acl_list_index], sw_if_index); + if (index < vec_len(am->output_sw_if_index_vec_by_acl[acl_list_index])) { + hash_acl_unapply(am, sw_if_index, is_input, acl_list_index); + vec_del1 (am->output_sw_if_index_vec_by_acl[acl_list_index], index); + } + } + + /* If there is no more ACLs applied on an interface, disable ACL processing */ if (0 == vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index])) { acl_interface_out_enable_disable (am, sw_if_index, 0); } } + clib_mem_set_heap (oldheap); return rv; } @@ -680,18 +843,49 @@ static void acl_interface_reset_inout_acls (u32 sw_if_index, u8 is_input) { acl_main_t *am = &acl_main; + int i; + void *oldheap = acl_set_heap(am); if (is_input) { - acl_interface_in_enable_disable (am, sw_if_index, 0); vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index); + if (vec_len(am->input_acl_vec_by_sw_if_index[sw_if_index]) > 0) { + acl_interface_in_enable_disable (am, sw_if_index, 0); + } + + for(i = vec_len(am->input_acl_vec_by_sw_if_index[sw_if_index])-1; i>=0; i--) { + u32 acl_list_index = am->input_acl_vec_by_sw_if_index[sw_if_index][i]; + hash_acl_unapply(am, sw_if_index, is_input, acl_list_index); + if (acl_list_index < vec_len(am->input_sw_if_index_vec_by_acl)) { + u32 index = vec_search(am->input_sw_if_index_vec_by_acl[acl_list_index], sw_if_index); + if (index < vec_len(am->input_sw_if_index_vec_by_acl[acl_list_index])) { + vec_del1 (am->input_sw_if_index_vec_by_acl[acl_list_index], index); + } + } + } + vec_reset_length (am->input_acl_vec_by_sw_if_index[sw_if_index]); } else { - acl_interface_out_enable_disable (am, sw_if_index, 0); vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index); + if (vec_len(am->output_acl_vec_by_sw_if_index[sw_if_index]) > 0) { + acl_interface_out_enable_disable (am, sw_if_index, 0); + } + + for(i = vec_len(am->output_acl_vec_by_sw_if_index[sw_if_index])-1; i>=0; i--) { + u32 acl_list_index = am->output_acl_vec_by_sw_if_index[sw_if_index][i]; + hash_acl_unapply(am, sw_if_index, is_input, acl_list_index); + if (acl_list_index < vec_len(am->output_sw_if_index_vec_by_acl)) { + u32 index = vec_search(am->output_sw_if_index_vec_by_acl[acl_list_index], sw_if_index); + if (index < vec_len(am->output_sw_if_index_vec_by_acl[acl_list_index])) { + vec_del1 (am->output_sw_if_index_vec_by_acl[acl_list_index], index); + } + } + } + vec_reset_length (am->output_acl_vec_by_sw_if_index[sw_if_index]); } + clib_mem_set_heap (oldheap); } static int @@ -699,279 +893,28 @@ acl_interface_add_del_inout_acl (u32 sw_if_index, u8 is_add, u8 is_input, u32 acl_list_index) { int rv = -1; + acl_main_t *am = &acl_main; + void *oldheap = acl_set_heap(am); if (is_add) { rv = acl_interface_add_inout_acl (sw_if_index, is_input, acl_list_index); + if (rv == 0) + { + hash_acl_apply(am, sw_if_index, is_input, acl_list_index); + } } else { + hash_acl_unapply(am, sw_if_index, is_input, acl_list_index); rv = acl_interface_del_inout_acl (sw_if_index, is_input, acl_list_index); } + clib_mem_set_heap (oldheap); return rv; } -static void * -get_ptr_to_offset (vlib_buffer_t * b0, int offset) -{ - u8 *p = vlib_buffer_get_current (b0) + offset; - return p; -} - -static u8 -acl_get_l4_proto (vlib_buffer_t * b0, int node_is_ip6) -{ - u8 proto; - int proto_offset; - if (node_is_ip6) - { - proto_offset = 20; - } - else - { - proto_offset = 23; - } - proto = *((u8 *) vlib_buffer_get_current (b0) + proto_offset); - return proto; -} - -static int -acl_match_addr (ip46_address_t * addr1, ip46_address_t * addr2, int prefixlen, - int is_ip6) -{ - if (prefixlen == 0) - { - /* match any always succeeds */ - return 1; - } - if (is_ip6) - { - if (memcmp (addr1, addr2, prefixlen / 8)) - { - /* If the starting full bytes do not match, no point in bittwidling the thumbs further */ - return 0; - } - if (prefixlen % 8) - { - u8 b1 = *((u8 *) addr1 + 1 + prefixlen / 8); - u8 b2 = *((u8 *) addr2 + 1 + prefixlen / 8); - u8 mask0 = (0xff - ((1 << (8 - (prefixlen % 8))) - 1)); - return (b1 & mask0) == b2; - } - else - { - /* The prefix fits into integer number of bytes, so nothing left to do */ - return 1; - } - } - else - { - uint32_t a1 = ntohl (addr1->ip4.as_u32); - uint32_t a2 = ntohl (addr2->ip4.as_u32); - uint32_t mask0 = 0xffffffff - ((1 << (32 - prefixlen)) - 1); - return (a1 & mask0) == a2; - } -} - -static int -acl_match_port (u16 port, u16 port_first, u16 port_last, int is_ip6) -{ - return ((port >= port_first) && (port <= port_last)); -} - -static int -acl_packet_match (acl_main_t * am, u32 acl_index, vlib_buffer_t * b0, - u8 * r_action, int *r_is_ip6, u32 * r_acl_match_p, - u32 * r_rule_match_p, u32 * trace_bitmap) -{ - ethernet_header_t *h0; - u16 type0; - - ip46_address_t src, dst; - int is_ip6; - int is_ip4; - u8 proto; - u16 src_port = 0; - u16 dst_port = 0; - u8 tcp_flags = 0; - int i; - acl_list_t *a; - acl_rule_t *r; - - h0 = vlib_buffer_get_current (b0); - type0 = clib_net_to_host_u16 (h0->type); - is_ip4 = (type0 == ETHERNET_TYPE_IP4); - is_ip6 = (type0 == ETHERNET_TYPE_IP6); - - if (!(is_ip4 || is_ip6)) - { - return 0; - } - /* The bunch of hardcoded offsets here is intentional to get rid of them - ASAP, when getting to a faster matching code */ - if (is_ip4) - { - clib_memcpy (&src.ip4, get_ptr_to_offset (b0, 26), 4); - clib_memcpy (&dst.ip4, get_ptr_to_offset (b0, 30), 4); - proto = acl_get_l4_proto (b0, 0); - if (1 == proto) - { - *trace_bitmap |= 0x00000001; - /* type */ - src_port = ((u16) (*(u8 *) get_ptr_to_offset (b0, 34))); - /* code */ - dst_port = ((u16) (*(u8 *) get_ptr_to_offset (b0, 35))); - } else { - /* assume TCP/UDP */ - src_port = ntohs ((u16) (*(u16 *) get_ptr_to_offset (b0, 34))); - dst_port = ntohs ((u16) (*(u16 *) get_ptr_to_offset (b0, 36))); - /* UDP gets ability to check on an oddball data byte as a bonus */ - tcp_flags = *(u8 *) get_ptr_to_offset (b0, 14 + 20 + 13); - } - } - else /* is_ipv6 implicitly */ - { - clib_memcpy (&src, get_ptr_to_offset (b0, 22), 16); - clib_memcpy (&dst, get_ptr_to_offset (b0, 38), 16); - proto = acl_get_l4_proto (b0, 1); - if (58 == proto) - { - *trace_bitmap |= 0x00000002; - /* type */ - src_port = (u16) (*(u8 *) get_ptr_to_offset (b0, 54)); - /* code */ - dst_port = (u16) (*(u8 *) get_ptr_to_offset (b0, 55)); - } - else - { - /* assume TCP/UDP */ - src_port = ntohs ((u16) (*(u16 *) get_ptr_to_offset (b0, 54))); - dst_port = ntohs ((u16) (*(u16 *) get_ptr_to_offset (b0, 56))); - tcp_flags = *(u8 *) get_ptr_to_offset (b0, 14 + 40 + 13); - } - } - if (pool_is_free_index (am->acls, acl_index)) - { - if (r_acl_match_p) - *r_acl_match_p = acl_index; - if (r_rule_match_p) - *r_rule_match_p = -1; - /* the ACL does not exist but is used for policy. Block traffic. */ - return 0; - } - a = am->acls + acl_index; - for (i = 0; i < a->count; i++) - { - r = a->rules + i; - if (is_ip6 != r->is_ipv6) - { - continue; - } - if (!acl_match_addr (&dst, &r->dst, r->dst_prefixlen, is_ip6)) - continue; - if (!acl_match_addr (&src, &r->src, r->src_prefixlen, is_ip6)) - continue; - if (r->proto) - { - if (proto != r->proto) - continue; - if (!acl_match_port - (src_port, r->src_port_or_type_first, r->src_port_or_type_last, - is_ip6)) - continue; - if (!acl_match_port - (dst_port, r->dst_port_or_code_first, r->dst_port_or_code_last, - is_ip6)) - continue; - /* No need for check of proto == TCP, since in other rules both fields should be zero, so this match will succeed */ - if ((tcp_flags & r->tcp_flags_mask) != r->tcp_flags_value) - continue; - } - /* everything matches! */ - *r_action = r->is_permit; - *r_is_ip6 = is_ip6; - if (r_acl_match_p) - *r_acl_match_p = acl_index; - if (r_rule_match_p) - *r_rule_match_p = i; - return 1; - } - return 0; -} - -void -input_acl_packet_match (u32 sw_if_index, vlib_buffer_t * b0, u32 * nextp, - u32 * acl_match_p, u32 * rule_match_p, - u32 * trace_bitmap) -{ - acl_main_t *am = &acl_main; - uint8_t action = 0; - int is_ip6 = 0; - int i; - vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index); - for (i = 0; i < vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]); - i++) - { - if (acl_packet_match - (am, am->input_acl_vec_by_sw_if_index[sw_if_index][i], b0, &action, - &is_ip6, acl_match_p, rule_match_p, trace_bitmap)) - { - if (is_ip6) - { - *nextp = am->acl_in_ip6_match_next[action]; - } - else - { - *nextp = am->acl_in_ip4_match_next[action]; - } - return; - } - } - if (vec_len (am->input_acl_vec_by_sw_if_index[sw_if_index]) > 0) - { - /* If there are ACLs and none matched, deny by default */ - *nextp = 0; - } - -} - -void -output_acl_packet_match (u32 sw_if_index, vlib_buffer_t * b0, u32 * nextp, - u32 * acl_match_p, u32 * rule_match_p, - u32 * trace_bitmap) -{ - acl_main_t *am = &acl_main; - uint8_t action = 0; - int is_ip6 = 0; - int i; - vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index); - for (i = 0; i < vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]); - i++) - { - if (acl_packet_match - (am, am->output_acl_vec_by_sw_if_index[sw_if_index][i], b0, &action, - &is_ip6, acl_match_p, rule_match_p, trace_bitmap)) - { - if (is_ip6) - { - *nextp = am->acl_out_ip6_match_next[action]; - } - else - { - *nextp = am->acl_out_ip4_match_next[action]; - } - return; - } - } - if (vec_len (am->output_acl_vec_by_sw_if_index[sw_if_index]) > 0) - { - /* If there are ACLs and none matched, deny by default */ - *nextp = 0; - } -} - typedef struct { u8 is_ipv6; @@ -1007,8 +950,22 @@ macip_find_match_type (macip_match_type_t * mv, u8 * mac_mask, u8 prefix_len, static int match_type_metric (macip_match_type_t * m) { - /* FIXME: count the ones in the MAC mask as well, check how well this heuristic works in real life */ - return m->prefix_len + m->is_ipv6 + 10 * m->count; + unsigned int mac_bits_set = 0; + unsigned int mac_byte; + int i; + for (i=0; i<6; i++) + { + mac_byte = m->mac_mask[i]; + for (; mac_byte; mac_byte >>= 1) + mac_bits_set += mac_byte & 1; + } + /* + * Attempt to place the more specific and the more used rules on top. + * There are obvious caveat corner cases to this, but they do not + * seem to be sensible in real world (e.g. specific IPv4 with wildcard MAC + * going with a wildcard IPv4 with a specific MAC). + */ + return m->prefix_len + mac_bits_set + m->is_ipv6 + 10 * m->count; } static int @@ -1052,7 +1009,7 @@ macip_create_classify_tables (acl_main_t * am, u32 macip_acl_index) match_type_index = vec_len (mvec); vec_validate (mvec, match_type_index); memcpy (mvec[match_type_index].mac_mask, - a->rules[match_type_index].src_mac_mask, 6); + a->rules[i].src_mac_mask, 6); mvec[match_type_index].prefix_len = a->rules[i].src_prefixlen; mvec[match_type_index].is_ipv6 = a->rules[i].is_ipv6; mvec[match_type_index].table_index = ~0; @@ -1148,6 +1105,7 @@ macip_create_classify_tables (acl_main_t * am, u32 macip_acl_index) macip_find_match_type (mvec, a->rules[i].src_mac_mask, a->rules[i].src_prefixlen, a->rules[i].is_ipv6); + ASSERT(match_type_index != ~0); /* add session to table mvec[match_type_index].table_index; */ vnet_classify_add_del_session (cm, mvec[match_type_index].table_index, mask, a->rules[i].is_permit ? ~0 : 0, i, @@ -1199,18 +1157,28 @@ macip_acl_add_list (u32 count, vl_api_macip_acl_rule_t rules[], acl_main_t *am = &acl_main; macip_acl_list_t *a; macip_acl_rule_t *r; - macip_acl_rule_t *acl_new_rules; + macip_acl_rule_t *acl_new_rules = 0; int i; - /* Create and populate the rules */ - acl_new_rules = clib_mem_alloc_aligned (sizeof (macip_acl_rule_t) * count, - CLIB_CACHE_LINE_BYTES); - if (!acl_new_rules) + if (*acl_list_index != ~0) { - /* Could not allocate rules. New or existing ACL - bail out regardless */ - return -1; + /* They supplied some number, let's see if this MACIP ACL exists */ + if (pool_is_free_index (am->macip_acls, *acl_list_index)) + { + /* tried to replace a non-existent ACL, no point doing anything */ + clib_warning("acl-plugin-error: Trying to replace nonexistent MACIP ACL %d (tag %s)", *acl_list_index, tag); + return -1; + } } + if (0 == count) { + clib_warning("acl-plugin-warning: Trying to create empty MACIP ACL (tag %s)", tag); + } + void *oldheap = acl_set_heap(am); + /* Create and populate the rules */ + if (count > 0) + vec_validate(acl_new_rules, count-1); + for (i = 0; i < count; i++) { r = &acl_new_rules[i]; @@ -1225,11 +1193,23 @@ macip_acl_add_list (u32 count, vl_api_macip_acl_rule_t rules[], r->src_prefixlen = rules[i].src_ip_prefix_len; } - /* Get ACL index */ - pool_get_aligned (am->macip_acls, a, CLIB_CACHE_LINE_BYTES); - memset (a, 0, sizeof (*a)); - /* Will return the newly allocated ACL index */ - *acl_list_index = a - am->macip_acls; + if (~0 == *acl_list_index) + { + /* Get ACL index */ + pool_get_aligned (am->macip_acls, a, CLIB_CACHE_LINE_BYTES); + memset (a, 0, sizeof (*a)); + /* Will return the newly allocated ACL index */ + *acl_list_index = a - am->macip_acls; + } + else + { + a = &am->macip_acls[*acl_list_index]; + if (a->rules) + { + vec_free (a->rules); + } + macip_destroy_classify_tables (am, *acl_list_index); + } a->rules = acl_new_rules; a->count = count; @@ -1237,7 +1217,7 @@ macip_acl_add_list (u32 count, vl_api_macip_acl_rule_t rules[], /* Create and populate the classifer tables */ macip_create_classify_tables (am, *acl_list_index); - + clib_mem_set_heap (oldheap); return 0; } @@ -1250,7 +1230,9 @@ macip_acl_interface_del_acl (acl_main_t * am, u32 sw_if_index) int rv; u32 macip_acl_index; macip_acl_list_t *a; + void *oldheap = acl_set_heap(am); vec_validate_init_empty (am->macip_acl_by_sw_if_index, sw_if_index, ~0); + clib_mem_set_heap (oldheap); macip_acl_index = am->macip_acl_by_sw_if_index[sw_if_index]; /* No point in deleting MACIP ACL which is not applied */ if (~0 == macip_acl_index) @@ -1277,12 +1259,15 @@ macip_acl_interface_add_acl (acl_main_t * am, u32 sw_if_index, { return -1; } + void *oldheap = acl_set_heap(am); a = &am->macip_acls[macip_acl_index]; vec_validate_init_empty (am->macip_acl_by_sw_if_index, sw_if_index, ~0); /* If there already a MACIP ACL applied, unapply it */ if (~0 != am->macip_acl_by_sw_if_index[sw_if_index]) macip_acl_interface_del_acl(am, sw_if_index); am->macip_acl_by_sw_if_index[sw_if_index] = macip_acl_index; + clib_mem_set_heap (oldheap); + /* Apply the classifier tables for L2 ACLs */ rv = vnet_set_input_acl_intfc (am->vlib_main, sw_if_index, a->ip4_table_index, @@ -1294,6 +1279,7 @@ static int macip_acl_del_list (u32 acl_list_index) { acl_main_t *am = &acl_main; + void *oldheap = acl_set_heap(am); macip_acl_list_t *a; int i; if (pool_is_free_index (am->macip_acls, acl_list_index)) @@ -1317,9 +1303,10 @@ macip_acl_del_list (u32 acl_list_index) a = &am->macip_acls[acl_list_index]; if (a->rules) { - clib_mem_free (a->rules); + vec_free (a->rules); } pool_put (am->macip_acls, a); + clib_mem_set_heap (oldheap); return 0; } @@ -1329,6 +1316,7 @@ macip_acl_interface_add_del_acl (u32 sw_if_index, u8 is_add, u32 acl_list_index) { acl_main_t *am = &acl_main; + void *oldheap = acl_set_heap(am); int rv = -1; if (is_add) { @@ -1338,9 +1326,38 @@ macip_acl_interface_add_del_acl (u32 sw_if_index, u8 is_add, { rv = macip_acl_interface_del_acl (am, sw_if_index); } + clib_mem_set_heap (oldheap); return rv; } +/* + * If the client does not allocate enough memory for a variable-length + * message, and then proceed to use it as if the full memory allocated, + * absent the check we happily consume that on the VPP side, and go + * along as if nothing happened. However, the resulting + * effects range from just garbage in the API decode + * (because the decoder snoops too far), to potential memory + * corruptions. + * + * This verifies that the actual length of the message is + * at least expected_len, and complains loudly if it is not. + * + * A failing check here is 100% a software bug on the API user side, + * so we might as well yell. + * + */ +static int verify_message_len(void *mp, u32 expected_len, char *where) +{ + u32 supplied_len = vl_msg_api_get_msg_length (mp); + if (supplied_len < expected_len) { + clib_warning("%s: Supplied message length %d is less than expected %d", + where, supplied_len, expected_len); + return 0; + } else { + return 1; + } +} + /* API message handler */ static void vl_api_acl_add_replace_t_handler (vl_api_acl_add_replace_t * mp) @@ -1349,8 +1366,14 @@ vl_api_acl_add_replace_t_handler (vl_api_acl_add_replace_t * mp) acl_main_t *am = &acl_main; int rv; u32 acl_list_index = ntohl (mp->acl_index); + u32 acl_count = ntohl (mp->count); + u32 expected_len = sizeof(*mp) + acl_count*sizeof(mp->r[0]); - rv = acl_add_list (ntohl (mp->count), mp->r, &acl_list_index, mp->tag); + if (verify_message_len(mp, expected_len, "acl_add_replace")) { + rv = acl_add_list (acl_count, mp->r, &acl_list_index, mp->tag); + } else { + rv = VNET_API_ERROR_INVALID_VALUE; + } /* *INDENT-OFF* */ REPLY_MACRO2(VL_API_ACL_ADD_REPLACE_REPLY, @@ -1411,9 +1434,18 @@ vl_api_acl_interface_set_acl_list_t_handler for (i = 0; i < mp->count; i++) { - acl_interface_add_del_inout_acl (sw_if_index, 1, (i < mp->n_input), - ntohl (mp->acls[i])); + if(acl_is_not_defined(am, ntohl (mp->acls[i]))) { + /* ACL does not exist, so we can not apply it */ + rv = -1; + } } + if (0 == rv) { + for (i = 0; i < mp->count; i++) + { + acl_interface_add_del_inout_acl (sw_if_index, 1, (i < mp->n_input), + ntohl (mp->acls[i])); + } + } } REPLY_MACRO (VL_API_ACL_INTERFACE_SET_ACL_LIST_REPLY); @@ -1453,6 +1485,7 @@ send_acl_details (acl_main_t * am, unix_shared_memory_queue_t * q, vl_api_acl_rule_t *rules; int i; int msg_size = sizeof (*mp) + sizeof (mp->r[0]) * acl->count; + void *oldheap = acl_set_heap(am); mp = vl_msg_api_alloc (msg_size); memset (mp, 0, msg_size); @@ -1471,6 +1504,7 @@ send_acl_details (acl_main_t * am, unix_shared_memory_queue_t * q, } clib_warning("Sending acl details for ACL index %d", ntohl(mp->acl_index)); + clib_mem_set_heap (oldheap); vl_msg_api_send_shmem (q, (u8 *) & mp); } @@ -1529,6 +1563,7 @@ send_acl_interface_list_details (acl_main_t * am, int n_output; int count; int i = 0; + void *oldheap = acl_set_heap(am); vec_validate (am->input_acl_vec_by_sw_if_index, sw_if_index); vec_validate (am->output_acl_vec_by_sw_if_index, sw_if_index); @@ -1559,7 +1594,7 @@ send_acl_interface_list_details (acl_main_t * am, mp->acls[n_input + i] = htonl (am->output_acl_vec_by_sw_if_index[sw_if_index][i]); } - + clib_mem_set_heap (oldheap); vl_msg_api_send_shmem (q, (u8 *) & mp); } @@ -1606,9 +1641,14 @@ vl_api_macip_acl_add_t_handler (vl_api_macip_acl_add_t * mp) acl_main_t *am = &acl_main; int rv; u32 acl_list_index = ~0; + u32 acl_count = ntohl (mp->count); + u32 expected_len = sizeof(*mp) + acl_count*sizeof(mp->r[0]); - rv = - macip_acl_add_list (ntohl (mp->count), mp->r, &acl_list_index, mp->tag); + if (verify_message_len(mp, expected_len, "macip_acl_add")) { + rv = macip_acl_add_list (acl_count, mp->r, &acl_list_index, mp->tag); + } else { + rv = VNET_API_ERROR_INVALID_VALUE; + } /* *INDENT-OFF* */ REPLY_MACRO2(VL_API_MACIP_ACL_ADD_REPLY, @@ -1618,6 +1658,30 @@ vl_api_macip_acl_add_t_handler (vl_api_macip_acl_add_t * mp) /* *INDENT-ON* */ } +static void +vl_api_macip_acl_add_replace_t_handler (vl_api_macip_acl_add_replace_t * mp) +{ + vl_api_macip_acl_add_replace_reply_t *rmp; + acl_main_t *am = &acl_main; + int rv; + u32 acl_list_index = ntohl (mp->acl_index); + u32 acl_count = ntohl (mp->count); + u32 expected_len = sizeof(*mp) + acl_count*sizeof(mp->r[0]); + + if (verify_message_len(mp, expected_len, "macip_acl_add_replace")) { + rv = macip_acl_add_list (acl_count, mp->r, &acl_list_index, mp->tag); + } else { + rv = VNET_API_ERROR_INVALID_VALUE; + } + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_MACIP_ACL_ADD_REPLACE_REPLY, + ({ + rmp->acl_index = htonl(acl_list_index); + })); + /* *INDENT-ON* */ +} + static void vl_api_macip_acl_del_t_handler (vl_api_macip_acl_del_t * mp) { @@ -1767,6 +1831,66 @@ vl_api_macip_acl_interface_get_t_handler (vl_api_macip_acl_interface_get_t * vl_msg_api_send_shmem (q, (u8 *) & rmp); } +static void +send_macip_acl_interface_list_details (acl_main_t * am, + unix_shared_memory_queue_t * q, + u32 sw_if_index, + u32 acl_index, + u32 context) +{ + vl_api_macip_acl_interface_list_details_t *rmp; + /* at this time there is only ever 1 mac ip acl per interface */ + int msg_size = sizeof (*rmp) + sizeof (rmp->acls[0]); + + rmp = vl_msg_api_alloc (msg_size); + memset (rmp, 0, msg_size); + rmp->_vl_msg_id = ntohs (VL_API_MACIP_ACL_INTERFACE_LIST_DETAILS + am->msg_id_base); + + /* fill in the message */ + rmp->context = context; + rmp->count = 1; + rmp->sw_if_index = htonl (sw_if_index); + rmp->acls[0] = htonl (acl_index); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +static void +vl_api_macip_acl_interface_list_dump_t_handler (vl_api_macip_acl_interface_list_dump_t *mp) +{ + unix_shared_memory_queue_t *q; + acl_main_t *am = &acl_main; + u32 sw_if_index = ntohl (mp->sw_if_index); + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + if (sw_if_index == ~0) + { + vec_foreach_index(sw_if_index, am->macip_acl_by_sw_if_index) + { + if (~0 != am->macip_acl_by_sw_if_index[sw_if_index]) + { + send_macip_acl_interface_list_details(am, q, sw_if_index, + am->macip_acl_by_sw_if_index[sw_if_index], + mp->context); + } + } + } + else + { + if (vec_len(am->macip_acl_by_sw_if_index) > sw_if_index) + { + send_macip_acl_interface_list_details(am, q, sw_if_index, + am->macip_acl_by_sw_if_index[sw_if_index], + mp->context); + } + } +} + /* Set up the API message handling tables */ static clib_error_t * acl_plugin_api_hookup (vlib_main_t * vm) @@ -1799,67 +1923,7 @@ setup_message_id_table (acl_main_t * am, api_main_t * apim) #undef _ } -u32 -register_match_action_nexts (u32 next_in_ip4, u32 next_in_ip6, - u32 next_out_ip4, u32 next_out_ip6) -{ - acl_main_t *am = &acl_main; - if (am->n_match_actions == 255) - { - return ~0; - } - u32 act = am->n_match_actions; - am->n_match_actions++; - am->acl_in_ip4_match_next[act] = next_in_ip4; - am->acl_in_ip6_match_next[act] = next_in_ip6; - am->acl_out_ip4_match_next[act] = next_out_ip4; - am->acl_out_ip6_match_next[act] = next_out_ip6; - return act; -} - -void -acl_setup_nodes (void) -{ - vlib_main_t *vm = vlib_get_main (); - acl_main_t *am = &acl_main; - vlib_node_t *n; - - n = vlib_get_node_by_name (vm, (u8 *) "l2-input-classify"); - am->l2_input_classify_next_acl_old = - vlib_node_add_next_with_slot (vm, n->index, acl_in_node.index, ~0); - n = vlib_get_node_by_name (vm, (u8 *) "l2-output-classify"); - am->l2_output_classify_next_acl_old = - vlib_node_add_next_with_slot (vm, n->index, acl_out_node.index, ~0); - - feat_bitmap_init_next_nodes (vm, acl_in_node.index, L2INPUT_N_FEAT, - l2input_get_feat_names (), - am->acl_in_node_feat_next_node_index); - - feat_bitmap_init_next_nodes (vm, acl_out_node.index, L2OUTPUT_N_FEAT, - l2output_get_feat_names (), - am->acl_out_node_feat_next_node_index); - - memset (&am->acl_in_ip4_match_next[0], 0, - sizeof (am->acl_in_ip4_match_next)); - memset (&am->acl_in_ip6_match_next[0], 0, - sizeof (am->acl_in_ip6_match_next)); - memset (&am->acl_out_ip4_match_next[0], 0, - sizeof (am->acl_out_ip4_match_next)); - memset (&am->acl_out_ip6_match_next[0], 0, - sizeof (am->acl_out_ip6_match_next)); - am->n_match_actions = 0; - - am->l2_input_classify_next_acl_ip4 = am->l2_input_classify_next_acl_old; - am->l2_input_classify_next_acl_ip6 = am->l2_input_classify_next_acl_old; - am->l2_output_classify_next_acl_ip4 = am->l2_output_classify_next_acl_old; - am->l2_output_classify_next_acl_ip6 = am->l2_output_classify_next_acl_old; - - register_match_action_nexts (0, 0, 0, 0); /* drop */ - register_match_action_nexts (~0, ~0, ~0, ~0); /* permit */ - register_match_action_nexts (ACL_IN_L2S_INPUT_IP4_ADD, ACL_IN_L2S_INPUT_IP6_ADD, ACL_OUT_L2S_OUTPUT_IP4_ADD, ACL_OUT_L2S_OUTPUT_IP6_ADD); /* permit + create session */ -} - -void +static void acl_setup_fa_nodes (void) { vlib_main_t *vm = vlib_get_main (); @@ -1871,9 +1935,9 @@ acl_setup_fa_nodes (void) n6 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-in-ip6-l2"); - am->fa_l2_input_classify_next_acl_ip4 = + am->l2_input_classify_next_acl_ip4 = vlib_node_add_next_with_slot (vm, n->index, n4->index, ~0); - am->fa_l2_input_classify_next_acl_ip6 = + am->l2_input_classify_next_acl_ip6 = vlib_node_add_next_with_slot (vm, n->index, n6->index, ~0); feat_bitmap_init_next_nodes (vm, n4->index, L2INPUT_N_FEAT, @@ -1889,9 +1953,9 @@ acl_setup_fa_nodes (void) n4 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-out-ip4-l2"); n6 = vlib_get_node_by_name (vm, (u8 *) "acl-plugin-out-ip6-l2"); - am->fa_l2_output_classify_next_acl_ip4 = + am->l2_output_classify_next_acl_ip4 = vlib_node_add_next_with_slot (vm, n->index, n4->index, ~0); - am->fa_l2_output_classify_next_acl_ip6 = + am->l2_output_classify_next_acl_ip6 = vlib_node_add_next_with_slot (vm, n->index, n6->index, ~0); feat_bitmap_init_next_nodes (vm, n4->index, L2OUTPUT_N_FEAT, @@ -1901,19 +1965,12 @@ acl_setup_fa_nodes (void) feat_bitmap_init_next_nodes (vm, n6->index, L2OUTPUT_N_FEAT, l2output_get_feat_names (), am->fa_acl_out_ip6_l2_node_feat_next_node_index); - - am->l2_input_classify_next_acl_ip4 = am->fa_l2_input_classify_next_acl_ip4; - am->l2_input_classify_next_acl_ip6 = am->fa_l2_input_classify_next_acl_ip6; - am->l2_output_classify_next_acl_ip4 = am->fa_l2_output_classify_next_acl_ip4; - am->l2_output_classify_next_acl_ip6 = am->fa_l2_output_classify_next_acl_ip6; - } -void +static void acl_set_timeout_sec(int timeout_type, u32 value) { acl_main_t *am = &acl_main; - l2sess_main_t *sm = &l2sess_main; clib_time_t *ct = &am->vlib_main->clib_time; if (timeout_type < ACL_N_TIMEOUTS) { @@ -1922,33 +1979,21 @@ acl_set_timeout_sec(int timeout_type, u32 value) clib_warning("Unknown timeout type %d", timeout_type); return; } - - switch(timeout_type) { - case ACL_TIMEOUT_UDP_IDLE: - sm->udp_session_idle_timeout = (u64)(((f64)value)/ct->seconds_per_clock); - break; - case ACL_TIMEOUT_TCP_IDLE: - sm->tcp_session_idle_timeout = (u64)(((f64)value)/ct->seconds_per_clock); - break; - case ACL_TIMEOUT_TCP_TRANSIENT: - sm->tcp_session_transient_timeout = (u64)(((f64)value)/ct->seconds_per_clock); - break; - default: - clib_warning("Unknown timeout type %d", timeout_type); - } + am->session_timeout[timeout_type] = (u64)(((f64)value)/ct->seconds_per_clock); } -void +static void acl_set_session_max_entries(u32 value) { acl_main_t *am = &acl_main; am->fa_conn_table_max_entries = value; } -int +static int acl_set_skip_ipv6_eh(u32 eh, u32 value) { acl_main_t *am = &acl_main; + if ((eh < 256) && (value < 2)) { am->fa_ipv6_known_eh_bitmap = clib_bitmap_set(am->fa_ipv6_known_eh_bitmap, eh, value); @@ -1963,15 +2008,25 @@ static clib_error_t * acl_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add) { acl_main_t *am = &acl_main; + if (0 == am->acl_mheap) { + /* ACL heap is not initialized, so definitely nothing to do. */ + return 0; + } if (0 == is_add) { vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index, ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, sw_if_index); + /* also unapply any ACLs in case the users did not do so. */ + macip_acl_interface_del_acl(am, sw_if_index); + acl_interface_reset_inout_acls (sw_if_index, 0); + acl_interface_reset_inout_acls (sw_if_index, 1); } return 0; } VNET_SW_INTERFACE_ADD_DEL_FUNCTION (acl_sw_interface_add_del); + + static clib_error_t * acl_set_aclplugin_fn (vlib_main_t * vm, unformat_input_t * input, @@ -1984,37 +2039,43 @@ acl_set_aclplugin_fn (vlib_main_t * vm, uword memory_size = 0; acl_main_t *am = &acl_main; - /* The new datapath is the default. This command exists out of precaution and for comparing the two */ - if (unformat (input, "l2-datapath")) { - if (unformat(input, "old")) { - am->l2_input_classify_next_acl_ip4 = am->l2_input_classify_next_acl_old; - am->l2_input_classify_next_acl_ip6 = am->l2_input_classify_next_acl_old; - am->l2_output_classify_next_acl_ip4 = am->l2_output_classify_next_acl_old; - am->l2_output_classify_next_acl_ip6 = am->l2_output_classify_next_acl_old; - goto done; - } - if (unformat(input, "new")) { - am->l2_input_classify_next_acl_ip4 = am->fa_l2_input_classify_next_acl_ip4; - am->l2_input_classify_next_acl_ip6 = am->fa_l2_input_classify_next_acl_ip6; - am->l2_output_classify_next_acl_ip4 = am->fa_l2_output_classify_next_acl_ip4; - am->l2_output_classify_next_acl_ip6 = am->fa_l2_output_classify_next_acl_ip6; - goto done; - } - goto done; - } if (unformat (input, "skip-ipv6-extension-header %u %u", &eh_val, &val)) { if(!acl_set_skip_ipv6_eh(eh_val, val)) { error = clib_error_return(0, "expecting eh=0..255, value=0..1"); } goto done; } - if (unformat (input, "session")) { - if (unformat (input, "clear")) { - acl_main_t *am = &acl_main; - vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index, - ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, ~0); - goto done; + if (unformat (input, "use-hash-acl-matching %u", &val)) + { + am->use_hash_acl_matching = (val !=0); + goto done; } + if (unformat (input, "l4-match-nonfirst-fragment %u", &val)) + { + am->l4_match_nonfirst_fragment = (val != 0); + goto done; + } + if (unformat (input, "heap")) + { + if (unformat(input, "main")) + { + if (unformat(input, "validate %u", &val)) + acl_plugin_acl_set_validate_heap(am, val); + else if (unformat(input, "trace %u", &val)) + acl_plugin_acl_set_trace_heap(am, val); + goto done; + } + else if (unformat(input, "hash")) + { + if (unformat(input, "validate %u", &val)) + acl_plugin_hash_acl_set_validate_heap(am, val); + else if (unformat(input, "trace %u", &val)) + acl_plugin_hash_acl_set_trace_heap(am, val); + goto done; + } + goto done; + } + if (unformat (input, "session")) { if (unformat (input, "table")) { /* The commands here are for tuning/testing. No user-serviceable parts inside */ if (unformat (input, "max-entries")) { @@ -2097,6 +2158,75 @@ done: return error; } +static u8 * +my_format_mac_address (u8 * s, va_list * args) +{ + u8 *a = va_arg (*args, u8 *); + return format (s, "%02x:%02x:%02x:%02x:%02x:%02x", + a[0], a[1], a[2], a[3], a[4], a[5]); +} + +static inline u8 * +my_macip_acl_rule_t_pretty_format (u8 *out, va_list *args) +{ + macip_acl_rule_t *a = va_arg (*args, macip_acl_rule_t *); + + out = format(out, "%s action %d ip %U/%d mac %U mask %U", + a->is_ipv6 ? "ipv6" : "ipv4", a->is_permit, + format_ip46_address, &a->src_ip_addr, IP46_TYPE_ANY, + a->src_prefixlen, + my_format_mac_address, a->src_mac, + my_format_mac_address, a->src_mac_mask); + return(out); +} + +static void +macip_acl_print(acl_main_t *am, u32 macip_acl_index) +{ + vlib_main_t * vm = am->vlib_main; + int i; + + /* Don't try to print someone else's memory */ + if (macip_acl_index > vec_len(am->macip_acls)) + return; + + macip_acl_list_t *a = vec_elt_at_index(am->macip_acls, macip_acl_index); + int free_pool_slot = pool_is_free_index(am->macip_acls, macip_acl_index); + + vlib_cli_output(vm, "MACIP acl_index: %d, count: %d (true len %d) tag {%s} is free pool slot: %d\n", + macip_acl_index, a->count, vec_len(a->rules), a->tag, free_pool_slot); + vlib_cli_output(vm, " ip4_table_index %d, ip6_table_index %d, l2_table_index %d\n", + a->ip4_table_index, a->ip6_table_index, a->l2_table_index); + for(i=0; irules); i++) + vlib_cli_output(vm, " rule %d: %U\n", i, my_macip_acl_rule_t_pretty_format, + vec_elt_at_index(a->rules, i)); + +} + +static clib_error_t * +acl_show_aclplugin_macip_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + clib_error_t *error = 0; + acl_main_t *am = &acl_main; + int i; + if (unformat (input, "interface")) + { + for(i=0; i < vec_len(am->macip_acl_by_sw_if_index); i++) + { + vlib_cli_output(vm, " sw_if_index %d: %d\n", i, vec_elt(am->macip_acl_by_sw_if_index, i)); + } + } + else if (unformat (input, "acl")) + { + for(i=0; i < vec_len(am->macip_acls); i++) + macip_acl_print(am, i); + } + return error; +} + + static clib_error_t * acl_show_aclplugin_fn (vlib_main_t * vm, unformat_input_t * input, @@ -2105,33 +2235,339 @@ acl_show_aclplugin_fn (vlib_main_t * vm, clib_error_t *error = 0; acl_main_t *am = &acl_main; vnet_interface_main_t *im = &am->vnet_main->interface_main; + u32 *pj; vnet_sw_interface_t *swif; if (unformat (input, "sessions")) { - u8 * out0 = 0; - pool_foreach (swif, im->sw_interfaces, - ({ - u32 sw_if_index = swif->sw_if_index; - u64 n_adds = sw_if_index < vec_len(am->fa_session_adds_by_sw_if_index) ? am->fa_session_adds_by_sw_if_index[sw_if_index] : 0; - u64 n_dels = sw_if_index < vec_len(am->fa_session_dels_by_sw_if_index) ? am->fa_session_dels_by_sw_if_index[sw_if_index] : 0; - out0 = format(out0, "sw_if_index %d: add %lu - del %lu = %lu\n", sw_if_index, n_adds, n_dels, n_adds - n_dels); - })); + u8 * out0 = format(0, ""); + u16 wk; + u32 show_bihash_verbose = 0; + u32 show_session_thread_id = ~0; + u32 show_session_session_index = ~0; + unformat (input, "thread %u index %u", &show_session_thread_id, &show_session_session_index); + unformat (input, "verbose %u", &show_bihash_verbose); + { + u64 n_adds = am->fa_session_total_adds; + u64 n_dels = am->fa_session_total_dels; + out0 = format(out0, "Sessions total: add %lu - del %lu = %lu\n", n_adds, n_dels, n_adds - n_dels); + } + out0 = format(out0, "\n\nPer-thread data:\n"); + for (wk = 0; wk < vec_len (am->per_worker_data); wk++) { + acl_fa_per_worker_data_t *pw = &am->per_worker_data[wk]; + out0 = format(out0, "Thread #%d:\n", wk); + if (show_session_thread_id == wk && show_session_session_index < pool_len(pw->fa_sessions_pool)) { + out0 = format(out0, " session index %u:\n", show_session_session_index); + fa_session_t *sess = pw->fa_sessions_pool + show_session_session_index; + u64 *m = (u64 *)&sess->info; + out0 = format(out0, " info: %016llx %016llx %016llx %016llx %016llx %016llx\n", m[0], m[1], m[2], m[3], m[4], m[5]); + out0 = format(out0, " sw_if_index: %u\n", sess->sw_if_index); + out0 = format(out0, " tcp_flags_seen: %x\n", sess->tcp_flags_seen.as_u16); + out0 = format(out0, " last active time: %lu\n", sess->last_active_time); + out0 = format(out0, " thread index: %u\n", sess->thread_index); + out0 = format(out0, " link enqueue time: %lu\n", sess->link_enqueue_time); + out0 = format(out0, " link next index: %u\n", sess->link_next_idx); + out0 = format(out0, " link prev index: %u\n", sess->link_prev_idx); + out0 = format(out0, " link list id: %u\n", sess->link_list_id); + } + out0 = format(out0, " connection add/del stats:\n", wk); + pool_foreach (swif, im->sw_interfaces, + ({ + u32 sw_if_index = swif->sw_if_index; + u64 n_adds = sw_if_index < vec_len(pw->fa_session_adds_by_sw_if_index) ? pw->fa_session_adds_by_sw_if_index[sw_if_index] : 0; + u64 n_dels = sw_if_index < vec_len(pw->fa_session_dels_by_sw_if_index) ? pw->fa_session_dels_by_sw_if_index[sw_if_index] : 0; + out0 = format(out0, " sw_if_index %d: add %lu - del %lu = %lu\n", sw_if_index, n_adds, n_dels, n_adds - n_dels); + })); + + out0 = format(out0, " connection timeout type lists:\n", wk); + u8 tt = 0; + for(tt = 0; tt < ACL_N_TIMEOUTS; tt++) { + u32 head_session_index = pw->fa_conn_list_head[tt]; + out0 = format(out0, " fa_conn_list_head[%d]: %d\n", tt, head_session_index); + if (~0 != head_session_index) { + fa_session_t *sess = pw->fa_sessions_pool + head_session_index; + out0 = format(out0, " last active time: %lu\n", sess->last_active_time); + out0 = format(out0, " link enqueue time: %lu\n", sess->link_enqueue_time); + } + } + + out0 = format(out0, " Next expiry time: %lu\n", pw->next_expiry_time); + out0 = format(out0, " Requeue until time: %lu\n", pw->requeue_until_time); + out0 = format(out0, " Current time wait interval: %lu\n", pw->current_time_wait_interval); + out0 = format(out0, " Count of deleted sessions: %lu\n", pw->cnt_deleted_sessions); + out0 = format(out0, " Delete already deleted: %lu\n", pw->cnt_already_deleted_sessions); + out0 = format(out0, " Session timers restarted: %lu\n", pw->cnt_session_timer_restarted); + out0 = format(out0, " Swipe until this time: %lu\n", pw->swipe_end_time); + out0 = format(out0, " sw_if_index serviced bitmap: %U\n", format_bitmap_hex, pw->serviced_sw_if_index_bitmap); + out0 = format(out0, " pending clear intfc bitmap : %U\n", format_bitmap_hex, pw->pending_clear_sw_if_index_bitmap); + out0 = format(out0, " clear in progress: %u\n", pw->clear_in_process); + out0 = format(out0, " interrupt is pending: %d\n", pw->interrupt_is_pending); + out0 = format(out0, " interrupt is needed: %d\n", pw->interrupt_is_needed); + out0 = format(out0, " interrupt is unwanted: %d\n", pw->interrupt_is_unwanted); + out0 = format(out0, " interrupt generation: %d\n", pw->interrupt_generation); + } out0 = format(out0, "\n\nConn cleaner thread counters:\n"); #define _(cnt, desc) out0 = format(out0, " %20lu: %s\n", am->cnt, desc); foreach_fa_cleaner_counter; #undef _ + vec_terminate_c_string(out0); vlib_cli_output(vm, "\n\n%s\n\n", out0); + vlib_cli_output(vm, "Interrupt generation: %d\n", am->fa_interrupt_generation); vlib_cli_output(vm, "Sessions per interval: min %lu max %lu increment: %f ms current: %f ms", am->fa_min_deleted_sessions_per_interval, am->fa_max_deleted_sessions_per_interval, am->fa_cleaner_wait_time_increment * 1000.0, ((f64)am->fa_current_cleaner_timer_wait_interval) * 1000.0/(f64)vm->clib_time.clocks_per_second); vec_free(out0); + show_fa_sessions_hash(vm, show_bihash_verbose); + } + else if (unformat (input, "interface")) + { + u32 sw_if_index = ~0; + u32 swi; + u8 * out0 = format(0, ""); + unformat (input, "sw_if_index %u", &sw_if_index); + for(swi = 0; (swi < vec_len(am->input_acl_vec_by_sw_if_index)) || + (swi < vec_len(am->output_acl_vec_by_sw_if_index)); swi++) { + out0 = format(out0, "sw_if_index %d:\n", swi); + + if ((swi < vec_len(am->input_acl_vec_by_sw_if_index)) && + (vec_len(am->input_acl_vec_by_sw_if_index[swi]) > 0)) { + out0 = format(out0, " input acl(s): "); + vec_foreach(pj, am->input_acl_vec_by_sw_if_index[swi]) { + out0 = format(out0, "%d ", *pj); + } + out0 = format(out0, "\n"); + } + + if ((swi < vec_len(am->output_acl_vec_by_sw_if_index)) && + (vec_len(am->output_acl_vec_by_sw_if_index[swi]) > 0)) { + out0 = format(out0, " output acl(s): "); + vec_foreach(pj, am->output_acl_vec_by_sw_if_index[swi]) { + out0 = format(out0, "%d ", *pj); + } + out0 = format(out0, "\n"); + } + + } + vec_terminate_c_string(out0); + vlib_cli_output(vm, "\n%s\n", out0); + vec_free(out0); + } + else if (unformat (input, "acl")) + { + u32 acl_index = ~0; + u32 i; + u8 * out0 = format(0, ""); + unformat (input, "index %u", &acl_index); + for(i=0; iacls); i++) { + if (acl_is_not_defined(am, i)) { + /* don't attempt to show the ACLs that do not exist */ + continue; + } + if ((acl_index != ~0) && (acl_index != i)) { + continue; + } + out0 = format(out0, "acl-index %u count %u tag {%s}\n", i, am->acls[i].count, am->acls[i].tag); + acl_rule_t *r; + int j; + for(j=0; jacls[i].count; j++) { + r = &am->acls[i].rules[j]; + out0 = format(out0, " %4d: %s ", j, r->is_ipv6 ? "ipv6" : "ipv4"); + out0 = format_acl_action(out0, r->is_permit); + out0 = format(out0, " src %U/%d", format_ip46_address, &r->src, IP46_TYPE_ANY, r->src_prefixlen); + out0 = format(out0, " dst %U/%d", format_ip46_address, &r->dst, IP46_TYPE_ANY, r->dst_prefixlen); + out0 = format(out0, " proto %d", r->proto); + out0 = format(out0, " sport %d", r->src_port_or_type_first); + if (r->src_port_or_type_first != r->src_port_or_type_last) { + out0 = format(out0, "-%d", r->src_port_or_type_last); + } + out0 = format(out0, " dport %d", r->dst_port_or_code_first); + if (r->dst_port_or_code_first != r->dst_port_or_code_last) { + out0 = format(out0, "-%d", r->dst_port_or_code_last); + } + if (r->tcp_flags_mask || r->tcp_flags_value) { + out0 = format(out0, " tcpflags %d mask %d", r->tcp_flags_value, r->tcp_flags_mask); + } + out0 = format(out0, "\n"); + } + + if (iinput_sw_if_index_vec_by_acl)) { + out0 = format(out0, " applied inbound on sw_if_index: "); + vec_foreach(pj, am->input_sw_if_index_vec_by_acl[i]) { + out0 = format(out0, "%d ", *pj); + } + out0 = format(out0, "\n"); + } + if (ioutput_sw_if_index_vec_by_acl)) { + out0 = format(out0, " applied outbound on sw_if_index: "); + vec_foreach(pj, am->output_sw_if_index_vec_by_acl[i]) { + out0 = format(out0, "%d ", *pj); + } + out0 = format(out0, "\n"); + } + } + vec_terminate_c_string(out0); + vlib_cli_output(vm, "\n%s\n", out0); + vec_free(out0); + } + else if (unformat (input, "memory")) + { + vlib_cli_output (vm, "ACL plugin main heap statistics:\n"); + if (am->acl_mheap) { + vlib_cli_output (vm, " %U\n", format_mheap, am->acl_mheap, 1); + } else { + vlib_cli_output (vm, " Not initialized\n"); + } + vlib_cli_output (vm, "ACL hash lookup support heap statistics:\n"); + if (am->hash_lookup_mheap) { + vlib_cli_output (vm, " %U\n", format_mheap, am->hash_lookup_mheap, 1); + } else { + vlib_cli_output (vm, " Not initialized\n"); + } + } + else if (unformat (input, "tables")) + { + ace_mask_type_entry_t *mte; + u32 acl_index = ~0; + u32 sw_if_index = ~0; + int show_acl_hash_info = 0; + int show_applied_info = 0; + int show_mask_type = 0; + int show_bihash = 0; + u32 show_bihash_verbose = 0; + + if (unformat (input, "acl")) { + show_acl_hash_info = 1; + /* mask-type is handy to see as well right there */ + show_mask_type = 1; + unformat (input, "index %u", &acl_index); + } else if (unformat (input, "applied")) { + show_applied_info = 1; + unformat (input, "sw_if_index %u", &sw_if_index); + } else if (unformat (input, "mask")) { + show_mask_type = 1; + } else if (unformat (input, "hash")) { + show_bihash = 1; + unformat (input, "verbose %u", &show_bihash_verbose); + } + + if ( ! (show_mask_type || show_acl_hash_info || show_applied_info || show_bihash) ) { + /* if no qualifiers specified, show all */ + show_mask_type = 1; + show_acl_hash_info = 1; + show_applied_info = 1; + show_bihash = 1; + } + + if (show_mask_type) { + vlib_cli_output(vm, "Mask-type entries:"); + /* *INDENT-OFF* */ + pool_foreach(mte, am->ace_mask_type_pool, + ({ + vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d", + mte - am->ace_mask_type_pool, + mte->mask.kv.key[0], mte->mask.kv.key[1], mte->mask.kv.key[2], + mte->mask.kv.key[3], mte->mask.kv.key[4], mte->mask.kv.value, mte->refcount); + })); + /* *INDENT-ON* */ + } + + if (show_acl_hash_info) { + u32 i,j; + u8 * out0 = format(0, ""); + u64 *m; + out0 = format(out0, "Mask-ready ACL representations\n"); + for (i=0; i< vec_len(am->hash_acl_infos); i++) { + if ((acl_index != ~0) && (acl_index != i)) { + continue; + } + hash_acl_info_t *ha = &am->hash_acl_infos[i]; + out0 = format(out0, "acl-index %u bitmask-ready layout\n", i); + out0 = format(out0, " applied inbound on sw_if_index list: %U\n", format_vec32, ha->inbound_sw_if_index_list, "%d"); + out0 = format(out0, " applied outbound on sw_if_index list: %U\n", format_vec32, ha->outbound_sw_if_index_list, "%d"); + out0 = format(out0, " mask type index bitmap: %U\n", format_bitmap_hex, ha->mask_type_index_bitmap); + for(j=0; jrules); j++) { + hash_ace_info_t *pa = &ha->rules[j]; + m = (u64 *)&pa->match; + out0 = format(out0, " %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n", + j, m[0], m[1], m[2], m[3], m[4], m[5], pa->mask_type_index, + pa->acl_index, pa->ace_index, pa->action, + pa->src_portrange_not_powerof2, pa->dst_portrange_not_powerof2); + } + } + vec_terminate_c_string(out0); + vlib_cli_output(vm, "\n%s\n", out0); + vec_free(out0); + } + + if (show_applied_info) { + u32 swi, j; + u8 * out0 = format(0, ""); + out0 = format(out0, "Applied lookup entries for interfaces\n"); + + for(swi = 0; (swi < vec_len(am->input_applied_hash_acl_info_by_sw_if_index)) || + (swi < vec_len(am->output_applied_hash_acl_info_by_sw_if_index)) || + (swi < vec_len(am->input_hash_entry_vec_by_sw_if_index)) || + (swi < vec_len(am->output_hash_entry_vec_by_sw_if_index)); swi++) { + if ((sw_if_index != ~0) && (sw_if_index != swi)) { + continue; + } + out0 = format(out0, "sw_if_index %d:\n", swi); + if (swi < vec_len(am->input_applied_hash_acl_info_by_sw_if_index)) { + applied_hash_acl_info_t *pal = &am->input_applied_hash_acl_info_by_sw_if_index[swi]; + out0 = format(out0, " input lookup mask_type_index_bitmap: %U\n", format_bitmap_hex, pal->mask_type_index_bitmap); + out0 = format(out0, " input applied acls: %U\n", format_vec32, pal->applied_acls, "%d"); + } + if (swi < vec_len(am->input_hash_entry_vec_by_sw_if_index)) { + out0 = format(out0, " input lookup applied entries:\n"); + for(j=0; jinput_hash_entry_vec_by_sw_if_index[swi]); j++) { + applied_hash_ace_entry_t *pae = &am->input_hash_entry_vec_by_sw_if_index[swi][j]; + out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld\n", + j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index, + pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index, pae->hitcount); + } + } + + if (swi < vec_len(am->output_applied_hash_acl_info_by_sw_if_index)) { + applied_hash_acl_info_t *pal = &am->output_applied_hash_acl_info_by_sw_if_index[swi]; + out0 = format(out0, " output lookup mask_type_index_bitmap: %U\n", format_bitmap_hex, pal->mask_type_index_bitmap); + out0 = format(out0, " output applied acls: %U\n", format_vec32, pal->applied_acls, "%d"); + } + if (swi < vec_len(am->output_hash_entry_vec_by_sw_if_index)) { + out0 = format(out0, " output lookup applied entries:\n"); + for(j=0; joutput_hash_entry_vec_by_sw_if_index[swi]); j++) { + applied_hash_ace_entry_t *pae = &am->output_hash_entry_vec_by_sw_if_index[swi][j]; + out0 = format(out0, " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld\n", + j, pae->acl_index, pae->ace_index, pae->action, pae->hash_ace_info_index, + pae->next_applied_entry_index, pae->prev_applied_entry_index, pae->tail_applied_entry_index, pae->hitcount); + } + } + + } + vec_terminate_c_string(out0); + vlib_cli_output(vm, "\n%s\n", out0); + vec_free(out0); + } + + if (show_bihash) { + show_hash_acl_hash(vm, am, show_bihash_verbose); + } } return error; } +static clib_error_t * +acl_clear_aclplugin_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + clib_error_t *error = 0; + acl_main_t *am = &acl_main; + vlib_process_signal_event (am->vlib_main, am->fa_cleaner_node_index, + ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, ~0); + return error; +} /* *INDENT-OFF* */ VLIB_CLI_COMMAND (aclplugin_set_command, static) = { @@ -2142,9 +2578,22 @@ VLIB_CLI_COMMAND (aclplugin_set_command, static) = { VLIB_CLI_COMMAND (aclplugin_show_command, static) = { .path = "show acl-plugin", - .short_help = "show acl-plugin sessions", + .short_help = "show acl-plugin {sessions|acl|interface|tables}", .function = acl_show_aclplugin_fn, }; + +VLIB_CLI_COMMAND (aclplugin_show_macip_command, static) = { + .path = "show acl-plugin macip", + .short_help = "show acl-plugin macip {acl|interface}", + .function = acl_show_aclplugin_macip_fn, +}; + + +VLIB_CLI_COMMAND (aclplugin_clear_command, static) = { + .path = "clear acl-plugin sessions", + .short_help = "clear acl-plugin sessions", + .function = acl_clear_aclplugin_fn, +}; /* *INDENT-ON* */ @@ -2165,7 +2614,6 @@ acl_init (vlib_main_t * vm) VL_MSG_FIRST_AVAILABLE); error = acl_plugin_api_hookup (vm); - acl_setup_nodes (); /* Add our API messages to the global name_crc hash table */ setup_message_id_table (am, &api_main); @@ -2173,6 +2621,7 @@ acl_init (vlib_main_t * vm) vec_free (name); acl_setup_fa_nodes(); + am->session_timeout_sec[ACL_TIMEOUT_TCP_TRANSIENT] = TCP_SESSION_TRANSIENT_TIMEOUT_SEC; am->session_timeout_sec[ACL_TIMEOUT_TCP_IDLE] = TCP_SESSION_IDLE_TIMEOUT_SEC; am->session_timeout_sec[ACL_TIMEOUT_UDP_IDLE] = UDP_SESSION_IDLE_TIMEOUT_SEC; @@ -2180,12 +2629,19 @@ acl_init (vlib_main_t * vm) am->fa_conn_table_hash_num_buckets = ACL_FA_CONN_TABLE_DEFAULT_HASH_NUM_BUCKETS; am->fa_conn_table_hash_memory_size = ACL_FA_CONN_TABLE_DEFAULT_HASH_MEMORY_SIZE; am->fa_conn_table_max_entries = ACL_FA_CONN_TABLE_DEFAULT_MAX_ENTRIES; - + vlib_thread_main_t *tm = vlib_get_thread_main (); + vec_validate(am->per_worker_data, tm->n_vlib_mains-1); { + u16 wk; u8 tt; - for(tt = 0; tt < ACL_N_TIMEOUTS; tt++) { - am->fa_conn_list_head[tt] = ~0; - am->fa_conn_list_tail[tt] = ~0; + for (wk = 0; wk < vec_len (am->per_worker_data); wk++) { + acl_fa_per_worker_data_t *pw = &am->per_worker_data[wk]; + vec_validate(pw->fa_conn_list_head, ACL_N_TIMEOUTS-1); + vec_validate(pw->fa_conn_list_tail, ACL_N_TIMEOUTS-1); + for(tt = 0; tt < ACL_N_TIMEOUTS; tt++) { + pw->fa_conn_list_head[tt] = ~0; + pw->fa_conn_list_tail[tt] = ~0; + } } } @@ -2196,7 +2652,6 @@ acl_init (vlib_main_t * vm) am->fa_cleaner_cnt_delete_by_sw_index = 0; am->fa_cleaner_cnt_delete_by_sw_index_ok = 0; am->fa_cleaner_cnt_unknown_event = 0; - am->fa_cleaner_cnt_deleted_sessions = 0; am->fa_cleaner_cnt_timer_restarted = 0; am->fa_cleaner_cnt_wait_with_timeout = 0; @@ -2205,6 +2660,11 @@ acl_init (vlib_main_t * vm) foreach_acl_eh #undef _ + am->l4_match_nonfirst_fragment = 1; + + /* use the new fancy hash-based matching */ + am->use_hash_acl_matching = 1; + return error; }