* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
#include <vnet/classify/vnet_classify.h>
#include <vnet/classify/in_out_acl.h>
#include <vnet/ip/ip.h>
#include <vnet/api_errno.h> /* for API error numbers */
#include <vnet/l2/l2_classify.h> /* for L2_INPUT_CLASSIFY_NEXT_xxx */
#include <vnet/fib/fib_table.h>
+#include <vppinfra/lock.h>
+#include <vnet/classify/trace_classify.h>
+
+
+
+/**
+ * @file
+ * @brief N-tuple classifier
+ */
vnet_classify_main_t vnet_classify_main;
}
vnet_classify_table_t *
-vnet_classify_new_table (vnet_classify_main_t * cm,
- u8 * mask, u32 nbuckets, u32 memory_size,
- u32 skip_n_vectors, u32 match_n_vectors)
+vnet_classify_new_table (vnet_classify_main_t *cm, const u8 *mask,
+ u32 nbuckets, u32 memory_size, u32 skip_n_vectors,
+ u32 match_n_vectors)
{
vnet_classify_table_t *t;
void *oldheap;
nbuckets = 1 << (max_log2 (nbuckets));
- pool_get_aligned (cm->tables, t, CLIB_CACHE_LINE_BYTES);
- clib_memset (t, 0, sizeof (*t));
+ pool_get_aligned_zero (cm->tables, t, CLIB_CACHE_LINE_BYTES);
- vec_validate_aligned (t->mask, match_n_vectors - 1, sizeof (u32x4));
- clib_memcpy (t->mask, mask, match_n_vectors * sizeof (u32x4));
+ clib_memset_u32 (t->mask, 0, 4 * ARRAY_LEN (t->mask));
+ clib_memcpy_fast (t->mask, mask, match_n_vectors * sizeof (u32x4));
t->next_table_index = ~0;
t->nbuckets = nbuckets;
t->match_n_vectors = match_n_vectors;
t->skip_n_vectors = skip_n_vectors;
t->entries_per_page = 2;
+ t->load_mask = pow2_mask (match_n_vectors * 2);
-#if USE_DLMALLOC == 0
- t->mheap = mheap_alloc (0 /* use VM */ , memory_size);
-#else
- t->mheap = create_mspace (memory_size, 1 /* locked */ );
-#endif
+ t->mheap = clib_mem_create_heap (0, memory_size, 1 /* locked */ ,
+ "classify");
vec_validate_aligned (t->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
oldheap = clib_mem_set_heap (t->mheap);
- t->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- t->writer_lock[0] = 0;
-
+ clib_spinlock_init (&t->writer_lock);
clib_mem_set_heap (oldheap);
return (t);
}
/* Recursively delete the entire chain */
vnet_classify_delete_table_index (cm, t->next_table_index, del_chain);
- vec_free (t->mask);
vec_free (t->buckets);
-#if USE_DLMALLOC == 0
- mheap_free (t->mheap);
-#else
- destroy_mspace (t->mheap);
-#endif
-
+ clib_mem_destroy_heap (t->mheap);
pool_put (cm->tables, t);
}
u32 required_length;
void *oldheap;
- ASSERT (t->writer_lock[0]);
+ CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock);
required_length =
(sizeof (vnet_classify_entry_t) + (t->match_n_vectors * sizeof (u32x4)))
* t->entries_per_page * (1 << log2_pages);
vnet_classify_entry_free (vnet_classify_table_t * t,
vnet_classify_entry_t * v, u32 log2_pages)
{
- ASSERT (t->writer_lock[0]);
+ CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock);
ASSERT (vec_len (t->freelists) > log2_pages);
v = vnet_classify_get_entry (t, b->offset);
- clib_memcpy (working_copy, v, required_length);
+ clib_memcpy_fast (working_copy, v, required_length);
working_bucket.as_u64 = b->as_u64;
working_bucket.offset = vnet_classify_get_offset (t, working_copy);
for (i = 0; i < length_in_entries; i++)
{
- u64 new_hash;
+ u32 new_hash;
v = vnet_classify_entry_at_index (t, old_values, i);
if (vnet_classify_entry_is_free (new_v))
{
- clib_memcpy (new_v, v, sizeof (vnet_classify_entry_t)
- + (t->match_n_vectors * sizeof (u32x4)));
+ clib_memcpy_fast (new_v, v, sizeof (vnet_classify_entry_t)
+ + (t->match_n_vectors * sizeof (u32x4)));
new_v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
goto doublebreak;
}
clib_warning ("BUG: linear rehash new entry not free!");
continue;
}
- clib_memcpy (new_v, v, sizeof (vnet_classify_entry_t)
- + (t->match_n_vectors * sizeof (u32x4)));
+ clib_memcpy_fast (new_v, v, sizeof (vnet_classify_entry_t)
+ + (t->match_n_vectors * sizeof (u32x4)));
new_v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
j++;
goto doublebreak;
fib_table_lock (e->metadata, FIB_PROTOCOL_IP6, FIB_SOURCE_CLASSIFY);
break;
case CLASSIFY_ACTION_SET_METADATA:
+ case CLASSIFY_ACTION_NONE:
break;
}
}
fib_table_unlock (e->metadata, FIB_PROTOCOL_IP6, FIB_SOURCE_CLASSIFY);
break;
case CLASSIFY_ACTION_SET_METADATA:
+ case CLASSIFY_ACTION_NONE:
break;
}
}
-int
-vnet_classify_add_del (vnet_classify_table_t * t,
- vnet_classify_entry_t * add_v, int is_add)
+static int
+vnet_classify_add_del (vnet_classify_table_t *t, vnet_classify_entry_t *add_v,
+ int is_add)
{
u32 bucket_index;
vnet_classify_bucket_t *b, tmp_b;
u32 value_index;
int rv = 0;
int i;
- u64 hash, new_hash;
+ u32 hash, new_hash;
u32 limit;
u32 old_log2_pages, new_log2_pages;
u32 thread_index = vlib_get_thread_index ();
hash >>= t->log2_nbuckets;
- while (clib_atomic_test_and_set (t->writer_lock))
- ;
+ clib_spinlock_lock (&t->writer_lock);
/* First elt in the bucket? */
if (b->offset == 0)
}
v = vnet_classify_entry_alloc (t, 0 /* new_log2_pages */ );
- clib_memcpy (v, add_v, sizeof (vnet_classify_entry_t) +
- t->match_n_vectors * sizeof (u32x4));
+ clib_memcpy_fast (v, add_v, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof (u32x4));
v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
vnet_classify_entry_claim_resource (v);
if (!memcmp
(v->key, add_v->key, t->match_n_vectors * sizeof (u32x4)))
{
- clib_memcpy (v, add_v, sizeof (vnet_classify_entry_t) +
- t->match_n_vectors * sizeof (u32x4));
+ clib_memcpy_fast (v, add_v, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof (u32x4));
v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
vnet_classify_entry_claim_resource (v);
if (vnet_classify_entry_is_free (v))
{
- clib_memcpy (v, add_v, sizeof (vnet_classify_entry_t) +
- t->match_n_vectors * sizeof (u32x4));
+ clib_memcpy_fast (v, add_v, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof (u32x4));
v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
vnet_classify_entry_claim_resource (v);
if (vnet_classify_entry_is_free (new_v))
{
- clib_memcpy (new_v, add_v, sizeof (vnet_classify_entry_t) +
- t->match_n_vectors * sizeof (u32x4));
+ clib_memcpy_fast (new_v, add_v, sizeof (vnet_classify_entry_t) +
+ t->match_n_vectors * sizeof (u32x4));
new_v->flags &= ~(VNET_CLASSIFY_ENTRY_FREE);
vnet_classify_entry_claim_resource (new_v);
}
/* Crap. Try again */
vnet_classify_entry_free (t, save_new_v, new_log2_pages);
- new_log2_pages++;
if (resplit_once)
goto mark_linear;
vnet_classify_entry_free (t, v, old_log2_pages);
unlock:
- CLIB_MEMORY_BARRIER ();
- t->writer_lock[0] = 0;
+ clib_spinlock_unlock (&t->writer_lock);
return rv;
}
}) classify_data_or_mask_t;
/* *INDENT-ON* */
-u64
-vnet_classify_hash_packet (vnet_classify_table_t * t, u8 * h)
+u32
+vnet_classify_hash_packet (const vnet_classify_table_t *t, u8 *h)
{
return vnet_classify_hash_packet_inline (t, h);
}
vnet_classify_entry_t *
-vnet_classify_find_entry (vnet_classify_table_t * t,
- u8 * h, u64 hash, f64 now)
+vnet_classify_find_entry (const vnet_classify_table_t *t, u8 *h, u32 hash,
+ f64 now)
{
return vnet_classify_find_entry_inline (t, h, hash, now);
}
-static u8 *
-format_classify_entry (u8 * s, va_list * args)
+u8 *
+format_classify_entry (u8 *s, va_list *args)
{
vnet_classify_table_t *t = va_arg (*args, vnet_classify_table_t *);
vnet_classify_entry_t *e = va_arg (*args, vnet_classify_entry_t *);
}
int
-vnet_classify_add_del_table (vnet_classify_main_t * cm,
- u8 * mask,
- u32 nbuckets,
- u32 memory_size,
- u32 skip,
- u32 match,
- u32 next_table_index,
- u32 miss_next_index,
- u32 * table_index,
- u8 current_data_flag,
- i16 current_data_offset,
+vnet_classify_add_del_table (vnet_classify_main_t *cm, const u8 *mask,
+ u32 nbuckets, u32 memory_size, u32 skip,
+ u32 match, u32 next_table_index,
+ u32 miss_next_index, u32 *table_index,
+ u8 current_data_flag, i16 current_data_offset,
int is_add, int del_chain)
{
vnet_classify_table_t *t;
if (nbuckets == 0)
return VNET_API_ERROR_INVALID_VALUE;
+ if (match < 1 || match > 5)
+ return VNET_API_ERROR_INVALID_VALUE;
+
t = vnet_classify_new_table (cm, mask, nbuckets, memory_size,
skip, match);
t->next_table_index = next_table_index;
else /* update */
{
vnet_classify_main_t *cm = &vnet_classify_main;
- t = pool_elt_at_index (cm->tables, *table_index);
+ if (pool_is_free_index (cm->tables, *table_index))
+ return VNET_API_ERROR_CLASSIFY_TABLE_NOT_FOUND;
+ t = pool_elt_at_index (cm->tables, *table_index);
t->next_table_index = next_table_index;
}
return 0;
else if (unformat (input, "dst_port"))
dst_port = 0xFFFF;
else
- return 0;
+ break;
}
if (!src_port && !dst_port)
u8 *mask = 0;
u8 found_something = 0;
ip4_header_t *ip;
+ u32 src_prefix_len = 32;
+ u32 src_prefix_mask = ~0;
+ u32 dst_prefix_len = 32;
+ u32 dst_prefix_mask = ~0;
#define _(a) u8 a=0;
foreach_ip4_proto_field;
version = 1;
else if (unformat (input, "hdr_length"))
hdr_length = 1;
+ else if (unformat (input, "src/%d", &src_prefix_len))
+ {
+ src_address = 1;
+ src_prefix_mask &= ~((1 << (32 - src_prefix_len)) - 1);
+ src_prefix_mask = clib_host_to_net_u32 (src_prefix_mask);
+ }
+ else if (unformat (input, "dst/%d", &dst_prefix_len))
+ {
+ dst_address = 1;
+ dst_prefix_mask &= ~((1 << (32 - dst_prefix_len)) - 1);
+ dst_prefix_mask = clib_host_to_net_u32 (dst_prefix_mask);
+ }
else if (unformat (input, "src"))
src_address = 1;
else if (unformat (input, "dst"))
break;
}
+ found_something = version + hdr_length;
#define _(a) found_something += a;
foreach_ip4_proto_field;
#undef _
foreach_ip4_proto_field;
#undef _
+ if (src_address)
+ ip->src_address.as_u32 = src_prefix_mask;
+
+ if (dst_address)
+ ip->dst_address.as_u32 = dst_prefix_mask;
+
ip->ip_version_and_header_length = 0;
if (version)
{
u8 **maskp = va_arg (*args, u8 **);
u8 *mask = 0;
- u8 found_something = 0;
+ u8 found_something;
ip6_header_t *ip;
u32 ip_version_traffic_class_and_flow_label;
break;
}
+ /* Account for "special" field names */
+ found_something = version + traffic_class + flow_label
+ + src_address + dst_address + protocol;
+
#define _(a) found_something += a;
foreach_ip6_proto_field;
#undef _
u8 *l2 = 0;
u8 *l3 = 0;
u8 *l4 = 0;
+ u8 add_l2 = 1;
int i;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "hex %U", unformat_hex_string, &mask))
;
+ else if (unformat (input, "l2 none"))
+ /* Don't add the l2 header in the mask */
+ add_l2 = 0;
else if (unformat (input, "l2 %U", unformat_l2_mask, &l2))
;
else if (unformat (input, "l3 %U", unformat_l3_mask, &l3))
break;
}
+ if (l2 && !add_l2)
+ {
+ vec_free (mask);
+ vec_free (l2);
+ vec_free (l3);
+ vec_free (l4);
+ return 0;
+ }
+
if (l4 && !l3)
{
vec_free (mask);
{
if (l2 || l3 || l4)
{
- /* "With a free Ethernet header in every package" */
- if (l2 == 0)
- vec_validate (l2, 13);
- mask = l2;
- if (l3)
+ if (add_l2)
{
- vec_append (mask, l3);
- vec_free (l3);
+ /* "With a free Ethernet header in every package" */
+ if (l2 == 0)
+ vec_validate (l2, 13);
+ mask = l2;
+ if (l3)
+ {
+ vec_append (mask, l3);
+ vec_free (l3);
+ }
}
+ else
+ mask = l3;
if (l4)
{
vec_append (mask, l4);
if (match == 0)
clib_warning ("BUG: match 0");
- _vec_len (mask) = match * sizeof (u32x4);
+ vec_set_len (mask, match * sizeof (u32x4));
*matchp = match;
*maskp = mask;
if (!is_add && table_index == ~0)
return clib_error_return (0, "table index required for delete");
- rv = vnet_classify_add_del_table (cm, mask, nbuckets, memory_size,
+ rv = vnet_classify_add_del_table (cm, mask, nbuckets, (u32) memory_size,
skip, match, next_table_index,
miss_next_index, &table_index,
current_data_flag, current_data_offset,
}
/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (classify_table, static) = {
+VLIB_CLI_COMMAND (classify_table, static) =
+{
.path = "classify table",
.short_help =
"classify table [miss-next|l2-miss_next|acl-miss-next <next_index>]"
};
/* *INDENT-ON* */
-static u8 *
-format_vnet_classify_table (u8 * s, va_list * args)
+static int
+filter_table_mask_compare (void *a1, void *a2)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ u32 *ti1 = a1;
+ u32 *ti2 = a2;
+ u32 n1 = 0, n2 = 0;
+ vnet_classify_table_t *t1, *t2;
+ u8 *m1, *m2;
+ int i;
+
+ t1 = pool_elt_at_index (cm->tables, *ti1);
+ t2 = pool_elt_at_index (cm->tables, *ti2);
+
+ m1 = (u8 *) (t1->mask);
+ m2 = (u8 *) (t2->mask);
+
+ for (i = 0; i < t1->match_n_vectors * sizeof (u32x4); i++)
+ {
+ n1 += count_set_bits (m1[0]);
+ m1++;
+ }
+
+ for (i = 0; i < t2->match_n_vectors * sizeof (u32x4); i++)
+ {
+ n2 += count_set_bits (m2[0]);
+ m2++;
+ }
+
+ /* Reverse sort: descending number of set bits */
+ if (n1 < n2)
+ return 1;
+ else if (n1 > n2)
+ return -1;
+ else
+ return 0;
+}
+
+
+/*
+ * Reorder the chain of tables starting with table_index such
+ * that more more-specific masks come before less-specific masks.
+ * Return the new head of the table chain.
+ */
+u32
+classify_sort_table_chain (vnet_classify_main_t * cm, u32 table_index)
+{
+ /*
+ * Form a vector of all classifier tables in this chain.
+ */
+ u32 *tables = 0;
+ vnet_classify_table_t *t;
+ u32 cti;
+ for (cti = table_index; cti != ~0; cti = t->next_table_index)
+ {
+ vec_add1 (tables, cti);
+ t = pool_elt_at_index (cm->tables, cti);
+ }
+
+ /*
+ * Sort filter tables from most-specific mask to least-specific mask.
+ */
+ vec_sort_with_function (tables, filter_table_mask_compare);
+
+ /*
+ * Relink tables via next_table_index fields.
+ */
+ int i;
+ for (i = 0; i < vec_len (tables); i++)
+ {
+ t = pool_elt_at_index (cm->tables, tables[i]);
+
+ if ((i + 1) < vec_len (tables))
+ t->next_table_index = tables[i + 1];
+ else
+ t->next_table_index = ~0;
+ }
+
+ table_index = tables[0];
+ vec_free (tables);
+
+ return table_index;
+}
+
+
+u32
+classify_get_trace_chain (void)
+{
+ u32 table_index;
+
+ table_index = vlib_global_main.trace_filter.classify_table_index;
+
+ return table_index;
+}
+
+/*
+ * Seting the Trace chain to ~0 is a request to delete and clear it.
+ */
+void
+classify_set_trace_chain (vnet_classify_main_t * cm, u32 table_index)
+{
+ if (table_index == ~0)
+ {
+ u32 old_table_index;
+
+ old_table_index = vlib_global_main.trace_filter.classify_table_index;
+ vnet_classify_delete_table_index (cm, old_table_index, 1);
+ }
+
+ vlib_global_main.trace_filter.classify_table_index = table_index;
+}
+
+
+u32
+classify_get_pcap_chain (vnet_classify_main_t * cm, u32 sw_if_index)
+{
+ u32 table_index = ~0;
+
+ if (sw_if_index != ~0
+ && (sw_if_index < vec_len (cm->classify_table_index_by_sw_if_index)))
+ table_index = cm->classify_table_index_by_sw_if_index[sw_if_index];
+
+ return table_index;
+}
+
+void
+classify_set_pcap_chain (vnet_classify_main_t * cm,
+ u32 sw_if_index, u32 table_index)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+
+ if (sw_if_index != ~0 && table_index != ~0)
+ vec_validate_init_empty (cm->classify_table_index_by_sw_if_index,
+ sw_if_index, ~0);
+
+ if (table_index == ~0)
+ {
+ u32 old_table_index = ~0;
+
+ if (sw_if_index < vec_len (cm->classify_table_index_by_sw_if_index))
+ old_table_index =
+ cm->classify_table_index_by_sw_if_index[sw_if_index];
+
+ vnet_classify_delete_table_index (cm, old_table_index, 1);
+ }
+
+ /*
+ * Put the table index where device drivers can find them.
+ * This table index will be either a valid table or a ~0 to clear it.
+ */
+ if (vec_len (cm->classify_table_index_by_sw_if_index) > sw_if_index)
+ cm->classify_table_index_by_sw_if_index[sw_if_index] = table_index;
+ if (sw_if_index > 0)
+ {
+ vnet_hw_interface_t *hi;
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ hi->trace_classify_table_index = table_index;
+ }
+}
+
+
+/*
+ * Search for a mask-compatible Classify table within the given table chain.
+ */
+u32
+classify_lookup_chain (u32 table_index, u8 * mask, u32 n_skip, u32 n_match)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ vnet_classify_table_t *t;
+ u32 cti;
+
+ if (table_index == ~0)
+ return ~0;
+
+ for (cti = table_index; cti != ~0; cti = t->next_table_index)
+ {
+ t = pool_elt_at_index (cm->tables, cti);
+
+ /* Classifier geometry mismatch, can't use this table. */
+ if (t->match_n_vectors != n_match || t->skip_n_vectors != n_skip)
+ continue;
+
+ /* Masks aren't congruent, can't use this table. */
+ if (t->match_n_vectors * sizeof (u32x4) != vec_len (mask))
+ continue;
+
+ /* Masks aren't bit-for-bit identical, can't use this table. */
+ if (memcmp (t->mask, mask, t->match_n_vectors * sizeof (u32x4)))
+ continue;
+
+ /* Winner... */
+ return cti;
+ }
+
+ return ~0;
+}
+
+
+static clib_error_t *
+classify_filter_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u32 nbuckets = 8;
+ vnet_main_t *vnm = vnet_get_main ();
+ uword memory_size = (uword) (128 << 10);
+ u32 skip = ~0;
+ u32 match = ~0;
+ u8 *match_vector;
+ int is_add = 1;
+ u32 table_index = ~0;
+ u32 next_table_index = ~0;
+ u32 miss_next_index = ~0;
+ u32 current_data_flag = 0;
+ int current_data_offset = 0;
+ u32 sw_if_index = ~0;
+ int pkt_trace = 0;
+ int pcap = 0;
+ u8 *mask = 0;
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ int rv = 0;
+ clib_error_t *err = 0;
+
+ unformat_input_t _line_input, *line_input = &_line_input;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "pcap %=", &pcap, 1))
+ sw_if_index = 0;
+ else if (unformat (line_input, "trace"))
+ pkt_trace = 1;
+ else if (unformat (line_input, "%U",
+ unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ if (sw_if_index == 0)
+ return clib_error_return (0, "Local interface not supported...");
+ }
+ else if (unformat (line_input, "buckets %d", &nbuckets))
+ ;
+ else if (unformat (line_input, "mask %U", unformat_classify_mask,
+ &mask, &skip, &match))
+ ;
+ else if (unformat (line_input, "memory-size %U", unformat_memory_size,
+ &memory_size))
+ ;
+ else
+ break;
+ }
+
+ if (is_add && mask == 0)
+ err = clib_error_return (0, "Mask required");
+
+ else if (is_add && skip == ~0)
+ err = clib_error_return (0, "skip count required");
+
+ else if (is_add && match == ~0)
+ err = clib_error_return (0, "match count required");
+
+ else if (sw_if_index == ~0 && pkt_trace == 0 && pcap == 0)
+ err = clib_error_return (0, "Must specify trace, pcap or interface...");
+
+ else if (pkt_trace && pcap)
+ err = clib_error_return
+ (0, "Packet trace and pcap are mutually exclusive...");
+
+ else if (pkt_trace && sw_if_index != ~0)
+ err = clib_error_return (0, "Packet trace filter is per-system");
+
+ if (err)
+ {
+ unformat_free (line_input);
+ return err;
+ }
+
+ if (!is_add)
+ {
+ /*
+ * Delete an existing PCAP or trace classify table.
+ */
+ if (pkt_trace)
+ classify_set_trace_chain (cm, ~0);
+ else
+ classify_set_pcap_chain (cm, sw_if_index, ~0);
+
+ vec_free (mask);
+ unformat_free (line_input);
+
+ return 0;
+ }
+
+ /*
+ * Find an existing compatible table or else make a new one.
+ */
+ if (pkt_trace)
+ table_index = classify_get_trace_chain ();
+ else
+ table_index = classify_get_pcap_chain (cm, sw_if_index);
+
+ if (table_index != ~0)
+ {
+ /*
+ * look for a compatible table in the existing chain
+ * - if a compatible table is found, table_index is updated with it
+ * - if not, table_index is updated to ~0 (aka nil) and because of that
+ * we are going to create one (see below). We save the original head
+ * in next_table_index so we can chain it with the newly created
+ * table
+ */
+ next_table_index = table_index;
+ table_index = classify_lookup_chain (table_index, mask, skip, match);
+ }
+
+ /*
+ * When no table is found, make one.
+ */
+ if (table_index == ~0)
+ {
+ u32 new_head_index;
+
+ /*
+ * Matching table wasn't found, so create a new one at the
+ * head of the next_table_index chain.
+ */
+ rv = vnet_classify_add_del_table (cm, mask, nbuckets, memory_size,
+ skip, match, next_table_index,
+ miss_next_index, &table_index,
+ current_data_flag,
+ current_data_offset, 1, 0);
+
+ if (rv != 0)
+ {
+ vec_free (mask);
+ unformat_free (line_input);
+ return clib_error_return (0,
+ "vnet_classify_add_del_table returned %d",
+ rv);
+ }
+
+ /*
+ * Reorder tables such that masks are most-specify to least-specific.
+ */
+ new_head_index = classify_sort_table_chain (cm, table_index);
+
+ /*
+ * Put first classifier table in chain in a place where
+ * other data structures expect to find and use it.
+ */
+ if (pkt_trace)
+ classify_set_trace_chain (cm, new_head_index);
+ else
+ classify_set_pcap_chain (cm, sw_if_index, new_head_index);
+ }
+
+ vec_free (mask);
+
+ /*
+ * Now try to parse a and add a filter-match session.
+ */
+ if (unformat (line_input, "match %U", unformat_classify_match,
+ cm, &match_vector, table_index) == 0)
+ return 0;
+
+ /*
+ * We use hit or miss to determine whether to trace or pcap pkts
+ * so the session setup is very limited
+ */
+ rv = vnet_classify_add_del_session (cm, table_index,
+ match_vector, 0 /* hit_next_index */ ,
+ 0 /* opaque_index */ ,
+ 0 /* advance */ ,
+ 0 /* action */ ,
+ 0 /* metadata */ ,
+ 1 /* is_add */ );
+
+ vec_free (match_vector);
+
+ return 0;
+}
+
+/** Enable / disable packet trace filter */
+int
+vlib_enable_disable_pkt_trace_filter (int enable)
+{
+ if (enable)
+ {
+ vlib_global_main.trace_filter.trace_filter_enable = 1;
+ }
+ else
+ {
+ vlib_global_main.trace_filter.trace_filter_enable = 0;
+ }
+ return 0;
+}
+
+/*?
+ * Construct an arbitrary set of packet classifier tables for use with
+ * "pcap trace rx | tx," and with the vpp packet tracer
+ *
+ * Packets which match a rule in the classifier table chain
+ * will be traced. The tables are automatically ordered so that
+ * matches in the most specific table are tried first.
+ *
+ * It's reasonably likely that folks will configure a single
+ * table with one or two matches. As a result, we configure
+ * 8 hash buckets and 128K of match rule space. One can override
+ * the defaults by specifying "buckets <nnn>" and "memory-size <xxx>"
+ * as desired.
+ *
+ * To build up complex filter chains, repeatedly issue the
+ * classify filter debug CLI command. Each command must specify the desired
+ * mask and match values. If a classifier table with a suitable mask
+ * already exists, the CLI command adds a match rule to the existing table.
+ * If not, the CLI command add a new table and the indicated mask rule
+ *
+ * Here is a terse description of the "mask <xxx>" syntax:
+ *
+ * l2 src dst proto tag1 tag2 ignore-tag1 ignore-tag2 cos1 cos2 dot1q dot1ad
+ *
+ * l3 ip4 <ip4-mask> ip6 <ip6-mask>
+ *
+ * <ip4-mask> version hdr_length src[/width] dst[/width]
+ * tos length fragment_id ttl protocol checksum
+ *
+ * <ip6-mask> version traffic-class flow-label src dst proto
+ * payload_length hop_limit protocol
+ *
+ * l4 tcp <tcp-mask> udp <udp_mask> src_port dst_port
+ *
+ * <tcp-mask> src dst # ports
+ *
+ * <udp-mask> src_port dst_port
+ *
+ * To construct matches, add the values to match after the indicated keywords:
+ * in the match syntax. For example:
+ * mask l3 ip4 src -> match l3 ip4 src 192.168.1.11
+ *
+ * @cliexpar
+ * Configuring the classify filter
+ *
+ * Configure a simple classify filter, and configure pcap trace rx to use it:
+ *
+ * @cliexcmd{classify filter rx mask l3 ip4 src match l3 ip4 src 192.168.1.11}
+ * <b><em>pcap trace rx max 100 filter</em></b>
+ *
+ * Configure another fairly simple filter
+ *
+ * @cliexcmd{classify filter mask l3 ip4 src dst match l3 ip4 src 192.168.1.10
+ * dst 192.168.2.10}
+ *
+ *
+ * Configure a filter for use with the vpp packet tracer:
+ * @cliexcmd{classify filter trace mask l3 ip4 src dst match l3 ip4 src
+ * 192.168.1.10 dst 192.168.2.10}
+ * <b><em>trace add dpdk-input 100 filter</em></b>
+ *
+ * Clear classifier filters
+ *
+ * <b><em>classify filter [trace | rx | tx | <intfc>] del</em></b>
+ *
+ * To display the top-level classifier tables for each use case:
+ * <b><em>show classify filter</em></b>
+ *
+ * To inspect the classifier tables, use
+ *
+ * <b><em>show classify table [verbose]</em></b>
+ * The verbose form displays all of the match rules, with hit-counters
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (classify_filter, static) =
+{
+ .path = "classify filter",
+ .short_help =
+ "classify filter <intfc> | pcap mask <mask-value> match <match-value>\n"
+ " | trace mask <mask-value> match <match-value> [del]\n"
+ " [buckets <nn>] [memory-size <n>]",
+ .function = classify_filter_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+show_classify_filter_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ u8 *name = 0;
+ u8 *s = 0;
+ u32 table_index;
+ int verbose = 0;
+ int i, j, limit;
+
+ (void) unformat (input, "verbose %=", &verbose, 1);
+
+ vlib_cli_output (vm, "%-30s%s", "Filter Used By", " Table(s)");
+ vlib_cli_output (vm, "%-30s%s", "--------------", " --------");
+
+ limit = vec_len (cm->classify_table_index_by_sw_if_index);
+
+ for (i = -1; i < limit; i++)
+ {
+ switch (i)
+ {
+ case -1:
+ table_index = vlib_global_main.trace_filter.classify_table_index;
+ name = format (0, "packet tracer:");
+ break;
+
+ case 0:
+ table_index = cm->classify_table_index_by_sw_if_index[i];
+ name = format (0, "pcap rx/tx/drop:");
+ break;
+
+ default:
+ table_index = cm->classify_table_index_by_sw_if_index[i];
+ name = format (0, "%U:", format_vnet_sw_if_index_name, vnm, i);
+ break;
+ }
+
+ if (verbose)
+ {
+ vnet_classify_table_t *t;
+ j = table_index;
+ do
+ {
+ if (j == ~0)
+ s = format (s, " none");
+ else
+ {
+ s = format (s, " %u", j);
+ t = pool_elt_at_index (cm->tables, j);
+ j = t->next_table_index;
+ }
+ }
+ while (j != ~0);
+
+ vlib_cli_output (vm, "%-30v table(s)%v", name, s);
+ vec_reset_length (s);
+ }
+ else
+ {
+ if (table_index != ~0)
+ s = format (s, " %u", table_index);
+ else
+ s = format (s, " none");
+
+ vlib_cli_output (vm, "%-30v first table%v", name, s);
+ vec_reset_length (s);
+ }
+ vec_reset_length (name);
+ }
+ vec_free (s);
+ vec_free (name);
+ return 0;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_classify_filter, static) =
+{
+ .path = "show classify filter",
+ .short_help = "show classify filter [verbose [nn]]",
+ .function = show_classify_filter_command_fn,
+};
+/* *INDENT-ON* */
+
+u8 *
+format_vnet_classify_table (u8 *s, va_list *args)
{
vnet_classify_main_t *cm = va_arg (*args, vnet_classify_main_t *);
int verbose = va_arg (*args, int);
if (index == ~0)
{
- s = format (s, "%10s%10s%10s%10s", "TableIdx", "Sessions", "NextTbl",
+ s = format (s, "\n%10s%10s%10s%10s", "TableIdx", "Sessions", "NextTbl",
"NextNode", verbose ? "Details" : "");
return s;
}
s = format (s, "%10u%10d%10d%10d", index, t->active_elements,
t->next_table_index, t->miss_next_index);
- s = format (s, "\n Heap: %U", format_mheap, t->mheap, 0 /*verbose */ );
+ s = format (s, "\n Heap: %U", format_clib_mem_heap, t->mheap,
+ 0 /*verbose */ );
s = format (s, "\n nbuckets %d, skip %d match %d flag %d offset %d",
t->nbuckets, t->skip_n_vectors, t->match_n_vectors,
}
/* *INDENT-OFF* */
- pool_foreach (t, cm->tables,
- ({
+ pool_foreach (t, cm->tables)
+ {
if (match_index == ~0 || (match_index == t - cm->tables))
vec_add1 (indices, t - cm->tables);
- }));
+ }
/* *INDENT-ON* */
if (vec_len (indices))
{
- vlib_cli_output (vm, "%U", format_vnet_classify_table, cm, verbose,
- ~0 /* hdr */ );
for (i = 0; i < vec_len (indices); i++)
- vlib_cli_output (vm, "%U", format_vnet_classify_table, cm,
- verbose, indices[i]);
+ {
+ vlib_cli_output (vm, "%U", format_vnet_classify_table, cm, verbose,
+ ~0 /* hdr */);
+ vlib_cli_output (vm, "%U", format_vnet_classify_table, cm, verbose,
+ indices[i]);
+ }
}
else
vlib_cli_output (vm, "No classifier tables configured");
else if (unformat (input, "dst_port %d", &dst_port))
;
else
- return 0;
+ break;
}
h.src_port = clib_host_to_net_u16 (src_port);
ip = (ip6_header_t *) match;
if (src)
- clib_memcpy (&ip->src_address, &src_val, sizeof (ip->src_address));
+ clib_memcpy_fast (&ip->src_address, &src_val, sizeof (ip->src_address));
if (dst)
- clib_memcpy (&ip->dst_address, &dst_val, sizeof (ip->dst_address));
+ clib_memcpy_fast (&ip->dst_address, &dst_val, sizeof (ip->dst_address));
if (proto)
ip->protocol = proto_val;
vec_validate_aligned (match, len - 1, sizeof (u32x4));
if (dst)
- clib_memcpy (match, dst_val, 6);
+ clib_memcpy_fast (match, dst_val, 6);
if (src)
- clib_memcpy (match + 6, src_val, 6);
+ clib_memcpy_fast (match + 6, src_val, 6);
if (tag2)
{
u8 *l2 = 0;
u8 *l3 = 0;
u8 *l4 = 0;
+ u8 add_l2 = 1;
if (pool_is_free_index (cm->tables, table_index))
return 0;
{
if (unformat (input, "hex %U", unformat_hex_string, &match))
;
+ else if (unformat (input, "l2 none"))
+ /* Don't add the l2 header in the mask */
+ add_l2 = 0;
else if (unformat (input, "l2 %U", unformat_l2_match, &l2))
;
else if (unformat (input, "l3 %U", unformat_l3_match, &l3))
break;
}
+ if (l2 && !add_l2)
+ {
+ vec_free (match);
+ vec_free (l2);
+ vec_free (l3);
+ vec_free (l4);
+ return 0;
+ }
+
if (l4 && !l3)
{
vec_free (match);
{
if (l2 || l3 || l4)
{
- /* "Win a free Ethernet header in every packet" */
- if (l2 == 0)
- vec_validate_aligned (l2, 13, sizeof (u32x4));
- match = l2;
- if (l3)
+ if (add_l2)
{
- vec_append_aligned (match, l3, sizeof (u32x4));
- vec_free (l3);
+ /* "Win a free Ethernet header in every packet" */
+ if (l2 == 0)
+ vec_validate_aligned (l2, 13, sizeof (u32x4));
+ match = l2;
+ if (l3)
+ {
+ vec_append_aligned (match, l3, sizeof (u32x4));
+ vec_free (l3);
+ }
}
+ else
+ match = l3;
if (l4)
{
vec_append_aligned (match, l4, sizeof (u32x4));
sizeof (u32x4));
/* Set size, include skipped vectors */
- _vec_len (match) =
- (t->match_n_vectors + t->skip_n_vectors) * sizeof (u32x4);
+ vec_set_len (match,
+ (t->match_n_vectors + t->skip_n_vectors) * sizeof (u32x4));
*matchp = match;
}
int
-vnet_classify_add_del_session (vnet_classify_main_t * cm,
- u32 table_index,
- u8 * match,
- u32 hit_next_index,
- u32 opaque_index,
- i32 advance,
- u8 action, u32 metadata, int is_add)
+vnet_classify_add_del_session (vnet_classify_main_t *cm, u32 table_index,
+ const u8 *match, u16 hit_next_index,
+ u32 opaque_index, i32 advance, u8 action,
+ u32 metadata, int is_add)
{
vnet_classify_table_t *t;
vnet_classify_entry_5_t _max_e __attribute__ ((aligned (16)));
e->metadata = 0;
/* Copy key data, honoring skip_n_vectors */
- clib_memcpy (&e->key, match + t->skip_n_vectors * sizeof (u32x4),
- t->match_n_vectors * sizeof (u32x4));
+ clib_memcpy_fast (&e->key, match + t->skip_n_vectors * sizeof (u32x4),
+ t->match_n_vectors * sizeof (u32x4));
/* Clear don't-care bits; likely when dynamically creating sessions */
for (i = 0; i < t->match_n_vectors; i++)
VLIB_CLI_COMMAND (classify_session_command, static) = {
.path = "classify session",
.short_help =
- "classify session [hit-next|l2-hit-next|"
+ "classify session [hit-next|l2-input-hit-next|l2-output-hit-next|"
"acl-hit-next <next_index>|policer-hit-next <policer_name>]"
"\n table-index <nn> match [hex] [l2] [l3 ip4] [opaque-index <index>]"
"\n [action set-ip4-fib-id|set-ip6-fib-id|set-sr-policy-index <n>] [del]",
vnet_classify_register_unformat_acl_next_index_fn (unformat_acl_next_node);
+ vlib_global_main.trace_filter.classify_table_index = ~0;
+
return 0;
}
VLIB_INIT_FUNCTION (vnet_classify_init);
-#define TEST_CODE 1
+int
+vnet_is_packet_traced (vlib_buffer_t * b, u32 classify_table_index, int func)
+{
+ return vnet_is_packet_traced_inline (b, classify_table_index, func);
+}
+VLIB_REGISTER_TRACE_FILTER_FUNCTION (vnet_is_packet_traced_fn, static) = {
+ .name = "vnet_is_packet_traced",
+ .description = "classifier based filter",
+ .priority = 50,
+ .function = vnet_is_packet_traced
+};
+
+#define TEST_CODE 0
#if TEST_CODE > 0
for (i = 0; i < tm->sessions; i++)
{
u8 *key_minus_skip;
- u64 hash;
+ u32 hash;
vnet_classify_entry_t *e;
ep = tm->entries + i;