#undef HAVE_MEMFD_CREATE
#include <vppinfra/linux/syscall.h>
#include <vpp-api/client/stat_client.h>
+#include <vppinfra/mheap.h>
stat_segment_main_t stat_segment_main;
return clib_mem_set_heap (sm->heap);
}
-/* Name to vector index hash */
static u32
-lookup_or_create_hash_index (void *oldheap, char *name, u32 next_vector_index)
+lookup_or_create_hash_index (u8 * name, u32 next_vector_index)
{
stat_segment_main_t *sm = &stat_segment_main;
u32 index;
hash_pair_t *hp;
+ /* Must be called in the context of the main heap */
+ ASSERT (clib_mem_get_heap != sm->heap);
+
hp = hash_get_pair (sm->directory_vector_by_name, name);
if (!hp)
{
cm->stat_segment_name ? cm->stat_segment_name : cm->name;
u32 next_vector_index = vec_len (sm->directory_vector);
clib_mem_set_heap (oldheap); /* Exit stats segment */
- u32 vector_index = lookup_or_create_hash_index (oldheap, stat_segment_name,
+ u32 vector_index = lookup_or_create_hash_index ((u8 *) stat_segment_name,
next_vector_index);
/* Back to stats segment */
clib_mem_set_heap (sm->heap); /* Re-enter stat segment */
}
void
-vlib_stats_register_error_index (u8 * name, u64 * em_vec, u64 index)
+vlib_stats_register_error_index (void *oldheap, u8 * name, u64 * em_vec,
+ u64 index)
{
stat_segment_main_t *sm = &stat_segment_main;
stat_segment_shared_header_t *shared_header = sm->shared_header;
ASSERT (shared_header);
vlib_stat_segment_lock ();
+ u32 next_vector_index = vec_len (sm->directory_vector);
+ clib_mem_set_heap (oldheap); /* Exit stats segment */
- memcpy (e.name, name, vec_len (name));
- e.name[vec_len (name)] = '\0';
- e.type = STAT_DIR_TYPE_ERROR_INDEX;
- e.offset = index;
- e.offset_vector = 0;
- vec_add1 (sm->directory_vector, e);
+ u32 vector_index = lookup_or_create_hash_index (name,
+ next_vector_index);
- /* Warn clients to refresh any pointers they might be holding */
- shared_header->directory_offset =
- stat_segment_offset (shared_header, sm->directory_vector);
+ /* Back to stats segment */
+ clib_mem_set_heap (sm->heap); /* Re-enter stat segment */
+
+ if (next_vector_index == vector_index)
+ {
+ memcpy (e.name, name, vec_len (name));
+ e.name[vec_len (name)] = '\0';
+ e.type = STAT_DIR_TYPE_ERROR_INDEX;
+ e.offset = index;
+ e.offset_vector = 0;
+ vec_add1 (sm->directory_vector, e);
+
+ /* Warn clients to refresh any pointers they might be holding */
+ shared_header->directory_offset =
+ stat_segment_offset (shared_header, sm->directory_vector);
+ }
+ else
+ {
+ vec_free (name);
+ }
vlib_stat_segment_unlock ();
}
clib_mem_set_heap (oldheap);
+ /* Total shared memory size */
+ clib_mem_usage_t usage;
+ mheap_usage (sm->heap, &usage);
+ sm->directory_vector[STAT_COUNTER_MEM_STATSEG_TOTAL].value =
+ usage.bytes_total;
+
return 0;
}
sm->directory_vector[STAT_COUNTER_LAST_STATS_CLEAR].value =
vm->node_main.time_last_runtime_stats_clear;
+ /* Stats segment memory heap counter */
+ clib_mem_usage_t usage;
+ mheap_usage (sm->heap, &usage);
+ sm->directory_vector[STAT_COUNTER_MEM_STATSEG_USED].value =
+ usage.bytes_used;
+
if (sm->node_counters_enabled)
update_node_counters (sm);
stat_segment_shared_header_t *shared_header = sm->shared_header;
void *oldheap;
stat_segment_directory_entry_t e;
- u32 index;
stat_segment_gauges_pool_t *gauge;
ASSERT (shared_header);
+ u32 next_vector_index = vec_len (sm->directory_vector);
+ u32 vector_index = lookup_or_create_hash_index (name,
+ next_vector_index);
+
+ if (vector_index < next_vector_index) /* Already registered */
+ return clib_error_return (0, "%v is alreadty registered", name);
+
oldheap = vlib_stats_push_heap (NULL);
vlib_stat_segment_lock ();
e.type = STAT_DIR_TYPE_SCALAR_INDEX;
memcpy (e.name, name, vec_len (name));
- index = vec_len (sm->directory_vector);
vec_add1 (sm->directory_vector, e);
shared_header->directory_offset =
pool_get (sm->gauges, gauge);
gauge->fn = update_fn;
gauge->caller_index = caller_index;
- gauge->directory_index = index;
+ gauge->directory_index = next_vector_index;
return NULL;
}
STAT_COUNTER_NODE_SUSPENDS,
STAT_COUNTER_INTERFACE_NAMES,
STAT_COUNTER_NODE_NAMES,
+ STAT_COUNTER_MEM_STATSEG_TOTAL,
+ STAT_COUNTER_MEM_STATSEG_USED,
STAT_COUNTERS
} stat_segment_counter_t;
_(NODE_CALLS, COUNTER_VECTOR_SIMPLE, calls, /sys/node) \
_(NODE_SUSPENDS, COUNTER_VECTOR_SIMPLE, suspends, /sys/node) \
_(INTERFACE_NAMES, NAME_VECTOR, names, /if) \
- _(NODE_NAMES, NAME_VECTOR, names, /sys/node)
+ _(NODE_NAMES, NAME_VECTOR, names, /sys/node) \
+ _(MEM_STATSEG_TOTAL, SCALAR_INDEX, total, /mem/statseg) \
+ _(MEM_STATSEG_USED, SCALAR_INDEX, used, /mem/statseg)
typedef struct
{
#!/usr/bin/env python2.7
import unittest
-
+import time
import psutil
from vpp_papi.vpp_stats import VPPStats
"is not equal to "
"ending client side file descriptor count: %s" % (
initial_fds, ending_fds))
+ @unittest.skip("Manual only")
+ def test_mem_leak(self):
+ def loop():
+ print('Running loop')
+ for i in range(50):
+ rv = self.vapi.papi.tap_create_v2(id=i, use_random_mac=1)
+ self.assertEqual(rv.retval, 0)
+ rv = self.vapi.papi.tap_delete_v2(sw_if_index=rv.sw_if_index)
+ self.assertEqual(rv.retval, 0)
+
+ before = self.statistics.get_counter('/mem/statseg/used')
+ loop()
+ self.vapi.cli("memory-trace on stats-segment")
+ for j in range(100):
+ loop()
+ print(self.vapi.cli("show memory stats-segment verbose"))
+ print('AFTER', before, self.statistics.get_counter('/mem/statseg/used'))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)