-#!/usr/bin/env python
+#!/usr/bin/env python3
from __future__ import print_function
from cffi import FFI
STAT_DIR_TYPE_COUNTER_VECTOR_SIMPLE,
STAT_DIR_TYPE_COUNTER_VECTOR_COMBINED,
STAT_DIR_TYPE_ERROR_INDEX,
+ STAT_DIR_TYPE_NAME_VECTOR,
} stat_directory_type_t;
typedef struct
union
{
double scalar_value;
- uint64_t error_value;
+ counter_t *error_vector;
counter_t **simple_counter_vec;
vlib_counter_t **combined_counter_vec;
+ uint8_t **name_vector;
};
} stat_segment_data_t;
typedef struct
{
+ uint64_t version;
uint64_t epoch;
uint64_t in_progress;
uint64_t directory_offset;
uint32_t *stat_segment_ls_r (uint8_t ** patterns, stat_client_main_t * sm);
uint32_t *stat_segment_ls (uint8_t ** pattern);
-stat_segment_data_t *stat_segment_dump_r (uint32_t * stats, stat_client_main_t * sm);
+stat_segment_data_t *stat_segment_dump_r (uint32_t * stats,
+ stat_client_main_t * sm);
stat_segment_data_t *stat_segment_dump (uint32_t * counter_vec);
void stat_segment_data_free (stat_segment_data_t * res);
double stat_segment_heartbeat_r (stat_client_main_t * sm);
-double stat_segment_heartbeat (void);
int stat_segment_vec_len(void *vec);
uint8_t **stat_segment_string_vector(uint8_t **string_vector, char *string);
-""")
+char *stat_segment_index_to_name_r (uint32_t index, stat_client_main_t * sm);
+uint64_t stat_segment_version(void);
+uint64_t stat_segment_version_r(stat_client_main_t *sm);
+void free(void *ptr);
+""") # noqa: E501
# Utility functions
strings = [strings]
for s in strings:
vec = api.stat_segment_string_vector(vec, ffi.new("char []",
- s.encode()))
+ s.encode('utf-8')))
return vec
return vec
+def error_vec_list(api, e):
+ vec = []
+ for thread in range(api.stat_segment_vec_len(e)):
+ vec.append(e[thread])
+ return vec
+
+
+def name_vec_list(api, e):
+ return [ffi.string(e[i]).decode('utf-8') for i in
+ range(api.stat_segment_vec_len(e)) if e[i] != ffi.NULL]
+
+
def stat_entry_to_python(api, e):
# Scalar index
if e.type == 1:
if e.type == 3:
return combined_counter_vec_list(api, e.combined_counter_vec)
if e.type == 4:
- return e.error_value
- return None
+ return error_vec_list(api, e.error_vector)
+ if e.type == 5:
+ return name_vec_list(api, e.name_vector)
+ raise NotImplementedError()
class VPPStatsIOError(IOError):
if not message:
try:
message = self.message % kwargs
- except Exception as e:
+ except Exception:
message = self.message
else:
message = message % kwargs
class VPPStats(object):
VPPStatsIOError = VPPStatsIOError
- default_socketname = '/var/run/stats.sock'
+ default_socketname = '/run/vpp/stats.sock'
sharedlib_name = 'libvppapiclient.so'
def __init__(self, socketname=default_socketname, timeout=10):
+ self.socketname = socketname
+ self.timeout = timeout
+ self.connected = False
try:
self.api = ffi.dlopen(VPPStats.sharedlib_name)
except Exception:
raise VPPStatsClientLoadError("Could not open: %s" %
VPPStats.sharedlib_name)
+
+ def connect(self):
self.client = self.api.stat_client_get()
- poll_end_time = time.time() + timeout
+ poll_end_time = time.time() + self.timeout
while time.time() < poll_end_time:
- rv = self.api.stat_segment_connect_r(socketname.encode(),
- self.client)
- if rv == 0:
+ rv = self.api.stat_segment_connect_r(
+ self.socketname.encode('utf-8'), self.client)
+ # Break out if success or any other error than "no such file"
+ # (indicating that VPP hasn't started yet)
+ if rv == 0 or ffi.errno != 2:
+ self.connected = True
break
if rv != 0:
raise VPPStatsIOError(retval=rv)
def heartbeat(self):
+ if not self.connected:
+ self.connect()
return self.api.stat_segment_heartbeat_r(self.client)
def ls(self, patterns):
+ if not self.connected:
+ self.connect()
return self.api.stat_segment_ls_r(make_string_vector(self.api,
patterns),
self.client)
+ def lsstr(self, patterns):
+ if not self.connected:
+ self.connect()
+ rv = self.api.stat_segment_ls_r(make_string_vector(self.api,
+ patterns),
+ self.client)
+
+ if rv == ffi.NULL:
+ raise VPPStatsIOError()
+ return [ffi.string(self.api.stat_segment_index_to_name_r(
+ rv[i], self.client)).decode('utf-8')
+ for i in range(self.api.stat_segment_vec_len(rv))]
+
def dump(self, counters):
+ if not self.connected:
+ self.connect()
stats = {}
rv = self.api.stat_segment_dump_r(counters, self.client)
# Raise exception and retry
if rv == ffi.NULL:
raise VPPStatsIOError()
rv_len = self.api.stat_segment_vec_len(rv)
+
for i in range(rv_len):
- n = ffi.string(rv[i].name).decode()
+ n = ffi.string(rv[i].name).decode('utf-8')
e = stat_entry_to_python(self.api, rv[i])
if e is not None:
stats[n] = e
.format(name))
k, v = s.popitem()
return v
- except VPPStatsIOError as e:
+ except VPPStatsIOError:
if retries > 10:
return None
retries += 1
+ def get_err_counter(self, name):
+ """Get an error counter. The errors from each worker thread
+ are summed"""
+ return sum(self.get_counter(name))
+
def disconnect(self):
- self.api.stat_segment_disconnect_r(self.client)
- self.api.stat_client_free(self.client)
+ try:
+ self.api.stat_segment_disconnect_r(self.client)
+ self.api.stat_client_free(self.client)
+ self.connected = False
+ del self.client
+ except AttributeError:
+ # no need to disconnect if we're not connected
+ pass
def set_errors(self):
'''Return all errors counters > 0'''
error_names = self.ls(['/err/'])
error_counters = self.dump(error_names)
break
- except VPPStatsIOError as e:
+ except VPPStatsIOError:
if retries > 10:
return None
retries += 1
- return {k: error_counters[k]
- for k in error_counters.keys() if error_counters[k]}
+ return {k: sum(error_counters[k])
+ for k in error_counters.keys() if sum(error_counters[k])}
def set_errors_str(self):
'''Return all errors counters > 0 pretty printed'''
- s = 'ERRORS:\n'
+ s = ['ERRORS:']
error_counters = self.set_errors()
for k in sorted(error_counters):
- s += '{:<60}{:>10}\n'.format(k, error_counters[k])
- return s
+ s.append('{:<60}{:>10}'.format(k, error_counters[k]))
+ return '%s\n' % '\n'.join(s)