#include <vnet/fib/fib_entry.h>
#include <vnet/mfib/mfib_entry.h>
#include <vnet/dpo/load_balance.h>
+#include <vnet/udp/udp_encap.h>
#define STATS_DEBUG 0
_(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \
_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \
_(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \
-_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats)
-
+_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \
+_(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \
+_(WANT_UDP_ENCAP_STATS, want_udp_encap_stats)
#define vl_msg_name_crc_list
#include <vpp/stats/stats.api.h>
#define IP6_FIB_COUNTER_BATCH_SIZE 30
#define IP4_MFIB_COUNTER_BATCH_SIZE 24
#define IP6_MFIB_COUNTER_BATCH_SIZE 15
+#define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t))
/* 5ms */
#define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
vl_api_want_interface_combined_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 swif;
swif = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
{
vpe_client_registration_t *clients, client;
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
u32 mp_size;
int i;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
}
#if STATS_DEBUG > 0
fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
#endif
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vlib_combined_counter_main_t *cm;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
- int i;
- u32 swif;
+ vl_api_registration_t *reg;
+ u32 i, swif, num = 0;
+
+ num = ntohl (mp->num);
- // Validate we have good sw_if_indexes before registering
- for (i = 0; i < mp->num; i++)
+ /*
+ * Validate sw_if_indexes before registering
+ */
+ for (i = 0; i < num; i++)
{
- swif = mp->sw_ifs[i];
+ swif = ntohl (mp->sw_ifs[i]);
- /* Check its a real sw_if_index that the client is allowed to see */
+ /*
+ * Check its a real sw_if_index that the client is allowed to see
+ */
if (swif != ~0)
{
if (pool_is_free_index (sm->interface_main->sw_interfaces, swif))
}
}
- for (i = 0; i < mp->num; i++)
+ for (i = 0; i < num; i++)
{
- swif = mp->sw_ifs[i];
+ swif = ntohl (mp->sw_ifs[i]);
rp.client_index = mp->client_index;
rp.client_pid = mp->pid;
handle_client_registration (&rp, IDX_PER_INTERFACE_COMBINED_COUNTERS,
- swif, mp->enable_disable);
+ swif, ntohl (mp->enable_disable));
}
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
- for (i = 0; i < mp->num; i++)
+ for (i = 0; i < num; i++)
{
- swif = mp->sw_ifs[i];
+ swif = ntohl (mp->sw_ifs[i]);
+
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
/* Per Interface Combined distribution to client */
vnet_interface_main_t *im = sm->interface_main;
api_main_t *am = sm->api_main;
vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = NULL;
+ vl_api_registration_t *vl_reg;
vlib_combined_counter_main_t *cm;
- /*
- * items_this_message will eventually be used to optimise the batching
- * of per client messages for each stat. For now setting this to 1 then
- * iterate. This will not affect API.
- *
- * FIXME instead of enqueueing here, this should be sent to a batch
- * storer for per-client transmission. Each "mp" sent would be a single entry
- * and if a client is listening to other sw_if_indexes for same, it would be
- * appended to that *mp
- */
- u32 items_this_message = 1;
- vnet_combined_counter_t *vp = 0;
+ vl_api_vnet_combined_counter_t *vp = 0;
vlib_counter_t v;
- int i, j;
- u32 timestamp;
+ u32 i, j;
vpe_client_stats_registration_t *reg;
vpe_client_registration_t *client;
u32 *sw_if_index = 0;
- /*
- FIXME(s):
- - capturing the timestamp of the counters "when VPP knew them" is important.
- Less so is that the timing of the delivery to the control plane be in the same
- timescale.
-
- i.e. As long as the control plane can delta messages from VPP and work out
- velocity etc based on the timestamp, it can do so in a more "batch mode".
-
- It would be beneficial to keep a "per-client" message queue, and then
- batch all the stat messages for a client into one message, with
- discrete timestamps.
-
- Given this particular API is for "per interface" one assumes that the scale
- is less than the ~0 case, which the prior API is suited for.
- */
vnet_interface_counter_lock (im);
- timestamp = vlib_time_now (sm->vlib_main);
-
vec_reset_length (sm->regs_tmp);
+
+ /* *INDENT-OFF* */
pool_foreach (reg,
- sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
- (
- {
- vec_add1 (sm->regs_tmp, reg);}));
+ sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
+ ({ vec_add1 (sm->regs_tmp, reg); }));
+ /* *INDENT-ON* */
for (i = 0; i < vec_len (sm->regs_tmp); i++)
{
continue;
}
vec_reset_length (sm->clients_tmp);
- pool_foreach (client, reg->clients, (
- {
- vec_add1 (sm->clients_tmp,
- client);}
- ));
-
- //FIXME - should be doing non-variant part of mp here and managing
- // any alloc per client in that vec_foreach
+
+ /* *INDENT-OFF* */
+ pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
+ client);}));
+ /* *INDENT-ON* */
+
for (j = 0; j < vec_len (sm->clients_tmp); j++)
{
client = sm->clients_tmp[j];
- q = vl_api_client_index_to_input_queue (client->client_index);
+
+ vl_reg = vl_api_client_index_to_registration (client->client_index);
//Client may have disconnected abrubtly, clean up so we don't poll nothing.
- if (!q)
+ if (!vl_reg)
{
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
reg->item, client->client_index);
continue;
}
+ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
+ memset (mp, 0, sizeof (*mp));
- mp = vl_msg_api_alloc (sizeof (*mp) +
- (items_this_message *
- (sizeof (*vp) /* rx */ )));
-
- // FIXME when optimising for items_this_message > 1 need to include a
- // SIMPLE_INTERFACE_BATCH_SIZE check.
mp->_vl_msg_id =
ntohs (VL_API_VNET_PER_INTERFACE_COMBINED_COUNTERS);
- mp->count = items_this_message;
- mp->timestamp = timestamp;
- vp = (vnet_combined_counter_t *) mp->data;
-
+ /*
+ * count will eventually be used to optimise the batching
+ * of per client messages for each stat. For now setting this to 1 then
+ * iterate. This will not affect API.
+ *
+ * FIXME instead of enqueueing here, this should be sent to a batch
+ * storer for per-client transmission. Each "mp" sent would be a single entry
+ * and if a client is listening to other sw_if_indexes for same, it would be
+ * appended to that *mp
+ *
+ *
+ * FIXME(s):
+ * - capturing the timestamp of the counters "when VPP knew them" is important.
+ * Less so is that the timing of the delivery to the control plane be in the same
+ * timescale.
+
+ * i.e. As long as the control plane can delta messages from VPP and work out
+ * velocity etc based on the timestamp, it can do so in a more "batch mode".
+
+ * It would be beneficial to keep a "per-client" message queue, and then
+ * batch all the stat messages for a client into one message, with
+ * discrete timestamps.
+
+ * Given this particular API is for "per interface" one assumes that the scale
+ * is less than the ~0 case, which the prior API is suited for.
+ */
+
+ /*
+ * 1 message per api call for now
+ */
+ mp->count = htonl (1);
+ mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
+
+ vp = (vl_api_vnet_combined_counter_t *) mp->data;
vp->sw_if_index = htonl (reg->item);
+ im = &vnet_get_main ()->interface_main;
cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
vlib_get_combined_counter (cm, reg->item, &v);
- clib_mem_unaligned (&vp->rx_packets, u64)
- = clib_host_to_net_u64 (v.packets);
+ clib_mem_unaligned (&vp->rx_packets, u64) =
+ clib_host_to_net_u64 (v.packets);
clib_mem_unaligned (&vp->rx_bytes, u64) =
clib_host_to_net_u64 (v.bytes);
-
-
- /* TX vlib_counter_t packets/bytes */
cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
vlib_get_combined_counter (cm, reg->item, &v);
- clib_mem_unaligned (&vp->tx_packets, u64)
- = clib_host_to_net_u64 (v.packets);
+ clib_mem_unaligned (&vp->tx_packets, u64) =
+ clib_host_to_net_u64 (v.packets);
clib_mem_unaligned (&vp->tx_bytes, u64) =
clib_host_to_net_u64 (v.bytes);
- vl_msg_api_send_shmem (q, (u8 *) & mp);
+ vl_api_send_msg (vl_reg, (u8 *) mp);
}
}
vlib_simple_counter_main_t *cm;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
- int i;
- u32 swif;
+ vl_api_registration_t *reg;
+ u32 i, swif, num = 0;
+
+ num = ntohl (mp->num);
- for (i = 0; i < mp->num; i++)
+ for (i = 0; i < num; i++)
{
- swif = mp->sw_ifs[i];
+ swif = ntohl (mp->sw_ifs[i]);
/* Check its a real sw_if_index that the client is allowed to see */
if (swif != ~0)
}
}
- for (i = 0; i < mp->num; i++)
+ for (i = 0; i < num; i++)
{
- swif = mp->sw_ifs[i];
+ swif = ntohl (mp->sw_ifs[i]);
rp.client_index = mp->client_index;
rp.client_pid = mp->pid;
handle_client_registration (&rp, IDX_PER_INTERFACE_SIMPLE_COUNTERS,
- swif, mp->enable_disable);
+ swif, ntohl (mp->enable_disable));
}
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
+ reg = vl_api_client_index_to_registration (mp->client_index);
- //Client may have disconnected abrubtly, clean up so we don't poll nothing.
- if (!q)
+ /* Client may have disconnected abruptly, clean up */
+ if (!reg)
{
- for (i = 0; i < mp->num; i++)
+ for (i = 0; i < num; i++)
{
- swif = mp->sw_ifs[i];
+ swif = ntohl (mp->sw_ifs[i]);
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
/* Per Interface Simple distribution to client */
vnet_interface_main_t *im = sm->interface_main;
api_main_t *am = sm->api_main;
vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = NULL;
+ vl_api_registration_t *vl_reg;
vlib_simple_counter_main_t *cm;
- /*
- * items_this_message will eventually be used to optimise the batching
- * of per client messages for each stat. For now setting this to 1 then
- * iterate. This will not affect API.
- *
- * FIXME instead of enqueueing here, this should be sent to a batch
- * storer for per-client transmission. Each "mp" sent would be a single entry
- * and if a client is listening to other sw_if_indexes for same, it would be
- * appended to that *mp
- */
- u32 items_this_message = 1;
- int i, j, size;
+ u32 i, j, size;
vpe_client_stats_registration_t *reg;
vpe_client_registration_t *client;
- u32 timestamp;
- u32 count;
- vnet_simple_counter_t *vp = 0;
+ u32 timestamp, count;
+ vl_api_vnet_simple_counter_t *vp = 0;
counter_t v;
- /*
- FIXME(s):
- - capturing the timestamp of the counters "when VPP knew them" is important.
- Less so is that the timing of the delivery to the control plane be in the same
- timescale.
-
- i.e. As long as the control plane can delta messages from VPP and work out
- velocity etc based on the timestamp, it can do so in a more "batch mode".
-
- It would be beneficial to keep a "per-client" message queue, and then
- batch all the stat messages for a client into one message, with
- discrete timestamps.
-
- Given this particular API is for "per interface" one assumes that the scale
- is less than the ~0 case, which the prior API is suited for.
- */
vnet_interface_counter_lock (im);
- timestamp = vlib_time_now (sm->vlib_main);
-
vec_reset_length (sm->regs_tmp);
+
+ /* *INDENT-OFF* */
pool_foreach (reg,
- sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], (
- {
- vec_add1
- (sm->regs_tmp,
- reg);}));
+ sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS],
+ ({ vec_add1 (sm->regs_tmp, reg); }));
+ /* *INDENT-ON* */
for (i = 0; i < vec_len (sm->regs_tmp); i++)
{
continue;
}
vec_reset_length (sm->clients_tmp);
- pool_foreach (client, reg->clients, (
- {
- vec_add1 (sm->clients_tmp,
- client);}
- ));
-
- //FIXME - should be doing non-variant part of mp here and managing
- // any alloc per client in that vec_foreach
+
+ /* *INDENT-OFF* */
+ pool_foreach (client, reg->clients, ({ vec_add1 (sm->clients_tmp,
+ client);}));
+ /* *INDENT-ON* */
+
for (j = 0; j < vec_len (sm->clients_tmp); j++)
{
client = sm->clients_tmp[j];
- q = vl_api_client_index_to_input_queue (client->client_index);
+ vl_reg = vl_api_client_index_to_registration (client->client_index);
- //Client may have disconnected abrubtly, clean up so we don't poll nothing.
- if (!q)
+ /* Client may have disconnected abrubtly, clean up */
+ if (!vl_reg)
{
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
continue;
}
- size = (sizeof (*mp) + (items_this_message * (sizeof (u64) * 10)));
- mp = vl_msg_api_alloc (size);
- // FIXME when optimising for items_this_message > 1 need to include a
- // SIMPLE_INTERFACE_BATCH_SIZE check.
+ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + sizeof (*vp));
+ memset (mp, 0, sizeof (*mp));
mp->_vl_msg_id = ntohs (VL_API_VNET_PER_INTERFACE_SIMPLE_COUNTERS);
- mp->count = items_this_message;
- mp->timestamp = timestamp;
- vp = (vnet_simple_counter_t *) mp->data;
+ /*
+ * count will eventually be used to optimise the batching
+ * of per client messages for each stat. For now setting this to 1 then
+ * iterate. This will not affect API.
+ *
+ * FIXME instead of enqueueing here, this should be sent to a batch
+ * storer for per-client transmission. Each "mp" sent would be a single entry
+ * and if a client is listening to other sw_if_indexes for same, it would be
+ * appended to that *mp
+ *
+ *
+ * FIXME(s):
+ * - capturing the timestamp of the counters "when VPP knew them" is important.
+ * Less so is that the timing of the delivery to the control plane be in the same
+ * timescale.
+
+ * i.e. As long as the control plane can delta messages from VPP and work out
+ * velocity etc based on the timestamp, it can do so in a more "batch mode".
+
+ * It would be beneficial to keep a "per-client" message queue, and then
+ * batch all the stat messages for a client into one message, with
+ * discrete timestamps.
+
+ * Given this particular API is for "per interface" one assumes that the scale
+ * is less than the ~0 case, which the prior API is suited for.
+ */
+
+ /*
+ * 1 message per api call for now
+ */
+ mp->count = htonl (1);
+ mp->timestamp = htonl (vlib_time_now (sm->vlib_main));
+ vp = (vl_api_vnet_simple_counter_t *) mp->data;
vp->sw_if_index = htonl (reg->item);
- //FIXME will be simpler with a preprocessor macro
// VNET_INTERFACE_COUNTER_DROP
cm = im->sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
v = vlib_get_simple_counter (cm, reg->item);
v = vlib_get_simple_counter (cm, reg->item);
clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
- vl_msg_api_send_shmem (q, (u8 *) & mp);
+ vl_api_send_msg (vl_reg, (u8 *) mp);
}
}
vl_msg_api_free (mp);
}
+typedef struct udp_encap_stat_t_
+{
+ u32 ue_id;
+ u64 stats[2];
+} udp_encap_stat_t;
+
+typedef struct udp_encap_stats_walk_t_
+{
+ udp_encap_stat_t *stats;
+} udp_encap_stats_walk_t;
+
+static int
+udp_encap_stats_walk_cb (index_t uei, void *arg)
+{
+ udp_encap_stats_walk_t *ctx = arg;
+ udp_encap_stat_t *stat;
+ udp_encap_t *ue;
+
+ ue = udp_encap_get (uei);
+ vec_add2 (ctx->stats, stat, 1);
+
+ stat->ue_id = uei;
+ udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]);
+
+ return (1);
+}
+
+static void
+udp_encap_ship (udp_encap_stats_walk_t * ctx)
+{
+ vl_api_vnet_udp_encap_counters_t *mp;
+ vl_shmem_hdr_t *shmem_hdr;
+ stats_main_t *sm;
+ api_main_t *am;
+ svm_queue_t *q;
+
+ mp = NULL;
+ sm = &stats_main;
+ am = sm->api_main;
+ shmem_hdr = am->shmem_hdr;
+ q = shmem_hdr->vl_input_queue;
+
+ /*
+ * If the walk context has counters, which may be left over from the last
+ * suspend, then we continue from there.
+ */
+ while (0 != vec_len (ctx->stats))
+ {
+ u32 n_items = MIN (vec_len (ctx->stats),
+ UDP_ENCAP_COUNTER_BATCH_SIZE);
+ u8 pause = 0;
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
+ (n_items *
+ sizeof
+ (vl_api_udp_encap_counter_t)));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
+ mp->count = ntohl (n_items);
+
+ /*
+ * copy the counters from the back of the context, then we can easily
+ * 'erase' them by resetting the vector length.
+ * The order we push the stats to the caller is not important.
+ */
+ clib_memcpy (mp->c,
+ &ctx->stats[vec_len (ctx->stats) - n_items],
+ n_items * sizeof (*ctx->stats));
+
+ _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
+
+ /*
+ * send to the shm q
+ */
+ svm_queue_lock (q);
+ pause = svm_queue_is_full (q);
+
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ svm_queue_unlock (q);
+ dsunlock (sm);
+
+ if (pause)
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ }
+}
+
+static void
+do_udp_encap_counters (stats_main_t * sm)
+{
+ udp_encap_stat_t *stat;
+
+ udp_encap_stats_walk_t ctx = {
+ .stats = NULL,
+ };
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+ udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
+ dsunlock (sm);
+
+ udp_encap_ship (&ctx);
+}
+
+int
+stats_set_poller_delay (u32 poller_delay_sec)
+{
+ stats_main_t *sm = &stats_main;
+ if (!poller_delay_sec)
+ {
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+ else
+ {
+ sm->stats_poll_interval_in_seconds = poller_delay_sec;
+ return 0;
+ }
+}
+
+static clib_error_t *
+stats_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ u32 sec;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "interval %u", &sec))
+ {
+ int rv = stats_set_poller_delay (sec);
+ if (rv)
+ {
+ return clib_error_return (0,
+ "`stats_set_poller_delay' API call failed, rv=%d:%U",
+ (int) rv, format_vnet_api_errno, rv);
+ }
+ return 0;
+ }
+ else
+ {
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, input);
+ }
+ }
+ return 0;
+}
+
+/* stats { ... } configuration. */
+/*?
+ *
+ * @cfgcmd{interval, <seconds>}
+ * Configure stats poller delay to be @c seconds.
+ *
+?*/
+VLIB_CONFIG_FUNCTION (stats_config, "stats");
+
+static void
+ vl_api_stats_get_poller_delay_t_handler
+ (vl_api_stats_get_poller_delay_t * mp)
+{
+ stats_main_t *sm = &stats_main;
+ vl_api_registration_t *reg;
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+ vl_api_stats_get_poller_delay_reply_t *rmp;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = 0;
+ rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+
+}
+
static void
stats_thread_fn (void *arg)
{
while (1)
{
- /* 10 second poll interval */
- ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
+ ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds,
+ 0 /* nsec */ );
if (!(sm->enable_poller))
{
if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS]))
do_ip6_nbr_counters (sm);
+
+ if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS]))
+ do_udp_encap_counters (sm);
}
}
{
vpe_client_registration_t *clients, client;
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
u32 mp_size;
int i;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
#endif
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
{
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
u32 mp_size;
vpe_client_registration_t *clients, client;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
}
}
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
{
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
u32 mp_size;
vpe_client_registration_t *clients, client;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
}
/* *INDENT-ON* */
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
{
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
u32 mp_size;
vpe_client_registration_t *clients, client;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
}
}
/* *INDENT-ON* */
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
{
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
u32 mp_size;
vpe_client_registration_t *clients, client;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
}
}
/* *INDENT-ON* */
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
}
}
+static void
+vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp)
+{
+ stats_main_t *sm = &stats_main;
+ vpe_client_registration_t rp;
+ vl_api_want_udp_encap_stats_reply_t *rmp;
+ uword *p;
+ i32 retval = 0;
+ vl_api_registration_t *reg;
+ u32 fib;
+
+ fib = ~0; //Using same mechanism as _per_interface_
+ rp.client_index = mp->client_index;
+ rp.client_pid = mp->pid;
+
+ handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable);
+
+reply:
+ reg = vl_api_client_index_to_registration (mp->client_index);
+
+ if (!reg)
+ {
+ sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS,
+ fib, mp->client_index);
+ return;
+ }
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = retval;
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+}
+
static void
vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
{
uword *p;
i32 retval = 0;
u32 item;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
item = ~0; //"ALL THE THINGS IN THE THINGS
rp.client_index = mp->client_index;
item, mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
return;
rmp = vl_msg_api_alloc (sizeof (*rmp));
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
uword *p;
i32 retval = 0;
u32 swif;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
swif = ~0; //Using same mechanism as _per_interface_
rp.client_index = mp->client_index;
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
+ reg = vl_api_client_index_to_registration (mp->client_index);
- if (!q)
+ if (!reg)
{
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
vl_api_want_ip4_fib_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 fib;
fib = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
+ reg = vl_api_client_index_to_registration (mp->client_index);
- if (!q)
+ if (!reg)
{
sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
fib, mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
vl_api_want_ip4_mfib_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 mfib;
mfib = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
mfib, mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
vl_api_want_ip4_fib_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 fib;
fib = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
fib, mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
vl_api_want_ip4_mfib_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 mfib;
mfib = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
mfib, mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
/* FIXME - NBR stats broken - this will be fixed in subsequent patch */
int i, which;
u64 total_pkts[VLIB_N_RX_TX];
u64 total_bytes[VLIB_N_RX_TX];
+ vl_api_registration_t *reg;
- svm_queue_t *q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
- {
- return;
- }
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
rmp = vl_msg_api_alloc (sizeof (*rmp));
rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
rmp->vector_rate =
clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
int