vl_api_want_interface_combined_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 swif;
swif = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS, swif,
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
{
vpe_client_registration_t *clients, client;
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
u32 mp_size;
int i;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
}
#if STATS_DEBUG > 0
fformat (stdout, "%U\n", format_vnet_combined_counters, mp);
#endif
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vlib_combined_counter_main_t *cm;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
int i;
u32 swif;
}
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
for (i = 0; i < mp->num; i++)
{
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
/* Per Interface Combined distribution to client */
vnet_interface_main_t *im = sm->interface_main;
api_main_t *am = sm->api_main;
vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = NULL;
+ vl_api_registration_t *vl_reg;
vlib_combined_counter_main_t *cm;
/*
* items_this_message will eventually be used to optimise the batching
timestamp = vlib_time_now (sm->vlib_main);
vec_reset_length (sm->regs_tmp);
+
+ /* *INDENT-OFF* */
pool_foreach (reg,
- sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
- (
- {
- vec_add1 (sm->regs_tmp, reg);}));
+ sm->stats_registrations[IDX_PER_INTERFACE_COMBINED_COUNTERS],
+ ({
+ vec_add1 (sm->regs_tmp, reg);
+ }));
+ /* *INDENT-ON* */
for (i = 0; i < vec_len (sm->regs_tmp); i++)
{
continue;
}
vec_reset_length (sm->clients_tmp);
- pool_foreach (client, reg->clients, (
- {
- vec_add1 (sm->clients_tmp,
- client);}
- ));
+
+ /* *INDENT-OFF* */
+ pool_foreach (client, reg->clients, ({
+ vec_add1 (sm->clients_tmp, client);
+ }));
+ /* *INDENT-ON* */
//FIXME - should be doing non-variant part of mp here and managing
// any alloc per client in that vec_foreach
for (j = 0; j < vec_len (sm->clients_tmp); j++)
{
client = sm->clients_tmp[j];
- q = vl_api_client_index_to_input_queue (client->client_index);
+
+ vl_reg = vl_api_client_index_to_registration (client->client_index);
//Client may have disconnected abrubtly, clean up so we don't poll nothing.
- if (!q)
+ if (!vl_reg)
{
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_COMBINED_COUNTERS,
clib_mem_unaligned (&vp->tx_bytes, u64) =
clib_host_to_net_u64 (v.bytes);
- vl_msg_api_send_shmem (q, (u8 *) & mp);
+ vl_api_send_msg (vl_reg, (u8 *) mp);
}
}
vlib_simple_counter_main_t *cm;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
int i;
u32 swif;
}
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
+ reg = vl_api_client_index_to_registration (mp->client_index);
- //Client may have disconnected abrubtly, clean up so we don't poll nothing.
- if (!q)
+ /* Client may have disconnected abruptly, clean up */
+ if (!reg)
{
for (i = 0; i < mp->num; i++)
{
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
/* Per Interface Simple distribution to client */
vnet_interface_main_t *im = sm->interface_main;
api_main_t *am = sm->api_main;
vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
- svm_queue_t *q = NULL;
+ vl_api_registration_t *vl_reg;
vlib_simple_counter_main_t *cm;
/*
* items_this_message will eventually be used to optimise the batching
timestamp = vlib_time_now (sm->vlib_main);
vec_reset_length (sm->regs_tmp);
+
+ /* *INDENT-OFF* */
pool_foreach (reg,
- sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], (
- {
- vec_add1
- (sm->regs_tmp,
- reg);}));
+ sm->stats_registrations[IDX_PER_INTERFACE_SIMPLE_COUNTERS], ({
+ vec_add1 (sm->regs_tmp, reg);
+ }));
+ /* *INDENT-ON* */
for (i = 0; i < vec_len (sm->regs_tmp); i++)
{
continue;
}
vec_reset_length (sm->clients_tmp);
- pool_foreach (client, reg->clients, (
- {
- vec_add1 (sm->clients_tmp,
- client);}
- ));
+
+ /* *INDENT-OFF* */
+ pool_foreach (client, reg->clients, ({
+ vec_add1 (sm->clients_tmp, client);
+ }));
+ /* *INDENT-ON* */
//FIXME - should be doing non-variant part of mp here and managing
// any alloc per client in that vec_foreach
for (j = 0; j < vec_len (sm->clients_tmp); j++)
{
client = sm->clients_tmp[j];
- q = vl_api_client_index_to_input_queue (client->client_index);
+ vl_reg = vl_api_client_index_to_registration (client->client_index);
- //Client may have disconnected abrubtly, clean up so we don't poll nothing.
- if (!q)
+ /* Client may have disconnected abrubtly, clean up */
+ if (!vl_reg)
{
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS,
v = vlib_get_simple_counter (cm, reg->item);
clib_mem_unaligned (&vp->rx_mpls, u64) = clib_host_to_net_u64 (v);
- vl_msg_api_send_shmem (q, (u8 *) & mp);
+ vl_api_send_msg (vl_reg, (u8 *) mp);
}
}
{
vpe_client_registration_t *clients, client;
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
u32 mp_size;
int i;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
fformat (stdout, "%U\n", format_vnet_simple_counters, mp);
#endif
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
{
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
u32 mp_size;
vpe_client_registration_t *clients, client;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
}
}
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
{
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
u32 mp_size;
vpe_client_registration_t *clients, client;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
}
/* *INDENT-ON* */
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
{
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
u32 mp_size;
vpe_client_registration_t *clients, client;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
}
}
/* *INDENT-ON* */
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
{
stats_main_t *sm = &stats_main;
- svm_queue_t *q, *q_prev = NULL;
+ vl_api_registration_t *reg, *reg_prev = NULL;
vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
u32 mp_size;
vpe_client_registration_t *clients, client;
for (i = 0; i < vec_len (clients); i++)
{
client = clients[i];
- q = vl_api_client_index_to_input_queue (client.client_index);
- if (q)
+ reg = vl_api_client_index_to_registration (client.client_index);
+ if (reg)
{
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
mp_copy = vl_msg_api_alloc_as_if_client (mp_size);
clib_memcpy (mp_copy, mp, mp_size);
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
mp = mp_copy;
}
- q_prev = q;
+ reg_prev = reg;
}
else
{
}
}
/* *INDENT-ON* */
- if (q_prev && (q_prev->cursize < q_prev->maxsize))
+ if (reg_prev && vl_api_can_send_msg (reg_prev))
{
- vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
+ vl_api_send_msg (reg_prev, (u8 *) mp);
}
else
{
uword *p;
i32 retval = 0;
u32 item;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
item = ~0; //"ALL THE THINGS IN THE THINGS
rp.client_index = mp->client_index;
item, mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
return;
rmp = vl_msg_api_alloc (sizeof (*rmp));
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
uword *p;
i32 retval = 0;
u32 swif;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
swif = ~0; //Using same mechanism as _per_interface_
rp.client_index = mp->client_index;
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
+ reg = vl_api_client_index_to_registration (mp->client_index);
- if (!q)
+ if (!reg)
{
sm->enable_poller =
clear_client_for_stat (IDX_PER_INTERFACE_SIMPLE_COUNTERS, swif,
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
vl_api_want_ip4_fib_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 fib;
fib = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
+ reg = vl_api_client_index_to_registration (mp->client_index);
- if (!q)
+ if (!reg)
{
sm->enable_poller = clear_client_for_stat (IDX_IP4_FIB_COUNTERS,
fib, mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
vl_api_want_ip4_mfib_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 mfib;
mfib = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
sm->enable_poller = clear_client_for_stat (IDX_IP4_MFIB_COUNTERS,
mfib, mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
vl_api_want_ip4_fib_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 fib;
fib = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
sm->enable_poller = clear_client_for_stat (IDX_IP6_FIB_COUNTERS,
fib, mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
static void
vl_api_want_ip4_mfib_stats_reply_t *rmp;
uword *p;
i32 retval = 0;
- svm_queue_t *q;
+ vl_api_registration_t *reg;
u32 mfib;
mfib = ~0; //Using same mechanism as _per_interface_
mp->enable_disable);
reply:
- q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
{
sm->enable_poller = clear_client_for_stat (IDX_IP6_MFIB_COUNTERS,
mfib, mp->client_index);
rmp->context = mp->context;
rmp->retval = retval;
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
/* FIXME - NBR stats broken - this will be fixed in subsequent patch */
int i, which;
u64 total_pkts[VLIB_N_RX_TX];
u64 total_bytes[VLIB_N_RX_TX];
+ vl_api_registration_t *reg;
- svm_queue_t *q = vl_api_client_index_to_input_queue (mp->client_index);
-
- if (!q)
- {
- return;
- }
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
rmp = vl_msg_api_alloc (sizeof (*rmp));
rmp->_vl_msg_id = ntohs (VL_API_VNET_GET_SUMMARY_STATS_REPLY);
rmp->vector_rate =
clib_host_to_net_u64 (vlib_last_vector_length_per_node (sm->vlib_main));
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ vl_api_send_msg (reg, (u8 *) rmp);
}
int