X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvpp%2Fstats%2Fstats.c;h=bf0bc50cafc46e2371f8e8bd4d7b62d7ccd693fc;hb=048a4e5a000017d0d632ebf02dcc23d9bf9ccf72;hp=ef9f61bbf607f228f49e3fb798091b51742c4136;hpb=919f377efefb9c97a5206923f41651da559ad3fa;p=vpp.git diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index ef9f61bbf60..bf0bc50cafc 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -18,6 +18,7 @@ #include #include #include +#include #define STATS_DEBUG 0 @@ -47,23 +48,25 @@ stats_main_t stats_main; #define foreach_stats_msg \ _(WANT_STATS, want_stats) \ _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \ -_(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \ +_(WANT_INTERFACE_SIMPLE_STATS, want_interface_simple_stats) \ _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \ -_(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \ +_(WANT_INTERFACE_COMBINED_STATS, want_interface_combined_stats) \ _(WANT_PER_INTERFACE_COMBINED_STATS, want_per_interface_combined_stats) \ -_(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \ +_(WANT_PER_INTERFACE_SIMPLE_STATS, want_per_interface_simple_stats) \ _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \ -_(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \ +_(WANT_IP4_FIB_STATS, want_ip4_fib_stats) \ _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \ -_(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \ +_(WANT_IP6_FIB_STATS, want_ip6_fib_stats) \ _(WANT_IP4_MFIB_STATS, want_ip4_mfib_stats) \ _(WANT_IP6_MFIB_STATS, want_ip6_mfib_stats) \ _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \ -_(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \ -_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \ -_(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \ -_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) - +_(WANT_IP4_NBR_STATS, want_ip4_nbr_stats) \ +_(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) \ +_(WANT_IP6_NBR_STATS, want_ip6_nbr_stats) \ +_(VNET_GET_SUMMARY_STATS, vnet_get_summary_stats) \ +_(STATS_GET_POLLER_DELAY, stats_get_poller_delay) \ +_(WANT_UDP_ENCAP_STATS, want_udp_encap_stats) \ +_(MAP_STATS_SEGMENT, map_stats_segment) #define vl_msg_name_crc_list #include @@ -85,6 +88,7 @@ setup_message_id_table (api_main_t * am) #define IP6_FIB_COUNTER_BATCH_SIZE 30 #define IP4_MFIB_COUNTER_BATCH_SIZE 24 #define IP6_MFIB_COUNTER_BATCH_SIZE 15 +#define UDP_ENCAP_COUNTER_BATCH_SIZE (1024 / sizeof(vl_api_udp_encap_counter_t)) /* 5ms */ #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5) @@ -360,6 +364,9 @@ exit: return elts; } +/* + * Return a copy of the clients list. + */ vpe_client_registration_t * get_clients_for_stat (u32 reg, u32 item) { @@ -378,10 +385,13 @@ get_clients_for_stat (u32 reg, u32 item) registration = pool_elt_at_index (sm->stats_registrations[reg], p[0]); vec_reset_length (clients); - pool_foreach (client, registration->clients, ( - { - vec_add1 (clients, *client);} - )); + + /* *INDENT-OFF* */ + pool_foreach (client, registration->clients, + ({ + vec_add1 (clients, *client);} + )); + /* *INDENT-ON* */ return clients; } @@ -605,6 +615,7 @@ static void reg_prev = reg; } } + vec_free (clients); #if STATS_DEBUG > 0 fformat (stdout, "%U\n", format_vnet_combined_counters, mp); #endif @@ -845,18 +856,26 @@ do_combined_per_interface_counters (stats_main_t * sm) vp->sw_if_index = htonl (reg->item); im = &vnet_get_main ()->interface_main; - cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX; - vlib_get_combined_counter (cm, reg->item, &v); - clib_mem_unaligned (&vp->rx_packets, u64) = - clib_host_to_net_u64 (v.packets); - clib_mem_unaligned (&vp->rx_bytes, u64) = - clib_host_to_net_u64 (v.bytes); - cm = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX; - vlib_get_combined_counter (cm, reg->item, &v); - clib_mem_unaligned (&vp->tx_packets, u64) = - clib_host_to_net_u64 (v.packets); - clib_mem_unaligned (&vp->tx_bytes, u64) = - clib_host_to_net_u64 (v.bytes); + +#define _(X, x) \ + cm = im->combined_sw_if_counters + X; \ + vlib_get_combined_counter (cm, reg->item, &v); \ + clib_mem_unaligned (&vp->x##_packets, u64) = \ + clib_host_to_net_u64 (v.packets); \ + clib_mem_unaligned (&vp->x##_bytes, u64) = \ + clib_host_to_net_u64 (v.bytes); + + + _(VNET_INTERFACE_COUNTER_RX, rx); + _(VNET_INTERFACE_COUNTER_TX, tx); + _(VNET_INTERFACE_COUNTER_RX_UNICAST, rx_unicast); + _(VNET_INTERFACE_COUNTER_TX_UNICAST, tx_unicast); + _(VNET_INTERFACE_COUNTER_RX_MULTICAST, rx_multicast); + _(VNET_INTERFACE_COUNTER_TX_MULTICAST, tx_multicast); + _(VNET_INTERFACE_COUNTER_RX_BROADCAST, rx_broadcast); + _(VNET_INTERFACE_COUNTER_TX_BROADCAST, tx_broadcast); + +#undef _ vl_api_send_msg (vl_reg, (u8 *) mp); } @@ -2068,6 +2087,182 @@ again: vl_msg_api_free (mp); } +typedef struct udp_encap_stat_t_ +{ + u32 ue_id; + u64 stats[2]; +} udp_encap_stat_t; + +typedef struct udp_encap_stats_walk_t_ +{ + udp_encap_stat_t *stats; +} udp_encap_stats_walk_t; + +static int +udp_encap_stats_walk_cb (index_t uei, void *arg) +{ + udp_encap_stats_walk_t *ctx = arg; + udp_encap_stat_t *stat; + udp_encap_t *ue; + + ue = udp_encap_get (uei); + vec_add2 (ctx->stats, stat, 1); + + stat->ue_id = uei; + udp_encap_get_stats (ue->ue_id, &stat->stats[0], &stat->stats[1]); + + return (1); +} + +static void +udp_encap_ship (udp_encap_stats_walk_t * ctx) +{ + vl_api_vnet_udp_encap_counters_t *mp; + vl_shmem_hdr_t *shmem_hdr; + stats_main_t *sm; + api_main_t *am; + svm_queue_t *q; + + mp = NULL; + sm = &stats_main; + am = sm->api_main; + shmem_hdr = am->shmem_hdr; + q = shmem_hdr->vl_input_queue; + + /* + * If the walk context has counters, which may be left over from the last + * suspend, then we continue from there. + */ + while (0 != vec_len (ctx->stats)) + { + u32 n_items = MIN (vec_len (ctx->stats), + UDP_ENCAP_COUNTER_BATCH_SIZE); + u8 pause = 0; + + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + + mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + + (n_items * + sizeof + (vl_api_udp_encap_counter_t))); + mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS); + mp->count = ntohl (n_items); + + /* + * copy the counters from the back of the context, then we can easily + * 'erase' them by resetting the vector length. + * The order we push the stats to the caller is not important. + */ + clib_memcpy (mp->c, + &ctx->stats[vec_len (ctx->stats) - n_items], + n_items * sizeof (*ctx->stats)); + + _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items; + + /* + * send to the shm q + */ + svm_queue_lock (q); + pause = svm_queue_is_full (q); + + vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); + svm_queue_unlock (q); + dsunlock (sm); + + if (pause) + ip46_fib_stats_delay (sm, 0 /* sec */ , + STATS_RELEASE_DELAY_NS); + } +} + +static void +do_udp_encap_counters (stats_main_t * sm) +{ + udp_encap_stat_t *stat; + + udp_encap_stats_walk_t ctx = { + .stats = NULL, + }; + + dslock (sm, 0 /* release hint */ , 1 /* tag */ ); + udp_encap_walk (udp_encap_stats_walk_cb, &ctx); + dsunlock (sm); + + udp_encap_ship (&ctx); +} + +int +stats_set_poller_delay (u32 poller_delay_sec) +{ + stats_main_t *sm = &stats_main; + if (!poller_delay_sec) + { + return VNET_API_ERROR_INVALID_ARGUMENT; + } + else + { + sm->stats_poll_interval_in_seconds = poller_delay_sec; + return 0; + } +} + +static clib_error_t * +stats_config (vlib_main_t * vm, unformat_input_t * input) +{ + u32 sec; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "interval %u", &sec)) + { + int rv = stats_set_poller_delay (sec); + if (rv) + { + return clib_error_return (0, + "`stats_set_poller_delay' API call failed, rv=%d:%U", + (int) rv, format_vnet_api_errno, rv); + } + return 0; + } + else + { + return clib_error_return (0, "unknown input '%U'", + format_unformat_error, input); + } + } + return 0; +} + +/* stats { ... } configuration. */ +/*? + * + * @cfgcmd{interval, <seconds>} + * Configure stats poller delay to be @c seconds. + * +?*/ +VLIB_CONFIG_FUNCTION (stats_config, "stats"); + +static void + vl_api_stats_get_poller_delay_t_handler + (vl_api_stats_get_poller_delay_t * mp) +{ + stats_main_t *sm = &stats_main; + vl_api_registration_t *reg; + reg = vl_api_client_index_to_registration (mp->client_index); + if (!reg) + return; + vl_api_stats_get_poller_delay_reply_t *rmp; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_PER_INTERFACE_SIMPLE_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = 0; + rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds); + + vl_api_send_msg (reg, (u8 *) rmp); + +} + static void stats_thread_fn (void *arg) { @@ -2090,8 +2285,8 @@ stats_thread_fn (void *arg) while (1) { - /* 10 second poll interval */ - ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ ); + ip46_fib_stats_delay (sm, sm->stats_poll_interval_in_seconds, + 0 /* nsec */ ); if (!(sm->enable_poller)) { @@ -2122,6 +2317,9 @@ stats_thread_fn (void *arg) if (pool_elts (sm->stats_registrations[IDX_IP6_NBR_COUNTERS])) do_ip6_nbr_counters (sm); + + if (pool_elts (sm->stats_registrations[IDX_UDP_ENCAP_COUNTERS])) + do_udp_encap_counters (sm); } } @@ -2165,6 +2363,7 @@ static void continue; } } + vec_free (clients); #if STATS_DEBUG > 0 fformat (stdout, "%U\n", format_vnet_simple_counters, mp); @@ -2218,6 +2417,7 @@ vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp) continue; } } + vec_free (clients); if (reg_prev && vl_api_can_send_msg (reg_prev)) { @@ -2267,6 +2467,7 @@ vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp) continue; } } + vec_free (clients); /* *INDENT-ON* */ if (reg_prev && vl_api_can_send_msg (reg_prev)) @@ -2317,6 +2518,8 @@ vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp) continue; } } + vec_free (clients); + /* *INDENT-ON* */ if (reg_prev && vl_api_can_send_msg (reg_prev)) { @@ -2366,6 +2569,8 @@ vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp) continue; } } + vec_free (clients); + /* *INDENT-ON* */ if (reg_prev && vl_api_can_send_msg (reg_prev)) { @@ -2377,6 +2582,41 @@ vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp) } } +static void +vl_api_want_udp_encap_stats_t_handler (vl_api_want_udp_encap_stats_t * mp) +{ + stats_main_t *sm = &stats_main; + vpe_client_registration_t rp; + vl_api_want_udp_encap_stats_reply_t *rmp; + uword *p; + i32 retval = 0; + vl_api_registration_t *reg; + u32 fib; + + fib = ~0; //Using same mechanism as _per_interface_ + rp.client_index = mp->client_index; + rp.client_pid = mp->pid; + + handle_client_registration (&rp, IDX_UDP_ENCAP_COUNTERS, fib, mp->enable); + +reply: + reg = vl_api_client_index_to_registration (mp->client_index); + + if (!reg) + { + sm->enable_poller = clear_client_for_stat (IDX_UDP_ENCAP_COUNTERS, + fib, mp->client_index); + return; + } + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_WANT_UDP_ENCAP_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = retval; + + vl_api_send_msg (reg, (u8 *) rmp); +} + static void vl_api_want_stats_t_handler (vl_api_want_stats_t * mp) { @@ -2695,6 +2935,50 @@ stats_memclnt_delete_callback (u32 client_index) #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler +#define vl_api_map_stats_segment_t_print vl_noop_handler + +static void +vl_api_map_stats_segment_t_handler (vl_api_map_stats_segment_t * mp) +{ + vl_api_map_stats_segment_reply_t *rmp; + stats_main_t *sm = &stats_main; + ssvm_private_t *ssvmp = &sm->stat_segment; + vl_api_registration_t *regp; + api_main_t *am = &api_main; + clib_file_t *cf; + vl_api_shm_elem_config_t *config = 0; + vl_shmem_hdr_t *shmem_hdr; + int rv = 0; + + regp = vl_api_client_index_to_registration (mp->client_index); + if (regp == 0) + { + clib_warning ("API client disconnected"); + return; + } + if (regp->registration_type != REGISTRATION_TYPE_SOCKET_SERVER) + rv = VNET_API_ERROR_INVALID_REGISTRATION; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = htons (VL_API_MAP_STATS_SEGMENT_REPLY); + rmp->context = mp->context; + rmp->retval = htonl (rv); + + vl_api_send_msg (regp, (u8 *) rmp); + + if (rv != 0) + return; + + /* + * We need the reply message to make it out the back door + * before we send the magic fd message so force a flush + */ + cf = vl_api_registration_file (regp); + cf->write_function (cf); + + /* Send the magic "here's your sign (aka fd)" socket message */ + vl_sock_api_send_fd_msg (cf->file_descriptor, ssvmp->fd); +} static clib_error_t * stats_init (vlib_main_t * vm)