+ do_fibs = &sm->do_ip46_fibs;
+
+ vec_reset_length (do_fibs->mfibs);
+ /* *INDENT-OFF* */
+ pool_foreach (mfib, im6->mfibs, ({vec_add1(do_fibs->mfibs, mfib);}));
+ /* *INDENT-ON* */
+
+ for (j = 0; j < vec_len (do_fibs->mfibs); j++)
+ {
+ mfib = do_fibs->mfibs[j];
+ /* We may have bailed out due to control-plane activity */
+ while ((mfib - im6->mfibs) < start_at_mfib_index)
+ continue;
+
+ if (mp == 0)
+ {
+ items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) +
+ items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
+ mp->count = 0;
+ mp->vrf_id = ntohl (mfib->mft_table_id);
+ ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
+ }
+ else
+ {
+ /* happens if the last MFIB was empty... */
+ ASSERT (mp->count == 0);
+ mp->vrf_id = ntohl (mfib->mft_table_id);
+ }
+
+ vec_reset_length (do_fibs->mroutes);
+
+ /*
+ * walk the table with table updates blocked
+ */
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ mfib_table_walk (mfib->mft_index,
+ FIB_PROTOCOL_IP6, mfib_table_stats_walk_cb, sm);
+ dsunlock (sm);
+
+ vec_foreach (pfx, do_fibs->mroutes)
+ {
+ const dpo_id_t *dpo_id;
+ fib_node_index_t mfei;
+ vlib_counter_t c;
+ u32 index;
+
+ /*
+ * re-lookup the entry, since we suspend during the collection
+ */
+ mfei = mfib_table_lookup (mfib->mft_index, pfx);
+
+ if (FIB_NODE_INDEX_INVALID == mfei)
+ continue;
+
+ dpo_id = mfib_entry_contribute_ip_forwarding (mfei);
+ index = (u32) dpo_id->dpoi_index;
+
+ vlib_get_combined_counter (&replicate_main.repm_counters,
+ dpo_id->dpoi_index, &c);
+ /*
+ * If it has seen at least one packet, send it.
+ */
+ if (c.packets > 0)
+ {
+ /* already in net byte order */
+ memcpy (ctrp->group, &pfx->fp_grp_addr.ip6, 16);
+ memcpy (ctrp->source, &pfx->fp_src_addr.ip6, 16);
+ ctrp->group_length = pfx->fp_len;
+ ctrp->packets = clib_host_to_net_u64 (c.packets);
+ ctrp->bytes = clib_host_to_net_u64 (c.bytes);
+ mp->count++;
+ ctrp++;
+
+ if (mp->count == items_this_message)
+ {
+ mp->count = htonl (items_this_message);
+ /*
+ * If the main thread's input queue is stuffed,
+ * drop the data structure lock (which the main thread
+ * may want), and take a pause.
+ */
+ svm_queue_lock (q);
+
+ while (svm_queue_is_full (q))
+ {
+ svm_queue_unlock (q);
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ svm_queue_lock (q);
+ }
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ svm_queue_unlock (q);
+
+ items_this_message = IP6_MFIB_COUNTER_BATCH_SIZE;
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) +
+ items_this_message * sizeof (vl_api_ip6_mfib_counter_t));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_MFIB_COUNTERS);
+ mp->count = 0;
+ mp->vrf_id = ntohl (mfib->mft_table_id);
+ ctrp = (vl_api_ip6_mfib_counter_t *) mp->c;
+ }
+ }
+ }
+
+ /* Flush any data from this mfib */
+ if (mp->count)
+ {
+ mp->count = htonl (mp->count);
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+ mp = 0;
+ }
+ }
+
+ /* If e.g. the last FIB had no reportable routes, free the buffer */
+ if (mp)
+ vl_msg_api_free (mp);
+}
+
+typedef struct
+{
+ u32 fib_index;
+ ip6_route_t **routep;
+ stats_main_t *sm;
+} add_routes_in_fib_arg_t;
+
+static void
+add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
+{
+ add_routes_in_fib_arg_t *ap = arg;
+ stats_main_t *sm = ap->sm;
+
+ if (sm->data_structure_lock->release_hint)
+ clib_longjmp (&sm->jmp_buf, 1);
+
+ if (kvp->key[2] >> 32 == ap->fib_index)
+ {
+ ip6_address_t *addr;
+ ip6_route_t *r;
+ addr = (ip6_address_t *) kvp;
+ vec_add2 (*ap->routep, r, 1);
+ r->address = addr[0];
+ r->address_length = kvp->key[2] & 0xFF;
+ r->index = kvp->value;
+ }
+}
+
+static void
+do_ip6_fib_counters (stats_main_t * sm)
+{
+ ip6_main_t *im6 = &ip6_main;
+ api_main_t *am = sm->api_main;
+ vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
+ svm_queue_t *q = shmem_hdr->vl_input_queue;
+ ip6_route_t *r;
+ fib_table_t *fib;
+ do_ip46_fibs_t *do_fibs;
+ vl_api_vnet_ip6_fib_counters_t *mp = 0;
+ u32 items_this_message;
+ vl_api_ip6_fib_counter_t *ctrp = 0;
+ u32 start_at_fib_index = 0;
+ BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
+ add_routes_in_fib_arg_t _a, *a = &_a;
+ int i;
+
+ do_fibs = &sm->do_ip46_fibs;
+again:
+ vec_reset_length (do_fibs->fibs);
+ /* *INDENT-OFF* */
+ pool_foreach (fib, im6->fibs,
+ ({vec_add1(do_fibs->fibs,fib);}));
+ /* *INDENT-ON* */
+
+
+ for (i = 0; i < vec_len (do_fibs->fibs); i++)
+ {
+ fib = do_fibs->fibs[i];
+ /* We may have bailed out due to control-plane activity */
+ while ((fib - im6->fibs) < start_at_fib_index)
+ continue;
+
+ if (mp == 0)
+ {
+ items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) +
+ items_this_message * sizeof (vl_api_ip6_fib_counter_t));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
+ mp->count = 0;
+ mp->vrf_id = ntohl (fib->ft_table_id);
+ ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+ }
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ vec_reset_length (do_fibs->ip6routes);
+ vec_reset_length (do_fibs->results);
+
+ a->fib_index = fib - im6->fibs;
+ a->routep = &do_fibs->ip6routes;
+ a->sm = sm;
+
+ if (clib_setjmp (&sm->jmp_buf, 0) == 0)
+ {
+ start_at_fib_index = fib - im6->fibs;
+ BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
+ }
+ else
+ {
+ dsunlock (sm);
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ mp->count = 0;
+ ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+ goto again;
+ }
+
+ vec_foreach (r, do_fibs->ip6routes)
+ {
+ vlib_counter_t c;
+
+ vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
+ r->index, &c);
+ /*
+ * If it has actually
+ * seen at least one packet, send it.
+ */
+ if (c.packets > 0)
+ {
+ /* already in net byte order */
+ ctrp->address[0] = r->address.as_u64[0];
+ ctrp->address[1] = r->address.as_u64[1];
+ ctrp->address_length = (u8) r->address_length;
+ ctrp->packets = clib_host_to_net_u64 (c.packets);
+ ctrp->bytes = clib_host_to_net_u64 (c.bytes);
+ mp->count++;
+ ctrp++;
+
+ if (mp->count == items_this_message)
+ {
+ mp->count = htonl (items_this_message);
+ /*
+ * If the main thread's input queue is stuffed,
+ * drop the data structure lock (which the main thread
+ * may want), and take a pause.
+ */
+ svm_queue_lock (q);
+ if (svm_queue_is_full (q))
+ {
+ dsunlock (sm);
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ svm_queue_unlock (q);
+ mp = 0;
+ ip46_fib_stats_delay (sm, 0 /* sec */ ,
+ STATS_RELEASE_DELAY_NS);
+ goto again;
+ }
+ vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
+ svm_queue_unlock (q);
+
+ items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
+ mp = vl_msg_api_alloc_as_if_client
+ (sizeof (*mp) +
+ items_this_message * sizeof (vl_api_ip6_fib_counter_t));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
+ mp->count = 0;
+ mp->vrf_id = ntohl (fib->ft_table_id);
+ ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+ }
+ }
+
+ if (sm->data_structure_lock->release_hint)
+ {
+ start_at_fib_index = fib - im6->fibs;
+ dsunlock (sm);
+ ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
+ mp->count = 0;
+ ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
+ goto again;
+ }
+ } /* vec_foreach (routes) */
+
+ dsunlock (sm);
+
+ /* Flush any data from this fib */
+ if (mp->count)
+ {
+ mp->count = htonl (mp->count);
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+ mp = 0;
+ }
+ }
+
+ /* If e.g. the last FIB had no reportable routes, free the buffer */
+ if (mp)
+ vl_msg_api_free (mp);
+}
+
+typedef struct udp_encap_stats_walk_t_
+{
+ vl_api_udp_encap_counter_t *stats;
+} udp_encap_stats_walk_t;
+
+static walk_rc_t
+udp_encap_stats_walk_cb (index_t uei, void *arg)
+{
+ udp_encap_stats_walk_t *ctx = arg;
+ vl_api_udp_encap_counter_t *stat;
+ udp_encap_t *ue;
+
+ ue = udp_encap_get (uei);
+ vec_add2 (ctx->stats, stat, 1);
+
+ stat->id = ue->ue_id;
+ udp_encap_get_stats (ue->ue_id, &stat->packets, &stat->bytes);
+
+ return (WALK_CONTINUE);
+}
+
+static void
+udp_encap_ship (udp_encap_stats_walk_t * ctx)
+{
+ vl_api_vnet_udp_encap_counters_t *mp;
+ vl_shmem_hdr_t *shmem_hdr;
+ stats_main_t *sm;
+ api_main_t *am;
+ svm_queue_t *q;
+
+ mp = NULL;
+ sm = &stats_main;
+ am = sm->api_main;
+ shmem_hdr = am->shmem_hdr;
+ q = shmem_hdr->vl_input_queue;
+
+ /*
+ * If the walk context has counters, which may be left over from the last
+ * suspend, then we continue from there.
+ */
+ while (0 != vec_len (ctx->stats))
+ {
+ u32 n_items = MIN (vec_len (ctx->stats),
+ UDP_ENCAP_COUNTER_BATCH_SIZE);
+ u8 pause = 0;
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
+ (n_items *
+ sizeof
+ (vl_api_udp_encap_counter_t)));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_UDP_ENCAP_COUNTERS);
+ mp->count = ntohl (n_items);
+
+ /*
+ * copy the counters from the back of the context, then we can easily
+ * 'erase' them by resetting the vector length.
+ * The order we push the stats to the caller is not important.
+ */
+ clib_memcpy (mp->c,
+ &ctx->stats[vec_len (ctx->stats) - n_items],
+ n_items * sizeof (*ctx->stats));
+
+ _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
+
+ /*
+ * send to the shm q
+ */
+ send_and_pause (sm, q, (u8 *) & mp);
+ }
+}
+
+static void
+do_udp_encap_counters (stats_main_t * sm)
+{
+ vl_api_udp_encap_counter_t *stat;
+
+ udp_encap_stats_walk_t ctx = {
+ .stats = NULL,
+ };
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+ udp_encap_walk (udp_encap_stats_walk_cb, &ctx);
+ dsunlock (sm);
+
+ udp_encap_ship (&ctx);
+}
+
+typedef struct bier_neighbor_stats_walk_t_
+{
+ vl_api_bier_neighbor_counter_t *stats;
+} bier_neighbor_stats_walk_t;
+
+static walk_rc_t
+bier_neighbor_stats_walk_cb (index_t bfmi, void *arg)
+{
+ bier_neighbor_stats_walk_t *ctx = arg;
+ vl_api_bier_neighbor_counter_t *stat;
+ fib_route_path_encode_t rpath;
+ bier_table_id_t btid;
+
+ vec_add2 (ctx->stats, stat, 1);
+
+ bier_fmask_encode (bfmi, &btid, &rpath);
+
+ stat->tbl_id.bt_set = btid.bti_set;
+ stat->tbl_id.bt_sub_domain = btid.bti_sub_domain;
+ stat->tbl_id.bt_hdr_len_id = btid.bti_hdr_len;
+ fib_api_path_encode (&rpath, &stat->path);
+ bier_fmask_get_stats (bfmi, &stat->packets, &stat->bytes);
+
+ return (WALK_CONTINUE);
+}
+
+static void
+bier_neighbor_ship (bier_neighbor_stats_walk_t * ctx)
+{
+ vl_api_vnet_bier_neighbor_counters_t *mp;
+ vl_shmem_hdr_t *shmem_hdr;
+ stats_main_t *sm;
+ api_main_t *am;
+ svm_queue_t *q;
+
+ mp = NULL;
+ sm = &stats_main;
+ am = sm->api_main;
+ shmem_hdr = am->shmem_hdr;
+ q = shmem_hdr->vl_input_queue;
+
+ /*
+ * If the walk context has counters, which may be left over from the last
+ * suspend, then we continue from there.
+ */
+ while (0 != vec_len (ctx->stats))
+ {
+ u32 n_items = MIN (vec_len (ctx->stats),
+ BIER_NEIGHBOR_COUNTER_BATCH_SIZE);
+ u8 pause = 0;
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+
+ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
+ (n_items *
+ sizeof
+ (vl_api_bier_neighbor_counter_t)));
+ mp->_vl_msg_id = ntohs (VL_API_VNET_BIER_NEIGHBOR_COUNTERS);
+ mp->count = ntohl (n_items);
+
+ /*
+ * copy the counters from the back of the context, then we can easily
+ * 'erase' them by resetting the vector length.
+ * The order we push the stats to the caller is not important.
+ */
+ clib_memcpy (mp->c,
+ &ctx->stats[vec_len (ctx->stats) - n_items],
+ n_items * sizeof (*ctx->stats));
+
+ _vec_len (ctx->stats) = vec_len (ctx->stats) - n_items;
+
+ /*
+ * send to the shm q
+ */
+ send_and_pause (sm, q, (u8 *) & mp);
+ }
+}
+
+static void
+do_bier_neighbor_counters (stats_main_t * sm)
+{
+ vl_api_bier_neighbor_counter_t *stat;
+
+ bier_neighbor_stats_walk_t ctx = {
+ .stats = NULL,
+ };
+
+ dslock (sm, 0 /* release hint */ , 1 /* tag */ );
+ bier_fmask_db_walk (bier_neighbor_stats_walk_cb, &ctx);
+ dsunlock (sm);
+
+ bier_neighbor_ship (&ctx);
+}
+
+int
+stats_set_poller_delay (u32 poller_delay_sec)
+{
+ stats_main_t *sm = &stats_main;
+ if (!poller_delay_sec)
+ {
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+ else
+ {
+ sm->stats_poll_interval_in_seconds = poller_delay_sec;
+ return 0;
+ }
+}
+
+/*
+ * Accept connection on the socket and exchange the fd for the shared
+ * memory segment.
+ */
+static clib_error_t *
+stats_socket_accept_ready (clib_file_t * uf)
+{
+ stats_main_t *sm = &stats_main;
+ clib_error_t *err;
+ clib_socket_t client = { 0 };
+
+ err = clib_socket_accept (sm->socket, &client);
+ if (err)
+ {
+ clib_error_report (err);
+ return err;
+ }
+
+ /* Send the fd across and close */
+ err = clib_socket_sendmsg (&client, 0, 0, &sm->memfd, 1);
+ if (err)
+ clib_error_report (err);
+ clib_socket_close (&client);
+
+ return 0;
+}
+
+static void
+stats_segment_socket_init (void)
+{
+ stats_main_t *sm = &stats_main;
+ clib_error_t *error;
+ clib_socket_t *s = clib_mem_alloc (sizeof (clib_socket_t));
+
+ s->config = (char *) sm->socket_name;
+ s->flags = CLIB_SOCKET_F_IS_SERVER | CLIB_SOCKET_F_SEQPACKET |
+ CLIB_SOCKET_F_ALLOW_GROUP_WRITE | CLIB_SOCKET_F_PASSCRED;
+ if ((error = clib_socket_init (s)))
+ {
+ clib_error_report (error);
+ return;
+ }
+
+ clib_file_t template = { 0 };
+ clib_file_main_t *fm = &file_main;
+ template.read_function = stats_socket_accept_ready;
+ template.file_descriptor = s->fd;
+ template.description =
+ format (0, "stats segment listener %s", STAT_SEGMENT_SOCKET_FILE);
+ clib_file_add (fm, &template);
+
+ sm->socket = s;
+}
+
+static clib_error_t *
+stats_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ stats_main_t *sm = &stats_main;
+ u32 sec;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "socket-name %s", &sm->socket_name))
+ ;
+ else if (unformat (input, "default"))
+ sm->socket_name = format (0, "%s", STAT_SEGMENT_SOCKET_FILE);
+ else if (unformat (input, "interval %u", &sec))
+ {
+ int rv = stats_set_poller_delay (sec);
+ if (rv)
+ {
+ return clib_error_return (0,
+ "`stats_set_poller_delay' API call failed, rv=%d:%U",
+ (int) rv, format_vnet_api_errno, rv);
+ }
+ }
+ else
+ {
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, input);
+ }
+ }
+
+ if (sm->socket_name)
+ stats_segment_socket_init ();
+
+ return 0;
+}
+
+/* stats { ... } configuration. */
+/*?
+ *
+ * @cfgcmd{interval, <seconds>}
+ * Configure stats poller delay to be @c seconds.
+ *
+?*/
+VLIB_CONFIG_FUNCTION (stats_config, "stats");
+
+static void
+ vl_api_stats_get_poller_delay_t_handler
+ (vl_api_stats_get_poller_delay_t * mp)
+{
+ stats_main_t *sm = &stats_main;
+ vl_api_registration_t *reg;
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+ vl_api_stats_get_poller_delay_reply_t *rmp;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id = ntohs (VL_API_STATS_GET_POLLER_DELAY_REPLY);
+ rmp->context = mp->context;
+ rmp->retval = 0;
+ rmp->delay = clib_host_to_net_u32 (sm->stats_poll_interval_in_seconds);
+
+ vl_api_send_msg (reg, (u8 *) rmp);
+
+}
+
+static void
+stats_thread_fn (void *arg)
+{
+ stats_main_t *sm = &stats_main;
+ vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ /* stats thread wants no signals. */
+ {
+ sigset_t s;
+ sigfillset (&s);
+ pthread_sigmask (SIG_SETMASK, &s, 0);
+ }
+
+ clib_mem_set_heap (w->thread_mheap);