2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <stats/stats.h>
17 #include <vlib/threads.h>
21 stats_main_t stats_main;
23 #include <vnet/ip/ip.h>
25 #include <api/vpe_msg_enum.h>
28 #define f64_print(a,b)
30 #define vl_typedefs /* define message structures */
31 #include <api/vpe_all_api_h.h>
34 #define vl_endianfun /* define message structures */
35 #include <api/vpe_all_api_h.h>
38 /* instantiate all the print functions we know about */
39 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
41 #include <api/vpe_all_api_h.h>
44 #define foreach_stats_msg \
45 _(WANT_STATS, want_stats) \
46 _(WANT_STATS_REPLY, want_stats_reply) \
47 _(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
48 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
49 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters)
51 /* These constants ensure msg sizes <= 1024, aka ring allocation */
52 #define SIMPLE_COUNTER_BATCH_SIZE 126
53 #define COMBINED_COUNTER_BATCH_SIZE 63
54 #define IP4_FIB_COUNTER_BATCH_SIZE 48
55 #define IP6_FIB_COUNTER_BATCH_SIZE 30
58 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
61 void dslock (stats_main_t *sm, int release_hint, int tag)
64 data_structure_lock_t *l = sm->data_structure_lock;
66 if(PREDICT_FALSE(l == 0))
69 thread_id = os_get_cpu_number();
70 if (l->lock && l->thread_id == thread_id) {
78 while (__sync_lock_test_and_set (&l->lock, 1))
81 l->thread_id = thread_id;
85 void dsunlock (stats_main_t *sm)
88 data_structure_lock_t *l = sm->data_structure_lock;
90 if(PREDICT_FALSE(l == 0))
93 thread_id = os_get_cpu_number();
94 ASSERT (l->lock && l->thread_id == thread_id);
99 CLIB_MEMORY_BARRIER();
104 static void do_simple_interface_counters (stats_main_t * sm)
106 vl_api_vnet_interface_counters_t * mp = 0;
107 vnet_interface_main_t * im = sm->interface_main;
108 api_main_t * am = sm->api_main;
109 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
110 unix_shared_memory_queue_t * q = shmem_hdr->vl_input_queue;
111 vlib_simple_counter_main_t * cm;
112 u32 items_this_message = 0;
117 * Prevent interface registration from expanding / moving the vectors...
118 * That tends never to happen, so we can hold this lock for a while.
120 vnet_interface_counter_lock (im);
122 vec_foreach (cm, im->sw_if_counters) {
124 for (i = 0; i < vec_len (cm->maxi); i++) {
126 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
127 vec_len (cm->maxi) - i);
129 mp = vl_msg_api_alloc_as_if_client
130 (sizeof (*mp) + items_this_message * sizeof (v));
131 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
132 mp->vnet_counter_type = cm - im->sw_if_counters;
134 mp->first_sw_if_index = htonl (i);
136 vp = (u64 *) mp->data;
138 v = vlib_get_simple_counter (cm, i);
139 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
142 if (mp->count == items_this_message) {
143 mp->count = htonl (items_this_message);
144 /* Send to the main thread... */
145 vl_msg_api_send_shmem (q, (u8 *)&mp);
151 vnet_interface_counter_unlock (im);
154 static void do_combined_interface_counters (stats_main_t * sm)
156 vl_api_vnet_interface_counters_t * mp = 0;
157 vnet_interface_main_t * im = sm->interface_main;
158 api_main_t * am = sm->api_main;
159 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
160 unix_shared_memory_queue_t * q = shmem_hdr->vl_input_queue;
161 vlib_combined_counter_main_t * cm;
162 u32 items_this_message = 0;
163 vlib_counter_t v, *vp = 0;
166 vnet_interface_counter_lock (im);
168 vec_foreach (cm, im->combined_sw_if_counters) {
170 for (i = 0; i < vec_len (cm->maxi); i++) {
172 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
173 vec_len (cm->maxi) - i);
175 mp = vl_msg_api_alloc_as_if_client
176 (sizeof (*mp) + items_this_message * sizeof (v));
177 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
178 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
180 mp->first_sw_if_index = htonl (i);
182 vp = (vlib_counter_t *)mp->data;
184 vlib_get_combined_counter (cm, i, &v);
185 clib_mem_unaligned (&vp->packets, u64)
186 = clib_host_to_net_u64 (v.packets);
187 clib_mem_unaligned (&vp->bytes, u64)
188 = clib_host_to_net_u64 (v.bytes);
191 if (mp->count == items_this_message) {
192 mp->count = htonl (items_this_message);
193 /* Send to the main thread... */
194 vl_msg_api_send_shmem (q, (u8 *)&mp);
200 vnet_interface_counter_unlock (im);
203 /* from .../vnet/vnet/ip/lookup.c. Yuck */
204 typedef CLIB_PACKED (struct {
205 ip4_address_t address;
207 u32 address_length : 6;
212 static void ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
214 struct timespec _req, *req = &_req;
215 struct timespec _rem, *rem = &_rem;
220 if (nanosleep (req, rem) == 0)
225 clib_unix_warning ("nanosleep");
230 static void do_ip4_fibs (stats_main_t * sm)
232 ip4_main_t * im4 = &ip4_main;
233 api_main_t * am = sm->api_main;
234 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
235 unix_shared_memory_queue_t * q = shmem_hdr->vl_input_queue;
236 static ip4_route_t * routes;
239 ip_lookup_main_t * lm = &im4->lookup_main;
240 static uword * results;
241 vl_api_vnet_ip4_fib_counters_t * mp = 0;
242 u32 items_this_message;
243 vl_api_ip4_fib_counter_t *ctrp = 0;
244 u32 start_at_fib_index = 0;
248 vec_foreach (fib, im4->fibs) {
249 /* We may have bailed out due to control-plane activity */
250 while ((fib - im4->fibs) < start_at_fib_index)
254 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
255 mp = vl_msg_api_alloc_as_if_client
257 items_this_message*sizeof(vl_api_ip4_fib_counter_t));
258 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
260 mp->vrf_id = ntohl(fib->table_id);
261 ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
263 /* happens if the last FIB was empty... */
264 ASSERT(mp->count == 0);
265 mp->vrf_id = ntohl(fib->table_id);
268 dslock (sm, 0 /* release hint */, 1 /* tag */);
270 vec_reset_length (routes);
271 vec_reset_length (results);
273 for (i = 0; i < ARRAY_LEN (fib->adj_index_by_dst_address); i++) {
274 uword * hash = fib->adj_index_by_dst_address[i];
278 x.address_length = i;
280 hash_foreach_pair (p, hash,
282 x.address.data_u32 = p->key;
283 if (lm->fib_result_n_words > 1) {
284 x.index = vec_len (results);
285 vec_add (results, p->value, lm->fib_result_n_words);
288 x.index = p->value[0];
290 vec_add1 (routes, x);
291 if (sm->data_structure_lock->release_hint) {
292 start_at_fib_index = fib - im4->fibs;
294 ip46_fib_stats_delay (sm, 0 /* sec */,
295 STATS_RELEASE_DELAY_NS);
297 ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
303 vec_foreach (r, routes) {
304 vlib_counter_t c, sum;
305 uword i, j, n_left, n_nhs, adj_index, * result = 0;
306 ip_adjacency_t * adj;
307 ip_multipath_next_hop_t * nhs, tmp_nhs[1];
309 adj_index = r->index;
310 if (lm->fib_result_n_words > 1) {
311 result = vec_elt_at_index (results, adj_index);
312 adj_index = result[0];
315 adj = ip_get_adjacency (lm, adj_index);
316 if (adj->n_adj == 1) {
318 nhs[0].next_hop_adj_index = ~0; /* not used */
322 ip_multipath_adjacency_t * madj;
323 madj = vec_elt_at_index (lm->multipath_adjacencies,
325 nhs = heap_elt_at_index
327 madj->normalized_next_hops.heap_offset);
328 n_nhs = madj->normalized_next_hops.count;
331 n_left = nhs[0].weight;
332 vlib_counter_zero (&sum);
333 for (i = j = 0; i < adj->n_adj; i++) {
335 vlib_get_combined_counter (&lm->adjacency_counters,
337 vlib_counter_add (&sum, &c);
339 * If we're done with this adj and it has actually
340 * seen at least one packet, send it.
342 if (n_left == 0 && sum.packets > 0) {
344 /* already in net byte order */
345 ctrp->address = r->address.as_u32;
346 ctrp->address_length = r->address_length;
347 ctrp->packets = clib_host_to_net_u64 (sum.packets);
348 ctrp->bytes = clib_host_to_net_u64(sum.bytes);
352 if (mp->count == items_this_message) {
353 mp->count = htonl (items_this_message);
355 * If the main thread's input queue is stuffed,
356 * drop the data structure lock (which the main thread
357 * may want), and take a pause.
359 unix_shared_memory_queue_lock (q);
360 if (unix_shared_memory_queue_is_full (q)) {
362 vl_msg_api_send_shmem_nolock (q, (u8 *)&mp);
363 unix_shared_memory_queue_unlock (q);
365 ip46_fib_stats_delay (sm, 0 /* sec */,
366 STATS_RELEASE_DELAY_NS);
369 vl_msg_api_send_shmem_nolock (q, (u8 *)&mp);
370 unix_shared_memory_queue_unlock (q);
372 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
373 mp = vl_msg_api_alloc_as_if_client
376 sizeof(vl_api_ip4_fib_counter_t));
377 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
379 mp->vrf_id = ntohl(fib->table_id);
380 ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
385 n_left = nhs[j].weight;
386 vlib_counter_zero (&sum);
389 } /* for each (mp or single) adj */
390 if (sm->data_structure_lock->release_hint) {
391 start_at_fib_index = fib - im4->fibs;
393 ip46_fib_stats_delay (sm, 0 /* sec */, STATS_RELEASE_DELAY_NS);
395 ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
398 } /* vec_foreach (routes) */
402 /* Flush any data from this fib */
404 mp->count = htonl (mp->count);
405 vl_msg_api_send_shmem (q, (u8 *)&mp);
408 } /* vec_foreach (fib) */
409 /* If e.g. the last FIB had no reportable routes, free the buffer */
411 vl_msg_api_free (mp);
415 ip6_address_t address;
422 ip6_route_t ** routep;
424 } add_routes_in_fib_arg_t;
426 static void add_routes_in_fib (BVT(clib_bihash_kv) * kvp, void *arg)
428 add_routes_in_fib_arg_t * ap = arg;
429 stats_main_t * sm = ap->sm;
431 if (sm->data_structure_lock->release_hint)
432 clib_longjmp (&sm->jmp_buf, 1);
434 if (kvp->key[2]>>32 == ap->fib_index)
438 addr = (ip6_address_t *) kvp;
439 vec_add2 (*ap->routep, r, 1);
440 r->address = addr[0];
441 r->address_length = kvp->key[2] & 0xFF;
442 r->index = kvp->value;
446 static void do_ip6_fibs (stats_main_t * sm)
448 ip6_main_t * im6 = &ip6_main;
449 api_main_t * am = sm->api_main;
450 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
451 unix_shared_memory_queue_t * q = shmem_hdr->vl_input_queue;
452 static ip6_route_t * routes;
455 ip_lookup_main_t * lm = &im6->lookup_main;
456 static uword * results;
457 vl_api_vnet_ip6_fib_counters_t * mp = 0;
458 u32 items_this_message;
459 vl_api_ip6_fib_counter_t *ctrp = 0;
460 u32 start_at_fib_index = 0;
461 BVT(clib_bihash) * h = &im6->ip6_lookup_table;
462 add_routes_in_fib_arg_t _a, *a=&_a;
465 vec_foreach (fib, im6->fibs) {
466 /* We may have bailed out due to control-plane activity */
467 while ((fib - im6->fibs) < start_at_fib_index)
471 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
472 mp = vl_msg_api_alloc_as_if_client
474 items_this_message*sizeof(vl_api_ip6_fib_counter_t));
475 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
477 mp->vrf_id = ntohl(fib->table_id);
478 ctrp = (vl_api_ip6_fib_counter_t *)mp->c;
481 dslock (sm, 0 /* release hint */, 1 /* tag */);
483 vec_reset_length (routes);
484 vec_reset_length (results);
486 a->fib_index = fib - im6->fibs;
490 if (clib_setjmp(&sm->jmp_buf, 0) == 0)
492 start_at_fib_index = fib - im6->fibs;
493 BV(clib_bihash_foreach_key_value_pair)(h, add_routes_in_fib, a);
498 ip46_fib_stats_delay (sm, 0 /* sec */,
499 STATS_RELEASE_DELAY_NS);
501 ctrp = (vl_api_ip6_fib_counter_t *)mp->c;
505 vec_foreach (r, routes) {
506 vlib_counter_t c, sum;
507 uword i, j, n_left, n_nhs, adj_index, * result = 0;
508 ip_adjacency_t * adj;
509 ip_multipath_next_hop_t * nhs, tmp_nhs[1];
511 adj_index = r->index;
512 if (lm->fib_result_n_words > 1) {
513 result = vec_elt_at_index (results, adj_index);
514 adj_index = result[0];
517 adj = ip_get_adjacency (lm, adj_index);
518 if (adj->n_adj == 1) {
520 nhs[0].next_hop_adj_index = ~0; /* not used */
524 ip_multipath_adjacency_t * madj;
525 madj = vec_elt_at_index (lm->multipath_adjacencies,
527 nhs = heap_elt_at_index
529 madj->normalized_next_hops.heap_offset);
530 n_nhs = madj->normalized_next_hops.count;
533 n_left = nhs[0].weight;
534 vlib_counter_zero (&sum);
535 for (i = j = 0; i < adj->n_adj; i++) {
537 vlib_get_combined_counter (&lm->adjacency_counters,
539 vlib_counter_add (&sum, &c);
540 if (n_left == 0 && sum.packets > 0) {
542 /* already in net byte order */
543 ctrp->address[0] = r->address.as_u64[0];
544 ctrp->address[1] = r->address.as_u64[1];
545 ctrp->address_length = (u8) r->address_length;
546 ctrp->packets = clib_host_to_net_u64 (sum.packets);
547 ctrp->bytes = clib_host_to_net_u64(sum.bytes);
551 if (mp->count == items_this_message) {
552 mp->count = htonl (items_this_message);
554 * If the main thread's input queue is stuffed,
555 * drop the data structure lock (which the main thread
556 * may want), and take a pause.
558 unix_shared_memory_queue_lock (q);
559 if (unix_shared_memory_queue_is_full (q)) {
561 vl_msg_api_send_shmem_nolock (q, (u8 *)&mp);
562 unix_shared_memory_queue_unlock (q);
564 ip46_fib_stats_delay (sm, 0 /* sec */,
565 STATS_RELEASE_DELAY_NS);
568 vl_msg_api_send_shmem_nolock (q, (u8 *)&mp);
569 unix_shared_memory_queue_unlock (q);
571 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
572 mp = vl_msg_api_alloc_as_if_client
575 sizeof(vl_api_ip6_fib_counter_t));
576 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
578 mp->vrf_id = ntohl(fib->table_id);
579 ctrp = (vl_api_ip6_fib_counter_t *)mp->c;
584 n_left = nhs[j].weight;
585 vlib_counter_zero (&sum);
588 } /* for each (mp or single) adj */
589 if (sm->data_structure_lock->release_hint) {
590 start_at_fib_index = fib - im6->fibs;
592 ip46_fib_stats_delay (sm, 0 /* sec */, STATS_RELEASE_DELAY_NS);
594 ctrp = (vl_api_ip6_fib_counter_t *)mp->c;
597 } /* vec_foreach (routes) */
601 /* Flush any data from this fib */
603 mp->count = htonl (mp->count);
604 vl_msg_api_send_shmem (q, (u8 *)&mp);
607 } /* vec_foreach (fib) */
608 /* If e.g. the last FIB had no reportable routes, free the buffer */
610 vl_msg_api_free (mp);
613 static void stats_thread_fn (void *arg)
615 stats_main_t *sm = &stats_main;
616 vlib_worker_thread_t *w = (vlib_worker_thread_t *)arg;
618 /* stats thread wants no signals. */
622 pthread_sigmask (SIG_SETMASK, &s, 0);
625 clib_mem_set_heap (w->thread_mheap);
628 /* 10 second poll interval */
629 ip46_fib_stats_delay (sm, 10 /* secs */, 0 /* nsec */);
631 if (! (sm->enable_poller))
633 do_simple_interface_counters (sm);
634 do_combined_interface_counters (sm);
640 static void vl_api_vnet_interface_counters_t_handler (
641 vl_api_vnet_interface_counters_t *mp)
643 vpe_client_registration_t *reg;
644 stats_main_t * sm = &stats_main;
645 unix_shared_memory_queue_t *q, *q_prev = NULL;
646 vl_api_vnet_interface_counters_t *mp_copy = NULL;
651 u32 count, sw_if_index;
655 mp_size = sizeof (*mp) + (ntohl(mp->count) *
656 (mp->is_combined ? sizeof (vlib_counter_t) : sizeof (u64)));
658 pool_foreach(reg, sm->stats_registrations,
660 q = vl_api_client_index_to_input_queue (reg->client_index);
662 if (q_prev && (q_prev->cursize < q_prev->maxsize)) {
663 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
664 memcpy(mp_copy, mp, mp_size);
665 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
673 count = ntohl (mp->count);
674 sw_if_index = ntohl (mp->first_sw_if_index);
675 if (mp->is_combined == 0) {
677 vp = (u64 *) mp->data;
679 switch (mp->vnet_counter_type) {
680 case VNET_INTERFACE_COUNTER_DROP:
681 counter_name = "drop";
683 case VNET_INTERFACE_COUNTER_PUNT:
684 counter_name = "punt";
686 case VNET_INTERFACE_COUNTER_IP4:
687 counter_name = "ip4";
689 case VNET_INTERFACE_COUNTER_IP6:
690 counter_name = "ip6";
692 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
693 counter_name = "rx-no-buff";
695 case VNET_INTERFACE_COUNTER_RX_MISS:,
696 counter_name = "rx-miss";
698 case VNET_INTERFACE_COUNTER_RX_ERROR:,
699 counter_name = "rx-error (fifo-full)";
701 case VNET_INTERFACE_COUNTER_TX_ERROR:,
702 counter_name = "tx-error (fifo-full)";
705 counter_name = "bogus";
708 for (i = 0; i < count; i++) {
709 v = clib_mem_unaligned (vp, u64);
710 v = clib_net_to_host_u64 (v);
712 fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name,
713 sm->vnet_main, sw_if_index, counter_name, v);
719 vp = (vlib_counter_t *) mp->data;
721 switch (mp->vnet_counter_type) {
722 case VNET_INTERFACE_COUNTER_RX:
725 case VNET_INTERFACE_COUNTER_TX:
729 counter_name = "bogus";
732 for (i = 0; i < count; i++) {
733 packets = clib_mem_unaligned (&vp->packets, u64);
734 packets = clib_net_to_host_u64 (packets);
735 bytes = clib_mem_unaligned (&vp->bytes, u64);
736 bytes = clib_net_to_host_u64 (bytes);
738 fformat (stdout, "%U.%s.packets %lld\n",
739 format_vnet_sw_if_index_name,
740 sm->vnet_main, sw_if_index, counter_name, packets);
741 fformat (stdout, "%U.%s.bytes %lld\n",
742 format_vnet_sw_if_index_name,
743 sm->vnet_main, sw_if_index, counter_name, bytes);
749 (q_prev->cursize < q_prev->maxsize)) {
750 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
752 vl_msg_api_free (mp);
756 static void vl_api_vnet_ip4_fib_counters_t_handler (
757 vl_api_vnet_ip4_fib_counters_t *mp)
759 vpe_client_registration_t *reg;
760 stats_main_t * sm = &stats_main;
761 unix_shared_memory_queue_t *q, *q_prev = NULL;
762 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
765 mp_size = sizeof(*mp_copy) +
766 ntohl(mp->count) * sizeof(vl_api_ip4_fib_counter_t);
768 pool_foreach(reg, sm->stats_registrations,
770 q = vl_api_client_index_to_input_queue (reg->client_index);
772 if (q_prev && (q_prev->cursize < q_prev->maxsize)) {
773 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
774 memcpy(mp_copy, mp, mp_size);
775 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
782 (q_prev->cursize < q_prev->maxsize)) {
783 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
785 vl_msg_api_free (mp);
789 static void vl_api_vnet_ip6_fib_counters_t_handler (
790 vl_api_vnet_ip6_fib_counters_t *mp)
792 vpe_client_registration_t *reg;
793 stats_main_t * sm = &stats_main;
794 unix_shared_memory_queue_t *q, *q_prev = NULL;
795 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
798 mp_size = sizeof(*mp_copy) +
799 ntohl(mp->count) * sizeof(vl_api_ip6_fib_counter_t);
801 pool_foreach(reg, sm->stats_registrations,
803 q = vl_api_client_index_to_input_queue (reg->client_index);
805 if (q_prev && (q_prev->cursize < q_prev->maxsize)) {
806 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
807 memcpy(mp_copy, mp, mp_size);
808 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
815 (q_prev->cursize < q_prev->maxsize)) {
816 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
818 vl_msg_api_free (mp);
822 static void vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t *mp)
823 { clib_warning ("BUG"); }
825 static void vl_api_want_stats_t_handler (
826 vl_api_want_stats_t *mp)
828 stats_main_t *sm = &stats_main;
829 vpe_client_registration_t *rp;
830 vl_api_want_stats_reply_t *rmp;
833 unix_shared_memory_queue_t *q;
835 p = hash_get (sm->stats_registration_hash, mp->client_index);
837 if (mp->enable_disable) {
838 clib_warning ("pid %d: already enabled...", mp->pid);
842 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
843 pool_put (sm->stats_registrations, rp);
844 hash_unset (sm->stats_registration_hash, mp->client_index);
848 if (mp->enable_disable == 0) {
849 clib_warning ("pid %d: already disabled...", mp->pid);
853 pool_get (sm->stats_registrations, rp);
854 rp->client_index = mp->client_index;
855 rp->client_pid = mp->pid;
856 hash_set (sm->stats_registration_hash, rp->client_index,
857 rp - sm->stats_registrations);
860 if (pool_elts(sm->stats_registrations))
861 sm->enable_poller = 1;
863 sm->enable_poller = 0;
865 q = vl_api_client_index_to_input_queue (mp->client_index);
870 rmp = vl_msg_api_alloc (sizeof (*rmp));
871 rmp->_vl_msg_id = ntohs(VL_API_WANT_STATS_REPLY);
872 rmp->context = mp->context;
873 rmp->retval = retval;
875 vl_msg_api_send_shmem (q, (u8 *)&rmp);
878 int stats_memclnt_delete_callback (u32 client_index)
880 vpe_client_registration_t *rp;
881 stats_main_t *sm = &stats_main;
884 p = hash_get (sm->stats_registration_hash, client_index);
886 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
887 pool_put (sm->stats_registrations, rp);
888 hash_unset (sm->stats_registration_hash, client_index);
894 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
895 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
896 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
897 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
899 static clib_error_t * stats_init (vlib_main_t * vm)
901 stats_main_t * sm = &stats_main;
902 api_main_t * am = &api_main;
903 void *vlib_worker_thread_bootstrap_fn (void *arg);
906 sm->vnet_main = vnet_get_main();
907 sm->interface_main = &vnet_get_main()->interface_main;
909 sm->stats_poll_interval_in_seconds = 10;
910 sm->data_structure_lock =
911 clib_mem_alloc_aligned (sizeof(data_structure_lock_t),
912 CLIB_CACHE_LINE_BYTES);
913 memset(sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
916 vl_msg_api_set_handlers(VL_API_##N, #n, \
917 vl_api_##n##_t_handler, \
919 vl_api_##n##_t_endian, \
920 vl_api_##n##_t_print, \
921 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
925 /* tell the msg infra not to free these messages... */
926 am->message_bounce [VL_API_VNET_INTERFACE_COUNTERS] = 1;
927 am->message_bounce [VL_API_VNET_IP4_FIB_COUNTERS] = 1;
928 am->message_bounce [VL_API_VNET_IP6_FIB_COUNTERS] = 1;
933 VLIB_INIT_FUNCTION (stats_init);
935 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
937 .function = stats_thread_fn,
940 .no_data_structure_clone = 1,