2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <stats/stats.h>
17 #include <vlib/threads.h>
21 stats_main_t stats_main;
23 #include <vnet/ip/ip.h>
25 #include <vpp-api/vpe_msg_enum.h>
28 #define f64_print(a,b)
30 #define vl_typedefs /* define message structures */
31 #include <vpp-api/vpe_all_api_h.h>
34 #define vl_endianfun /* define message structures */
35 #include <vpp-api/vpe_all_api_h.h>
38 /* instantiate all the print functions we know about */
39 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
41 #include <vpp-api/vpe_all_api_h.h>
44 #define foreach_stats_msg \
45 _(WANT_STATS, want_stats) \
46 _(WANT_STATS_REPLY, want_stats_reply) \
47 _(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
48 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
49 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters)
51 /* These constants ensure msg sizes <= 1024, aka ring allocation */
52 #define SIMPLE_COUNTER_BATCH_SIZE 126
53 #define COMBINED_COUNTER_BATCH_SIZE 63
54 #define IP4_FIB_COUNTER_BATCH_SIZE 48
55 #define IP6_FIB_COUNTER_BATCH_SIZE 30
58 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
62 dslock (stats_main_t * sm, int release_hint, int tag)
65 data_structure_lock_t *l = sm->data_structure_lock;
67 if (PREDICT_FALSE (l == 0))
70 thread_id = os_get_cpu_number ();
71 if (l->lock && l->thread_id == thread_id)
80 while (__sync_lock_test_and_set (&l->lock, 1))
83 l->thread_id = thread_id;
88 dsunlock (stats_main_t * sm)
91 data_structure_lock_t *l = sm->data_structure_lock;
93 if (PREDICT_FALSE (l == 0))
96 thread_id = os_get_cpu_number ();
97 ASSERT (l->lock && l->thread_id == thread_id);
103 CLIB_MEMORY_BARRIER ();
109 do_simple_interface_counters (stats_main_t * sm)
111 vl_api_vnet_interface_counters_t *mp = 0;
112 vnet_interface_main_t *im = sm->interface_main;
113 api_main_t *am = sm->api_main;
114 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
115 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
116 vlib_simple_counter_main_t *cm;
117 u32 items_this_message = 0;
122 * Prevent interface registration from expanding / moving the vectors...
123 * That tends never to happen, so we can hold this lock for a while.
125 vnet_interface_counter_lock (im);
127 vec_foreach (cm, im->sw_if_counters)
130 for (i = 0; i < vec_len (cm->maxi); i++)
134 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
135 vec_len (cm->maxi) - i);
137 mp = vl_msg_api_alloc_as_if_client
138 (sizeof (*mp) + items_this_message * sizeof (v));
139 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
140 mp->vnet_counter_type = cm - im->sw_if_counters;
142 mp->first_sw_if_index = htonl (i);
144 vp = (u64 *) mp->data;
146 v = vlib_get_simple_counter (cm, i);
147 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
150 if (mp->count == items_this_message)
152 mp->count = htonl (items_this_message);
153 /* Send to the main thread... */
154 vl_msg_api_send_shmem (q, (u8 *) & mp);
160 vnet_interface_counter_unlock (im);
164 do_combined_interface_counters (stats_main_t * sm)
166 vl_api_vnet_interface_counters_t *mp = 0;
167 vnet_interface_main_t *im = sm->interface_main;
168 api_main_t *am = sm->api_main;
169 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
170 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
171 vlib_combined_counter_main_t *cm;
172 u32 items_this_message = 0;
173 vlib_counter_t v, *vp = 0;
176 vnet_interface_counter_lock (im);
178 vec_foreach (cm, im->combined_sw_if_counters)
181 for (i = 0; i < vec_len (cm->maxi); i++)
185 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
186 vec_len (cm->maxi) - i);
188 mp = vl_msg_api_alloc_as_if_client
189 (sizeof (*mp) + items_this_message * sizeof (v));
190 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
191 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
193 mp->first_sw_if_index = htonl (i);
195 vp = (vlib_counter_t *) mp->data;
197 vlib_get_combined_counter (cm, i, &v);
198 clib_mem_unaligned (&vp->packets, u64)
199 = clib_host_to_net_u64 (v.packets);
200 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
203 if (mp->count == items_this_message)
205 mp->count = htonl (items_this_message);
206 /* Send to the main thread... */
207 vl_msg_api_send_shmem (q, (u8 *) & mp);
213 vnet_interface_counter_unlock (im);
216 /* from .../vnet/vnet/ip/lookup.c. Yuck */
217 typedef CLIB_PACKED (struct
219 ip4_address_t address;
220 u32 address_length: 6;
225 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
227 struct timespec _req, *req = &_req;
228 struct timespec _rem, *rem = &_rem;
234 if (nanosleep (req, rem) == 0)
239 clib_unix_warning ("nanosleep");
245 do_ip4_fibs (stats_main_t * sm)
247 ip4_main_t *im4 = &ip4_main;
248 api_main_t *am = sm->api_main;
249 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
250 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
251 static ip4_route_t *routes;
254 ip_lookup_main_t *lm = &im4->lookup_main;
255 static uword *results;
256 vl_api_vnet_ip4_fib_counters_t *mp = 0;
257 u32 items_this_message;
258 vl_api_ip4_fib_counter_t *ctrp = 0;
259 u32 start_at_fib_index = 0;
263 vec_foreach (fib, im4->fibs)
265 /* We may have bailed out due to control-plane activity */
266 while ((fib - im4->fibs) < start_at_fib_index)
271 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
272 mp = vl_msg_api_alloc_as_if_client
274 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
275 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
277 mp->vrf_id = ntohl (fib->table_id);
278 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
282 /* happens if the last FIB was empty... */
283 ASSERT (mp->count == 0);
284 mp->vrf_id = ntohl (fib->table_id);
287 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
289 vec_reset_length (routes);
290 vec_reset_length (results);
292 for (i = 0; i < ARRAY_LEN (fib->adj_index_by_dst_address); i++)
294 uword *hash = fib->adj_index_by_dst_address[i];
298 x.address_length = i;
301 hash_foreach_pair (p, hash,
303 x.address.data_u32 = p->key;
304 if (lm->fib_result_n_words > 1)
306 x.index = vec_len (results);
307 vec_add (results, p->value, lm->fib_result_n_words);
310 x.index = p->value[0];
312 vec_add1 (routes, x);
313 if (sm->data_structure_lock->release_hint)
315 start_at_fib_index = fib - im4->fibs;
317 ip46_fib_stats_delay (sm, 0 /* sec */,
318 STATS_RELEASE_DELAY_NS);
320 ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
327 vec_foreach (r, routes)
329 vlib_counter_t c, sum;
330 uword i, j, n_left, n_nhs, adj_index, *result = 0;
332 ip_multipath_next_hop_t *nhs, tmp_nhs[1];
334 adj_index = r->index;
335 if (lm->fib_result_n_words > 1)
337 result = vec_elt_at_index (results, adj_index);
338 adj_index = result[0];
341 adj = ip_get_adjacency (lm, adj_index);
345 nhs[0].next_hop_adj_index = ~0; /* not used */
351 ip_multipath_adjacency_t *madj;
352 madj = vec_elt_at_index (lm->multipath_adjacencies,
354 nhs = heap_elt_at_index
355 (lm->next_hop_heap, madj->normalized_next_hops.heap_offset);
356 n_nhs = madj->normalized_next_hops.count;
359 n_left = nhs[0].weight;
360 vlib_counter_zero (&sum);
361 for (i = j = 0; i < adj->n_adj; i++)
364 vlib_get_combined_counter (&lm->adjacency_counters,
366 vlib_counter_add (&sum, &c);
368 * If we're done with this adj and it has actually
369 * seen at least one packet, send it.
371 if (n_left == 0 && sum.packets > 0)
374 /* already in net byte order */
375 ctrp->address = r->address.as_u32;
376 ctrp->address_length = r->address_length;
377 ctrp->packets = clib_host_to_net_u64 (sum.packets);
378 ctrp->bytes = clib_host_to_net_u64 (sum.bytes);
382 if (mp->count == items_this_message)
384 mp->count = htonl (items_this_message);
386 * If the main thread's input queue is stuffed,
387 * drop the data structure lock (which the main thread
388 * may want), and take a pause.
390 unix_shared_memory_queue_lock (q);
391 if (unix_shared_memory_queue_is_full (q))
394 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
395 unix_shared_memory_queue_unlock (q);
397 ip46_fib_stats_delay (sm, 0 /* sec */ ,
398 STATS_RELEASE_DELAY_NS);
401 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
402 unix_shared_memory_queue_unlock (q);
404 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
405 mp = vl_msg_api_alloc_as_if_client
407 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
408 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
410 mp->vrf_id = ntohl (fib->table_id);
411 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
417 n_left = nhs[j].weight;
418 vlib_counter_zero (&sum);
421 } /* for each (mp or single) adj */
422 if (sm->data_structure_lock->release_hint)
424 start_at_fib_index = fib - im4->fibs;
426 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
428 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
431 } /* vec_foreach (routes) */
435 /* Flush any data from this fib */
438 mp->count = htonl (mp->count);
439 vl_msg_api_send_shmem (q, (u8 *) & mp);
442 } /* vec_foreach (fib) */
443 /* If e.g. the last FIB had no reportable routes, free the buffer */
445 vl_msg_api_free (mp);
450 ip6_address_t address;
458 ip6_route_t **routep;
460 } add_routes_in_fib_arg_t;
463 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
465 add_routes_in_fib_arg_t *ap = arg;
466 stats_main_t *sm = ap->sm;
468 if (sm->data_structure_lock->release_hint)
469 clib_longjmp (&sm->jmp_buf, 1);
471 if (kvp->key[2] >> 32 == ap->fib_index)
475 addr = (ip6_address_t *) kvp;
476 vec_add2 (*ap->routep, r, 1);
477 r->address = addr[0];
478 r->address_length = kvp->key[2] & 0xFF;
479 r->index = kvp->value;
484 do_ip6_fibs (stats_main_t * sm)
486 ip6_main_t *im6 = &ip6_main;
487 api_main_t *am = sm->api_main;
488 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
489 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
490 static ip6_route_t *routes;
493 ip_lookup_main_t *lm = &im6->lookup_main;
494 static uword *results;
495 vl_api_vnet_ip6_fib_counters_t *mp = 0;
496 u32 items_this_message;
497 vl_api_ip6_fib_counter_t *ctrp = 0;
498 u32 start_at_fib_index = 0;
499 BVT (clib_bihash) * h = &im6->ip6_lookup_table;
500 add_routes_in_fib_arg_t _a, *a = &_a;
503 vec_foreach (fib, im6->fibs)
505 /* We may have bailed out due to control-plane activity */
506 while ((fib - im6->fibs) < start_at_fib_index)
511 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
512 mp = vl_msg_api_alloc_as_if_client
514 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
515 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
517 mp->vrf_id = ntohl (fib->table_id);
518 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
521 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
523 vec_reset_length (routes);
524 vec_reset_length (results);
526 a->fib_index = fib - im6->fibs;
530 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
532 start_at_fib_index = fib - im6->fibs;
533 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
538 ip46_fib_stats_delay (sm, 0 /* sec */ ,
539 STATS_RELEASE_DELAY_NS);
541 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
545 vec_foreach (r, routes)
547 vlib_counter_t c, sum;
548 uword i, j, n_left, n_nhs, adj_index, *result = 0;
550 ip_multipath_next_hop_t *nhs, tmp_nhs[1];
552 adj_index = r->index;
553 if (lm->fib_result_n_words > 1)
555 result = vec_elt_at_index (results, adj_index);
556 adj_index = result[0];
559 adj = ip_get_adjacency (lm, adj_index);
563 nhs[0].next_hop_adj_index = ~0; /* not used */
569 ip_multipath_adjacency_t *madj;
570 madj = vec_elt_at_index (lm->multipath_adjacencies,
572 nhs = heap_elt_at_index
573 (lm->next_hop_heap, madj->normalized_next_hops.heap_offset);
574 n_nhs = madj->normalized_next_hops.count;
577 n_left = nhs[0].weight;
578 vlib_counter_zero (&sum);
579 for (i = j = 0; i < adj->n_adj; i++)
582 vlib_get_combined_counter (&lm->adjacency_counters,
584 vlib_counter_add (&sum, &c);
585 if (n_left == 0 && sum.packets > 0)
588 /* already in net byte order */
589 ctrp->address[0] = r->address.as_u64[0];
590 ctrp->address[1] = r->address.as_u64[1];
591 ctrp->address_length = (u8) r->address_length;
592 ctrp->packets = clib_host_to_net_u64 (sum.packets);
593 ctrp->bytes = clib_host_to_net_u64 (sum.bytes);
597 if (mp->count == items_this_message)
599 mp->count = htonl (items_this_message);
601 * If the main thread's input queue is stuffed,
602 * drop the data structure lock (which the main thread
603 * may want), and take a pause.
605 unix_shared_memory_queue_lock (q);
606 if (unix_shared_memory_queue_is_full (q))
609 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
610 unix_shared_memory_queue_unlock (q);
612 ip46_fib_stats_delay (sm, 0 /* sec */ ,
613 STATS_RELEASE_DELAY_NS);
616 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
617 unix_shared_memory_queue_unlock (q);
619 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
620 mp = vl_msg_api_alloc_as_if_client
622 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
623 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
625 mp->vrf_id = ntohl (fib->table_id);
626 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
632 n_left = nhs[j].weight;
633 vlib_counter_zero (&sum);
636 } /* for each (mp or single) adj */
637 if (sm->data_structure_lock->release_hint)
639 start_at_fib_index = fib - im6->fibs;
641 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
643 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
646 } /* vec_foreach (routes) */
650 /* Flush any data from this fib */
653 mp->count = htonl (mp->count);
654 vl_msg_api_send_shmem (q, (u8 *) & mp);
657 } /* vec_foreach (fib) */
658 /* If e.g. the last FIB had no reportable routes, free the buffer */
660 vl_msg_api_free (mp);
664 stats_thread_fn (void *arg)
666 stats_main_t *sm = &stats_main;
667 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
668 vlib_thread_main_t *tm = vlib_get_thread_main ();
670 /* stats thread wants no signals. */
674 pthread_sigmask (SIG_SETMASK, &s, 0);
677 if (vec_len (tm->thread_prefix))
678 vlib_set_thread_name ((char *)
679 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
681 clib_mem_set_heap (w->thread_mheap);
685 /* 10 second poll interval */
686 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
688 if (!(sm->enable_poller))
690 do_simple_interface_counters (sm);
691 do_combined_interface_counters (sm);
698 vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t *
701 vpe_client_registration_t *reg;
702 stats_main_t *sm = &stats_main;
703 unix_shared_memory_queue_t *q, *q_prev = NULL;
704 vl_api_vnet_interface_counters_t *mp_copy = NULL;
709 u32 count, sw_if_index;
713 mp_size = sizeof (*mp) + (ntohl (mp->count) *
714 (mp->is_combined ? sizeof (vlib_counter_t) :
718 pool_foreach(reg, sm->stats_registrations,
720 q = vl_api_client_index_to_input_queue (reg->client_index);
723 if (q_prev && (q_prev->cursize < q_prev->maxsize))
725 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
726 clib_memcpy(mp_copy, mp, mp_size);
727 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
736 count = ntohl (mp->count);
737 sw_if_index = ntohl (mp->first_sw_if_index);
738 if (mp->is_combined == 0)
741 vp = (u64 *) mp->data;
743 switch (mp->vnet_counter_type)
745 case VNET_INTERFACE_COUNTER_DROP:
746 counter_name = "drop";
748 case VNET_INTERFACE_COUNTER_PUNT:
749 counter_name = "punt";
751 case VNET_INTERFACE_COUNTER_IP4:
752 counter_name = "ip4";
754 case VNET_INTERFACE_COUNTER_IP6:
755 counter_name = "ip6";
757 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
758 counter_name = "rx-no-buff";
760 case VNET_INTERFACE_COUNTER_RX_MISS:
761 , counter_name = "rx-miss";
763 case VNET_INTERFACE_COUNTER_RX_ERROR:
764 , counter_name = "rx-error (fifo-full)";
766 case VNET_INTERFACE_COUNTER_TX_ERROR:
767 , counter_name = "tx-error (fifo-full)";
770 counter_name = "bogus";
773 for (i = 0; i < count; i++)
775 v = clib_mem_unaligned (vp, u64);
776 v = clib_net_to_host_u64 (v);
778 fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name,
779 sm->vnet_main, sw_if_index, counter_name, v);
787 vp = (vlib_counter_t *) mp->data;
789 switch (mp->vnet_counter_type)
791 case VNET_INTERFACE_COUNTER_RX:
794 case VNET_INTERFACE_COUNTER_TX:
798 counter_name = "bogus";
801 for (i = 0; i < count; i++)
803 packets = clib_mem_unaligned (&vp->packets, u64);
804 packets = clib_net_to_host_u64 (packets);
805 bytes = clib_mem_unaligned (&vp->bytes, u64);
806 bytes = clib_net_to_host_u64 (bytes);
808 fformat (stdout, "%U.%s.packets %lld\n",
809 format_vnet_sw_if_index_name,
810 sm->vnet_main, sw_if_index, counter_name, packets);
811 fformat (stdout, "%U.%s.bytes %lld\n",
812 format_vnet_sw_if_index_name,
813 sm->vnet_main, sw_if_index, counter_name, bytes);
818 if (q_prev && (q_prev->cursize < q_prev->maxsize))
820 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
824 vl_msg_api_free (mp);
829 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
831 vpe_client_registration_t *reg;
832 stats_main_t *sm = &stats_main;
833 unix_shared_memory_queue_t *q, *q_prev = NULL;
834 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
837 mp_size = sizeof (*mp_copy) +
838 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
841 pool_foreach(reg, sm->stats_registrations,
843 q = vl_api_client_index_to_input_queue (reg->client_index);
846 if (q_prev && (q_prev->cursize < q_prev->maxsize))
848 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
849 clib_memcpy(mp_copy, mp, mp_size);
850 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
857 if (q_prev && (q_prev->cursize < q_prev->maxsize))
859 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
863 vl_msg_api_free (mp);
868 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
870 vpe_client_registration_t *reg;
871 stats_main_t *sm = &stats_main;
872 unix_shared_memory_queue_t *q, *q_prev = NULL;
873 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
876 mp_size = sizeof (*mp_copy) +
877 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
880 pool_foreach(reg, sm->stats_registrations,
882 q = vl_api_client_index_to_input_queue (reg->client_index);
885 if (q_prev && (q_prev->cursize < q_prev->maxsize))
887 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
888 clib_memcpy(mp_copy, mp, mp_size);
889 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
896 if (q_prev && (q_prev->cursize < q_prev->maxsize))
898 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
902 vl_msg_api_free (mp);
907 vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp)
909 clib_warning ("BUG");
913 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
915 stats_main_t *sm = &stats_main;
916 vpe_client_registration_t *rp;
917 vl_api_want_stats_reply_t *rmp;
920 unix_shared_memory_queue_t *q;
922 p = hash_get (sm->stats_registration_hash, mp->client_index);
925 if (mp->enable_disable)
927 clib_warning ("pid %d: already enabled...", mp->pid);
933 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
934 pool_put (sm->stats_registrations, rp);
935 hash_unset (sm->stats_registration_hash, mp->client_index);
939 if (mp->enable_disable == 0)
941 clib_warning ("pid %d: already disabled...", mp->pid);
945 pool_get (sm->stats_registrations, rp);
946 rp->client_index = mp->client_index;
947 rp->client_pid = mp->pid;
948 hash_set (sm->stats_registration_hash, rp->client_index,
949 rp - sm->stats_registrations);
952 if (pool_elts (sm->stats_registrations))
953 sm->enable_poller = 1;
955 sm->enable_poller = 0;
957 q = vl_api_client_index_to_input_queue (mp->client_index);
962 rmp = vl_msg_api_alloc (sizeof (*rmp));
963 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
964 rmp->context = mp->context;
965 rmp->retval = retval;
967 vl_msg_api_send_shmem (q, (u8 *) & rmp);
971 stats_memclnt_delete_callback (u32 client_index)
973 vpe_client_registration_t *rp;
974 stats_main_t *sm = &stats_main;
977 p = hash_get (sm->stats_registration_hash, client_index);
980 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
981 pool_put (sm->stats_registrations, rp);
982 hash_unset (sm->stats_registration_hash, client_index);
988 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
989 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
990 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
991 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
993 static clib_error_t *
994 stats_init (vlib_main_t * vm)
996 stats_main_t *sm = &stats_main;
997 api_main_t *am = &api_main;
998 void *vlib_worker_thread_bootstrap_fn (void *arg);
1001 sm->vnet_main = vnet_get_main ();
1002 sm->interface_main = &vnet_get_main ()->interface_main;
1004 sm->stats_poll_interval_in_seconds = 10;
1005 sm->data_structure_lock =
1006 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
1007 CLIB_CACHE_LINE_BYTES);
1008 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
1011 vl_msg_api_set_handlers(VL_API_##N, #n, \
1012 vl_api_##n##_t_handler, \
1014 vl_api_##n##_t_endian, \
1015 vl_api_##n##_t_print, \
1016 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
1020 /* tell the msg infra not to free these messages... */
1021 am->message_bounce[VL_API_VNET_INTERFACE_COUNTERS] = 1;
1022 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
1023 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
1028 VLIB_INIT_FUNCTION (stats_init);
1031 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
1033 .function = stats_thread_fn,
1036 .no_data_structure_clone = 1,
1042 * fd.io coding-style-patch-verification: ON
1045 * eval: (c-set-style "gnu")