2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <stats/stats.h>
17 #include <vlib/threads.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/fib/fib_table.h>
20 #include <vnet/dpo/load_balance.h>
24 stats_main_t stats_main;
26 #include <vnet/ip/ip.h>
28 #include <vpp-api/vpe_msg_enum.h>
31 #define f64_print(a,b)
33 #define vl_typedefs /* define message structures */
34 #include <vpp-api/vpe_all_api_h.h>
37 #define vl_endianfun /* define message structures */
38 #include <vpp-api/vpe_all_api_h.h>
41 /* instantiate all the print functions we know about */
42 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
44 #include <vpp-api/vpe_all_api_h.h>
47 #define foreach_stats_msg \
48 _(WANT_STATS, want_stats) \
49 _(WANT_STATS_REPLY, want_stats_reply) \
50 _(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
51 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
52 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters)
54 /* These constants ensure msg sizes <= 1024, aka ring allocation */
55 #define SIMPLE_COUNTER_BATCH_SIZE 126
56 #define COMBINED_COUNTER_BATCH_SIZE 63
57 #define IP4_FIB_COUNTER_BATCH_SIZE 48
58 #define IP6_FIB_COUNTER_BATCH_SIZE 30
61 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
65 dslock (stats_main_t * sm, int release_hint, int tag)
68 data_structure_lock_t *l = sm->data_structure_lock;
70 if (PREDICT_FALSE (l == 0))
73 thread_id = os_get_cpu_number ();
74 if (l->lock && l->thread_id == thread_id)
83 while (__sync_lock_test_and_set (&l->lock, 1))
86 l->thread_id = thread_id;
91 dsunlock (stats_main_t * sm)
94 data_structure_lock_t *l = sm->data_structure_lock;
96 if (PREDICT_FALSE (l == 0))
99 thread_id = os_get_cpu_number ();
100 ASSERT (l->lock && l->thread_id == thread_id);
106 CLIB_MEMORY_BARRIER ();
112 do_simple_interface_counters (stats_main_t * sm)
114 vl_api_vnet_interface_counters_t *mp = 0;
115 vnet_interface_main_t *im = sm->interface_main;
116 api_main_t *am = sm->api_main;
117 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
118 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
119 vlib_simple_counter_main_t *cm;
120 u32 items_this_message = 0;
125 * Prevent interface registration from expanding / moving the vectors...
126 * That tends never to happen, so we can hold this lock for a while.
128 vnet_interface_counter_lock (im);
130 vec_foreach (cm, im->sw_if_counters)
133 for (i = 0; i < vec_len (cm->maxi); i++)
137 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
138 vec_len (cm->maxi) - i);
140 mp = vl_msg_api_alloc_as_if_client
141 (sizeof (*mp) + items_this_message * sizeof (v));
142 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
143 mp->vnet_counter_type = cm - im->sw_if_counters;
145 mp->first_sw_if_index = htonl (i);
147 vp = (u64 *) mp->data;
149 v = vlib_get_simple_counter (cm, i);
150 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
153 if (mp->count == items_this_message)
155 mp->count = htonl (items_this_message);
156 /* Send to the main thread... */
157 vl_msg_api_send_shmem (q, (u8 *) & mp);
163 vnet_interface_counter_unlock (im);
167 do_combined_interface_counters (stats_main_t * sm)
169 vl_api_vnet_interface_counters_t *mp = 0;
170 vnet_interface_main_t *im = sm->interface_main;
171 api_main_t *am = sm->api_main;
172 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
173 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
174 vlib_combined_counter_main_t *cm;
175 u32 items_this_message = 0;
176 vlib_counter_t v, *vp = 0;
179 vnet_interface_counter_lock (im);
181 vec_foreach (cm, im->combined_sw_if_counters)
184 for (i = 0; i < vec_len (cm->maxi); i++)
188 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
189 vec_len (cm->maxi) - i);
191 mp = vl_msg_api_alloc_as_if_client
192 (sizeof (*mp) + items_this_message * sizeof (v));
193 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
194 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
196 mp->first_sw_if_index = htonl (i);
198 vp = (vlib_counter_t *) mp->data;
200 vlib_get_combined_counter (cm, i, &v);
201 clib_mem_unaligned (&vp->packets, u64)
202 = clib_host_to_net_u64 (v.packets);
203 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
206 if (mp->count == items_this_message)
208 mp->count = htonl (items_this_message);
209 /* Send to the main thread... */
210 vl_msg_api_send_shmem (q, (u8 *) & mp);
216 vnet_interface_counter_unlock (im);
219 /* from .../vnet/vnet/ip/lookup.c. Yuck */
220 typedef CLIB_PACKED (struct
222 ip4_address_t address;
223 u32 address_length: 6;
228 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
230 struct timespec _req, *req = &_req;
231 struct timespec _rem, *rem = &_rem;
237 if (nanosleep (req, rem) == 0)
242 clib_unix_warning ("nanosleep");
248 do_ip4_fibs (stats_main_t * sm)
250 ip4_main_t *im4 = &ip4_main;
251 api_main_t *am = sm->api_main;
252 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
253 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
254 static ip4_route_t *routes;
257 ip_lookup_main_t *lm = &im4->lookup_main;
258 static uword *results;
259 vl_api_vnet_ip4_fib_counters_t *mp = 0;
260 u32 items_this_message;
261 vl_api_ip4_fib_counter_t *ctrp = 0;
262 u32 start_at_fib_index = 0;
267 pool_foreach (fib, im4->fibs,
269 /* We may have bailed out due to control-plane activity */
270 while ((fib - im4->fibs) < start_at_fib_index)
275 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
276 mp = vl_msg_api_alloc_as_if_client
278 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
279 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
281 mp->vrf_id = ntohl (fib->ft_table_id);
282 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
286 /* happens if the last FIB was empty... */
287 ASSERT (mp->count == 0);
288 mp->vrf_id = ntohl (fib->ft_table_id);
291 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
293 vec_reset_length (routes);
294 vec_reset_length (results);
296 for (i = 0; i < ARRAY_LEN (fib->v4.fib_entry_by_dst_address); i++)
298 uword *hash = fib->v4.fib_entry_by_dst_address[i];
302 x.address_length = i;
304 hash_foreach_pair (p, hash,
306 x.address.data_u32 = p->key;
307 if (lm->fib_result_n_words > 1)
309 x.index = vec_len (results);
310 vec_add (results, p->value, lm->fib_result_n_words);
313 x.index = p->value[0];
315 vec_add1 (routes, x);
316 if (sm->data_structure_lock->release_hint)
318 start_at_fib_index = fib - im4->fibs;
320 ip46_fib_stats_delay (sm, 0 /* sec */,
321 STATS_RELEASE_DELAY_NS);
323 ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
329 vec_foreach (r, routes)
333 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
337 * seen at least one packet, send it.
342 /* already in net byte order */
343 ctrp->address = r->address.as_u32;
344 ctrp->address_length = r->address_length;
345 ctrp->packets = clib_host_to_net_u64 (c.packets);
346 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
350 if (mp->count == items_this_message)
352 mp->count = htonl (items_this_message);
354 * If the main thread's input queue is stuffed,
355 * drop the data structure lock (which the main thread
356 * may want), and take a pause.
358 unix_shared_memory_queue_lock (q);
359 if (unix_shared_memory_queue_is_full (q))
362 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
363 unix_shared_memory_queue_unlock (q);
365 ip46_fib_stats_delay (sm, 0 /* sec */ ,
366 STATS_RELEASE_DELAY_NS);
369 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
370 unix_shared_memory_queue_unlock (q);
372 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
373 mp = vl_msg_api_alloc_as_if_client
375 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
376 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
378 mp->vrf_id = ntohl (fib->ft_table_id);
379 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
381 } /* for each (mp or single) adj */
382 if (sm->data_structure_lock->release_hint)
384 start_at_fib_index = fib - im4->fibs;
386 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
388 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
391 } /* vec_foreach (routes) */
395 /* Flush any data from this fib */
398 mp->count = htonl (mp->count);
399 vl_msg_api_send_shmem (q, (u8 *) & mp);
405 /* If e.g. the last FIB had no reportable routes, free the buffer */
407 vl_msg_api_free (mp);
412 ip6_address_t address;
420 ip6_route_t **routep;
422 } add_routes_in_fib_arg_t;
425 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
427 add_routes_in_fib_arg_t *ap = arg;
428 stats_main_t *sm = ap->sm;
430 if (sm->data_structure_lock->release_hint)
431 clib_longjmp (&sm->jmp_buf, 1);
433 if (kvp->key[2] >> 32 == ap->fib_index)
437 addr = (ip6_address_t *) kvp;
438 vec_add2 (*ap->routep, r, 1);
439 r->address = addr[0];
440 r->address_length = kvp->key[2] & 0xFF;
441 r->index = kvp->value;
446 do_ip6_fibs (stats_main_t * sm)
448 ip6_main_t *im6 = &ip6_main;
449 api_main_t *am = sm->api_main;
450 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
451 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
452 static ip6_route_t *routes;
455 static uword *results;
456 vl_api_vnet_ip6_fib_counters_t *mp = 0;
457 u32 items_this_message;
458 vl_api_ip6_fib_counter_t *ctrp = 0;
459 u32 start_at_fib_index = 0;
460 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
461 add_routes_in_fib_arg_t _a, *a = &_a;
465 pool_foreach (fib, im6->fibs,
467 /* We may have bailed out due to control-plane activity */
468 while ((fib - im6->fibs) < start_at_fib_index)
473 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
474 mp = vl_msg_api_alloc_as_if_client
476 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
477 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
479 mp->vrf_id = ntohl (fib->ft_table_id);
480 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
483 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
485 vec_reset_length (routes);
486 vec_reset_length (results);
488 a->fib_index = fib - im6->fibs;
492 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
494 start_at_fib_index = fib - im6->fibs;
495 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
500 ip46_fib_stats_delay (sm, 0 /* sec */ ,
501 STATS_RELEASE_DELAY_NS);
503 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
507 vec_foreach (r, routes)
511 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
515 * seen at least one packet, send it.
519 /* already in net byte order */
520 ctrp->address[0] = r->address.as_u64[0];
521 ctrp->address[1] = r->address.as_u64[1];
522 ctrp->address_length = (u8) r->address_length;
523 ctrp->packets = clib_host_to_net_u64 (c.packets);
524 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
528 if (mp->count == items_this_message)
530 mp->count = htonl (items_this_message);
532 * If the main thread's input queue is stuffed,
533 * drop the data structure lock (which the main thread
534 * may want), and take a pause.
536 unix_shared_memory_queue_lock (q);
537 if (unix_shared_memory_queue_is_full (q))
540 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
541 unix_shared_memory_queue_unlock (q);
543 ip46_fib_stats_delay (sm, 0 /* sec */ ,
544 STATS_RELEASE_DELAY_NS);
547 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
548 unix_shared_memory_queue_unlock (q);
550 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
551 mp = vl_msg_api_alloc_as_if_client
553 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
554 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
556 mp->vrf_id = ntohl (fib->ft_table_id);
557 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
561 if (sm->data_structure_lock->release_hint)
563 start_at_fib_index = fib - im6->fibs;
565 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
567 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
570 } /* vec_foreach (routes) */
574 /* Flush any data from this fib */
577 mp->count = htonl (mp->count);
578 vl_msg_api_send_shmem (q, (u8 *) & mp);
584 /* If e.g. the last FIB had no reportable routes, free the buffer */
586 vl_msg_api_free (mp);
590 stats_thread_fn (void *arg)
592 stats_main_t *sm = &stats_main;
593 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
594 vlib_thread_main_t *tm = vlib_get_thread_main ();
596 /* stats thread wants no signals. */
600 pthread_sigmask (SIG_SETMASK, &s, 0);
603 if (vec_len (tm->thread_prefix))
604 vlib_set_thread_name ((char *)
605 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
607 clib_mem_set_heap (w->thread_mheap);
611 /* 10 second poll interval */
612 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
614 if (!(sm->enable_poller))
616 do_simple_interface_counters (sm);
617 do_combined_interface_counters (sm);
624 vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t *
627 vpe_client_registration_t *reg;
628 stats_main_t *sm = &stats_main;
629 unix_shared_memory_queue_t *q, *q_prev = NULL;
630 vl_api_vnet_interface_counters_t *mp_copy = NULL;
635 u32 count, sw_if_index;
639 mp_size = sizeof (*mp) + (ntohl (mp->count) *
640 (mp->is_combined ? sizeof (vlib_counter_t) :
644 pool_foreach(reg, sm->stats_registrations,
646 q = vl_api_client_index_to_input_queue (reg->client_index);
649 if (q_prev && (q_prev->cursize < q_prev->maxsize))
651 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
652 clib_memcpy(mp_copy, mp, mp_size);
653 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
662 count = ntohl (mp->count);
663 sw_if_index = ntohl (mp->first_sw_if_index);
664 if (mp->is_combined == 0)
667 vp = (u64 *) mp->data;
669 switch (mp->vnet_counter_type)
671 case VNET_INTERFACE_COUNTER_DROP:
672 counter_name = "drop";
674 case VNET_INTERFACE_COUNTER_PUNT:
675 counter_name = "punt";
677 case VNET_INTERFACE_COUNTER_IP4:
678 counter_name = "ip4";
680 case VNET_INTERFACE_COUNTER_IP6:
681 counter_name = "ip6";
683 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
684 counter_name = "rx-no-buff";
686 case VNET_INTERFACE_COUNTER_RX_MISS:
687 , counter_name = "rx-miss";
689 case VNET_INTERFACE_COUNTER_RX_ERROR:
690 , counter_name = "rx-error (fifo-full)";
692 case VNET_INTERFACE_COUNTER_TX_ERROR:
693 , counter_name = "tx-error (fifo-full)";
696 counter_name = "bogus";
699 for (i = 0; i < count; i++)
701 v = clib_mem_unaligned (vp, u64);
702 v = clib_net_to_host_u64 (v);
704 fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name,
705 sm->vnet_main, sw_if_index, counter_name, v);
713 vp = (vlib_counter_t *) mp->data;
715 switch (mp->vnet_counter_type)
717 case VNET_INTERFACE_COUNTER_RX:
720 case VNET_INTERFACE_COUNTER_TX:
724 counter_name = "bogus";
727 for (i = 0; i < count; i++)
729 packets = clib_mem_unaligned (&vp->packets, u64);
730 packets = clib_net_to_host_u64 (packets);
731 bytes = clib_mem_unaligned (&vp->bytes, u64);
732 bytes = clib_net_to_host_u64 (bytes);
734 fformat (stdout, "%U.%s.packets %lld\n",
735 format_vnet_sw_if_index_name,
736 sm->vnet_main, sw_if_index, counter_name, packets);
737 fformat (stdout, "%U.%s.bytes %lld\n",
738 format_vnet_sw_if_index_name,
739 sm->vnet_main, sw_if_index, counter_name, bytes);
744 if (q_prev && (q_prev->cursize < q_prev->maxsize))
746 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
750 vl_msg_api_free (mp);
755 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
757 vpe_client_registration_t *reg;
758 stats_main_t *sm = &stats_main;
759 unix_shared_memory_queue_t *q, *q_prev = NULL;
760 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
763 mp_size = sizeof (*mp_copy) +
764 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
767 pool_foreach(reg, sm->stats_registrations,
769 q = vl_api_client_index_to_input_queue (reg->client_index);
772 if (q_prev && (q_prev->cursize < q_prev->maxsize))
774 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
775 clib_memcpy(mp_copy, mp, mp_size);
776 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
783 if (q_prev && (q_prev->cursize < q_prev->maxsize))
785 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
789 vl_msg_api_free (mp);
794 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
796 vpe_client_registration_t *reg;
797 stats_main_t *sm = &stats_main;
798 unix_shared_memory_queue_t *q, *q_prev = NULL;
799 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
802 mp_size = sizeof (*mp_copy) +
803 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
806 pool_foreach(reg, sm->stats_registrations,
808 q = vl_api_client_index_to_input_queue (reg->client_index);
811 if (q_prev && (q_prev->cursize < q_prev->maxsize))
813 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
814 clib_memcpy(mp_copy, mp, mp_size);
815 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
822 if (q_prev && (q_prev->cursize < q_prev->maxsize))
824 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
828 vl_msg_api_free (mp);
833 vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp)
835 clib_warning ("BUG");
839 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
841 stats_main_t *sm = &stats_main;
842 vpe_client_registration_t *rp;
843 vl_api_want_stats_reply_t *rmp;
846 unix_shared_memory_queue_t *q;
848 p = hash_get (sm->stats_registration_hash, mp->client_index);
851 if (mp->enable_disable)
853 clib_warning ("pid %d: already enabled...", mp->pid);
859 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
860 pool_put (sm->stats_registrations, rp);
861 hash_unset (sm->stats_registration_hash, mp->client_index);
865 if (mp->enable_disable == 0)
867 clib_warning ("pid %d: already disabled...", mp->pid);
871 pool_get (sm->stats_registrations, rp);
872 rp->client_index = mp->client_index;
873 rp->client_pid = mp->pid;
874 hash_set (sm->stats_registration_hash, rp->client_index,
875 rp - sm->stats_registrations);
878 if (pool_elts (sm->stats_registrations))
879 sm->enable_poller = 1;
881 sm->enable_poller = 0;
883 q = vl_api_client_index_to_input_queue (mp->client_index);
888 rmp = vl_msg_api_alloc (sizeof (*rmp));
889 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
890 rmp->context = mp->context;
891 rmp->retval = retval;
893 vl_msg_api_send_shmem (q, (u8 *) & rmp);
897 stats_memclnt_delete_callback (u32 client_index)
899 vpe_client_registration_t *rp;
900 stats_main_t *sm = &stats_main;
903 p = hash_get (sm->stats_registration_hash, client_index);
906 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
907 pool_put (sm->stats_registrations, rp);
908 hash_unset (sm->stats_registration_hash, client_index);
914 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
915 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
916 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
917 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
919 static clib_error_t *
920 stats_init (vlib_main_t * vm)
922 stats_main_t *sm = &stats_main;
923 api_main_t *am = &api_main;
924 void *vlib_worker_thread_bootstrap_fn (void *arg);
927 sm->vnet_main = vnet_get_main ();
928 sm->interface_main = &vnet_get_main ()->interface_main;
930 sm->stats_poll_interval_in_seconds = 10;
931 sm->data_structure_lock =
932 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
933 CLIB_CACHE_LINE_BYTES);
934 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
937 vl_msg_api_set_handlers(VL_API_##N, #n, \
938 vl_api_##n##_t_handler, \
940 vl_api_##n##_t_endian, \
941 vl_api_##n##_t_print, \
942 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
946 /* tell the msg infra not to free these messages... */
947 am->message_bounce[VL_API_VNET_INTERFACE_COUNTERS] = 1;
948 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
949 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
954 VLIB_INIT_FUNCTION (stats_init);
957 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
959 .function = stats_thread_fn,
962 .no_data_structure_clone = 1,
968 * fd.io coding-style-patch-verification: ON
971 * eval: (c-set-style "gnu")