2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vlib/threads.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/fib/fib_table.h>
20 #include <vnet/fib/ip4_fib.h>
21 #include <vnet/dpo/load_balance.h>
25 stats_main_t stats_main;
27 #include <vnet/ip/ip.h>
29 #include <vpp/api/vpe_msg_enum.h>
32 #define f64_print(a,b)
34 #define vl_typedefs /* define message structures */
35 #include <vpp/api/vpe_all_api_h.h>
38 #define vl_endianfun /* define message structures */
39 #include <vpp/api/vpe_all_api_h.h>
42 /* instantiate all the print functions we know about */
43 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
45 #include <vpp/api/vpe_all_api_h.h>
48 #define foreach_stats_msg \
49 _(WANT_STATS, want_stats) \
50 _(VNET_INTERFACE_SIMPLE_COUNTERS, vnet_interface_simple_counters) \
51 _(VNET_INTERFACE_COMBINED_COUNTERS, vnet_interface_combined_counters) \
52 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
53 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
54 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
55 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters)
57 /* These constants ensure msg sizes <= 1024, aka ring allocation */
58 #define SIMPLE_COUNTER_BATCH_SIZE 126
59 #define COMBINED_COUNTER_BATCH_SIZE 63
60 #define IP4_FIB_COUNTER_BATCH_SIZE 48
61 #define IP6_FIB_COUNTER_BATCH_SIZE 30
64 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
68 dslock (stats_main_t * sm, int release_hint, int tag)
71 data_structure_lock_t *l = sm->data_structure_lock;
73 if (PREDICT_FALSE (l == 0))
76 thread_index = vlib_get_thread_index ();
77 if (l->lock && l->thread_index == thread_index)
86 while (__sync_lock_test_and_set (&l->lock, 1))
89 l->thread_index = thread_index;
94 stats_dslock_with_hint (int hint, int tag)
96 stats_main_t *sm = &stats_main;
97 dslock (sm, hint, tag);
101 dsunlock (stats_main_t * sm)
104 data_structure_lock_t *l = sm->data_structure_lock;
106 if (PREDICT_FALSE (l == 0))
109 thread_index = vlib_get_thread_index ();
110 ASSERT (l->lock && l->thread_index == thread_index);
116 CLIB_MEMORY_BARRIER ();
122 stats_dsunlock (int hint, int tag)
124 stats_main_t *sm = &stats_main;
129 do_simple_interface_counters (stats_main_t * sm)
131 vl_api_vnet_interface_simple_counters_t *mp = 0;
132 vnet_interface_main_t *im = sm->interface_main;
133 api_main_t *am = sm->api_main;
134 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
135 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
136 vlib_simple_counter_main_t *cm;
137 u32 items_this_message = 0;
142 * Prevent interface registration from expanding / moving the vectors...
143 * That tends never to happen, so we can hold this lock for a while.
145 vnet_interface_counter_lock (im);
147 vec_foreach (cm, im->sw_if_counters)
149 n_counts = vlib_simple_counter_n_counters (cm);
150 for (i = 0; i < n_counts; i++)
154 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
157 mp = vl_msg_api_alloc_as_if_client
158 (sizeof (*mp) + items_this_message * sizeof (v));
159 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_SIMPLE_COUNTERS);
160 mp->vnet_counter_type = cm - im->sw_if_counters;
161 mp->first_sw_if_index = htonl (i);
163 vp = (u64 *) mp->data;
165 v = vlib_get_simple_counter (cm, i);
166 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
169 if (mp->count == items_this_message)
171 mp->count = htonl (items_this_message);
172 /* Send to the main thread... */
173 vl_msg_api_send_shmem (q, (u8 *) & mp);
179 vnet_interface_counter_unlock (im);
183 do_combined_interface_counters (stats_main_t * sm)
185 vl_api_vnet_interface_combined_counters_t *mp = 0;
186 vnet_interface_main_t *im = sm->interface_main;
187 api_main_t *am = sm->api_main;
188 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
189 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
190 vlib_combined_counter_main_t *cm;
191 u32 items_this_message = 0;
192 vlib_counter_t v, *vp = 0;
195 vnet_interface_counter_lock (im);
197 vec_foreach (cm, im->combined_sw_if_counters)
199 n_counts = vlib_combined_counter_n_counters (cm);
200 for (i = 0; i < n_counts; i++)
204 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
207 mp = vl_msg_api_alloc_as_if_client
208 (sizeof (*mp) + items_this_message * sizeof (v));
209 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COMBINED_COUNTERS);
210 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
211 mp->first_sw_if_index = htonl (i);
213 vp = (vlib_counter_t *) mp->data;
215 vlib_get_combined_counter (cm, i, &v);
216 clib_mem_unaligned (&vp->packets, u64)
217 = clib_host_to_net_u64 (v.packets);
218 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
221 if (mp->count == items_this_message)
223 mp->count = htonl (items_this_message);
224 /* Send to the main thread... */
225 vl_msg_api_send_shmem (q, (u8 *) & mp);
231 vnet_interface_counter_unlock (im);
234 /* from .../vnet/vnet/ip/lookup.c. Yuck */
235 typedef CLIB_PACKED (struct
237 ip4_address_t address;
238 u32 address_length: 6;
243 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
245 struct timespec _req, *req = &_req;
246 struct timespec _rem, *rem = &_rem;
252 if (nanosleep (req, rem) == 0)
257 clib_unix_warning ("nanosleep");
263 * @brief The context passed when collecting adjacency counters
265 typedef struct ip4_nbr_stats_ctx_t_
268 * The SW IF index all these adjs belong to
273 * A vector of ip4 nbr counters
275 vl_api_ip4_nbr_counter_t *counters;
276 } ip4_nbr_stats_ctx_t;
279 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
281 vl_api_ip4_nbr_counter_t *vl_counter;
282 vlib_counter_t adj_counter;
283 ip4_nbr_stats_ctx_t *ctx;
287 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
289 if (0 != adj_counter.packets)
291 vec_add2 (ctx->counters, vl_counter, 1);
294 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
295 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
296 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
297 vl_counter->link_type = adj->ia_link;
299 return (ADJ_WALK_RC_CONTINUE);
302 #define MIN(x,y) (((x)<(y))?(x):(y))
305 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
307 api_main_t *am = sm->api_main;
308 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
309 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
310 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
314 * If the walk context has counters, which may be left over from the last
315 * suspend, then we continue from there.
317 while (0 != vec_len (ctx->counters))
319 u32 n_items = MIN (vec_len (ctx->counters),
320 IP4_FIB_COUNTER_BATCH_SIZE);
323 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
325 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
328 (vl_api_ip4_nbr_counter_t)));
329 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
330 mp->count = ntohl (n_items);
331 mp->sw_if_index = ntohl (ctx->sw_if_index);
336 * copy the counters from the back of the context, then we can easily
337 * 'erase' them by resetting the vector length.
338 * The order we push the stats to the caller is not important.
341 &ctx->counters[vec_len (ctx->counters) - n_items],
342 n_items * sizeof (*ctx->counters));
344 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
349 unix_shared_memory_queue_lock (q);
350 pause = unix_shared_memory_queue_is_full (q);
352 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
353 unix_shared_memory_queue_unlock (q);
357 ip46_fib_stats_delay (sm, 0 /* sec */ ,
358 STATS_RELEASE_DELAY_NS);
363 do_ip4_nbrs (stats_main_t * sm)
365 vnet_main_t *vnm = vnet_get_main ();
366 vnet_interface_main_t *im = &vnm->interface_main;
367 vnet_sw_interface_t *si;
369 ip4_nbr_stats_ctx_t ctx = {
375 pool_foreach (si, im->sw_interfaces,
378 * update the interface we are now concerned with
380 ctx.sw_if_index = si->sw_if_index;
383 * we are about to walk another interface, so we shouldn't have any pending
386 ASSERT(ctx.counters == NULL);
389 * visit each neighbour adjacency on the interface and collect
391 * Because we hold the lock the walk is synchronous, so safe to routing
392 * updates. It's limited in work by the number of adjacenies on an
393 * interface, which is typically not huge.
395 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
396 adj_nbr_walk (si->sw_if_index,
403 * if this interface has some adjacencies with counters then ship them,
404 * else continue to the next interface.
406 if (NULL != ctx.counters)
408 ip4_nbr_ship(sm, &ctx);
415 * @brief The context passed when collecting adjacency counters
417 typedef struct ip6_nbr_stats_ctx_t_
420 * The SW IF index all these adjs belong to
425 * A vector of ip6 nbr counters
427 vl_api_ip6_nbr_counter_t *counters;
428 } ip6_nbr_stats_ctx_t;
431 ip6_nbr_stats_cb (adj_index_t ai,
434 vl_api_ip6_nbr_counter_t *vl_counter;
435 vlib_counter_t adj_counter;
436 ip6_nbr_stats_ctx_t *ctx;
440 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
442 if (0 != adj_counter.packets)
444 vec_add2(ctx->counters, vl_counter, 1);
447 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
448 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
449 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
450 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
451 vl_counter->link_type = adj->ia_link;
453 return (ADJ_WALK_RC_CONTINUE);
456 #define MIN(x,y) (((x)<(y))?(x):(y))
459 ip6_nbr_ship (stats_main_t * sm,
460 ip6_nbr_stats_ctx_t *ctx)
462 api_main_t *am = sm->api_main;
463 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
464 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
465 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
469 * If the walk context has counters, which may be left over from the last
470 * suspend, then we continue from there.
472 while (0 != vec_len(ctx->counters))
474 u32 n_items = MIN (vec_len (ctx->counters),
475 IP6_FIB_COUNTER_BATCH_SIZE);
478 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
480 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
483 (vl_api_ip6_nbr_counter_t)));
484 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
485 mp->count = ntohl (n_items);
486 mp->sw_if_index = ntohl (ctx->sw_if_index);
491 * copy the counters from the back of the context, then we can easily
492 * 'erase' them by resetting the vector length.
493 * The order we push the stats to the caller is not important.
496 &ctx->counters[vec_len (ctx->counters) - n_items],
497 n_items * sizeof (*ctx->counters));
499 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
504 unix_shared_memory_queue_lock (q);
505 pause = unix_shared_memory_queue_is_full (q);
507 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
508 unix_shared_memory_queue_unlock (q);
512 ip46_fib_stats_delay (sm, 0 /* sec */ ,
513 STATS_RELEASE_DELAY_NS);
518 do_ip6_nbrs (stats_main_t * sm)
520 vnet_main_t *vnm = vnet_get_main ();
521 vnet_interface_main_t *im = &vnm->interface_main;
522 vnet_sw_interface_t *si;
524 ip6_nbr_stats_ctx_t ctx = {
530 pool_foreach (si, im->sw_interfaces,
533 * update the interface we are now concerned with
535 ctx.sw_if_index = si->sw_if_index;
538 * we are about to walk another interface, so we shouldn't have any pending
541 ASSERT(ctx.counters == NULL);
544 * visit each neighbour adjacency on the interface and collect
546 * Because we hold the lock the walk is synchronous, so safe to routing
547 * updates. It's limited in work by the number of adjacenies on an
548 * interface, which is typically not huge.
550 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
551 adj_nbr_walk (si->sw_if_index,
558 * if this interface has some adjacencies with counters then ship them,
559 * else continue to the next interface.
561 if (NULL != ctx.counters)
563 ip6_nbr_ship(sm, &ctx);
570 do_ip4_fibs (stats_main_t * sm)
572 ip4_main_t *im4 = &ip4_main;
573 api_main_t *am = sm->api_main;
574 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
575 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
576 static ip4_route_t *routes;
580 ip_lookup_main_t *lm = &im4->lookup_main;
581 static uword *results;
582 vl_api_vnet_ip4_fib_counters_t *mp = 0;
583 u32 items_this_message;
584 vl_api_ip4_fib_counter_t *ctrp = 0;
585 u32 start_at_fib_index = 0;
590 pool_foreach (fib, im4->fibs,
592 /* We may have bailed out due to control-plane activity */
593 while ((fib - im4->fibs) < start_at_fib_index)
596 v4_fib = pool_elt_at_index (im4->v4_fibs, fib->ft_index);
600 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
601 mp = vl_msg_api_alloc_as_if_client
603 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
604 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
606 mp->vrf_id = ntohl (fib->ft_table_id);
607 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
611 /* happens if the last FIB was empty... */
612 ASSERT (mp->count == 0);
613 mp->vrf_id = ntohl (fib->ft_table_id);
616 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
618 vec_reset_length (routes);
619 vec_reset_length (results);
621 for (i = 0; i < ARRAY_LEN (v4_fib->fib_entry_by_dst_address); i++)
623 uword *hash = v4_fib->fib_entry_by_dst_address[i];
627 x.address_length = i;
629 hash_foreach_pair (p, hash,
631 x.address.data_u32 = p->key;
632 x.index = p->value[0];
634 vec_add1 (routes, x);
635 if (sm->data_structure_lock->release_hint)
637 start_at_fib_index = fib - im4->fibs;
639 ip46_fib_stats_delay (sm, 0 /* sec */,
640 STATS_RELEASE_DELAY_NS);
642 ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
648 vec_foreach (r, routes)
652 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
656 * seen at least one packet, send it.
661 /* already in net byte order */
662 ctrp->address = r->address.as_u32;
663 ctrp->address_length = r->address_length;
664 ctrp->packets = clib_host_to_net_u64 (c.packets);
665 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
669 if (mp->count == items_this_message)
671 mp->count = htonl (items_this_message);
673 * If the main thread's input queue is stuffed,
674 * drop the data structure lock (which the main thread
675 * may want), and take a pause.
677 unix_shared_memory_queue_lock (q);
678 if (unix_shared_memory_queue_is_full (q))
681 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
682 unix_shared_memory_queue_unlock (q);
684 ip46_fib_stats_delay (sm, 0 /* sec */ ,
685 STATS_RELEASE_DELAY_NS);
688 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
689 unix_shared_memory_queue_unlock (q);
691 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
692 mp = vl_msg_api_alloc_as_if_client
694 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
695 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
697 mp->vrf_id = ntohl (fib->ft_table_id);
698 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
700 } /* for each (mp or single) adj */
701 if (sm->data_structure_lock->release_hint)
703 start_at_fib_index = fib - im4->fibs;
705 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
707 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
710 } /* vec_foreach (routes) */
714 /* Flush any data from this fib */
717 mp->count = htonl (mp->count);
718 vl_msg_api_send_shmem (q, (u8 *) & mp);
724 /* If e.g. the last FIB had no reportable routes, free the buffer */
726 vl_msg_api_free (mp);
731 ip6_address_t address;
739 ip6_route_t **routep;
741 } add_routes_in_fib_arg_t;
744 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
746 add_routes_in_fib_arg_t *ap = arg;
747 stats_main_t *sm = ap->sm;
749 if (sm->data_structure_lock->release_hint)
750 clib_longjmp (&sm->jmp_buf, 1);
752 if (kvp->key[2] >> 32 == ap->fib_index)
756 addr = (ip6_address_t *) kvp;
757 vec_add2 (*ap->routep, r, 1);
758 r->address = addr[0];
759 r->address_length = kvp->key[2] & 0xFF;
760 r->index = kvp->value;
765 do_ip6_fibs (stats_main_t * sm)
767 ip6_main_t *im6 = &ip6_main;
768 api_main_t *am = sm->api_main;
769 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
770 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
771 static ip6_route_t *routes;
774 static uword *results;
775 vl_api_vnet_ip6_fib_counters_t *mp = 0;
776 u32 items_this_message;
777 vl_api_ip6_fib_counter_t *ctrp = 0;
778 u32 start_at_fib_index = 0;
779 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
780 add_routes_in_fib_arg_t _a, *a = &_a;
784 pool_foreach (fib, im6->fibs,
786 /* We may have bailed out due to control-plane activity */
787 while ((fib - im6->fibs) < start_at_fib_index)
792 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
793 mp = vl_msg_api_alloc_as_if_client
795 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
796 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
798 mp->vrf_id = ntohl (fib->ft_table_id);
799 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
802 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
804 vec_reset_length (routes);
805 vec_reset_length (results);
807 a->fib_index = fib - im6->fibs;
811 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
813 start_at_fib_index = fib - im6->fibs;
814 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
819 ip46_fib_stats_delay (sm, 0 /* sec */ ,
820 STATS_RELEASE_DELAY_NS);
822 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
826 vec_foreach (r, routes)
830 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
834 * seen at least one packet, send it.
838 /* already in net byte order */
839 ctrp->address[0] = r->address.as_u64[0];
840 ctrp->address[1] = r->address.as_u64[1];
841 ctrp->address_length = (u8) r->address_length;
842 ctrp->packets = clib_host_to_net_u64 (c.packets);
843 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
847 if (mp->count == items_this_message)
849 mp->count = htonl (items_this_message);
851 * If the main thread's input queue is stuffed,
852 * drop the data structure lock (which the main thread
853 * may want), and take a pause.
855 unix_shared_memory_queue_lock (q);
856 if (unix_shared_memory_queue_is_full (q))
859 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
860 unix_shared_memory_queue_unlock (q);
862 ip46_fib_stats_delay (sm, 0 /* sec */ ,
863 STATS_RELEASE_DELAY_NS);
866 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
867 unix_shared_memory_queue_unlock (q);
869 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
870 mp = vl_msg_api_alloc_as_if_client
872 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
873 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
875 mp->vrf_id = ntohl (fib->ft_table_id);
876 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
880 if (sm->data_structure_lock->release_hint)
882 start_at_fib_index = fib - im6->fibs;
884 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
886 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
889 } /* vec_foreach (routes) */
893 /* Flush any data from this fib */
896 mp->count = htonl (mp->count);
897 vl_msg_api_send_shmem (q, (u8 *) & mp);
903 /* If e.g. the last FIB had no reportable routes, free the buffer */
905 vl_msg_api_free (mp);
909 stats_thread_fn (void *arg)
911 stats_main_t *sm = &stats_main;
912 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
913 vlib_thread_main_t *tm = vlib_get_thread_main ();
915 /* stats thread wants no signals. */
919 pthread_sigmask (SIG_SETMASK, &s, 0);
922 if (vec_len (tm->thread_prefix))
923 vlib_set_thread_name ((char *)
924 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
926 clib_mem_set_heap (w->thread_mheap);
930 /* 10 second poll interval */
931 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
933 if (!(sm->enable_poller))
935 do_simple_interface_counters (sm);
936 do_combined_interface_counters (sm);
945 vl_api_vnet_interface_simple_counters_t_handler
946 (vl_api_vnet_interface_simple_counters_t * mp)
948 vpe_client_registration_t *reg;
949 stats_main_t *sm = &stats_main;
950 unix_shared_memory_queue_t *q, *q_prev = NULL;
951 vl_api_vnet_interface_simple_counters_t *mp_copy = NULL;
956 u32 count, sw_if_index;
960 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (u64));
963 pool_foreach(reg, sm->stats_registrations,
965 q = vl_api_client_index_to_input_queue (reg->client_index);
968 if (q_prev && (q_prev->cursize < q_prev->maxsize))
970 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
971 clib_memcpy(mp_copy, mp, mp_size);
972 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
981 count = ntohl (mp->count);
982 sw_if_index = ntohl (mp->first_sw_if_index);
984 vp = (u64 *) mp->data;
986 switch (mp->vnet_counter_type)
988 case VNET_INTERFACE_COUNTER_DROP:
989 counter_name = "drop";
991 case VNET_INTERFACE_COUNTER_PUNT:
992 counter_name = "punt";
994 case VNET_INTERFACE_COUNTER_IP4:
995 counter_name = "ip4";
997 case VNET_INTERFACE_COUNTER_IP6:
998 counter_name = "ip6";
1000 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
1001 counter_name = "rx-no-buff";
1003 case VNET_INTERFACE_COUNTER_RX_MISS:
1004 , counter_name = "rx-miss";
1006 case VNET_INTERFACE_COUNTER_RX_ERROR:
1007 , counter_name = "rx-error (fifo-full)";
1009 case VNET_INTERFACE_COUNTER_TX_ERROR:
1010 , counter_name = "tx-error (fifo-full)";
1013 counter_name = "bogus";
1016 for (i = 0; i < count; i++)
1018 v = clib_mem_unaligned (vp, u64);
1019 v = clib_net_to_host_u64 (v);
1021 fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name,
1022 sm->vnet_main, sw_if_index, counter_name, v);
1026 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1028 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1032 vl_msg_api_free (mp);
1037 vl_api_vnet_interface_combined_counters_t_handler
1038 (vl_api_vnet_interface_combined_counters_t * mp)
1040 vpe_client_registration_t *reg;
1041 stats_main_t *sm = &stats_main;
1042 unix_shared_memory_queue_t *q, *q_prev = NULL;
1043 vl_api_vnet_interface_combined_counters_t *mp_copy = NULL;
1048 u32 count, sw_if_index;
1052 mp_size = sizeof (*mp) + (ntohl (mp->count) * sizeof (vlib_counter_t));
1055 pool_foreach(reg, sm->stats_registrations,
1057 q = vl_api_client_index_to_input_queue (reg->client_index);
1060 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1062 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1063 clib_memcpy(mp_copy, mp, mp_size);
1064 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1073 count = ntohl (mp->count);
1074 sw_if_index = ntohl (mp->first_sw_if_index);
1078 vp = (vlib_counter_t *) mp->data;
1080 switch (mp->vnet_counter_type)
1082 case VNET_INTERFACE_COUNTER_RX:
1083 counter_name = "rx";
1085 case VNET_INTERFACE_COUNTER_TX:
1086 counter_name = "tx";
1089 counter_name = "bogus";
1092 for (i = 0; i < count; i++)
1094 packets = clib_mem_unaligned (&vp->packets, u64);
1095 packets = clib_net_to_host_u64 (packets);
1096 bytes = clib_mem_unaligned (&vp->bytes, u64);
1097 bytes = clib_net_to_host_u64 (bytes);
1099 fformat (stdout, "%U.%s.packets %lld\n",
1100 format_vnet_sw_if_index_name,
1101 sm->vnet_main, sw_if_index, counter_name, packets);
1102 fformat (stdout, "%U.%s.bytes %lld\n",
1103 format_vnet_sw_if_index_name,
1104 sm->vnet_main, sw_if_index, counter_name, bytes);
1109 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1111 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1115 vl_msg_api_free (mp);
1120 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
1122 vpe_client_registration_t *reg;
1123 stats_main_t *sm = &stats_main;
1124 unix_shared_memory_queue_t *q, *q_prev = NULL;
1125 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
1128 mp_size = sizeof (*mp_copy) +
1129 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
1132 pool_foreach(reg, sm->stats_registrations,
1134 q = vl_api_client_index_to_input_queue (reg->client_index);
1137 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1139 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1140 clib_memcpy(mp_copy, mp, mp_size);
1141 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1148 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1150 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1154 vl_msg_api_free (mp);
1159 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
1161 vpe_client_registration_t *reg;
1162 stats_main_t *sm = &stats_main;
1163 unix_shared_memory_queue_t *q, *q_prev = NULL;
1164 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
1167 mp_size = sizeof (*mp_copy) +
1168 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
1171 pool_foreach(reg, sm->stats_registrations,
1173 q = vl_api_client_index_to_input_queue (reg->client_index);
1176 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1178 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1179 clib_memcpy(mp_copy, mp, mp_size);
1180 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1187 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1189 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1193 vl_msg_api_free (mp);
1198 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
1200 vpe_client_registration_t *reg;
1201 stats_main_t *sm = &stats_main;
1202 unix_shared_memory_queue_t *q, *q_prev = NULL;
1203 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
1206 mp_size = sizeof (*mp_copy) +
1207 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
1210 pool_foreach(reg, sm->stats_registrations,
1212 q = vl_api_client_index_to_input_queue (reg->client_index);
1215 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1217 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1218 clib_memcpy(mp_copy, mp, mp_size);
1219 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1226 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1228 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1232 vl_msg_api_free (mp);
1237 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
1239 vpe_client_registration_t *reg;
1240 stats_main_t *sm = &stats_main;
1241 unix_shared_memory_queue_t *q, *q_prev = NULL;
1242 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
1245 mp_size = sizeof (*mp_copy) +
1246 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
1249 pool_foreach(reg, sm->stats_registrations,
1251 q = vl_api_client_index_to_input_queue (reg->client_index);
1254 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1256 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1257 clib_memcpy(mp_copy, mp, mp_size);
1258 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1265 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1267 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1271 vl_msg_api_free (mp);
1276 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
1278 stats_main_t *sm = &stats_main;
1279 vpe_client_registration_t *rp;
1280 vl_api_want_stats_reply_t *rmp;
1283 unix_shared_memory_queue_t *q;
1285 p = hash_get (sm->stats_registration_hash, mp->client_index);
1288 if (mp->enable_disable)
1290 clib_warning ("pid %d: already enabled...", mp->pid);
1296 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1297 pool_put (sm->stats_registrations, rp);
1298 hash_unset (sm->stats_registration_hash, mp->client_index);
1302 if (mp->enable_disable == 0)
1304 clib_warning ("pid %d: already disabled...", mp->pid);
1308 pool_get (sm->stats_registrations, rp);
1309 rp->client_index = mp->client_index;
1310 rp->client_pid = mp->pid;
1311 hash_set (sm->stats_registration_hash, rp->client_index,
1312 rp - sm->stats_registrations);
1315 if (pool_elts (sm->stats_registrations))
1316 sm->enable_poller = 1;
1318 sm->enable_poller = 0;
1320 q = vl_api_client_index_to_input_queue (mp->client_index);
1325 rmp = vl_msg_api_alloc (sizeof (*rmp));
1326 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
1327 rmp->context = mp->context;
1328 rmp->retval = retval;
1330 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1334 stats_memclnt_delete_callback (u32 client_index)
1336 vpe_client_registration_t *rp;
1337 stats_main_t *sm = &stats_main;
1340 p = hash_get (sm->stats_registration_hash, client_index);
1343 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1344 pool_put (sm->stats_registrations, rp);
1345 hash_unset (sm->stats_registration_hash, client_index);
1351 #define vl_api_vnet_interface_simple_counters_t_endian vl_noop_handler
1352 #define vl_api_vnet_interface_simple_counters_t_print vl_noop_handler
1353 #define vl_api_vnet_interface_combined_counters_t_endian vl_noop_handler
1354 #define vl_api_vnet_interface_combined_counters_t_print vl_noop_handler
1355 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
1356 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
1357 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
1358 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
1359 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
1360 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
1361 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
1362 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
1364 static clib_error_t *
1365 stats_init (vlib_main_t * vm)
1367 stats_main_t *sm = &stats_main;
1368 api_main_t *am = &api_main;
1369 void *vlib_worker_thread_bootstrap_fn (void *arg);
1372 sm->vnet_main = vnet_get_main ();
1373 sm->interface_main = &vnet_get_main ()->interface_main;
1375 sm->stats_poll_interval_in_seconds = 10;
1376 sm->data_structure_lock =
1377 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
1378 CLIB_CACHE_LINE_BYTES);
1379 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
1382 vl_msg_api_set_handlers(VL_API_##N, #n, \
1383 vl_api_##n##_t_handler, \
1385 vl_api_##n##_t_endian, \
1386 vl_api_##n##_t_print, \
1387 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
1391 /* tell the msg infra not to free these messages... */
1392 am->message_bounce[VL_API_VNET_INTERFACE_SIMPLE_COUNTERS] = 1;
1393 am->message_bounce[VL_API_VNET_INTERFACE_COMBINED_COUNTERS] = 1;
1394 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
1395 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
1396 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
1397 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
1402 VLIB_INIT_FUNCTION (stats_init);
1405 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
1407 .function = stats_thread_fn,
1410 .no_data_structure_clone = 1,
1416 * fd.io coding-style-patch-verification: ON
1419 * eval: (c-set-style "gnu")