2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vpp/stats/stats.h>
17 #include <vlib/threads.h>
18 #include <vnet/fib/fib_entry.h>
19 #include <vnet/fib/fib_table.h>
20 #include <vnet/dpo/load_balance.h>
24 stats_main_t stats_main;
26 #include <vnet/ip/ip.h>
28 #include <vpp/api/vpe_msg_enum.h>
31 #define f64_print(a,b)
33 #define vl_typedefs /* define message structures */
34 #include <vpp/api/vpe_all_api_h.h>
37 #define vl_endianfun /* define message structures */
38 #include <vpp/api/vpe_all_api_h.h>
41 /* instantiate all the print functions we know about */
42 #define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
44 #include <vpp/api/vpe_all_api_h.h>
47 #define foreach_stats_msg \
48 _(WANT_STATS, want_stats) \
49 _(WANT_STATS_REPLY, want_stats_reply) \
50 _(VNET_INTERFACE_COUNTERS, vnet_interface_counters) \
51 _(VNET_IP4_FIB_COUNTERS, vnet_ip4_fib_counters) \
52 _(VNET_IP6_FIB_COUNTERS, vnet_ip6_fib_counters) \
53 _(VNET_IP4_NBR_COUNTERS, vnet_ip4_nbr_counters) \
54 _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters)
56 /* These constants ensure msg sizes <= 1024, aka ring allocation */
57 #define SIMPLE_COUNTER_BATCH_SIZE 126
58 #define COMBINED_COUNTER_BATCH_SIZE 63
59 #define IP4_FIB_COUNTER_BATCH_SIZE 48
60 #define IP6_FIB_COUNTER_BATCH_SIZE 30
63 #define STATS_RELEASE_DELAY_NS (1000 * 1000 * 5)
67 dslock (stats_main_t * sm, int release_hint, int tag)
70 data_structure_lock_t *l = sm->data_structure_lock;
72 if (PREDICT_FALSE (l == 0))
75 thread_id = os_get_cpu_number ();
76 if (l->lock && l->thread_id == thread_id)
85 while (__sync_lock_test_and_set (&l->lock, 1))
88 l->thread_id = thread_id;
93 stats_dslock_with_hint (int hint, int tag)
95 stats_main_t *sm = &stats_main;
96 dslock (sm, hint, tag);
100 dsunlock (stats_main_t * sm)
103 data_structure_lock_t *l = sm->data_structure_lock;
105 if (PREDICT_FALSE (l == 0))
108 thread_id = os_get_cpu_number ();
109 ASSERT (l->lock && l->thread_id == thread_id);
115 CLIB_MEMORY_BARRIER ();
121 stats_dsunlock (int hint, int tag)
123 stats_main_t *sm = &stats_main;
128 do_simple_interface_counters (stats_main_t * sm)
130 vl_api_vnet_interface_counters_t *mp = 0;
131 vnet_interface_main_t *im = sm->interface_main;
132 api_main_t *am = sm->api_main;
133 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
134 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
135 vlib_simple_counter_main_t *cm;
136 u32 items_this_message = 0;
141 * Prevent interface registration from expanding / moving the vectors...
142 * That tends never to happen, so we can hold this lock for a while.
144 vnet_interface_counter_lock (im);
146 vec_foreach (cm, im->sw_if_counters)
149 for (i = 0; i < vec_len (cm->maxi); i++)
153 items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE,
154 vec_len (cm->maxi) - i);
156 mp = vl_msg_api_alloc_as_if_client
157 (sizeof (*mp) + items_this_message * sizeof (v));
158 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
159 mp->vnet_counter_type = cm - im->sw_if_counters;
161 mp->first_sw_if_index = htonl (i);
163 vp = (u64 *) mp->data;
165 v = vlib_get_simple_counter (cm, i);
166 clib_mem_unaligned (vp, u64) = clib_host_to_net_u64 (v);
169 if (mp->count == items_this_message)
171 mp->count = htonl (items_this_message);
172 /* Send to the main thread... */
173 vl_msg_api_send_shmem (q, (u8 *) & mp);
179 vnet_interface_counter_unlock (im);
183 do_combined_interface_counters (stats_main_t * sm)
185 vl_api_vnet_interface_counters_t *mp = 0;
186 vnet_interface_main_t *im = sm->interface_main;
187 api_main_t *am = sm->api_main;
188 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
189 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
190 vlib_combined_counter_main_t *cm;
191 u32 items_this_message = 0;
192 vlib_counter_t v, *vp = 0;
195 vnet_interface_counter_lock (im);
197 vec_foreach (cm, im->combined_sw_if_counters)
200 for (i = 0; i < vec_len (cm->maxi); i++)
204 items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE,
205 vec_len (cm->maxi) - i);
207 mp = vl_msg_api_alloc_as_if_client
208 (sizeof (*mp) + items_this_message * sizeof (v));
209 mp->_vl_msg_id = ntohs (VL_API_VNET_INTERFACE_COUNTERS);
210 mp->vnet_counter_type = cm - im->combined_sw_if_counters;
212 mp->first_sw_if_index = htonl (i);
214 vp = (vlib_counter_t *) mp->data;
216 vlib_get_combined_counter (cm, i, &v);
217 clib_mem_unaligned (&vp->packets, u64)
218 = clib_host_to_net_u64 (v.packets);
219 clib_mem_unaligned (&vp->bytes, u64) = clib_host_to_net_u64 (v.bytes);
222 if (mp->count == items_this_message)
224 mp->count = htonl (items_this_message);
225 /* Send to the main thread... */
226 vl_msg_api_send_shmem (q, (u8 *) & mp);
232 vnet_interface_counter_unlock (im);
235 /* from .../vnet/vnet/ip/lookup.c. Yuck */
236 typedef CLIB_PACKED (struct
238 ip4_address_t address;
239 u32 address_length: 6;
244 ip46_fib_stats_delay (stats_main_t * sm, u32 sec, u32 nsec)
246 struct timespec _req, *req = &_req;
247 struct timespec _rem, *rem = &_rem;
253 if (nanosleep (req, rem) == 0)
258 clib_unix_warning ("nanosleep");
264 * @brief The context passed when collecting adjacency counters
266 typedef struct ip4_nbr_stats_ctx_t_
269 * The SW IF index all these adjs belong to
274 * A vector of ip4 nbr counters
276 vl_api_ip4_nbr_counter_t *counters;
277 } ip4_nbr_stats_ctx_t;
280 ip4_nbr_stats_cb (adj_index_t ai, void *arg)
282 vl_api_ip4_nbr_counter_t *vl_counter;
283 vlib_counter_t adj_counter;
284 ip4_nbr_stats_ctx_t *ctx;
288 vlib_get_combined_counter (&adjacency_counters, ai, &adj_counter);
290 if (0 != adj_counter.packets)
292 vec_add2 (ctx->counters, vl_counter, 1);
295 vl_counter->packets = clib_host_to_net_u64 (adj_counter.packets);
296 vl_counter->bytes = clib_host_to_net_u64 (adj_counter.bytes);
297 vl_counter->address = adj->sub_type.nbr.next_hop.ip4.as_u32;
298 vl_counter->link_type = adj->ia_link;
300 return (ADJ_WALK_RC_CONTINUE);
303 #define MIN(x,y) (((x)<(y))?(x):(y))
306 ip4_nbr_ship (stats_main_t * sm, ip4_nbr_stats_ctx_t * ctx)
308 api_main_t *am = sm->api_main;
309 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
310 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
311 vl_api_vnet_ip4_nbr_counters_t *mp = 0;
315 * If the walk context has counters, which may be left over from the last
316 * suspend, then we continue from there.
318 while (0 != vec_len (ctx->counters))
320 u32 n_items = MIN (vec_len (ctx->counters),
321 IP4_FIB_COUNTER_BATCH_SIZE);
324 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
326 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
329 (vl_api_ip4_nbr_counter_t)));
330 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_NBR_COUNTERS);
331 mp->count = ntohl (n_items);
332 mp->sw_if_index = ntohl (ctx->sw_if_index);
337 * copy the counters from the back of the context, then we can easily
338 * 'erase' them by resetting the vector length.
339 * The order we push the stats to the caller is not important.
342 &ctx->counters[vec_len (ctx->counters) - n_items],
343 n_items * sizeof (*ctx->counters));
345 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
350 unix_shared_memory_queue_lock (q);
351 pause = unix_shared_memory_queue_is_full (q);
353 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
354 unix_shared_memory_queue_unlock (q);
358 ip46_fib_stats_delay (sm, 0 /* sec */ ,
359 STATS_RELEASE_DELAY_NS);
364 do_ip4_nbrs (stats_main_t * sm)
366 vnet_main_t *vnm = vnet_get_main ();
367 vnet_interface_main_t *im = &vnm->interface_main;
368 vnet_sw_interface_t *si;
370 ip4_nbr_stats_ctx_t ctx = {
376 pool_foreach (si, im->sw_interfaces,
379 * update the interface we are now concerned with
381 ctx.sw_if_index = si->sw_if_index;
384 * we are about to walk another interface, so we shouldn't have any pending
387 ASSERT(ctx.counters == NULL);
390 * visit each neighbour adjacency on the interface and collect
392 * Because we hold the lock the walk is synchronous, so safe to routing
393 * updates. It's limited in work by the number of adjacenies on an
394 * interface, which is typically not huge.
396 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
397 adj_nbr_walk (si->sw_if_index,
404 * if this interface has some adjacencies with counters then ship them,
405 * else continue to the next interface.
407 if (NULL != ctx.counters)
409 ip4_nbr_ship(sm, &ctx);
416 * @brief The context passed when collecting adjacency counters
418 typedef struct ip6_nbr_stats_ctx_t_
421 * The SW IF index all these adjs belong to
426 * A vector of ip6 nbr counters
428 vl_api_ip6_nbr_counter_t *counters;
429 } ip6_nbr_stats_ctx_t;
432 ip6_nbr_stats_cb (adj_index_t ai,
435 vl_api_ip6_nbr_counter_t *vl_counter;
436 vlib_counter_t adj_counter;
437 ip6_nbr_stats_ctx_t *ctx;
441 vlib_get_combined_counter(&adjacency_counters, ai, &adj_counter);
443 if (0 != adj_counter.packets)
445 vec_add2(ctx->counters, vl_counter, 1);
448 vl_counter->packets = clib_host_to_net_u64(adj_counter.packets);
449 vl_counter->bytes = clib_host_to_net_u64(adj_counter.bytes);
450 vl_counter->address[0] = adj->sub_type.nbr.next_hop.ip6.as_u64[0];
451 vl_counter->address[1] = adj->sub_type.nbr.next_hop.ip6.as_u64[1];
452 vl_counter->link_type = adj->ia_link;
454 return (ADJ_WALK_RC_CONTINUE);
457 #define MIN(x,y) (((x)<(y))?(x):(y))
460 ip6_nbr_ship (stats_main_t * sm,
461 ip6_nbr_stats_ctx_t *ctx)
463 api_main_t *am = sm->api_main;
464 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
465 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
466 vl_api_vnet_ip6_nbr_counters_t *mp = 0;
470 * If the walk context has counters, which may be left over from the last
471 * suspend, then we continue from there.
473 while (0 != vec_len(ctx->counters))
475 u32 n_items = MIN (vec_len (ctx->counters),
476 IP6_FIB_COUNTER_BATCH_SIZE);
479 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
481 mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) +
484 (vl_api_ip6_nbr_counter_t)));
485 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_NBR_COUNTERS);
486 mp->count = ntohl (n_items);
487 mp->sw_if_index = ntohl (ctx->sw_if_index);
492 * copy the counters from the back of the context, then we can easily
493 * 'erase' them by resetting the vector length.
494 * The order we push the stats to the caller is not important.
497 &ctx->counters[vec_len (ctx->counters) - n_items],
498 n_items * sizeof (*ctx->counters));
500 _vec_len (ctx->counters) = vec_len (ctx->counters) - n_items;
505 unix_shared_memory_queue_lock (q);
506 pause = unix_shared_memory_queue_is_full (q);
508 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
509 unix_shared_memory_queue_unlock (q);
513 ip46_fib_stats_delay (sm, 0 /* sec */ ,
514 STATS_RELEASE_DELAY_NS);
519 do_ip6_nbrs (stats_main_t * sm)
521 vnet_main_t *vnm = vnet_get_main ();
522 vnet_interface_main_t *im = &vnm->interface_main;
523 vnet_sw_interface_t *si;
525 ip6_nbr_stats_ctx_t ctx = {
531 pool_foreach (si, im->sw_interfaces,
534 * update the interface we are now concerned with
536 ctx.sw_if_index = si->sw_if_index;
539 * we are about to walk another interface, so we shouldn't have any pending
542 ASSERT(ctx.counters == NULL);
545 * visit each neighbour adjacency on the interface and collect
547 * Because we hold the lock the walk is synchronous, so safe to routing
548 * updates. It's limited in work by the number of adjacenies on an
549 * interface, which is typically not huge.
551 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
552 adj_nbr_walk (si->sw_if_index,
559 * if this interface has some adjacencies with counters then ship them,
560 * else continue to the next interface.
562 if (NULL != ctx.counters)
564 ip6_nbr_ship(sm, &ctx);
571 do_ip4_fibs (stats_main_t * sm)
573 ip4_main_t *im4 = &ip4_main;
574 api_main_t *am = sm->api_main;
575 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
576 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
577 static ip4_route_t *routes;
580 ip_lookup_main_t *lm = &im4->lookup_main;
581 static uword *results;
582 vl_api_vnet_ip4_fib_counters_t *mp = 0;
583 u32 items_this_message;
584 vl_api_ip4_fib_counter_t *ctrp = 0;
585 u32 start_at_fib_index = 0;
590 pool_foreach (fib, im4->fibs,
592 /* We may have bailed out due to control-plane activity */
593 while ((fib - im4->fibs) < start_at_fib_index)
598 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
599 mp = vl_msg_api_alloc_as_if_client
601 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
602 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
604 mp->vrf_id = ntohl (fib->ft_table_id);
605 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
609 /* happens if the last FIB was empty... */
610 ASSERT (mp->count == 0);
611 mp->vrf_id = ntohl (fib->ft_table_id);
614 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
616 vec_reset_length (routes);
617 vec_reset_length (results);
619 for (i = 0; i < ARRAY_LEN (fib->v4.fib_entry_by_dst_address); i++)
621 uword *hash = fib->v4.fib_entry_by_dst_address[i];
625 x.address_length = i;
627 hash_foreach_pair (p, hash,
629 x.address.data_u32 = p->key;
630 x.index = p->value[0];
632 vec_add1 (routes, x);
633 if (sm->data_structure_lock->release_hint)
635 start_at_fib_index = fib - im4->fibs;
637 ip46_fib_stats_delay (sm, 0 /* sec */,
638 STATS_RELEASE_DELAY_NS);
640 ctrp = (vl_api_ip4_fib_counter_t *)mp->c;
646 vec_foreach (r, routes)
650 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
654 * seen at least one packet, send it.
659 /* already in net byte order */
660 ctrp->address = r->address.as_u32;
661 ctrp->address_length = r->address_length;
662 ctrp->packets = clib_host_to_net_u64 (c.packets);
663 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
667 if (mp->count == items_this_message)
669 mp->count = htonl (items_this_message);
671 * If the main thread's input queue is stuffed,
672 * drop the data structure lock (which the main thread
673 * may want), and take a pause.
675 unix_shared_memory_queue_lock (q);
676 if (unix_shared_memory_queue_is_full (q))
679 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
680 unix_shared_memory_queue_unlock (q);
682 ip46_fib_stats_delay (sm, 0 /* sec */ ,
683 STATS_RELEASE_DELAY_NS);
686 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
687 unix_shared_memory_queue_unlock (q);
689 items_this_message = IP4_FIB_COUNTER_BATCH_SIZE;
690 mp = vl_msg_api_alloc_as_if_client
692 items_this_message * sizeof (vl_api_ip4_fib_counter_t));
693 mp->_vl_msg_id = ntohs (VL_API_VNET_IP4_FIB_COUNTERS);
695 mp->vrf_id = ntohl (fib->ft_table_id);
696 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
698 } /* for each (mp or single) adj */
699 if (sm->data_structure_lock->release_hint)
701 start_at_fib_index = fib - im4->fibs;
703 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
705 ctrp = (vl_api_ip4_fib_counter_t *) mp->c;
708 } /* vec_foreach (routes) */
712 /* Flush any data from this fib */
715 mp->count = htonl (mp->count);
716 vl_msg_api_send_shmem (q, (u8 *) & mp);
722 /* If e.g. the last FIB had no reportable routes, free the buffer */
724 vl_msg_api_free (mp);
729 ip6_address_t address;
737 ip6_route_t **routep;
739 } add_routes_in_fib_arg_t;
742 add_routes_in_fib (BVT (clib_bihash_kv) * kvp, void *arg)
744 add_routes_in_fib_arg_t *ap = arg;
745 stats_main_t *sm = ap->sm;
747 if (sm->data_structure_lock->release_hint)
748 clib_longjmp (&sm->jmp_buf, 1);
750 if (kvp->key[2] >> 32 == ap->fib_index)
754 addr = (ip6_address_t *) kvp;
755 vec_add2 (*ap->routep, r, 1);
756 r->address = addr[0];
757 r->address_length = kvp->key[2] & 0xFF;
758 r->index = kvp->value;
763 do_ip6_fibs (stats_main_t * sm)
765 ip6_main_t *im6 = &ip6_main;
766 api_main_t *am = sm->api_main;
767 vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr;
768 unix_shared_memory_queue_t *q = shmem_hdr->vl_input_queue;
769 static ip6_route_t *routes;
772 static uword *results;
773 vl_api_vnet_ip6_fib_counters_t *mp = 0;
774 u32 items_this_message;
775 vl_api_ip6_fib_counter_t *ctrp = 0;
776 u32 start_at_fib_index = 0;
777 BVT (clib_bihash) * h = &im6->ip6_table[IP6_FIB_TABLE_FWDING].ip6_hash;
778 add_routes_in_fib_arg_t _a, *a = &_a;
782 pool_foreach (fib, im6->fibs,
784 /* We may have bailed out due to control-plane activity */
785 while ((fib - im6->fibs) < start_at_fib_index)
790 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
791 mp = vl_msg_api_alloc_as_if_client
793 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
794 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
796 mp->vrf_id = ntohl (fib->ft_table_id);
797 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
800 dslock (sm, 0 /* release hint */ , 1 /* tag */ );
802 vec_reset_length (routes);
803 vec_reset_length (results);
805 a->fib_index = fib - im6->fibs;
809 if (clib_setjmp (&sm->jmp_buf, 0) == 0)
811 start_at_fib_index = fib - im6->fibs;
812 BV (clib_bihash_foreach_key_value_pair) (h, add_routes_in_fib, a);
817 ip46_fib_stats_delay (sm, 0 /* sec */ ,
818 STATS_RELEASE_DELAY_NS);
820 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
824 vec_foreach (r, routes)
828 vlib_get_combined_counter (&load_balance_main.lbm_to_counters,
832 * seen at least one packet, send it.
836 /* already in net byte order */
837 ctrp->address[0] = r->address.as_u64[0];
838 ctrp->address[1] = r->address.as_u64[1];
839 ctrp->address_length = (u8) r->address_length;
840 ctrp->packets = clib_host_to_net_u64 (c.packets);
841 ctrp->bytes = clib_host_to_net_u64 (c.bytes);
845 if (mp->count == items_this_message)
847 mp->count = htonl (items_this_message);
849 * If the main thread's input queue is stuffed,
850 * drop the data structure lock (which the main thread
851 * may want), and take a pause.
853 unix_shared_memory_queue_lock (q);
854 if (unix_shared_memory_queue_is_full (q))
857 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
858 unix_shared_memory_queue_unlock (q);
860 ip46_fib_stats_delay (sm, 0 /* sec */ ,
861 STATS_RELEASE_DELAY_NS);
864 vl_msg_api_send_shmem_nolock (q, (u8 *) & mp);
865 unix_shared_memory_queue_unlock (q);
867 items_this_message = IP6_FIB_COUNTER_BATCH_SIZE;
868 mp = vl_msg_api_alloc_as_if_client
870 items_this_message * sizeof (vl_api_ip6_fib_counter_t));
871 mp->_vl_msg_id = ntohs (VL_API_VNET_IP6_FIB_COUNTERS);
873 mp->vrf_id = ntohl (fib->ft_table_id);
874 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
878 if (sm->data_structure_lock->release_hint)
880 start_at_fib_index = fib - im6->fibs;
882 ip46_fib_stats_delay (sm, 0 /* sec */ , STATS_RELEASE_DELAY_NS);
884 ctrp = (vl_api_ip6_fib_counter_t *) mp->c;
887 } /* vec_foreach (routes) */
891 /* Flush any data from this fib */
894 mp->count = htonl (mp->count);
895 vl_msg_api_send_shmem (q, (u8 *) & mp);
901 /* If e.g. the last FIB had no reportable routes, free the buffer */
903 vl_msg_api_free (mp);
907 stats_thread_fn (void *arg)
909 stats_main_t *sm = &stats_main;
910 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
911 vlib_thread_main_t *tm = vlib_get_thread_main ();
913 /* stats thread wants no signals. */
917 pthread_sigmask (SIG_SETMASK, &s, 0);
920 if (vec_len (tm->thread_prefix))
921 vlib_set_thread_name ((char *)
922 format (0, "%v_stats%c", tm->thread_prefix, '\0'));
924 clib_mem_set_heap (w->thread_mheap);
928 /* 10 second poll interval */
929 ip46_fib_stats_delay (sm, 10 /* secs */ , 0 /* nsec */ );
931 if (!(sm->enable_poller))
933 do_simple_interface_counters (sm);
934 do_combined_interface_counters (sm);
943 vl_api_vnet_interface_counters_t_handler (vl_api_vnet_interface_counters_t *
946 vpe_client_registration_t *reg;
947 stats_main_t *sm = &stats_main;
948 unix_shared_memory_queue_t *q, *q_prev = NULL;
949 vl_api_vnet_interface_counters_t *mp_copy = NULL;
954 u32 count, sw_if_index;
958 mp_size = sizeof (*mp) + (ntohl (mp->count) *
959 (mp->is_combined ? sizeof (vlib_counter_t) :
963 pool_foreach(reg, sm->stats_registrations,
965 q = vl_api_client_index_to_input_queue (reg->client_index);
968 if (q_prev && (q_prev->cursize < q_prev->maxsize))
970 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
971 clib_memcpy(mp_copy, mp, mp_size);
972 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
981 count = ntohl (mp->count);
982 sw_if_index = ntohl (mp->first_sw_if_index);
983 if (mp->is_combined == 0)
986 vp = (u64 *) mp->data;
988 switch (mp->vnet_counter_type)
990 case VNET_INTERFACE_COUNTER_DROP:
991 counter_name = "drop";
993 case VNET_INTERFACE_COUNTER_PUNT:
994 counter_name = "punt";
996 case VNET_INTERFACE_COUNTER_IP4:
997 counter_name = "ip4";
999 case VNET_INTERFACE_COUNTER_IP6:
1000 counter_name = "ip6";
1002 case VNET_INTERFACE_COUNTER_RX_NO_BUF:
1003 counter_name = "rx-no-buff";
1005 case VNET_INTERFACE_COUNTER_RX_MISS:
1006 , counter_name = "rx-miss";
1008 case VNET_INTERFACE_COUNTER_RX_ERROR:
1009 , counter_name = "rx-error (fifo-full)";
1011 case VNET_INTERFACE_COUNTER_TX_ERROR:
1012 , counter_name = "tx-error (fifo-full)";
1015 counter_name = "bogus";
1018 for (i = 0; i < count; i++)
1020 v = clib_mem_unaligned (vp, u64);
1021 v = clib_net_to_host_u64 (v);
1023 fformat (stdout, "%U.%s %lld\n", format_vnet_sw_if_index_name,
1024 sm->vnet_main, sw_if_index, counter_name, v);
1032 vp = (vlib_counter_t *) mp->data;
1034 switch (mp->vnet_counter_type)
1036 case VNET_INTERFACE_COUNTER_RX:
1037 counter_name = "rx";
1039 case VNET_INTERFACE_COUNTER_TX:
1040 counter_name = "tx";
1043 counter_name = "bogus";
1046 for (i = 0; i < count; i++)
1048 packets = clib_mem_unaligned (&vp->packets, u64);
1049 packets = clib_net_to_host_u64 (packets);
1050 bytes = clib_mem_unaligned (&vp->bytes, u64);
1051 bytes = clib_net_to_host_u64 (bytes);
1053 fformat (stdout, "%U.%s.packets %lld\n",
1054 format_vnet_sw_if_index_name,
1055 sm->vnet_main, sw_if_index, counter_name, packets);
1056 fformat (stdout, "%U.%s.bytes %lld\n",
1057 format_vnet_sw_if_index_name,
1058 sm->vnet_main, sw_if_index, counter_name, bytes);
1063 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1065 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1069 vl_msg_api_free (mp);
1074 vl_api_vnet_ip4_fib_counters_t_handler (vl_api_vnet_ip4_fib_counters_t * mp)
1076 vpe_client_registration_t *reg;
1077 stats_main_t *sm = &stats_main;
1078 unix_shared_memory_queue_t *q, *q_prev = NULL;
1079 vl_api_vnet_ip4_fib_counters_t *mp_copy = NULL;
1082 mp_size = sizeof (*mp_copy) +
1083 ntohl (mp->count) * sizeof (vl_api_ip4_fib_counter_t);
1086 pool_foreach(reg, sm->stats_registrations,
1088 q = vl_api_client_index_to_input_queue (reg->client_index);
1091 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1093 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1094 clib_memcpy(mp_copy, mp, mp_size);
1095 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1102 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1104 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1108 vl_msg_api_free (mp);
1113 vl_api_vnet_ip4_nbr_counters_t_handler (vl_api_vnet_ip4_nbr_counters_t * mp)
1115 vpe_client_registration_t *reg;
1116 stats_main_t *sm = &stats_main;
1117 unix_shared_memory_queue_t *q, *q_prev = NULL;
1118 vl_api_vnet_ip4_nbr_counters_t *mp_copy = NULL;
1121 mp_size = sizeof (*mp_copy) +
1122 ntohl (mp->count) * sizeof (vl_api_ip4_nbr_counter_t);
1125 pool_foreach(reg, sm->stats_registrations,
1127 q = vl_api_client_index_to_input_queue (reg->client_index);
1130 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1132 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1133 clib_memcpy(mp_copy, mp, mp_size);
1134 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1141 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1143 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1147 vl_msg_api_free (mp);
1152 vl_api_vnet_ip6_fib_counters_t_handler (vl_api_vnet_ip6_fib_counters_t * mp)
1154 vpe_client_registration_t *reg;
1155 stats_main_t *sm = &stats_main;
1156 unix_shared_memory_queue_t *q, *q_prev = NULL;
1157 vl_api_vnet_ip6_fib_counters_t *mp_copy = NULL;
1160 mp_size = sizeof (*mp_copy) +
1161 ntohl (mp->count) * sizeof (vl_api_ip6_fib_counter_t);
1164 pool_foreach(reg, sm->stats_registrations,
1166 q = vl_api_client_index_to_input_queue (reg->client_index);
1169 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1171 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1172 clib_memcpy(mp_copy, mp, mp_size);
1173 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1180 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1182 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1186 vl_msg_api_free (mp);
1191 vl_api_vnet_ip6_nbr_counters_t_handler (vl_api_vnet_ip6_nbr_counters_t * mp)
1193 vpe_client_registration_t *reg;
1194 stats_main_t *sm = &stats_main;
1195 unix_shared_memory_queue_t *q, *q_prev = NULL;
1196 vl_api_vnet_ip6_nbr_counters_t *mp_copy = NULL;
1199 mp_size = sizeof (*mp_copy) +
1200 ntohl (mp->count) * sizeof (vl_api_ip6_nbr_counter_t);
1203 pool_foreach(reg, sm->stats_registrations,
1205 q = vl_api_client_index_to_input_queue (reg->client_index);
1208 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1210 mp_copy = vl_msg_api_alloc_as_if_client(mp_size);
1211 clib_memcpy(mp_copy, mp, mp_size);
1212 vl_msg_api_send_shmem (q_prev, (u8 *)&mp);
1219 if (q_prev && (q_prev->cursize < q_prev->maxsize))
1221 vl_msg_api_send_shmem (q_prev, (u8 *) & mp);
1225 vl_msg_api_free (mp);
1230 vl_api_want_stats_reply_t_handler (vl_api_want_stats_reply_t * mp)
1232 clib_warning ("BUG");
1236 vl_api_want_stats_t_handler (vl_api_want_stats_t * mp)
1238 stats_main_t *sm = &stats_main;
1239 vpe_client_registration_t *rp;
1240 vl_api_want_stats_reply_t *rmp;
1243 unix_shared_memory_queue_t *q;
1245 p = hash_get (sm->stats_registration_hash, mp->client_index);
1248 if (mp->enable_disable)
1250 clib_warning ("pid %d: already enabled...", mp->pid);
1256 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1257 pool_put (sm->stats_registrations, rp);
1258 hash_unset (sm->stats_registration_hash, mp->client_index);
1262 if (mp->enable_disable == 0)
1264 clib_warning ("pid %d: already disabled...", mp->pid);
1268 pool_get (sm->stats_registrations, rp);
1269 rp->client_index = mp->client_index;
1270 rp->client_pid = mp->pid;
1271 hash_set (sm->stats_registration_hash, rp->client_index,
1272 rp - sm->stats_registrations);
1275 if (pool_elts (sm->stats_registrations))
1276 sm->enable_poller = 1;
1278 sm->enable_poller = 0;
1280 q = vl_api_client_index_to_input_queue (mp->client_index);
1285 rmp = vl_msg_api_alloc (sizeof (*rmp));
1286 rmp->_vl_msg_id = ntohs (VL_API_WANT_STATS_REPLY);
1287 rmp->context = mp->context;
1288 rmp->retval = retval;
1290 vl_msg_api_send_shmem (q, (u8 *) & rmp);
1294 stats_memclnt_delete_callback (u32 client_index)
1296 vpe_client_registration_t *rp;
1297 stats_main_t *sm = &stats_main;
1300 p = hash_get (sm->stats_registration_hash, client_index);
1303 rp = pool_elt_at_index (sm->stats_registrations, p[0]);
1304 pool_put (sm->stats_registrations, rp);
1305 hash_unset (sm->stats_registration_hash, client_index);
1311 #define vl_api_vnet_ip4_fib_counters_t_endian vl_noop_handler
1312 #define vl_api_vnet_ip4_fib_counters_t_print vl_noop_handler
1313 #define vl_api_vnet_ip6_fib_counters_t_endian vl_noop_handler
1314 #define vl_api_vnet_ip6_fib_counters_t_print vl_noop_handler
1315 #define vl_api_vnet_ip4_nbr_counters_t_endian vl_noop_handler
1316 #define vl_api_vnet_ip4_nbr_counters_t_print vl_noop_handler
1317 #define vl_api_vnet_ip6_nbr_counters_t_endian vl_noop_handler
1318 #define vl_api_vnet_ip6_nbr_counters_t_print vl_noop_handler
1320 static clib_error_t *
1321 stats_init (vlib_main_t * vm)
1323 stats_main_t *sm = &stats_main;
1324 api_main_t *am = &api_main;
1325 void *vlib_worker_thread_bootstrap_fn (void *arg);
1328 sm->vnet_main = vnet_get_main ();
1329 sm->interface_main = &vnet_get_main ()->interface_main;
1331 sm->stats_poll_interval_in_seconds = 10;
1332 sm->data_structure_lock =
1333 clib_mem_alloc_aligned (sizeof (data_structure_lock_t),
1334 CLIB_CACHE_LINE_BYTES);
1335 memset (sm->data_structure_lock, 0, sizeof (*sm->data_structure_lock));
1338 vl_msg_api_set_handlers(VL_API_##N, #n, \
1339 vl_api_##n##_t_handler, \
1341 vl_api_##n##_t_endian, \
1342 vl_api_##n##_t_print, \
1343 sizeof(vl_api_##n##_t), 0 /* do NOT trace! */);
1347 /* tell the msg infra not to free these messages... */
1348 am->message_bounce[VL_API_VNET_INTERFACE_COUNTERS] = 1;
1349 am->message_bounce[VL_API_VNET_IP4_FIB_COUNTERS] = 1;
1350 am->message_bounce[VL_API_VNET_IP6_FIB_COUNTERS] = 1;
1351 am->message_bounce[VL_API_VNET_IP4_NBR_COUNTERS] = 1;
1352 am->message_bounce[VL_API_VNET_IP6_NBR_COUNTERS] = 1;
1357 VLIB_INIT_FUNCTION (stats_init);
1360 VLIB_REGISTER_THREAD (stats_thread_reg, static) = {
1362 .function = stats_thread_fn,
1365 .no_data_structure_clone = 1,
1371 * fd.io coding-style-patch-verification: ON
1374 * eval: (c-set-style "gnu")